repo
stringlengths 1
152
⌀ | file
stringlengths 15
205
| code
stringlengths 0
41.6M
| file_length
int64 0
41.6M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 90
values |
---|---|---|---|---|---|---|
null |
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/atomic.h
|
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#define atomic_read_uint64(p) atomic_add_uint64(p, 0)
#define atomic_read_uint32(p) atomic_add_uint32(p, 0)
#define atomic_read_p(p) atomic_add_p(p, NULL)
#define atomic_read_z(p) atomic_add_z(p, 0)
#define atomic_read_u(p) atomic_add_u(p, 0)
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
/*
* All arithmetic functions return the arithmetic result of the atomic
* operation. Some atomic operation APIs return the value prior to mutation, in
* which case the following functions must redundantly compute the result so
* that it can be returned. These functions are normally inlined, so the extra
* operations can be optimized away if the return values aren't used by the
* callers.
*
* <t> atomic_read_<t>(<t> *p) { return (*p); }
* <t> atomic_add_<t>(<t> *p, <t> x) { return (*p += x); }
* <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p -= x); }
* bool atomic_cas_<t>(<t> *p, <t> c, <t> s)
* {
* if (*p != c)
* return (true);
* *p = s;
* return (false);
* }
* void atomic_write_<t>(<t> *p, <t> x) { *p = x; }
*/
#ifndef JEMALLOC_ENABLE_INLINE
uint64_t atomic_add_uint64(uint64_t *p, uint64_t x);
uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x);
bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s);
void atomic_write_uint64(uint64_t *p, uint64_t x);
uint32_t atomic_add_uint32(uint32_t *p, uint32_t x);
uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x);
bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s);
void atomic_write_uint32(uint32_t *p, uint32_t x);
void *atomic_add_p(void **p, void *x);
void *atomic_sub_p(void **p, void *x);
bool atomic_cas_p(void **p, void *c, void *s);
void atomic_write_p(void **p, const void *x);
size_t atomic_add_z(size_t *p, size_t x);
size_t atomic_sub_z(size_t *p, size_t x);
bool atomic_cas_z(size_t *p, size_t c, size_t s);
void atomic_write_z(size_t *p, size_t x);
unsigned atomic_add_u(unsigned *p, unsigned x);
unsigned atomic_sub_u(unsigned *p, unsigned x);
bool atomic_cas_u(unsigned *p, unsigned c, unsigned s);
void atomic_write_u(unsigned *p, unsigned x);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
/******************************************************************************/
/* 64-bit operations. */
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
# if (defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
uint64_t t = x;
asm volatile (
"lock; xaddq %0, %1;"
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (t + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
uint64_t t;
x = (uint64_t)(-(int64_t)x);
t = x;
asm volatile (
"lock; xaddq %0, %1;"
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (t + x);
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
uint8_t success;
asm volatile (
"lock; cmpxchgq %4, %0;"
"sete %1;"
: "=m" (*p), "=a" (success) /* Outputs. */
: "m" (*p), "a" (c), "r" (s) /* Inputs. */
: "memory" /* Clobbers. */
);
return (!(bool)success);
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
asm volatile (
"xchgq %1, %0;" /* Lock is implied by xchgq. */
: "=m" (*p), "+r" (x) /* Outputs. */
: "m" (*p) /* Inputs. */
: "memory" /* Clobbers. */
);
}
# elif (defined(JEMALLOC_C11ATOMICS))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
return (atomic_fetch_add(a, x) + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
return (atomic_fetch_sub(a, x) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
return (!atomic_compare_exchange_strong(a, &c, s));
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
atomic_store(a, x);
}
# elif (defined(JEMALLOC_ATOMIC9))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
/*
* atomic_fetchadd_64() doesn't exist, but we only ever use this
* function on LP64 systems, so atomic_fetchadd_long() will do.
*/
assert(sizeof(uint64_t) == sizeof(unsigned long));
return (atomic_fetchadd_long(p, (unsigned long)x) + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
assert(sizeof(uint64_t) == sizeof(unsigned long));
return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
assert(sizeof(uint64_t) == sizeof(unsigned long));
return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s));
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
assert(sizeof(uint64_t) == sizeof(unsigned long));
atomic_store_rel_long(p, x);
}
# elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p));
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
uint64_t o;
/*The documented OSAtomic*() API does not expose an atomic exchange. */
do {
o = atomic_read_uint64(p);
} while (atomic_cas_uint64(p, o, x));
}
# elif (defined(_MSC_VER))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (InterlockedExchangeAdd64(p, x) + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
uint64_t o;
o = InterlockedCompareExchange64(p, s, c);
return (o != c);
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
InterlockedExchange64(p, x);
}
# elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \
defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (__sync_sub_and_fetch(p, x));
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
return (!__sync_bool_compare_and_swap(p, c, s));
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
__sync_lock_test_and_set(p, x);
}
# else
# error "Missing implementation for 64-bit atomic operations"
# endif
#endif
/******************************************************************************/
/* 32-bit operations. */
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
uint32_t t = x;
asm volatile (
"lock; xaddl %0, %1;"
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (t + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
uint32_t t;
x = (uint32_t)(-(int32_t)x);
t = x;
asm volatile (
"lock; xaddl %0, %1;"
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (t + x);
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
uint8_t success;
asm volatile (
"lock; cmpxchgl %4, %0;"
"sete %1;"
: "=m" (*p), "=a" (success) /* Outputs. */
: "m" (*p), "a" (c), "r" (s) /* Inputs. */
: "memory"
);
return (!(bool)success);
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
asm volatile (
"xchgl %1, %0;" /* Lock is implied by xchgl. */
: "=m" (*p), "+r" (x) /* Outputs. */
: "m" (*p) /* Inputs. */
: "memory" /* Clobbers. */
);
}
# elif (defined(JEMALLOC_C11ATOMICS))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
return (atomic_fetch_add(a, x) + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
return (atomic_fetch_sub(a, x) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
return (!atomic_compare_exchange_strong(a, &c, s));
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
atomic_store(a, x);
}
#elif (defined(JEMALLOC_ATOMIC9))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (atomic_fetchadd_32(p, x) + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
return (!atomic_cmpset_32(p, c, s));
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
atomic_store_rel_32(p, x);
}
#elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p));
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
uint32_t o;
/*The documented OSAtomic*() API does not expose an atomic exchange. */
do {
o = atomic_read_uint32(p);
} while (atomic_cas_uint32(p, o, x));
}
#elif (defined(_MSC_VER))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (InterlockedExchangeAdd(p, x) + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (InterlockedExchangeAdd(p, -((int32_t)x)) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
uint32_t o;
o = InterlockedCompareExchange(p, s, c);
return (o != c);
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
InterlockedExchange(p, x);
}
#elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \
defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (__sync_sub_and_fetch(p, x));
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
return (!__sync_bool_compare_and_swap(p, c, s));
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
__sync_lock_test_and_set(p, x);
}
#else
# error "Missing implementation for 32-bit atomic operations"
#endif
/******************************************************************************/
/* Pointer operations. */
JEMALLOC_INLINE void *
atomic_add_p(void **p, void *x)
{
#if (LG_SIZEOF_PTR == 3)
return ((void *)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_PTR == 2)
return ((void *)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
#endif
}
JEMALLOC_INLINE void *
atomic_sub_p(void **p, void *x)
{
#if (LG_SIZEOF_PTR == 3)
return ((void *)atomic_add_uint64((uint64_t *)p,
(uint64_t)-((int64_t)x)));
#elif (LG_SIZEOF_PTR == 2)
return ((void *)atomic_add_uint32((uint32_t *)p,
(uint32_t)-((int32_t)x)));
#endif
}
JEMALLOC_INLINE bool
atomic_cas_p(void **p, void *c, void *s)
{
#if (LG_SIZEOF_PTR == 3)
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
#elif (LG_SIZEOF_PTR == 2)
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
#endif
}
JEMALLOC_INLINE void
atomic_write_p(void **p, const void *x)
{
#if (LG_SIZEOF_PTR == 3)
atomic_write_uint64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_PTR == 2)
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
#endif
}
/******************************************************************************/
/* size_t operations. */
JEMALLOC_INLINE size_t
atomic_add_z(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 3)
return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_PTR == 2)
return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
#endif
}
JEMALLOC_INLINE size_t
atomic_sub_z(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 3)
return ((size_t)atomic_add_uint64((uint64_t *)p,
(uint64_t)-((int64_t)x)));
#elif (LG_SIZEOF_PTR == 2)
return ((size_t)atomic_add_uint32((uint32_t *)p,
(uint32_t)-((int32_t)x)));
#endif
}
JEMALLOC_INLINE bool
atomic_cas_z(size_t *p, size_t c, size_t s)
{
#if (LG_SIZEOF_PTR == 3)
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
#elif (LG_SIZEOF_PTR == 2)
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
#endif
}
JEMALLOC_INLINE void
atomic_write_z(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 3)
atomic_write_uint64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_PTR == 2)
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
#endif
}
/******************************************************************************/
/* unsigned operations. */
JEMALLOC_INLINE unsigned
atomic_add_u(unsigned *p, unsigned x)
{
#if (LG_SIZEOF_INT == 3)
return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_INT == 2)
return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
#endif
}
JEMALLOC_INLINE unsigned
atomic_sub_u(unsigned *p, unsigned x)
{
#if (LG_SIZEOF_INT == 3)
return ((unsigned)atomic_add_uint64((uint64_t *)p,
(uint64_t)-((int64_t)x)));
#elif (LG_SIZEOF_INT == 2)
return ((unsigned)atomic_add_uint32((uint32_t *)p,
(uint32_t)-((int32_t)x)));
#endif
}
JEMALLOC_INLINE bool
atomic_cas_u(unsigned *p, unsigned c, unsigned s)
{
#if (LG_SIZEOF_INT == 3)
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
#elif (LG_SIZEOF_INT == 2)
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
#endif
}
JEMALLOC_INLINE void
atomic_write_u(unsigned *p, unsigned x)
{
#if (LG_SIZEOF_INT == 3)
atomic_write_uint64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_INT == 2)
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
#endif
}
/******************************************************************************/
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 15,441 | 22.684049 | 80 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
|
#ifndef JEMALLOC_INTERNAL_DECLS_H
#define JEMALLOC_INTERNAL_DECLS_H
#include <math.h>
#ifdef _WIN32
# include <windows.h>
# include "msvc_compat/windows_extra.h"
#else
# include <sys/param.h>
# include <sys/mman.h>
# if !defined(__pnacl__) && !defined(__native_client__)
# include <sys/syscall.h>
# if !defined(SYS_write) && defined(__NR_write)
# define SYS_write __NR_write
# endif
# include <sys/uio.h>
# endif
# include <pthread.h>
# ifdef JEMALLOC_OS_UNFAIR_LOCK
# include <os/lock.h>
# endif
# ifdef JEMALLOC_GLIBC_MALLOC_HOOK
# include <sched.h>
# endif
# include <errno.h>
# include <sys/time.h>
# include <time.h>
# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
# include <mach/mach_time.h>
# endif
#endif
#include <sys/types.h>
#include <limits.h>
#ifndef SIZE_T_MAX
# define SIZE_T_MAX SIZE_MAX
#endif
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stddef.h>
#ifndef offsetof
# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
#endif
#include <string.h>
#include <strings.h>
#include <ctype.h>
#ifdef _MSC_VER
# include <io.h>
typedef intptr_t ssize_t;
# define PATH_MAX 1024
# define STDERR_FILENO 2
# define __func__ __FUNCTION__
# ifdef JEMALLOC_HAS_RESTRICT
# define restrict __restrict
# endif
/* Disable warnings about deprecated system functions. */
# pragma warning(disable: 4996)
#if _MSC_VER < 1800
static int
isblank(int c)
{
return (c == '\t' || c == ' ');
}
#endif
#else
# include <unistd.h>
#endif
#include <fcntl.h>
#endif /* JEMALLOC_INTERNAL_H */
| 1,608 | 20.171053 | 68 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/mb.h
|
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void mb_write(void);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_))
#ifdef __i386__
/*
* According to the Intel Architecture Software Developer's Manual, current
* processors execute instructions in order from the perspective of other
* processors in a multiprocessor system, but 1) Intel reserves the right to
* change that, and 2) the compiler's optimizer could re-order instructions if
* there weren't some form of barrier. Therefore, even if running on an
* architecture that does not need memory barriers (everything through at least
* i686), an "optimizer barrier" is necessary.
*/
JEMALLOC_INLINE void
mb_write(void)
{
# if 0
/* This is a true memory barrier. */
asm volatile ("pusha;"
"xor %%eax,%%eax;"
"cpuid;"
"popa;"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
# else
/*
* This is hopefully enough to keep the compiler from reordering
* instructions around this one.
*/
asm volatile ("nop;"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
# endif
}
#elif (defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE void
mb_write(void)
{
asm volatile ("sfence"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
}
#elif defined(__powerpc__)
JEMALLOC_INLINE void
mb_write(void)
{
asm volatile ("eieio"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
}
#elif defined(__sparc64__)
JEMALLOC_INLINE void
mb_write(void)
{
asm volatile ("membar #StoreStore"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
}
#elif defined(__tile__)
JEMALLOC_INLINE void
mb_write(void)
{
__sync_synchronize();
}
#else
/*
* This is much slower than a simple memory barrier, but the semantics of mutex
* unlock make this work.
*/
JEMALLOC_INLINE void
mb_write(void)
{
malloc_mutex_t mtx;
malloc_mutex_init(&mtx, "mb", WITNESS_RANK_OMIT);
malloc_mutex_lock(TSDN_NULL, &mtx);
malloc_mutex_unlock(TSDN_NULL, &mtx);
}
#endif
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 2,738 | 22.612069 | 80 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/quarantine.h
|
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct quarantine_obj_s quarantine_obj_t;
typedef struct quarantine_s quarantine_t;
/* Default per thread quarantine size if valgrind is enabled. */
#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct quarantine_obj_s {
void *ptr;
size_t usize;
};
struct quarantine_s {
size_t curbytes;
size_t curobjs;
size_t first;
#define LG_MAXOBJS_INIT 10
size_t lg_maxobjs;
quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void quarantine_alloc_hook_work(tsd_t *tsd);
void quarantine(tsd_t *tsd, void *ptr);
void quarantine_cleanup(tsd_t *tsd);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void quarantine_alloc_hook(void);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_))
JEMALLOC_ALWAYS_INLINE void
quarantine_alloc_hook(void)
{
tsd_t *tsd;
assert(config_fill && opt_quarantine);
tsd = tsd_fetch();
if (tsd_quarantine_get(tsd) == NULL)
quarantine_alloc_hook_work(tsd);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 1,593 | 25.131148 | 80 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/valgrind.h
|
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#ifdef JEMALLOC_VALGRIND
#include <valgrind/valgrind.h>
/*
* The size that is reported to Valgrind must be consistent through a chain of
* malloc..realloc..realloc calls. Request size isn't recorded anywhere in
* jemalloc, so it is critical that all callers of these macros provide usize
* rather than request size. As a result, buffer overflow detection is
* technically weakened for the standard API, though it is generally accepted
* practice to consider any extra bytes reported by malloc_usable_size() as
* usable space.
*/
#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do { \
if (unlikely(in_valgrind)) \
valgrind_make_mem_noaccess(ptr, usize); \
} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do { \
if (unlikely(in_valgrind)) \
valgrind_make_mem_undefined(ptr, usize); \
} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do { \
if (unlikely(in_valgrind)) \
valgrind_make_mem_defined(ptr, usize); \
} while (0)
/*
* The VALGRIND_MALLOCLIKE_BLOCK() and VALGRIND_RESIZEINPLACE_BLOCK() macro
* calls must be embedded in macros rather than in functions so that when
* Valgrind reports errors, there are no extra stack frames in the backtraces.
*/
#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do { \
if (unlikely(in_valgrind && cond)) { \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(tsdn, ptr), \
zero); \
} \
} while (0)
#define JEMALLOC_VALGRIND_REALLOC_MOVED_no(ptr, old_ptr) \
(false)
#define JEMALLOC_VALGRIND_REALLOC_MOVED_maybe(ptr, old_ptr) \
((ptr) != (old_ptr))
#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_no(ptr) \
(false)
#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_maybe(ptr) \
(ptr == NULL)
#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_no(old_ptr) \
(false)
#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_maybe(old_ptr) \
(old_ptr == NULL)
#define JEMALLOC_VALGRIND_REALLOC(moved, tsdn, ptr, usize, ptr_null, \
old_ptr, old_usize, old_rzsize, old_ptr_null, zero) do { \
if (unlikely(in_valgrind)) { \
size_t rzsize = p2rz(tsdn, ptr); \
\
if (!JEMALLOC_VALGRIND_REALLOC_MOVED_##moved(ptr, \
old_ptr)) { \
VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
usize, rzsize); \
if (zero && old_usize < usize) { \
valgrind_make_mem_defined( \
(void *)((uintptr_t)ptr + \
old_usize), usize - old_usize); \
} \
} else { \
if (!JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_## \
old_ptr_null(old_ptr)) { \
valgrind_freelike_block(old_ptr, \
old_rzsize); \
} \
if (!JEMALLOC_VALGRIND_REALLOC_PTR_NULL_## \
ptr_null(ptr)) { \
size_t copy_size = (old_usize < usize) \
? old_usize : usize; \
size_t tail_size = usize - copy_size; \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \
rzsize, false); \
if (copy_size > 0) { \
valgrind_make_mem_defined(ptr, \
copy_size); \
} \
if (zero && tail_size > 0) { \
valgrind_make_mem_defined( \
(void *)((uintptr_t)ptr + \
copy_size), tail_size); \
} \
} \
} \
} \
} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \
if (unlikely(in_valgrind)) \
valgrind_freelike_block(ptr, rzsize); \
} while (0)
#else
#define RUNNING_ON_VALGRIND ((unsigned)0)
#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do {} while (0)
#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \
ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
zero) do {} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#ifdef JEMALLOC_VALGRIND
void valgrind_make_mem_noaccess(void *ptr, size_t usize);
void valgrind_make_mem_undefined(void *ptr, size_t usize);
void valgrind_make_mem_defined(void *ptr, size_t usize);
void valgrind_freelike_block(void *ptr, size_t usize);
#endif
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 4,841 | 36.534884 | 80 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/extent.h
|
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct extent_node_s extent_node_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/* Tree of extents. Use accessor functions for en_* fields. */
struct extent_node_s {
/* Arena from which this extent came, if any. */
arena_t *en_arena;
/* Pointer to the extent that this tree node is responsible for. */
void *en_addr;
/* Total region size. */
size_t en_size;
/*
* Serial number (potentially non-unique).
*
* In principle serial numbers can wrap around on 32-bit systems if
* JEMALLOC_MUNMAP is defined, but as long as comparison functions fall
* back on address comparison for equal serial numbers, stable (if
* imperfect) ordering is maintained.
*
* Serial numbers may not be unique even in the absence of wrap-around,
* e.g. when splitting an extent and assigning the same serial number to
* both resulting adjacent extents.
*/
size_t en_sn;
/*
* The zeroed flag is used by chunk recycling code to track whether
* memory is zero-filled.
*/
bool en_zeroed;
/*
* True if physical memory is committed to the extent, whether
* explicitly or implicitly as on a system that overcommits and
* satisfies physical memory needs on demand via soft page faults.
*/
bool en_committed;
/*
* The achunk flag is used to validate that huge allocation lookups
* don't return arena chunks.
*/
bool en_achunk;
/* Profile counters, used for huge objects. */
prof_tctx_t *en_prof_tctx;
/* Linkage for arena's runs_dirty and chunks_cache rings. */
arena_runs_dirty_link_t rd;
qr(extent_node_t) cc_link;
union {
/* Linkage for the size/sn/address-ordered tree. */
rb_node(extent_node_t) szsnad_link;
/* Linkage for arena's achunks, huge, and node_cache lists. */
ql_elm(extent_node_t) ql_link;
};
/* Linkage for the address-ordered tree. */
rb_node(extent_node_t) ad_link;
};
typedef rb_tree(extent_node_t) extent_tree_t;
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
rb_proto(, extent_tree_szsnad_, extent_tree_t, extent_node_t)
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
arena_t *extent_node_arena_get(const extent_node_t *node);
void *extent_node_addr_get(const extent_node_t *node);
size_t extent_node_size_get(const extent_node_t *node);
size_t extent_node_sn_get(const extent_node_t *node);
bool extent_node_zeroed_get(const extent_node_t *node);
bool extent_node_committed_get(const extent_node_t *node);
bool extent_node_achunk_get(const extent_node_t *node);
prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node);
void extent_node_arena_set(extent_node_t *node, arena_t *arena);
void extent_node_addr_set(extent_node_t *node, void *addr);
void extent_node_size_set(extent_node_t *node, size_t size);
void extent_node_sn_set(extent_node_t *node, size_t sn);
void extent_node_zeroed_set(extent_node_t *node, bool zeroed);
void extent_node_committed_set(extent_node_t *node, bool committed);
void extent_node_achunk_set(extent_node_t *node, bool achunk);
void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
void extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
size_t size, size_t sn, bool zeroed, bool committed);
void extent_node_dirty_linkage_init(extent_node_t *node);
void extent_node_dirty_insert(extent_node_t *node,
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
void extent_node_dirty_remove(extent_node_t *node);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
JEMALLOC_INLINE arena_t *
extent_node_arena_get(const extent_node_t *node)
{
return (node->en_arena);
}
JEMALLOC_INLINE void *
extent_node_addr_get(const extent_node_t *node)
{
return (node->en_addr);
}
JEMALLOC_INLINE size_t
extent_node_size_get(const extent_node_t *node)
{
return (node->en_size);
}
JEMALLOC_INLINE size_t
extent_node_sn_get(const extent_node_t *node)
{
return (node->en_sn);
}
JEMALLOC_INLINE bool
extent_node_zeroed_get(const extent_node_t *node)
{
return (node->en_zeroed);
}
JEMALLOC_INLINE bool
extent_node_committed_get(const extent_node_t *node)
{
assert(!node->en_achunk);
return (node->en_committed);
}
JEMALLOC_INLINE bool
extent_node_achunk_get(const extent_node_t *node)
{
return (node->en_achunk);
}
JEMALLOC_INLINE prof_tctx_t *
extent_node_prof_tctx_get(const extent_node_t *node)
{
return (node->en_prof_tctx);
}
JEMALLOC_INLINE void
extent_node_arena_set(extent_node_t *node, arena_t *arena)
{
node->en_arena = arena;
}
JEMALLOC_INLINE void
extent_node_addr_set(extent_node_t *node, void *addr)
{
node->en_addr = addr;
}
JEMALLOC_INLINE void
extent_node_size_set(extent_node_t *node, size_t size)
{
node->en_size = size;
}
JEMALLOC_INLINE void
extent_node_sn_set(extent_node_t *node, size_t sn)
{
node->en_sn = sn;
}
JEMALLOC_INLINE void
extent_node_zeroed_set(extent_node_t *node, bool zeroed)
{
node->en_zeroed = zeroed;
}
JEMALLOC_INLINE void
extent_node_committed_set(extent_node_t *node, bool committed)
{
node->en_committed = committed;
}
JEMALLOC_INLINE void
extent_node_achunk_set(extent_node_t *node, bool achunk)
{
node->en_achunk = achunk;
}
JEMALLOC_INLINE void
extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx)
{
node->en_prof_tctx = tctx;
}
JEMALLOC_INLINE void
extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
size_t sn, bool zeroed, bool committed)
{
extent_node_arena_set(node, arena);
extent_node_addr_set(node, addr);
extent_node_size_set(node, size);
extent_node_sn_set(node, sn);
extent_node_zeroed_set(node, zeroed);
extent_node_committed_set(node, committed);
extent_node_achunk_set(node, false);
if (config_prof)
extent_node_prof_tctx_set(node, NULL);
}
JEMALLOC_INLINE void
extent_node_dirty_linkage_init(extent_node_t *node)
{
qr_new(&node->rd, rd_link);
qr_new(node, cc_link);
}
JEMALLOC_INLINE void
extent_node_dirty_insert(extent_node_t *node,
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty)
{
qr_meld(runs_dirty, &node->rd, rd_link);
qr_meld(chunks_dirty, node, cc_link);
}
JEMALLOC_INLINE void
extent_node_dirty_remove(extent_node_t *node)
{
qr_remove(&node->rd, rd_link);
qr_remove(node, cc_link);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 6,787 | 24.04797 | 80 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/chunk_dss.h
|
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef enum {
dss_prec_disabled = 0,
dss_prec_primary = 1,
dss_prec_secondary = 2,
dss_prec_limit = 3
} dss_prec_t;
#define DSS_PREC_DEFAULT dss_prec_secondary
#define DSS_DEFAULT "secondary"
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
extern const char *dss_prec_names[];
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
dss_prec_t chunk_dss_prec_get(void);
bool chunk_dss_prec_set(dss_prec_t dss_prec);
void *chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit);
bool chunk_in_dss(void *chunk);
bool chunk_dss_mergeable(void *chunk_a, void *chunk_b);
void chunk_dss_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 1,211 | 30.894737 | 80 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
|
/*
* JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for
* functions that are static inline functions if inlining is enabled, and
* single-definition library-private functions if inlining is disabled.
*
* JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in
* which case the denoted functions are always static, regardless of whether
* inlining is enabled.
*/
#if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE)
/* Disable inlining to make debugging/profiling easier. */
# define JEMALLOC_ALWAYS_INLINE
# define JEMALLOC_ALWAYS_INLINE_C static
# define JEMALLOC_INLINE
# define JEMALLOC_INLINE_C static
# define inline
#else
# define JEMALLOC_ENABLE_INLINE
# ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ALWAYS_INLINE \
static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline)
# define JEMALLOC_ALWAYS_INLINE_C \
static inline JEMALLOC_ATTR(always_inline)
# else
# define JEMALLOC_ALWAYS_INLINE static inline
# define JEMALLOC_ALWAYS_INLINE_C static inline
# endif
# define JEMALLOC_INLINE static inline
# define JEMALLOC_INLINE_C static inline
# ifdef _MSC_VER
# define inline _inline
# endif
#endif
#ifdef JEMALLOC_CC_SILENCE
# define UNUSED JEMALLOC_ATTR(unused)
#else
# define UNUSED
#endif
#define ZU(z) ((size_t)z)
#define ZI(z) ((ssize_t)z)
#define QU(q) ((uint64_t)q)
#define QI(q) ((int64_t)q)
#define KZU(z) ZU(z##ULL)
#define KZI(z) ZI(z##LL)
#define KQU(q) QU(q##ULL)
#define KQI(q) QI(q##LL)
#ifndef __DECONST
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif
#ifndef JEMALLOC_HAS_RESTRICT
# define restrict
#endif
| 1,669 | 27.793103 | 78 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/pages.h
|
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *pages_map(void *addr, size_t size, bool *commit);
void pages_unmap(void *addr, size_t size);
void *pages_trim(void *addr, size_t alloc_size, size_t leadsize,
size_t size, bool *commit);
bool pages_commit(void *addr, size_t size);
bool pages_decommit(void *addr, size_t size);
bool pages_purge(void *addr, size_t size);
bool pages_huge(void *addr, size_t size);
bool pages_nohuge(void *addr, size_t size);
void pages_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 1,077 | 34.933333 | 80 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/prof.h
|
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct prof_bt_s prof_bt_t;
typedef struct prof_cnt_s prof_cnt_t;
typedef struct prof_tctx_s prof_tctx_t;
typedef struct prof_gctx_s prof_gctx_t;
typedef struct prof_tdata_s prof_tdata_t;
/* Option defaults. */
#ifdef JEMALLOC_PROF
# define PROF_PREFIX_DEFAULT "jeprof"
#else
# define PROF_PREFIX_DEFAULT ""
#endif
#define LG_PROF_SAMPLE_DEFAULT 19
#define LG_PROF_INTERVAL_DEFAULT -1
/*
* Hard limit on stack backtrace depth. The version of prof_backtrace() that
* is based on __builtin_return_address() necessarily has a hard-coded number
* of backtrace frame handlers, and should be kept in sync with this setting.
*/
#define PROF_BT_MAX 128
/* Initial hash table size. */
#define PROF_CKH_MINITEMS 64
/* Size of memory buffer to use when writing dump files. */
#define PROF_DUMP_BUFSIZE 65536
/* Size of stack-allocated buffer used by prof_printf(). */
#define PROF_PRINTF_BUFSIZE 128
/*
* Number of mutexes shared among all gctx's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NCTX_LOCKS 1024
/*
* Number of mutexes shared among all tdata's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NTDATA_LOCKS 256
/*
* prof_tdata pointers close to NULL are used to encode state information that
* is used for cleaning up during thread shutdown.
*/
#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct prof_bt_s {
/* Backtrace, stored as len program counters. */
void **vec;
unsigned len;
};
#ifdef JEMALLOC_PROF_LIBGCC
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
typedef struct {
prof_bt_t *bt;
unsigned max;
} prof_unwind_data_t;
#endif
struct prof_cnt_s {
/* Profiling counters. */
uint64_t curobjs;
uint64_t curbytes;
uint64_t accumobjs;
uint64_t accumbytes;
};
typedef enum {
prof_tctx_state_initializing,
prof_tctx_state_nominal,
prof_tctx_state_dumping,
prof_tctx_state_purgatory /* Dumper must finish destroying. */
} prof_tctx_state_t;
struct prof_tctx_s {
/* Thread data for thread that performed the allocation. */
prof_tdata_t *tdata;
/*
* Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
* defunct during teardown.
*/
uint64_t thr_uid;
uint64_t thr_discrim;
/* Profiling counters, protected by tdata->lock. */
prof_cnt_t cnts;
/* Associated global context. */
prof_gctx_t *gctx;
/*
* UID that distinguishes multiple tctx's created by the same thread,
* but coexisting in gctx->tctxs. There are two ways that such
* coexistence can occur:
* - A dumper thread can cause a tctx to be retained in the purgatory
* state.
* - Although a single "producer" thread must create all tctx's which
* share the same thr_uid, multiple "consumers" can each concurrently
* execute portions of prof_tctx_destroy(). prof_tctx_destroy() only
* gets called once each time cnts.cur{objs,bytes} drop to 0, but this
* threshold can be hit again before the first consumer finishes
* executing prof_tctx_destroy().
*/
uint64_t tctx_uid;
/* Linkage into gctx's tctxs. */
rb_node(prof_tctx_t) tctx_link;
/*
* True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
* sample vs destroy race.
*/
bool prepared;
/* Current dump-related state, protected by gctx->lock. */
prof_tctx_state_t state;
/*
* Copy of cnts snapshotted during early dump phase, protected by
* dump_mtx.
*/
prof_cnt_t dump_cnts;
};
typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
struct prof_gctx_s {
/* Protects nlimbo, cnt_summed, and tctxs. */
malloc_mutex_t *lock;
/*
* Number of threads that currently cause this gctx to be in a state of
* limbo due to one of:
* - Initializing this gctx.
* - Initializing per thread counters associated with this gctx.
* - Preparing to destroy this gctx.
* - Dumping a heap profile that includes this gctx.
* nlimbo must be 1 (single destroyer) in order to safely destroy the
* gctx.
*/
unsigned nlimbo;
/*
* Tree of profile counters, one for each thread that has allocated in
* this context.
*/
prof_tctx_tree_t tctxs;
/* Linkage for tree of contexts to be dumped. */
rb_node(prof_gctx_t) dump_link;
/* Temporary storage for summation during dump. */
prof_cnt_t cnt_summed;
/* Associated backtrace. */
prof_bt_t bt;
/* Backtrace vector, variable size, referred to by bt. */
void *vec[1];
};
typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
struct prof_tdata_s {
malloc_mutex_t *lock;
/* Monotonically increasing unique thread identifier. */
uint64_t thr_uid;
/*
* Monotonically increasing discriminator among tdata structures
* associated with the same thr_uid.
*/
uint64_t thr_discrim;
/* Included in heap profile dumps if non-NULL. */
char *thread_name;
bool attached;
bool expired;
rb_node(prof_tdata_t) tdata_link;
/*
* Counter used to initialize prof_tctx_t's tctx_uid. No locking is
* necessary when incrementing this field, because only one thread ever
* does so.
*/
uint64_t tctx_uid_next;
/*
* Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks
* backtraces for which it has non-zero allocation/deallocation counters
* associated with thread-specific prof_tctx_t objects. Other threads
* may write to prof_tctx_t contents when freeing associated objects.
*/
ckh_t bt2tctx;
/* Sampling state. */
uint64_t prng_state;
uint64_t bytes_until_sample;
/* State used to avoid dumping while operating on prof internals. */
bool enq;
bool enq_idump;
bool enq_gdump;
/*
* Set to true during an early dump phase for tdata's which are
* currently being dumped. New threads' tdata's have this initialized
* to false so that they aren't accidentally included in later dump
* phases.
*/
bool dumping;
/*
* True if profiling is active for this tdata's thread
* (thread.prof.active mallctl).
*/
bool active;
/* Temporary storage for summation during dump. */
prof_cnt_t cnt_summed;
/* Backtrace vector, used for calls to prof_backtrace(). */
void *vec[PROF_BT_MAX];
};
typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern bool opt_prof;
extern bool opt_prof_active;
extern bool opt_prof_thread_active_init;
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
extern bool opt_prof_gdump; /* High-water memory dumping. */
extern bool opt_prof_final; /* Final profile dumping. */
extern bool opt_prof_leak; /* Dump leak summary at exit. */
extern bool opt_prof_accum; /* Report cumulative bytes. */
extern char opt_prof_prefix[
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
PATH_MAX +
#endif
1];
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
extern bool prof_active;
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
extern bool prof_gdump_val;
/*
* Profile dump interval, measured in bytes allocated. Each arena triggers a
* profile dump when it reaches this threshold. The effect is that the
* interval between profile dumps averages prof_interval, though the actual
* interval between dumps will tend to be sporadic, and the interval will be a
* maximum of approximately (prof_interval * narenas).
*/
extern uint64_t prof_interval;
/*
* Initialized as opt_lg_prof_sample, and potentially modified during profiling
* resets.
*/
extern size_t lg_prof_sample;
void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx);
void bt_init(prof_bt_t *bt, void **vec);
void prof_backtrace(prof_bt_t *bt);
prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
#ifdef JEMALLOC_JET
size_t prof_tdata_count(void);
size_t prof_bt_count(void);
const prof_cnt_t *prof_cnt_all(void);
typedef int (prof_dump_open_t)(bool, const char *);
extern prof_dump_open_t *prof_dump_open;
typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *);
extern prof_dump_header_t *prof_dump_header;
#endif
void prof_idump(tsdn_t *tsdn);
bool prof_mdump(tsd_t *tsd, const char *filename);
void prof_gdump(tsdn_t *tsdn);
prof_tdata_t *prof_tdata_init(tsd_t *tsd);
prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
void prof_reset(tsd_t *tsd, size_t lg_sample);
void prof_tdata_cleanup(tsd_t *tsd);
bool prof_active_get(tsdn_t *tsdn);
bool prof_active_set(tsdn_t *tsdn, bool active);
const char *prof_thread_name_get(tsd_t *tsd);
int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
bool prof_thread_active_get(tsd_t *tsd);
bool prof_thread_active_set(tsd_t *tsd, bool active);
bool prof_thread_active_init_get(tsdn_t *tsdn);
bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
bool prof_gdump_get(tsdn_t *tsdn);
bool prof_gdump_set(tsdn_t *tsdn, bool active);
void prof_boot0(void);
void prof_boot1(void);
bool prof_boot2(tsd_t *tsd);
void prof_prefork0(tsdn_t *tsdn);
void prof_prefork1(tsdn_t *tsdn);
void prof_postfork_parent(tsdn_t *tsdn);
void prof_postfork_child(tsdn_t *tsdn);
void prof_sample_threshold_update(prof_tdata_t *tdata);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
bool prof_active_get_unlocked(void);
bool prof_gdump_get_unlocked(void);
prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
prof_tctx_t *prof_tctx_get(tsdn_t *tsdn, const void *ptr);
void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
const void *old_ptr, prof_tctx_t *tctx);
bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
prof_tdata_t **tdata_out);
prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active,
bool update);
void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize,
prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr,
size_t old_usize, prof_tctx_t *old_tctx);
void prof_free(tsd_t *tsd, const void *ptr, size_t usize);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
JEMALLOC_ALWAYS_INLINE bool
prof_active_get_unlocked(void)
{
/*
* Even if opt_prof is true, sampling can be temporarily disabled by
* setting prof_active to false. No locking is used when reading
* prof_active in the fast path, so there are no guarantees regarding
* how long it will take for all threads to notice state changes.
*/
return (prof_active);
}
JEMALLOC_ALWAYS_INLINE bool
prof_gdump_get_unlocked(void)
{
/*
* No locking is used when reading prof_gdump_val in the fast path, so
* there are no guarantees regarding how long it will take for all
* threads to notice state changes.
*/
return (prof_gdump_val);
}
JEMALLOC_ALWAYS_INLINE prof_tdata_t *
prof_tdata_get(tsd_t *tsd, bool create)
{
prof_tdata_t *tdata;
cassert(config_prof);
tdata = tsd_prof_tdata_get(tsd);
if (create) {
if (unlikely(tdata == NULL)) {
if (tsd_nominal(tsd)) {
tdata = prof_tdata_init(tsd);
tsd_prof_tdata_set(tsd, tdata);
}
} else if (unlikely(tdata->expired)) {
tdata = prof_tdata_reinit(tsd, tdata);
tsd_prof_tdata_set(tsd, tdata);
}
assert(tdata == NULL || tdata->attached);
}
return (tdata);
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
prof_tctx_get(tsdn_t *tsdn, const void *ptr)
{
cassert(config_prof);
assert(ptr != NULL);
return (arena_prof_tctx_get(tsdn, ptr));
}
JEMALLOC_ALWAYS_INLINE void
prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
arena_prof_tctx_set(tsdn, ptr, usize, tctx);
}
JEMALLOC_ALWAYS_INLINE void
prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr,
prof_tctx_t *old_tctx)
{
cassert(config_prof);
assert(ptr != NULL);
arena_prof_tctx_reset(tsdn, ptr, usize, old_ptr, old_tctx);
}
JEMALLOC_ALWAYS_INLINE bool
prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
prof_tdata_t **tdata_out)
{
prof_tdata_t *tdata;
cassert(config_prof);
tdata = prof_tdata_get(tsd, true);
if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX))
tdata = NULL;
if (tdata_out != NULL)
*tdata_out = tdata;
if (unlikely(tdata == NULL))
return (true);
if (likely(tdata->bytes_until_sample >= usize)) {
if (update)
tdata->bytes_until_sample -= usize;
return (true);
} else {
/* Compute new sample threshold. */
if (update)
prof_sample_threshold_update(tdata);
return (!tdata->active);
}
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
{
prof_tctx_t *ret;
prof_tdata_t *tdata;
prof_bt_t bt;
assert(usize == s2u(usize));
if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
&tdata)))
ret = (prof_tctx_t *)(uintptr_t)1U;
else {
bt_init(&bt, tdata->vec);
prof_backtrace(&bt);
ret = prof_lookup(tsd, &bt);
}
return (ret);
}
JEMALLOC_ALWAYS_INLINE void
prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
assert(usize == isalloc(tsdn, ptr, true));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
prof_malloc_sample_object(tsdn, ptr, usize, tctx);
else
prof_tctx_set(tsdn, ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
}
JEMALLOC_ALWAYS_INLINE void
prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
prof_tctx_t *old_tctx)
{
bool sampled, old_sampled;
cassert(config_prof);
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
if (prof_active && !updated && ptr != NULL) {
assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
/*
* Don't sample. The usize passed to prof_alloc_prep()
* was larger than what actually got allocated, so a
* backtrace was captured for this allocation, even
* though its actual usize was insufficient to cross the
* sample threshold.
*/
prof_alloc_rollback(tsd, tctx, true);
tctx = (prof_tctx_t *)(uintptr_t)1U;
}
}
sampled = ((uintptr_t)tctx > (uintptr_t)1U);
old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
if (unlikely(sampled))
prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
else
prof_tctx_reset(tsd_tsdn(tsd), ptr, usize, old_ptr, old_tctx);
if (unlikely(old_sampled))
prof_free_sampled_object(tsd, old_usize, old_tctx);
}
JEMALLOC_ALWAYS_INLINE void
prof_free(tsd_t *tsd, const void *ptr, size_t usize)
{
prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr);
cassert(config_prof);
assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
prof_free_sampled_object(tsd, usize, tctx);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 15,844 | 27.914234 | 81 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/hash.h
|
/*
* The following hash function is based on MurmurHash3, placed into the public
* domain by Austin Appleby. See https://github.com/aappleby/smhasher for
* details.
*/
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
uint32_t hash_x86_32(const void *key, int len, uint32_t seed);
void hash_x86_128(const void *key, const int len, uint32_t seed,
uint64_t r_out[2]);
void hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t r_out[2]);
void hash(const void *key, size_t len, const uint32_t seed,
size_t r_hash[2]);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
/******************************************************************************/
/* Internal implementation. */
JEMALLOC_INLINE uint32_t
hash_rotl_32(uint32_t x, int8_t r)
{
return ((x << r) | (x >> (32 - r)));
}
JEMALLOC_INLINE uint64_t
hash_rotl_64(uint64_t x, int8_t r)
{
return ((x << r) | (x >> (64 - r)));
}
JEMALLOC_INLINE uint32_t
hash_get_block_32(const uint32_t *p, int i)
{
/* Handle unaligned read. */
if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
uint32_t ret;
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
return (ret);
}
return (p[i]);
}
JEMALLOC_INLINE uint64_t
hash_get_block_64(const uint64_t *p, int i)
{
/* Handle unaligned read. */
if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
uint64_t ret;
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
return (ret);
}
return (p[i]);
}
JEMALLOC_INLINE uint32_t
hash_fmix_32(uint32_t h)
{
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return (h);
}
JEMALLOC_INLINE uint64_t
hash_fmix_64(uint64_t k)
{
k ^= k >> 33;
k *= KQU(0xff51afd7ed558ccd);
k ^= k >> 33;
k *= KQU(0xc4ceb9fe1a85ec53);
k ^= k >> 33;
return (k);
}
JEMALLOC_INLINE uint32_t
hash_x86_32(const void *key, int len, uint32_t seed)
{
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 4;
uint32_t h1 = seed;
const uint32_t c1 = 0xcc9e2d51;
const uint32_t c2 = 0x1b873593;
/* body */
{
const uint32_t *blocks = (const uint32_t *) (data + nblocks*4);
int i;
for (i = -nblocks; i; i++) {
uint32_t k1 = hash_get_block_32(blocks, i);
k1 *= c1;
k1 = hash_rotl_32(k1, 15);
k1 *= c2;
h1 ^= k1;
h1 = hash_rotl_32(h1, 13);
h1 = h1*5 + 0xe6546b64;
}
}
/* tail */
{
const uint8_t *tail = (const uint8_t *) (data + nblocks*4);
uint32_t k1 = 0;
switch (len & 3) {
case 3: k1 ^= tail[2] << 16;
case 2: k1 ^= tail[1] << 8;
case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
k1 *= c2; h1 ^= k1;
}
}
/* finalization */
h1 ^= len;
h1 = hash_fmix_32(h1);
return (h1);
}
UNUSED JEMALLOC_INLINE void
hash_x86_128(const void *key, const int len, uint32_t seed,
uint64_t r_out[2])
{
const uint8_t * data = (const uint8_t *) key;
const int nblocks = len / 16;
uint32_t h1 = seed;
uint32_t h2 = seed;
uint32_t h3 = seed;
uint32_t h4 = seed;
const uint32_t c1 = 0x239b961b;
const uint32_t c2 = 0xab0e9789;
const uint32_t c3 = 0x38b34ae5;
const uint32_t c4 = 0xa1e38b93;
/* body */
{
const uint32_t *blocks = (const uint32_t *) (data + nblocks*16);
int i;
for (i = -nblocks; i; i++) {
uint32_t k1 = hash_get_block_32(blocks, i*4 + 0);
uint32_t k2 = hash_get_block_32(blocks, i*4 + 1);
uint32_t k3 = hash_get_block_32(blocks, i*4 + 2);
uint32_t k4 = hash_get_block_32(blocks, i*4 + 3);
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
h1 = hash_rotl_32(h1, 19); h1 += h2;
h1 = h1*5 + 0x561ccd1b;
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
h2 = hash_rotl_32(h2, 17); h2 += h3;
h2 = h2*5 + 0x0bcaa747;
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
h3 = hash_rotl_32(h3, 15); h3 += h4;
h3 = h3*5 + 0x96cd1c35;
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
h4 = hash_rotl_32(h4, 13); h4 += h1;
h4 = h4*5 + 0x32ac3b17;
}
}
/* tail */
{
const uint8_t *tail = (const uint8_t *) (data + nblocks*16);
uint32_t k1 = 0;
uint32_t k2 = 0;
uint32_t k3 = 0;
uint32_t k4 = 0;
switch (len & 15) {
case 15: k4 ^= tail[14] << 16;
case 14: k4 ^= tail[13] << 8;
case 13: k4 ^= tail[12] << 0;
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
case 12: k3 ^= tail[11] << 24;
case 11: k3 ^= tail[10] << 16;
case 10: k3 ^= tail[ 9] << 8;
case 9: k3 ^= tail[ 8] << 0;
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
case 8: k2 ^= tail[ 7] << 24;
case 7: k2 ^= tail[ 6] << 16;
case 6: k2 ^= tail[ 5] << 8;
case 5: k2 ^= tail[ 4] << 0;
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
case 4: k1 ^= tail[ 3] << 24;
case 3: k1 ^= tail[ 2] << 16;
case 2: k1 ^= tail[ 1] << 8;
case 1: k1 ^= tail[ 0] << 0;
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
}
}
/* finalization */
h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
h1 += h2; h1 += h3; h1 += h4;
h2 += h1; h3 += h1; h4 += h1;
h1 = hash_fmix_32(h1);
h2 = hash_fmix_32(h2);
h3 = hash_fmix_32(h3);
h4 = hash_fmix_32(h4);
h1 += h2; h1 += h3; h1 += h4;
h2 += h1; h3 += h1; h4 += h1;
r_out[0] = (((uint64_t) h2) << 32) | h1;
r_out[1] = (((uint64_t) h4) << 32) | h3;
}
UNUSED JEMALLOC_INLINE void
hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t r_out[2])
{
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 16;
uint64_t h1 = seed;
uint64_t h2 = seed;
const uint64_t c1 = KQU(0x87c37b91114253d5);
const uint64_t c2 = KQU(0x4cf5ad432745937f);
/* body */
{
const uint64_t *blocks = (const uint64_t *) (data);
int i;
for (i = 0; i < nblocks; i++) {
uint64_t k1 = hash_get_block_64(blocks, i*2 + 0);
uint64_t k2 = hash_get_block_64(blocks, i*2 + 1);
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
h1 = hash_rotl_64(h1, 27); h1 += h2;
h1 = h1*5 + 0x52dce729;
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
h2 = hash_rotl_64(h2, 31); h2 += h1;
h2 = h2*5 + 0x38495ab5;
}
}
/* tail */
{
const uint8_t *tail = (const uint8_t*)(data + nblocks*16);
uint64_t k1 = 0;
uint64_t k2 = 0;
switch (len & 15) {
case 15: k2 ^= ((uint64_t)(tail[14])) << 48;
case 14: k2 ^= ((uint64_t)(tail[13])) << 40;
case 13: k2 ^= ((uint64_t)(tail[12])) << 32;
case 12: k2 ^= ((uint64_t)(tail[11])) << 24;
case 11: k2 ^= ((uint64_t)(tail[10])) << 16;
case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8;
case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56;
case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48;
case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40;
case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32;
case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24;
case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16;
case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8;
case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
}
}
/* finalization */
h1 ^= len; h2 ^= len;
h1 += h2;
h2 += h1;
h1 = hash_fmix_64(h1);
h2 = hash_fmix_64(h2);
h1 += h2;
h2 += h1;
r_out[0] = h1;
r_out[1] = h2;
}
/******************************************************************************/
/* API. */
JEMALLOC_INLINE void
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
{
assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash);
#else
{
uint64_t hashes[2];
hash_x86_128(key, (int)len, seed, hashes);
r_hash[0] = (size_t)hashes[0];
r_hash[1] = (size_t)hashes[1];
}
#endif
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 8,394 | 22.449721 | 80 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/tsd.h
|
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/* Maximum number of malloc_tsd users with cleanup functions. */
#define MALLOC_TSD_CLEANUPS_MAX 2
typedef bool (*malloc_tsd_cleanup_t)(void);
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
typedef struct tsd_init_block_s tsd_init_block_t;
typedef struct tsd_init_head_s tsd_init_head_t;
#endif
typedef struct tsd_s tsd_t;
typedef struct tsdn_s tsdn_t;
#define TSDN_NULL ((tsdn_t *)0)
typedef enum {
tsd_state_uninitialized,
tsd_state_nominal,
tsd_state_purgatory,
tsd_state_reincarnated
} tsd_state_t;
/*
* TLS/TSD-agnostic macro-based implementation of thread-specific data. There
* are five macros that support (at least) three use cases: file-private,
* library-private, and library-private inlined. Following is an example
* library-private tsd variable:
*
* In example.h:
* typedef struct {
* int x;
* int y;
* } example_t;
* #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0})
* malloc_tsd_types(example_, example_t)
* malloc_tsd_protos(, example_, example_t)
* malloc_tsd_externs(example_, example_t)
* In example.c:
* malloc_tsd_data(, example_, example_t, EX_INITIALIZER)
* malloc_tsd_funcs(, example_, example_t, EX_INITIALIZER,
* example_tsd_cleanup)
*
* The result is a set of generated functions, e.g.:
*
* bool example_tsd_boot(void) {...}
* bool example_tsd_booted_get(void) {...}
* example_t *example_tsd_get(bool init) {...}
* void example_tsd_set(example_t *val) {...}
*
* Note that all of the functions deal in terms of (a_type *) rather than
* (a_type) so that it is possible to support non-pointer types (unlike
* pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is
* cast to (void *). This means that the cleanup function needs to cast the
* function argument to (a_type *), then dereference the resulting pointer to
* access fields, e.g.
*
* void
* example_tsd_cleanup(void *arg)
* {
* example_t *example = (example_t *)arg;
*
* example->x = 42;
* [...]
* if ([want the cleanup function to be called again])
* example_tsd_set(example);
* }
*
* If example_tsd_set() is called within example_tsd_cleanup(), it will be
* called again. This is similar to how pthreads TSD destruction works, except
* that pthreads only calls the cleanup function again if the value was set to
* non-NULL.
*/
/* malloc_tsd_types(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_types(a_name, a_type)
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_types(a_name, a_type)
#elif (defined(_WIN32))
#define malloc_tsd_types(a_name, a_type) \
typedef struct { \
bool initialized; \
a_type val; \
} a_name##tsd_wrapper_t;
#else
#define malloc_tsd_types(a_name, a_type) \
typedef struct { \
bool initialized; \
a_type val; \
} a_name##tsd_wrapper_t;
#endif
/* malloc_tsd_protos(). */
#define malloc_tsd_protos(a_attr, a_name, a_type) \
a_attr bool \
a_name##tsd_boot0(void); \
a_attr void \
a_name##tsd_boot1(void); \
a_attr bool \
a_name##tsd_boot(void); \
a_attr bool \
a_name##tsd_booted_get(void); \
a_attr a_type * \
a_name##tsd_get(bool init); \
a_attr void \
a_name##tsd_set(a_type *val);
/* malloc_tsd_externs(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_externs(a_name, a_type) \
extern __thread a_type a_name##tsd_tls; \
extern __thread bool a_name##tsd_initialized; \
extern bool a_name##tsd_booted;
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_externs(a_name, a_type) \
extern __thread a_type a_name##tsd_tls; \
extern pthread_key_t a_name##tsd_tsd; \
extern bool a_name##tsd_booted;
#elif (defined(_WIN32))
#define malloc_tsd_externs(a_name, a_type) \
extern DWORD a_name##tsd_tsd; \
extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \
extern bool a_name##tsd_booted;
#else
#define malloc_tsd_externs(a_name, a_type) \
extern pthread_key_t a_name##tsd_tsd; \
extern tsd_init_head_t a_name##tsd_init_head; \
extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \
extern bool a_name##tsd_booted;
#endif
/* malloc_tsd_data(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr __thread a_type JEMALLOC_TLS_MODEL \
a_name##tsd_tls = a_initializer; \
a_attr __thread bool JEMALLOC_TLS_MODEL \
a_name##tsd_initialized = false; \
a_attr bool a_name##tsd_booted = false;
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr __thread a_type JEMALLOC_TLS_MODEL \
a_name##tsd_tls = a_initializer; \
a_attr pthread_key_t a_name##tsd_tsd; \
a_attr bool a_name##tsd_booted = false;
#elif (defined(_WIN32))
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr DWORD a_name##tsd_tsd; \
a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \
false, \
a_initializer \
}; \
a_attr bool a_name##tsd_booted = false;
#else
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr pthread_key_t a_name##tsd_tsd; \
a_attr tsd_init_head_t a_name##tsd_init_head = { \
ql_head_initializer(blocks), \
MALLOC_MUTEX_INITIALIZER \
}; \
a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \
false, \
a_initializer \
}; \
a_attr bool a_name##tsd_booted = false;
#endif
/* malloc_tsd_funcs(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Initialization/cleanup. */ \
a_attr bool \
a_name##tsd_cleanup_wrapper(void) \
{ \
\
if (a_name##tsd_initialized) { \
a_name##tsd_initialized = false; \
a_cleanup(&a_name##tsd_tls); \
} \
return (a_name##tsd_initialized); \
} \
a_attr bool \
a_name##tsd_boot0(void) \
{ \
\
if (a_cleanup != malloc_tsd_no_cleanup) { \
malloc_tsd_cleanup_register( \
&a_name##tsd_cleanup_wrapper); \
} \
a_name##tsd_booted = true; \
return (false); \
} \
a_attr void \
a_name##tsd_boot1(void) \
{ \
\
/* Do nothing. */ \
} \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
return (a_name##tsd_boot0()); \
} \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
a_attr bool \
a_name##tsd_get_allocates(void) \
{ \
\
return (false); \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(bool init) \
{ \
\
assert(a_name##tsd_booted); \
return (&a_name##tsd_tls); \
} \
a_attr void \
a_name##tsd_set(a_type *val) \
{ \
\
assert(a_name##tsd_booted); \
a_name##tsd_tls = (*val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
a_name##tsd_initialized = true; \
}
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Initialization/cleanup. */ \
a_attr bool \
a_name##tsd_boot0(void) \
{ \
\
if (a_cleanup != malloc_tsd_no_cleanup) { \
if (pthread_key_create(&a_name##tsd_tsd, a_cleanup) != \
0) \
return (true); \
} \
a_name##tsd_booted = true; \
return (false); \
} \
a_attr void \
a_name##tsd_boot1(void) \
{ \
\
/* Do nothing. */ \
} \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
return (a_name##tsd_boot0()); \
} \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
a_attr bool \
a_name##tsd_get_allocates(void) \
{ \
\
return (false); \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(bool init) \
{ \
\
assert(a_name##tsd_booted); \
return (&a_name##tsd_tls); \
} \
a_attr void \
a_name##tsd_set(a_type *val) \
{ \
\
assert(a_name##tsd_booted); \
a_name##tsd_tls = (*val); \
if (a_cleanup != malloc_tsd_no_cleanup) { \
if (pthread_setspecific(a_name##tsd_tsd, \
(void *)(&a_name##tsd_tls))) { \
malloc_write("<jemalloc>: Error" \
" setting TSD for "#a_name"\n"); \
if (opt_abort) \
abort(); \
} \
} \
}
#elif (defined(_WIN32))
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Initialization/cleanup. */ \
a_attr bool \
a_name##tsd_cleanup_wrapper(void) \
{ \
DWORD error = GetLastError(); \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
TlsGetValue(a_name##tsd_tsd); \
SetLastError(error); \
\
if (wrapper == NULL) \
return (false); \
if (a_cleanup != malloc_tsd_no_cleanup && \
wrapper->initialized) { \
wrapper->initialized = false; \
a_cleanup(&wrapper->val); \
if (wrapper->initialized) { \
/* Trigger another cleanup round. */ \
return (true); \
} \
} \
malloc_tsd_dalloc(wrapper); \
return (false); \
} \
a_attr void \
a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
{ \
\
if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) { \
malloc_write("<jemalloc>: Error setting" \
" TSD for "#a_name"\n"); \
abort(); \
} \
} \
a_attr a_name##tsd_wrapper_t * \
a_name##tsd_wrapper_get(bool init) \
{ \
DWORD error = GetLastError(); \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
TlsGetValue(a_name##tsd_tsd); \
SetLastError(error); \
\
if (init && unlikely(wrapper == NULL)) { \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
abort(); \
} else { \
wrapper->initialized = false; \
wrapper->val = a_initializer; \
} \
a_name##tsd_wrapper_set(wrapper); \
} \
return (wrapper); \
} \
a_attr bool \
a_name##tsd_boot0(void) \
{ \
\
a_name##tsd_tsd = TlsAlloc(); \
if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES) \
return (true); \
if (a_cleanup != malloc_tsd_no_cleanup) { \
malloc_tsd_cleanup_register( \
&a_name##tsd_cleanup_wrapper); \
} \
a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \
a_name##tsd_booted = true; \
return (false); \
} \
a_attr void \
a_name##tsd_boot1(void) \
{ \
a_name##tsd_wrapper_t *wrapper; \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
abort(); \
} \
memcpy(wrapper, &a_name##tsd_boot_wrapper, \
sizeof(a_name##tsd_wrapper_t)); \
a_name##tsd_wrapper_set(wrapper); \
} \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
if (a_name##tsd_boot0()) \
return (true); \
a_name##tsd_boot1(); \
return (false); \
} \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
a_attr bool \
a_name##tsd_get_allocates(void) \
{ \
\
return (true); \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(bool init) \
{ \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
wrapper = a_name##tsd_wrapper_get(init); \
if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \
return (NULL); \
return (&wrapper->val); \
} \
a_attr void \
a_name##tsd_set(a_type *val) \
{ \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
wrapper = a_name##tsd_wrapper_get(true); \
wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \
}
#else
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Initialization/cleanup. */ \
a_attr void \
a_name##tsd_cleanup_wrapper(void *arg) \
{ \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)arg; \
\
if (a_cleanup != malloc_tsd_no_cleanup && \
wrapper->initialized) { \
wrapper->initialized = false; \
a_cleanup(&wrapper->val); \
if (wrapper->initialized) { \
/* Trigger another cleanup round. */ \
if (pthread_setspecific(a_name##tsd_tsd, \
(void *)wrapper)) { \
malloc_write("<jemalloc>: Error" \
" setting TSD for "#a_name"\n"); \
if (opt_abort) \
abort(); \
} \
return; \
} \
} \
malloc_tsd_dalloc(wrapper); \
} \
a_attr void \
a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
{ \
\
if (pthread_setspecific(a_name##tsd_tsd, \
(void *)wrapper)) { \
malloc_write("<jemalloc>: Error setting" \
" TSD for "#a_name"\n"); \
abort(); \
} \
} \
a_attr a_name##tsd_wrapper_t * \
a_name##tsd_wrapper_get(bool init) \
{ \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
pthread_getspecific(a_name##tsd_tsd); \
\
if (init && unlikely(wrapper == NULL)) { \
tsd_init_block_t block; \
wrapper = tsd_init_check_recursion( \
&a_name##tsd_init_head, &block); \
if (wrapper) \
return (wrapper); \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
block.data = wrapper; \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
abort(); \
} else { \
wrapper->initialized = false; \
wrapper->val = a_initializer; \
} \
a_name##tsd_wrapper_set(wrapper); \
tsd_init_finish(&a_name##tsd_init_head, &block); \
} \
return (wrapper); \
} \
a_attr bool \
a_name##tsd_boot0(void) \
{ \
\
if (pthread_key_create(&a_name##tsd_tsd, \
a_name##tsd_cleanup_wrapper) != 0) \
return (true); \
a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \
a_name##tsd_booted = true; \
return (false); \
} \
a_attr void \
a_name##tsd_boot1(void) \
{ \
a_name##tsd_wrapper_t *wrapper; \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
abort(); \
} \
memcpy(wrapper, &a_name##tsd_boot_wrapper, \
sizeof(a_name##tsd_wrapper_t)); \
a_name##tsd_wrapper_set(wrapper); \
} \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
if (a_name##tsd_boot0()) \
return (true); \
a_name##tsd_boot1(); \
return (false); \
} \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
a_attr bool \
a_name##tsd_get_allocates(void) \
{ \
\
return (true); \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(bool init) \
{ \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
wrapper = a_name##tsd_wrapper_get(init); \
if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \
return (NULL); \
return (&wrapper->val); \
} \
a_attr void \
a_name##tsd_set(a_type *val) \
{ \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
wrapper = a_name##tsd_wrapper_get(true); \
wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \
}
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
struct tsd_init_block_s {
ql_elm(tsd_init_block_t) link;
pthread_t thread;
void *data;
};
struct tsd_init_head_s {
ql_head(tsd_init_block_t) blocks;
malloc_mutex_t lock;
};
#endif
#define MALLOC_TSD \
/* O(name, type) */ \
O(tcache, tcache_t *) \
O(thread_allocated, uint64_t) \
O(thread_deallocated, uint64_t) \
O(prof_tdata, prof_tdata_t *) \
O(iarena, arena_t *) \
O(arena, arena_t *) \
O(arenas_tdata, arena_tdata_t *) \
O(narenas_tdata, unsigned) \
O(arenas_tdata_bypass, bool) \
O(tcache_enabled, tcache_enabled_t) \
O(quarantine, quarantine_t *) \
O(witnesses, witness_list_t) \
O(witness_fork, bool) \
#define TSD_INITIALIZER { \
tsd_state_uninitialized, \
NULL, \
0, \
0, \
NULL, \
NULL, \
NULL, \
NULL, \
0, \
false, \
tcache_enabled_default, \
NULL, \
ql_head_initializer(witnesses), \
false \
}
struct tsd_s {
tsd_state_t state;
#define O(n, t) \
t n;
MALLOC_TSD
#undef O
};
/*
* Wrapper around tsd_t that makes it possible to avoid implicit conversion
* between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be
* explicitly converted to tsd_t, which is non-nullable.
*/
struct tsdn_s {
tsd_t tsd;
};
static const tsd_t tsd_initializer = TSD_INITIALIZER;
malloc_tsd_types(, tsd_t)
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *malloc_tsd_malloc(size_t size);
void malloc_tsd_dalloc(void *wrapper);
void malloc_tsd_no_cleanup(void *arg);
void malloc_tsd_cleanup_register(bool (*f)(void));
tsd_t *malloc_tsd_boot0(void);
void malloc_tsd_boot1(void);
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
void *tsd_init_check_recursion(tsd_init_head_t *head,
tsd_init_block_t *block);
void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
#endif
void tsd_cleanup(void *arg);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t)
tsd_t *tsd_fetch_impl(bool init);
tsd_t *tsd_fetch(void);
tsdn_t *tsd_tsdn(tsd_t *tsd);
bool tsd_nominal(tsd_t *tsd);
#define O(n, t) \
t *tsd_##n##p_get(tsd_t *tsd); \
t tsd_##n##_get(tsd_t *tsd); \
void tsd_##n##_set(tsd_t *tsd, t n);
MALLOC_TSD
#undef O
tsdn_t *tsdn_fetch(void);
bool tsdn_null(const tsdn_t *tsdn);
tsd_t *tsdn_tsd(tsdn_t *tsdn);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_))
malloc_tsd_externs(, tsd_t)
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup)
JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_fetch_impl(bool init)
{
tsd_t *tsd = tsd_get(init);
if (!init && tsd_get_allocates() && tsd == NULL)
return (NULL);
assert(tsd != NULL);
if (unlikely(tsd->state != tsd_state_nominal)) {
if (tsd->state == tsd_state_uninitialized) {
tsd->state = tsd_state_nominal;
/* Trigger cleanup handler registration. */
tsd_set(tsd);
} else if (tsd->state == tsd_state_purgatory) {
tsd->state = tsd_state_reincarnated;
tsd_set(tsd);
} else
assert(tsd->state == tsd_state_reincarnated);
}
return (tsd);
}
JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_fetch(void)
{
return (tsd_fetch_impl(true));
}
JEMALLOC_ALWAYS_INLINE tsdn_t *
tsd_tsdn(tsd_t *tsd)
{
return ((tsdn_t *)tsd);
}
JEMALLOC_INLINE bool
tsd_nominal(tsd_t *tsd)
{
return (tsd->state == tsd_state_nominal);
}
#define O(n, t) \
JEMALLOC_ALWAYS_INLINE t * \
tsd_##n##p_get(tsd_t *tsd) \
{ \
\
return (&tsd->n); \
} \
\
JEMALLOC_ALWAYS_INLINE t \
tsd_##n##_get(tsd_t *tsd) \
{ \
\
return (*tsd_##n##p_get(tsd)); \
} \
\
JEMALLOC_ALWAYS_INLINE void \
tsd_##n##_set(tsd_t *tsd, t n) \
{ \
\
assert(tsd->state == tsd_state_nominal); \
tsd->n = n; \
}
MALLOC_TSD
#undef O
JEMALLOC_ALWAYS_INLINE tsdn_t *
tsdn_fetch(void)
{
if (!tsd_booted_get())
return (NULL);
return (tsd_tsdn(tsd_fetch_impl(false)));
}
JEMALLOC_ALWAYS_INLINE bool
tsdn_null(const tsdn_t *tsdn)
{
return (tsdn == NULL);
}
JEMALLOC_ALWAYS_INLINE tsd_t *
tsdn_tsd(tsdn_t *tsdn)
{
assert(!tsdn_null(tsdn));
return (&tsdn->tsd);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 21,743 | 26.593909 | 80 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/geohash-int/geohash.h
|
/*
* Copyright (c) 2013-2014, yinqiwen <[email protected]>
* Copyright (c) 2014, Matt Stancliff <[email protected]>.
* Copyright (c) 2015, Salvatore Sanfilippo <[email protected]>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef GEOHASH_H_
#define GEOHASH_H_
#include <stddef.h>
#include <stdint.h>
#include <stdint.h>
#if defined(__cplusplus)
extern "C" {
#endif
#define HASHISZERO(r) (!(r).bits && !(r).step)
#define RANGEISZERO(r) (!(r).max && !(r).min)
#define RANGEPISZERO(r) (r == NULL || RANGEISZERO(*r))
#define GEO_STEP_MAX 26 /* 26*2 = 52 bits. */
/* Limits from EPSG:900913 / EPSG:3785 / OSGEO:41001 */
#define GEO_LAT_MIN -85.05112878
#define GEO_LAT_MAX 85.05112878
#define GEO_LONG_MIN -180
#define GEO_LONG_MAX 180
typedef enum {
GEOHASH_NORTH = 0,
GEOHASH_EAST,
GEOHASH_WEST,
GEOHASH_SOUTH,
GEOHASH_SOUTH_WEST,
GEOHASH_SOUTH_EAST,
GEOHASH_NORT_WEST,
GEOHASH_NORT_EAST
} GeoDirection;
typedef struct {
uint64_t bits;
uint8_t step;
} GeoHashBits;
typedef struct {
double min;
double max;
} GeoHashRange;
typedef struct {
GeoHashBits hash;
GeoHashRange longitude;
GeoHashRange latitude;
} GeoHashArea;
typedef struct {
GeoHashBits north;
GeoHashBits east;
GeoHashBits west;
GeoHashBits south;
GeoHashBits north_east;
GeoHashBits south_east;
GeoHashBits north_west;
GeoHashBits south_west;
} GeoHashNeighbors;
/*
* 0:success
* -1:failed
*/
void geohashGetCoordRange(GeoHashRange *long_range, GeoHashRange *lat_range);
int geohashEncode(const GeoHashRange *long_range, const GeoHashRange *lat_range,
double longitude, double latitude, uint8_t step,
GeoHashBits *hash);
int geohashEncodeType(double longitude, double latitude,
uint8_t step, GeoHashBits *hash);
int geohashEncodeWGS84(double longitude, double latitude, uint8_t step,
GeoHashBits *hash);
int geohashDecode(const GeoHashRange long_range, const GeoHashRange lat_range,
const GeoHashBits hash, GeoHashArea *area);
int geohashDecodeType(const GeoHashBits hash, GeoHashArea *area);
int geohashDecodeWGS84(const GeoHashBits hash, GeoHashArea *area);
int geohashDecodeAreaToLongLat(const GeoHashArea *area, double *xy);
int geohashDecodeToLongLatType(const GeoHashBits hash, double *xy);
int geohashDecodeToLongLatWGS84(const GeoHashBits hash, double *xy);
int geohashDecodeToLongLatMercator(const GeoHashBits hash, double *xy);
void geohashNeighbors(const GeoHashBits *hash, GeoHashNeighbors *neighbors);
#if defined(__cplusplus)
}
#endif
#endif /* GEOHASH_H_ */
| 4,124 | 33.663866 | 80 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/geohash-int/geohash_helper.h
|
/*
* Copyright (c) 2013-2014, yinqiwen <[email protected]>
* Copyright (c) 2014, Matt Stancliff <[email protected]>.
* Copyright (c) 2015, Salvatore Sanfilippo <[email protected]>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef GEOHASH_HELPER_HPP_
#define GEOHASH_HELPER_HPP_
#include <math.h>
#include "geohash.h"
#define GZERO(s) s.bits = s.step = 0;
#define GISZERO(s) (!s.bits && !s.step)
#define GISNOTZERO(s) (s.bits || s.step)
typedef uint64_t GeoHashFix52Bits;
typedef uint64_t GeoHashVarBits;
typedef struct {
GeoHashBits hash;
GeoHashArea area;
GeoHashNeighbors neighbors;
} GeoHashRadius;
int GeoHashBitsComparator(const GeoHashBits *a, const GeoHashBits *b);
uint8_t geohashEstimateStepsByRadius(double range_meters, double lat);
int geohashBoundingBox(double longitude, double latitude, double radius_meters,
double *bounds);
GeoHashRadius geohashGetAreasByRadius(double longitude,
double latitude, double radius_meters);
GeoHashRadius geohashGetAreasByRadiusWGS84(double longitude, double latitude,
double radius_meters);
GeoHashRadius geohashGetAreasByRadiusMercator(double longitude, double latitude,
double radius_meters);
GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits hash);
double geohashGetDistance(double lon1d, double lat1d,
double lon2d, double lat2d);
int geohashGetDistanceIfInRadius(double x1, double y1,
double x2, double y2, double radius,
double *distance);
int geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2,
double y2, double radius,
double *distance);
#endif /* GEOHASH_HELPER_HPP_ */
| 3,368 | 45.791667 | 80 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/utils/install_server.sh
|
#!/bin/sh
# Copyright 2011 Dvir Volk <dvirsk at gmail dot com>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL Dvir Volk OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
#
# Interactive service installer for redis server
# this generates a redis config file and an /etc/init.d script, and installs them
# this scripts should be run as root
die () {
echo "ERROR: $1. Aborting!"
exit 1
}
#Absolute path to this script
SCRIPT=$(readlink -f $0)
#Absolute path this script is in
SCRIPTPATH=$(dirname $SCRIPT)
#Initial defaults
_REDIS_PORT=6379
echo "Welcome to the redis service installer"
echo "This script will help you easily set up a running redis server"
echo
#check for root user
if [ "$(id -u)" -ne 0 ] ; then
echo "You must run this script as root. Sorry!"
exit 1
fi
#Read the redis port
read -p "Please select the redis port for this instance: [$_REDIS_PORT] " REDIS_PORT
if ! echo $REDIS_PORT | egrep -q '^[0-9]+$' ; then
echo "Selecting default: $_REDIS_PORT"
REDIS_PORT=$_REDIS_PORT
fi
#read the redis config file
_REDIS_CONFIG_FILE="/etc/redis/$REDIS_PORT.conf"
read -p "Please select the redis config file name [$_REDIS_CONFIG_FILE] " REDIS_CONFIG_FILE
if [ -z "$REDIS_CONFIG_FILE" ] ; then
REDIS_CONFIG_FILE=$_REDIS_CONFIG_FILE
echo "Selected default - $REDIS_CONFIG_FILE"
fi
#read the redis log file path
_REDIS_LOG_FILE="/var/log/redis_$REDIS_PORT.log"
read -p "Please select the redis log file name [$_REDIS_LOG_FILE] " REDIS_LOG_FILE
if [ -z "$REDIS_LOG_FILE" ] ; then
REDIS_LOG_FILE=$_REDIS_LOG_FILE
echo "Selected default - $REDIS_LOG_FILE"
fi
#get the redis data directory
_REDIS_DATA_DIR="/var/lib/redis/$REDIS_PORT"
read -p "Please select the data directory for this instance [$_REDIS_DATA_DIR] " REDIS_DATA_DIR
if [ -z "$REDIS_DATA_DIR" ] ; then
REDIS_DATA_DIR=$_REDIS_DATA_DIR
echo "Selected default - $REDIS_DATA_DIR"
fi
#get the redis executable path
_REDIS_EXECUTABLE=`command -v redis-server`
read -p "Please select the redis executable path [$_REDIS_EXECUTABLE] " REDIS_EXECUTABLE
if [ ! -x "$REDIS_EXECUTABLE" ] ; then
REDIS_EXECUTABLE=$_REDIS_EXECUTABLE
if [ ! -x "$REDIS_EXECUTABLE" ] ; then
echo "Mmmmm... it seems like you don't have a redis executable. Did you run make install yet?"
exit 1
fi
fi
#check the default for redis cli
CLI_EXEC=`command -v redis-cli`
if [ -z "$CLI_EXEC" ] ; then
CLI_EXEC=`dirname $REDIS_EXECUTABLE`"/redis-cli"
fi
echo "Selected config:"
echo "Port : $REDIS_PORT"
echo "Config file : $REDIS_CONFIG_FILE"
echo "Log file : $REDIS_LOG_FILE"
echo "Data dir : $REDIS_DATA_DIR"
echo "Executable : $REDIS_EXECUTABLE"
echo "Cli Executable : $CLI_EXEC"
read -p "Is this ok? Then press ENTER to go on or Ctrl-C to abort." _UNUSED_
mkdir -p `dirname "$REDIS_CONFIG_FILE"` || die "Could not create redis config directory"
mkdir -p `dirname "$REDIS_LOG_FILE"` || die "Could not create redis log dir"
mkdir -p "$REDIS_DATA_DIR" || die "Could not create redis data directory"
#render the templates
TMP_FILE="/tmp/${REDIS_PORT}.conf"
DEFAULT_CONFIG="${SCRIPTPATH}/../redis.conf"
INIT_TPL_FILE="${SCRIPTPATH}/redis_init_script.tpl"
INIT_SCRIPT_DEST="/etc/init.d/redis_${REDIS_PORT}"
PIDFILE="/var/run/redis_${REDIS_PORT}.pid"
if [ ! -f "$DEFAULT_CONFIG" ]; then
echo "Mmmmm... the default config is missing. Did you switch to the utils directory?"
exit 1
fi
#Generate config file from the default config file as template
#changing only the stuff we're controlling from this script
echo "## Generated by install_server.sh ##" > $TMP_FILE
read -r SED_EXPR <<-EOF
s#^port [0-9]{4}\$#port ${REDIS_PORT}#; \
s#^logfile .+\$#logfile ${REDIS_LOG_FILE}#; \
s#^dir .+\$#dir ${REDIS_DATA_DIR}#; \
s#^pidfile .+\$#pidfile ${PIDFILE}#; \
s#^daemonize no\$#daemonize yes#;
EOF
sed -r "$SED_EXPR" $DEFAULT_CONFIG >> $TMP_FILE
#cat $TPL_FILE | while read line; do eval "echo \"$line\"" >> $TMP_FILE; done
cp $TMP_FILE $REDIS_CONFIG_FILE || die "Could not write redis config file $REDIS_CONFIG_FILE"
#Generate sample script from template file
rm -f $TMP_FILE
#we hard code the configs here to avoid issues with templates containing env vars
#kinda lame but works!
REDIS_INIT_HEADER=\
"#!/bin/sh\n
#Configurations injected by install_server below....\n\n
EXEC=$REDIS_EXECUTABLE\n
CLIEXEC=$CLI_EXEC\n
PIDFILE=\"$PIDFILE\"\n
CONF=\"$REDIS_CONFIG_FILE\"\n\n
REDISPORT=\"$REDIS_PORT\"\n\n
###############\n\n"
REDIS_CHKCONFIG_INFO=\
"# REDHAT chkconfig header\n\n
# chkconfig: - 58 74\n
# description: redis_${REDIS_PORT} is the redis daemon.\n
### BEGIN INIT INFO\n
# Provides: redis_6379\n
# Required-Start: \$network \$local_fs \$remote_fs\n
# Required-Stop: \$network \$local_fs \$remote_fs\n
# Default-Start: 2 3 4 5\n
# Default-Stop: 0 1 6\n
# Should-Start: \$syslog \$named\n
# Should-Stop: \$syslog \$named\n
# Short-Description: start and stop redis_${REDIS_PORT}\n
# Description: Redis daemon\n
### END INIT INFO\n\n"
if command -v chkconfig >/dev/null; then
#if we're a box with chkconfig on it we want to include info for chkconfig
echo "$REDIS_INIT_HEADER" "$REDIS_CHKCONFIG_INFO" > $TMP_FILE && cat $INIT_TPL_FILE >> $TMP_FILE || die "Could not write init script to $TMP_FILE"
else
#combine the header and the template (which is actually a static footer)
echo "$REDIS_INIT_HEADER" > $TMP_FILE && cat $INIT_TPL_FILE >> $TMP_FILE || die "Could not write init script to $TMP_FILE"
fi
###
# Generate sample script from template file
# - No need to check which system we are on. The init info are comments and
# do not interfere with update_rc.d systems. Additionally:
# Ubuntu/debian by default does not come with chkconfig, but does issue a
# warning if init info is not available.
cat > ${TMP_FILE} <<EOT
#!/bin/sh
#Configurations injected by install_server below....
EXEC=$REDIS_EXECUTABLE
CLIEXEC=$CLI_EXEC
PIDFILE=$PIDFILE
CONF="$REDIS_CONFIG_FILE"
REDISPORT="$REDIS_PORT"
###############
# SysV Init Information
# chkconfig: - 58 74
# description: redis_${REDIS_PORT} is the redis daemon.
### BEGIN INIT INFO
# Provides: redis_${REDIS_PORT}
# Required-Start: \$network \$local_fs \$remote_fs
# Required-Stop: \$network \$local_fs \$remote_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Should-Start: \$syslog \$named
# Should-Stop: \$syslog \$named
# Short-Description: start and stop redis_${REDIS_PORT}
# Description: Redis daemon
### END INIT INFO
EOT
cat ${INIT_TPL_FILE} >> ${TMP_FILE}
#copy to /etc/init.d
cp $TMP_FILE $INIT_SCRIPT_DEST && \
chmod +x $INIT_SCRIPT_DEST || die "Could not copy redis init script to $INIT_SCRIPT_DEST"
echo "Copied $TMP_FILE => $INIT_SCRIPT_DEST"
#Install the service
echo "Installing service..."
if command -v chkconfig >/dev/null 2>&1; then
# we're chkconfig, so lets add to chkconfig and put in runlevel 345
chkconfig --add redis_${REDIS_PORT} && echo "Successfully added to chkconfig!"
chkconfig --level 345 redis_${REDIS_PORT} on && echo "Successfully added to runlevels 345!"
elif command -v update-rc.d >/dev/null 2>&1; then
#if we're not a chkconfig box assume we're able to use update-rc.d
update-rc.d redis_${REDIS_PORT} defaults && echo "Success!"
else
echo "No supported init tool found."
fi
/etc/init.d/redis_$REDIS_PORT start || die "Failed starting service..."
#tada
echo "Installation successful!"
exit 0
| 8,545 | 33.739837 | 147 |
sh
|
null |
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/utils/whatisdoing.sh
|
# This script is from http://poormansprofiler.org/
#
# NOTE: Instead of using this script, you should use the Redis
# Software Watchdog, which provides a similar functionality but in
# a more reliable / easy to use way.
#
# Check http://redis.io/topics/latency for more information.
#!/bin/bash
nsamples=1
sleeptime=0
pid=$(ps auxww | grep '[r]edis-server' | awk '{print $2}')
for x in $(seq 1 $nsamples)
do
gdb -ex "set pagination 0" -ex "thread apply all bt" -batch -p $pid
sleep $sleeptime
done | \
awk '
BEGIN { s = ""; }
/Thread/ { print s; s = ""; }
/^\#/ { if (s != "" ) { s = s "," $4} else { s = $4 } }
END { print s }' | \
sort | uniq -c | sort -r -n -k 1,1
| 693 | 26.76 | 71 |
sh
|
null |
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/utils/releasetools/02_upload_tarball.sh
|
#!/bin/bash
echo "Uploading..."
scp /tmp/redis-${1}.tar.gz [email protected]:/var/virtual/download.redis.io/httpdocs/releases/
echo "Updating web site... (press any key if it is a stable release, or Ctrl+C)"
read x
ssh [email protected] "cd /var/virtual/download.redis.io/httpdocs; ./update.sh ${1}"
| 304 | 42.571429 | 96 |
sh
|
null |
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/utils/releasetools/04_release_hash.sh
|
#!/bin/bash
SHA=$(curl -s http://download.redis.io/releases/redis-${1}.tar.gz | shasum | cut -f 1 -d' ')
ENTRY="hash redis-${1}.tar.gz sha1 $SHA http://download.redis.io/releases/redis-${1}.tar.gz"
echo $ENTRY >> ~/hack/redis-hashes/README
vi ~/hack/redis-hashes/README
echo "Press any key to commit, Ctrl-C to abort)."
read yes
(cd ~/hack/redis-hashes; git commit -a -m "${1} hash."; git push)
| 395 | 43 | 92 |
sh
|
null |
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/utils/releasetools/03_test_release.sh
|
#!/bin/sh
if [ $# != "1" ]
then
echo "Usage: ${0} <git-ref>"
exit 1
fi
TAG=$1
TARNAME="redis-${TAG}.tar.gz"
DOWNLOADURL="http://download.redis.io/releases/${TARNAME}"
ssh antirez@metal "export TERM=xterm;
cd /tmp;
rm -rf test_release_tmp_dir;
cd test_release_tmp_dir;
wget $DOWNLOADURL;
tar xvzf $TARNAME;
cd redis-${TAG};
make;
./runtest;
./runtest-sentinel;
if [ -x runtest-cluster ]; then
./runtest-cluster;
fi"
| 657 | 25.32 | 58 |
sh
|
null |
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/utils/releasetools/01_create_tarball.sh
|
#!/bin/sh
if [ $# != "1" ]
then
echo "Usage: ./mkrelease.sh <git-ref>"
exit 1
fi
TAG=$1
TARNAME="redis-${TAG}.tar"
echo "Generating /tmp/${TARNAME}"
cd ~/hack/redis
git archive $TAG --prefix redis-${TAG}/ > /tmp/$TARNAME || exit 1
echo "Gizipping the archive"
rm -f /tmp/$TARNAME.gz
gzip -9 /tmp/$TARNAME
| 314 | 18.6875 | 65 |
sh
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/run.sh
|
sudo mount -o dax /dev/pmem0 /mnt/pmem
sudo rm -rf /mnt/mem/*
sudo chown oem /mnt/pmem
#./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1
#sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --num=10000 --engine=cmap --benchmarks=fillseq,fillrandom,overwrite > out
make bench
sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --num=10000 --engine=cmap --benchmarks=fillrandom > out
#grep "timecp" out > time
#awk '{sum+= $3;} END{print sum;}' time
grep "cp" out > time
log=$(awk '{sum+= $2;} END{print sum;}' time)
echo ""
echo $1'cp' $log
tottime=$(tail -1 out | awk '{print $NF}' | cut -d "," -f10|awk '{print $1/100}')
echo $1'tottime' $tottime
| 674 | 41.1875 | 135 |
sh
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/runtest.sh
|
sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq
sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillrandom
sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,overwrite
sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,readseq
sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,readrandom
sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,readmissing
sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,deleteseq
sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,deleterandom
sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,readwhilewriting
sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,readrandomwriterandom
sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,txfillrandom
| 1,176 | 89.538462 | 117 |
sh
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/setup.sh
|
sudo mkfs.ext4 /dev/pmem0
sudo mount -o dax /dev/pmem0 /mnt/pmem
sudo chown oem /mnt/pmem
#cd Research/Research/pmdk/src/examples/libpmemobj/map/
#cat run.sh
| 159 | 25.666667 | 55 |
sh
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/entrypoint.sh
|
#!/bin/sh -l
#
# SPDX-License-Identifier: Apache-2.0
# Copyright 2021, Intel Corporation
set -e
set -x
echo "$1"
project_dir=${WORKDIR:-/pmemkv-bench}
echo "run basic test"
pytest-3 -v ${project_dir}/tests/test.py
| 219 | 12.75 | 40 |
sh
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/bench/db_bench.cc
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the Apache 2.0 License
// (found in the LICENSE file in the root directory).
// SPDX-License-Identifier: Apache-2.0
// Copyright 2017-2021, Intel Corporation
#include <chrono>
#include <cstdio>
#include <cstdlib>
#include <ctime>
#include <inttypes.h>
#include <iomanip>
#include <iostream>
#include <memory>
#include <sstream>
#include <string>
#include <sys/types.h>
#include <sys/stat.h>
#include <vector>
#include "csv.h"
#include "histogram.h"
#include "leveldb/env.h"
#include "libpmemkv.hpp"
#include "libpmempool.h"
#include "mutexlock.h"
#include "port/port_posix.h"
#include "random.h"
#include "testutil.h"
static const std::string USAGE =
"pmemkv_bench\n"
"--engine=<name> (storage engine name, default: cmap)\n"
"--db=<location> (path to persistent pool, default: /dev/shm/pmemkv)\n"
" (note: file on DAX filesystem, DAX device, or poolset file)\n"
"--db_size_in_gb=<integer> (size of persistent pool to create in GB, default: 0)\n"
" (note: for existing poolset or device DAX configs use 0 or leave default value)\n"
" (note: when pool path is non-existing, value should be > 0)\n"
"--histogram=<0|1> (show histograms when reporting latencies)\n"
"--num=<integer> (number of keys to place in database, default: 1000000)\n"
"--reads=<integer> (number of read operations, default: 1000000)\n"
"--threads=<integer> (number of concurrent threads, default: 1)\n"
"--key_size=<integer> (size of keys in bytes, default: 16)\n"
"--value_size=<integer> (size of values in bytes, default: 100)\n"
"--readwritepercent=<integer> (Ratio of reads to reads/writes (expressed "
"as percentage) for the ReadRandomWriteRandom workload. The default value "
"90 means 90% operations out of all reads and writes operations are reads. "
"In other words, 9 gets for every 1 put.) type: int32 default: 90\n"
"--tx_size=<integer> (number of elements to insert in a single tx, there will be"
"num/tx_size transactions per thread in total, the last tx might be smaller, default: 10)\n"
"--disjoint=<0|1> (specifies whether each thread works on disjoint set of keys. "
"0 means that all threads read/write to the db using any key between 0 and `num`, so that "
"number of ops is `threads` * `num`. 1 means that each thread performs reads/writes using "
"only [`thread_id` * `num` / `threads`, (`thread_id` + 1) * `num` / `threads`) subset of keys, "
"so that total number of ops is `num`. The default value is 0.)\n"
"--benchmarks=<name>, (comma-separated list of benchmarks to run)\n"
" fillseq (load N values in sequential key order)\n"
" fillrandom (load N values in random key order)\n"
" overwrite (replace N values in random key order)\n"
" readseq (read N values in sequential key order)\n"
" readrandom (read N values in random key order)\n"
" readmissing (read N missing values in random key order)\n"
" deleteseq (delete N values in sequential key order)\n"
" deleterandom (delete N values in random key order)\n"
" readwhilewriting (1 writer, N threads doing random reads)\n"
" readrandomwriterandom (N threads doing random-read, random-write)\n"
" txfillrandom (load N values in random key order transactionally)\n";
// Number of key/values to place in database
static int FLAGS_num = 1000000;
static bool FLAGS_disjoint = false;
// Number of read operations to do. If negative, do FLAGS_num reads.
static int FLAGS_reads = -1;
// Number of concurrent threads to run.
static int FLAGS_threads = 1;
static int FLAGS_key_size = 16;
// Size of each value
static int FLAGS_value_size = 100;
// Print histogram of operation timings
static bool FLAGS_histogram = false;
// Use the db with the following name.
static const char *FLAGS_db = "/dev/shm/pmemkv";
// Use following size when opening the database.
static int FLAGS_db_size_in_gb = 0;
static const double FLAGS_compression_ratio = 1.0;
static const int FLAGS_ops_between_duration_checks = 1000;
static const int FLAGS_duration = 0;
static int FLAGS_readwritepercent = 90;
static int FLAGS_tx_size = 10;
using namespace leveldb;
leveldb::Env *g_env = NULL;
#if defined(__linux)
static Slice TrimSpace(Slice s)
{
size_t start = 0;
while (start < s.size() && isspace(s[start])) {
start++;
}
size_t limit = s.size();
while (limit > start && isspace(s[limit - 1])) {
limit--;
}
return Slice(s.data() + start, limit - start);
}
#endif
// Helper for quickly generating random data.
class RandomGenerator {
private:
std::string data_;
unsigned int pos_;
public:
RandomGenerator()
{
// We use a limited amount of data over and over again and ensure
// that it is larger than the compression window (32KB), and also
// large enough to serve all typical value sizes we want to write.
Random rnd(301);
std::string piece;
while (data_.size() < (unsigned)std::max(1048576, FLAGS_value_size)) {
// Add a short fragment that is as compressible as specified
// by FLAGS_compression_ratio.
test::CompressibleString(&rnd, FLAGS_compression_ratio, 100, &piece);
data_.append(piece);
}
pos_ = 0;
}
Slice Generate(unsigned int len)
{
assert(len <= data_.size());
if (pos_ + len > data_.size()) {
pos_ = 0;
}
pos_ += len;
return Slice(data_.data() + pos_ - len, len);
}
Slice GenerateWithTTL(unsigned int len)
{
assert(len <= data_.size());
if (pos_ + len > data_.size()) {
pos_ = 0;
}
pos_ += len;
return Slice(data_.data() + pos_ - len, len);
}
};
static void AppendWithSpace(std::string *str, Slice msg)
{
if (msg.empty())
return;
if (!str->empty()) {
str->push_back(' ');
}
str->append(msg.data(), msg.size());
}
enum OperationType : unsigned char {
kRead = 0,
kWrite,
kDelete,
kSeek,
kMerge,
kUpdate,
};
class BenchmarkLogger {
private:
struct hist {
int id;
std::string name;
std::string histogram;
};
int id = 0;
std::vector<hist> histograms;
CSV<int> csv = CSV<int>("id");
public:
void insert(std::string name, Histogram histogram)
{
histograms.push_back({id, name, histogram.ToString()});
std::vector<double> percentiles = {50, 75, 90, 99.9, 99.99};
for (double &percentile : percentiles) {
csv.insert(id, "Percentilie P" + std::to_string(percentile) + " [micros/op]",
histogram.Percentile(percentile));
}
csv.insert(id, "Median [micros/op]", histogram.Median());
}
template <typename T>
void insert(std::string column, T data)
{
csv.insert(id, column, data);
}
void insert(std::string column, std::time_t time)
{
std::ostringstream time_stream;
time_stream << std::put_time(std::localtime(&time), "%D %T");
insert(column, time_stream.str());
}
void print_histogram()
{
std::cout << "------------------------------------------------" << std::endl;
for (auto &histogram : histograms) {
std::cout << "benchmark: " << histogram.id << ", " << histogram.name << std::endl
<< histogram.histogram << std::endl;
}
}
void print()
{
csv.print();
}
void next_benchmark()
{
id++;
}
};
class Stats {
private:
double start_;
double finish_;
double seconds_;
int done_;
int next_report_;
int64_t bytes_;
double last_op_finish_;
Histogram hist_;
std::string message_;
bool exclude_from_merge_;
public:
Stats()
{
Start();
}
void Start()
{
next_report_ = 100;
last_op_finish_ = start_;
hist_.Clear();
done_ = 0;
bytes_ = 0;
seconds_ = 0;
start_ = g_env->NowMicros();
finish_ = start_;
message_.clear();
// When set, stats from this thread won't be merged with others.
exclude_from_merge_ = false;
}
void Merge(const Stats &other)
{
if (other.exclude_from_merge_)
return;
hist_.Merge(other.hist_);
done_ += other.done_;
bytes_ += other.bytes_;
seconds_ += other.seconds_;
if (other.start_ < start_)
start_ = other.start_;
if (other.finish_ > finish_)
finish_ = other.finish_;
// Just keep the messages from one thread
if (message_.empty())
message_ = other.message_;
}
void Stop()
{
finish_ = g_env->NowMicros();
seconds_ = (finish_ - start_) * 1e-6;
}
void AddMessage(Slice msg)
{
AppendWithSpace(&message_, msg);
}
void SetExcludeFromMerge()
{
exclude_from_merge_ = true;
}
void FinishedSingleOp()
{
if (FLAGS_histogram) {
double now = g_env->NowMicros();
double micros = now - last_op_finish_;
hist_.Add(micros);
if (micros > 20000) {
fprintf(stderr, "long op: %.1f micros%30s\r", micros, "");
fflush(stderr);
}
last_op_finish_ = now;
}
done_++;
if (done_ >= next_report_) {
if (next_report_ < 1000)
next_report_ += 100;
else if (next_report_ < 5000)
next_report_ += 500;
else if (next_report_ < 10000)
next_report_ += 1000;
else if (next_report_ < 50000)
next_report_ += 5000;
else if (next_report_ < 100000)
next_report_ += 10000;
else if (next_report_ < 500000)
next_report_ += 50000;
else
next_report_ += 100000;
fprintf(stderr, "... finished %d ops%30s\r", done_, "");
fflush(stderr);
}
}
void AddBytes(int64_t n)
{
bytes_ += n;
}
float get_micros_per_op()
{
// Pretend at least one op was done in case we are running a benchmark
// that does not call FinishedSingleOp().
if (done_ < 1)
done_ = 1;
return seconds_ * 1e6 / done_;
}
float get_ops_per_sec()
{
// Pretend at least one op was done in case we are running a benchmark
// that does not call FinishedSingleOp().
if (done_ < 1)
done_ = 1;
double elapsed = (finish_ - start_) * 1e-6;
return done_ / elapsed;
}
float get_throughput()
{
// Rate and ops/sec is computed on actual elapsed time, not the sum of per-thread
// elapsed times.
double elapsed = (finish_ - start_) * 1e-6;
return (bytes_ / 1048576.0) / elapsed;
}
std::string get_extra_data()
{
return message_;
}
Histogram &get_histogram()
{
return hist_;
}
};
// State shared by all concurrent executions of the same benchmark.
struct SharedState {
port::Mutex mu;
port::CondVar cv;
int total;
// Each thread goes through the following states:
// (1) initializing
// (2) waiting for others to be initialized
// (3) running
// (4) done
int num_initialized;
int num_done;
bool start;
SharedState() : cv(&mu)
{
}
};
// Per-thread state for concurrent executions of the same benchmark.
struct ThreadState {
int tid; // 0..n-1 when running in n threads
Random rand; // Has different seeds for different threads
Stats stats;
SharedState *shared;
ThreadState(int index) : tid(index), rand(1000 + index)
{
}
};
class Duration {
typedef std::chrono::high_resolution_clock::time_point time_point;
public:
Duration(uint64_t max_seconds, int64_t max_ops, int64_t ops_per_stage = 0)
{
max_seconds_ = max_seconds;
max_ops_ = max_ops;
ops_per_stage_ = (ops_per_stage > 0) ? ops_per_stage : max_ops;
ops_ = 0;
start_at_ = std::chrono::high_resolution_clock::now();
}
int64_t GetStage()
{
return std::min(ops_, max_ops_ - 1) / ops_per_stage_;
}
bool Done(int64_t increment)
{
if (increment <= 0)
increment = 1; // avoid Done(0) and infinite loops
ops_ += increment;
if (max_seconds_) {
// Recheck every appx 1000 ops (exact iff increment is factor of 1000)
auto granularity = FLAGS_ops_between_duration_checks;
if ((ops_ / granularity) != ((ops_ - increment) / granularity)) {
time_point now = std::chrono::high_resolution_clock::now();
return std::chrono::duration_cast<std::chrono::milliseconds>(now - start_at_)
.count() >= max_seconds_;
} else {
return false;
}
} else {
return ops_ > max_ops_;
}
}
private:
uint64_t max_seconds_;
int64_t max_ops_;
int64_t ops_per_stage_;
int64_t ops_;
time_point start_at_;
};
class Benchmark {
private:
pmem::kv::db *kv_;
int num_;
int tx_size_;
int value_size_;
int key_size_;
int reads_;
int64_t readwrites_;
BenchmarkLogger &logger;
Slice name;
int n;
const char *engine;
void (Benchmark::*method)(ThreadState *) = NULL;
void PrintHeader()
{
PrintEnvironment();
logger.insert("Path", FLAGS_db);
logger.insert("Engine", engine);
logger.insert("Keys [bytes each]", FLAGS_key_size);
logger.insert("Values [bytes each]", FLAGS_value_size);
logger.insert("Entries", num_);
logger.insert("RawSize [MB (estimated)]",
((static_cast<int64_t>(FLAGS_key_size + FLAGS_value_size) * num_) / 1048576.0));
PrintWarnings();
}
void PrintWarnings()
{
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
fprintf(stdout, "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
#endif
#ifndef NDEBUG
fprintf(stdout, "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
#endif
}
void PrintEnvironment()
{
#if defined(__linux)
auto now = std::time(NULL);
logger.insert("Date:", now);
FILE *cpuinfo = fopen("/proc/cpuinfo", "r");
if (cpuinfo != NULL) {
char line[1000];
int num_cpus = 0;
std::string cpu_type;
std::string cache_size;
while (fgets(line, sizeof(line), cpuinfo) != NULL) {
const char *sep = strchr(line, ':');
if (sep == NULL) {
continue;
}
Slice key = TrimSpace(Slice(line, sep - 1 - line));
Slice val = TrimSpace(Slice(sep + 1));
if (key == "model name") {
++num_cpus;
cpu_type = val.ToString();
} else if (key == "cache size") {
cache_size = val.ToString();
}
}
fclose(cpuinfo);
logger.insert("CPU", std::to_string(num_cpus));
logger.insert("CPU model", cpu_type);
logger.insert("CPUCache", cache_size);
}
#endif
}
public:
Benchmark(Slice name, pmem::kv::db *&kv, int num_threads, const char *engine, BenchmarkLogger &logger)
: kv_(kv), num_(FLAGS_num), tx_size_(FLAGS_tx_size), value_size_(FLAGS_value_size),
key_size_(FLAGS_key_size), reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
readwrites_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads), logger(logger), n(num_threads),
name(name), engine(engine)
{
fprintf(stderr, "running %s \n", name.ToString().c_str());
bool fresh_db = false;
if (name == Slice("fillseq")) {
fresh_db = true;
method = &Benchmark::WriteSeq;
} else if (name == Slice("fillrandom")) {
fresh_db = true;
method = &Benchmark::WriteRandom;
} else if (name == Slice("txfillrandom")) {
fresh_db = true;
method = &Benchmark::TxFillRandom;
} else if (name == Slice("overwrite")) {
method = &Benchmark::WriteRandom;
} else if (name == Slice("readseq")) {
method = &Benchmark::ReadSeq;
} else if (name == Slice("readrandom")) {
method = &Benchmark::ReadRandom;
} else if (name == Slice("readmissing")) {
method = &Benchmark::ReadMissing;
} else if (name == Slice("deleteseq")) {
method = &Benchmark::DeleteSeq;
} else if (name == Slice("deleterandom")) {
method = &Benchmark::DeleteRandom;
} else if (name == Slice("readwhilewriting")) {
++num_threads;
method = &Benchmark::ReadWhileWriting;
} else if (name == Slice("readrandomwriterandom")) {
method = &Benchmark::ReadRandomWriteRandom;
} else {
throw std::runtime_error("unknown benchmark: " + name.ToString());
}
logger.next_benchmark();
logger.insert("Benchmark", name.ToString());
PrintHeader();
if (fresh_db) {
if (kv_ != nullptr) {
kv_->close();
delete kv_;
kv_ = nullptr;
}
Create(name.ToString());
kv = kv_;
}
}
Slice AllocateKey(std::unique_ptr<const char[]> &key_guard)
{
const char *tmp = new char[key_size_];
key_guard.reset(tmp);
return Slice(key_guard.get(), key_size_);
}
void GenerateKeyFromInt(uint64_t v, int64_t num_keys, Slice *key, bool missing = false)
{
char *start = const_cast<char *>(key->data());
char *pos = start;
int bytes_to_fill = std::min(key_size_, 8);
if (missing) {
int64_t v1 = -v;
memcpy(pos, static_cast<void *>(&v1), bytes_to_fill);
} else
memcpy(pos, static_cast<void *>(&v), bytes_to_fill);
pos += bytes_to_fill;
if (key_size_ > pos - start) {
memset(pos, '0', key_size_ - (pos - start));
}
}
void Run()
{
SharedState shared;
shared.total = n;
shared.num_initialized = 0;
shared.num_done = 0;
shared.start = false;
ThreadArg *arg = new ThreadArg[n];
for (int i = 0; i < n; i++) {
arg[i].bm = this;
arg[i].method = method;
arg[i].shared = &shared;
arg[i].thread = new ThreadState(i);
arg[i].thread->shared = &shared;
g_env->StartThread(ThreadBody, &arg[i]);
}
shared.mu.Lock();
while (shared.num_initialized < n) {
shared.cv.Wait();
}
shared.start = true;
shared.cv.SignalAll();
while (shared.num_done < n) {
shared.cv.Wait();
}
shared.mu.Unlock();
for (int i = 1; i < n; i++) {
arg[0].thread->stats.Merge(arg[i].thread->stats);
}
auto thread_stats = arg[0].thread->stats;
logger.insert("micros/op (avarage)", thread_stats.get_micros_per_op());
logger.insert("ops/sec", thread_stats.get_ops_per_sec());
logger.insert("throughput [MB/s]", thread_stats.get_throughput());
logger.insert("extra_data", thread_stats.get_extra_data());
if (FLAGS_histogram) {
logger.insert(name.ToString(), thread_stats.get_histogram());
}
for (int i = 0; i < n; i++) {
delete arg[i].thread;
}
delete[] arg;
}
private:
struct ThreadArg {
Benchmark *bm;
SharedState *shared;
ThreadState *thread;
void (Benchmark::*method)(ThreadState *);
};
struct DbInserter {
DbInserter(pmem::kv::db *db) : db(db)
{
}
pmem::kv::status put(pmem::kv::string_view key, pmem::kv::string_view value)
{
return db->put(key, value);
}
pmem::kv::status commit()
{
return pmem::kv::status::OK;
}
private:
pmem::kv::db *db;
};
struct TxInserter {
TxInserter(pmem::kv::db *db) : tx(db->tx_begin().get_value())
{
}
pmem::kv::status put(pmem::kv::string_view key, pmem::kv::string_view value)
{
return tx.put(key, value);
}
pmem::kv::status commit()
{
return tx.commit();
}
private:
pmem::kv::tx tx;
};
static void ThreadBody(void *v)
{
ThreadArg *arg = reinterpret_cast<ThreadArg *>(v);
SharedState *shared = arg->shared;
ThreadState *thread = arg->thread;
{
MutexLock l(&shared->mu);
shared->num_initialized++;
if (shared->num_initialized >= shared->total) {
shared->cv.SignalAll();
}
while (!shared->start) {
shared->cv.Wait();
}
}
thread->stats.Start();
(arg->bm->*(arg->method))(thread);
thread->stats.Stop();
{
MutexLock l(&shared->mu);
shared->num_done++;
if (shared->num_done >= shared->total) {
shared->cv.SignalAll();
}
}
}
void Create(std::string name)
{
assert(kv_ == nullptr);
auto start = g_env->NowMicros();
auto size = 512ULL * 1024ULL * 1024ULL * FLAGS_db_size_in_gb;
pmem::kv::config cfg;
auto cfg_s = cfg.put_string("path", FLAGS_db);
if (cfg_s != pmem::kv::status::OK)
throw std::runtime_error("putting 'path' to config failed");
cfg_s = cfg.put_uint64("force_create", 1);
if (cfg_s != pmem::kv::status::OK)
throw std::runtime_error("putting 'force_create' to config failed");
cfg_s = cfg.put_uint64("size", size);
if (cfg_s != pmem::kv::status::OK)
throw std::runtime_error("putting 'size' to config failed");
/* Check if the path is a directory or a file
* (we don't pass filename in case of memkind
* based engines, only dir). If it is a file,
* remove the previous file with the same name. */
struct stat info;
if (stat(FLAGS_db, &info) == 0 && !(info.st_mode & S_IFDIR))
{
auto start = g_env->NowMicros();
/* Remove pool file. This should be
* implemented using libpmempool for backward
* compatibility. */
if (pmempool_rm(FLAGS_db, PMEMPOOL_RM_FORCE) != 0) {
throw std::runtime_error(std::string("Cannot remove pool: ") + FLAGS_db);
}
logger.insert("Remove [millis/op]", ((g_env->NowMicros() - start) * 1e-3));
}
kv_ = new pmem::kv::db;
auto s = kv_->open(engine, std::move(cfg));
if (s != pmem::kv::status::OK) {
fprintf(stderr,
"Cannot start engine (%s) for path (%s) with %i GB capacity\n%s\n\nUSAGE: %s",
engine, FLAGS_db, FLAGS_db_size_in_gb, pmem::kv::errormsg().c_str(),
USAGE.c_str());
exit(-42);
}
logger.insert("Open [millis/op]", ((g_env->NowMicros() - start) * 1e-3));
}
template <typename Inserter = DbInserter>
void DoWrite(ThreadState *thread, bool seq)
{
if (num_ != FLAGS_num) {
char msg[100];
snprintf(msg, sizeof(msg), "(%d ops)", num_);
thread->stats.AddMessage(msg);
}
std::unique_ptr<const char[]> key_guard;
Slice key = AllocateKey(key_guard);
auto num = FLAGS_disjoint ? num_ / FLAGS_threads : num_;
auto start = FLAGS_disjoint ? thread->tid * num : 0;
auto end = FLAGS_disjoint ? (thread->tid + 1) * num : num_;
pmem::kv::status s;
int64_t bytes = 0;
auto batch_size = std::is_same<Inserter, TxInserter>::value ? tx_size_ : 1;
for (int n = start; n < end; n += batch_size) {
Inserter inserter(kv_);
for (int i = n; i < n + batch_size; i++) {
const int k = seq ? i : (thread->rand.Next() % num) + start;
GenerateKeyFromInt(k, FLAGS_num, &key);
std::string value = std::string();
value.append(value_size_, 'X');
s = inserter.put(key.ToString(), value);
bytes += value_size_ + key.size();
if (s != pmem::kv::status::OK) {
fprintf(stdout, "Out of space at key %i\n", i);
exit(1);
}
}
s = inserter.commit();
thread->stats.FinishedSingleOp();
if (s != pmem::kv::status::OK) {
fprintf(stdout, "Commit failed at batch %i\n", n);
exit(1);
}
}
thread->stats.AddBytes(bytes);
}
void WriteSeq(ThreadState *thread)
{
DoWrite<DbInserter>(thread, true);
}
void WriteRandom(ThreadState *thread)
{
DoWrite<DbInserter>(thread, false);
}
void DoRead(ThreadState *thread, bool seq, bool missing)
{
pmem::kv::status s;
int64_t bytes = 0;
int found = 0;
std::unique_ptr<const char[]> key_guard;
Slice key = AllocateKey(key_guard);
auto num = FLAGS_disjoint ? reads_ / FLAGS_threads : reads_;
auto start = FLAGS_disjoint ? thread->tid * num : 0;
auto end = FLAGS_disjoint ? (thread->tid + 1) * num : reads_;
for (int i = start; i < end; i++) {
const int k = seq ? i : (thread->rand.Next() % num) + start;
GenerateKeyFromInt(k, FLAGS_num, &key, missing);
std::string value;
if (kv_->get(key.ToString(), &value) == pmem::kv::status::OK)
found++;
thread->stats.FinishedSingleOp();
bytes += value.length() + key.size();
}
thread->stats.AddBytes(bytes);
char msg[100];
snprintf(msg, sizeof(msg), "(%d of %d found by one thread)", found, reads_);
thread->stats.AddMessage(msg);
}
void ReadSeq(ThreadState *thread)
{
DoRead(thread, true, false);
}
void ReadRandom(ThreadState *thread)
{
DoRead(thread, false, false);
}
void ReadMissing(ThreadState *thread)
{
DoRead(thread, false, true);
}
void DoDelete(ThreadState *thread, bool seq)
{
std::unique_ptr<const char[]> key_guard;
Slice key = AllocateKey(key_guard);
for (int i = 0; i < num_; i++) {
const int k = seq ? i : (thread->rand.Next() % FLAGS_num);
GenerateKeyFromInt(k, FLAGS_num, &key);
kv_->remove(key.ToString());
thread->stats.FinishedSingleOp();
}
}
void DeleteSeq(ThreadState *thread)
{
DoDelete(thread, true);
}
void DeleteRandom(ThreadState *thread)
{
DoDelete(thread, false);
}
void BGWriter(ThreadState *thread, enum OperationType write_merge)
{
// Special thread that keeps writing until other threads are done.
RandomGenerator gen;
int64_t bytes = 0;
// Don't merge stats from this thread with the readers.
thread->stats.SetExcludeFromMerge();
std::unique_ptr<const char[]> key_guard;
Slice key = AllocateKey(key_guard);
uint32_t written = 0;
bool hint_printed = false;
while (true) {
{
MutexLock l(&thread->shared->mu);
if (thread->shared->num_done + 1 >= thread->shared->num_initialized) {
// Finish the write immediately
break;
}
}
GenerateKeyFromInt(thread->rand.Next() % FLAGS_num, FLAGS_num, &key);
pmem::kv::status s;
if (write_merge == kWrite) {
s = kv_->put(key.ToString(), gen.Generate(value_size_).ToString());
} else {
fprintf(stderr, "Merge operation not supported\n");
exit(1);
}
written++;
if (s != pmem::kv::status::OK) {
fprintf(stderr, "Put error\n");
exit(1);
}
bytes += key.size() + value_size_;
}
thread->stats.AddBytes(bytes);
}
void ReadWhileWriting(ThreadState *thread)
{
if (thread->tid > 0) {
ReadRandom(thread);
} else {
BGWriter(thread, kWrite);
}
}
void ReadRandomWriteRandom(ThreadState *thread)
{
RandomGenerator gen;
std::string value;
int64_t found = 0;
int get_weight = 0;
int put_weight = 0;
int64_t reads_done = 0;
int64_t writes_done = 0;
int64_t bytes = 0;
Duration duration(FLAGS_duration, readwrites_);
std::unique_ptr<const char[]> key_guard;
Slice key = AllocateKey(key_guard);
// the number of iterations is the larger of read_ or write_
while (!duration.Done(1)) {
GenerateKeyFromInt(thread->rand.Next() % FLAGS_num, FLAGS_num, &key);
if (get_weight == 0 && put_weight == 0) {
// one batch completed, reinitialize for next batch
get_weight = FLAGS_readwritepercent;
put_weight = 100 - get_weight;
}
if (get_weight > 0) {
value.clear();
pmem::kv::status s = kv_->get(key.ToString(), &value);
if (s == pmem::kv::status::OK) {
found++;
} else if (s != pmem::kv::status::NOT_FOUND) {
fprintf(stderr, "get error\n");
}
bytes += value.length() + key.size();
get_weight--;
reads_done++;
thread->stats.FinishedSingleOp();
} else if (put_weight > 0) {
// then do all the corresponding number of puts
// for all the gets we have done earlier
pmem::kv::status s =
kv_->put(key.ToString(), gen.Generate(value_size_).ToString());
if (s != pmem::kv::status::OK) {
fprintf(stderr, "put error\n");
exit(1);
}
bytes += key.size() + value_size_;
put_weight--;
writes_done++;
thread->stats.FinishedSingleOp();
}
}
thread->stats.AddBytes(bytes);
char msg[100];
snprintf(msg, sizeof(msg),
"(reads:%" PRIu64 " writes:%" PRIu64 " total:%" PRIu64 " found:%" PRIu64 ")",
reads_done, writes_done, readwrites_, found);
thread->stats.AddMessage(msg);
}
void TxFillRandom(ThreadState *thread)
{
DoWrite<TxInserter>(thread, false);
}
};
/////////////////Page fault handling/////////////////
#include <bits/types/sig_atomic_t.h>
#include <bits/types/sigset_t.h>
#include <signal.h>
#include <unistd.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <libpmem.h>
#define SIGSTKSZ 8192
#define SA_SIGINFO 4
#define SA_ONSTACK 0x08000000 /* Use signal stack by using `sa_restorer'. */
#define SA_RESTART 0x10000000 /* Restart syscall on signal return. */
#define SA_NODEFER 0x40000000 /* Don't automatically block the signal when*/
stack_t _sigstk;
int updated_page_count = 0;
int all_updates = 0;
void * checkpoint_start;
void * page[50];
void * device;
void cmd_issue( uint32_t opcode,
uint32_t TXID,
uint32_t TID,
uint32_t OID,
uint64_t data_addr,
uint32_t data_size,
void * ptr){
//command with thread id encoded as first 8 bits of each word
uint32_t issue_cmd[7];
issue_cmd[0] = (TID<<24)|(opcode<<16)|(TXID<<8)|TID;
issue_cmd[1] = (TID<<24)|(OID<<16)|(data_addr>>48);
issue_cmd[2] = (TID<<24)|((data_addr & 0x0000FFFFFFFFFFFF)>>24);
issue_cmd[3] = (TID<<24)|(data_addr & 0x0000000000FFFFFF);
issue_cmd[4] = (TID<<24)|(data_size<<8);
issue_cmd[5] = (TID<<24)|(0X00FFFFFF>>16);
issue_cmd[6] = (TID<<24)|((0X00FFFFFF & 0x0000FFFF)<<8);
for(int i=0;i<7;i++){
//printf("%d %08x\n",TID, issue_cmd[i]);
*((u_int32_t *) ptr) = issue_cmd[i];
}
}
static inline uint64_t getCycle(){
uint32_t cycles_high, cycles_low, pid;
asm volatile ("RDTSCP\n\t" // rdtscp into eax and edx
"mov %%edx, %0\n\t"
"mov %%eax, %1\n\t"
"mov %%ecx, %2\n\t"
:"=r" (cycles_high), "=r" (cycles_low), "=r" (pid) //store in vars
:// no input
:"%eax", "%edx", "%ecx" // clobbered by rdtscp
);
return((uint64_t)cycles_high << 32) | cycles_low;
}
/// @brief Signal handler to trap SEGVs.
static void segvHandle(int signum, siginfo_t * siginfo, void * context) {
#define CPTIME
#ifdef CPTIME
uint64_t endCycles, startCycles,totalCycles;
startCycles = getCycle();
#endif
void * addr = siginfo->si_addr; // address of access
uint64_t pageNo = ((uint64_t)addr)/4096;
unsigned long * pageStart = (unsigned long *)(pageNo*4096);
// Check if this was a SEGV that we are supposed to trap.
if (siginfo->si_code == SEGV_ACCERR) {
mprotect(pageStart, 4096, PROT_READ|PROT_WRITE);
// printf("test1\n");
if(all_updates > 0 || updated_page_count == 50){
for(int i=0;i<updated_page_count;i++){
//memcpy(checkpoint_start + i*4096, page[i],4096);
//pmem_persist( checkpoint_start + i*4096,4096);
cmd_issue(2,0,0,0, (uint64_t)(checkpoint_start + i*4096),4096,device);
page[updated_page_count] = 0;
}
updated_page_count = 0;
all_updates = 0;
}
all_updates ++;
//printf("te\n");
for(int i=0; i<updated_page_count; i++){
if(page[i] == pageStart){
#ifdef CPTIME
endCycles = getCycle();
totalCycles = endCycles - startCycles;
double totTime = ((double)totalCycles)/2000000000;
printf("cp %f\n", totTime);
#endif
return;}
}
page[updated_page_count] = pageStart;
//printf("test1 %lx %d %d\n",page[updated_page_count],updated_page_count,all_updates);
updated_page_count++;
#ifdef CPTIME
endCycles = getCycle();
totalCycles = endCycles - startCycles;
double totTime = ((double)totalCycles)/2000000000;
printf("cp %f\n", totTime);
#endif
//*((int *)checkpoint_start) = 10;
//test++;
//printf("test1 %lx %d\n",updated_page_count);
} else if (siginfo->si_code == SEGV_MAPERR) {
fprintf (stderr, "%d : map error with addr %p!\n", getpid(), addr);
abort();
} else {
fprintf (stderr, "%d : other access error with addr %p.\n", getpid(), addr);
abort();
}
}
static void installSignalHandler(void) {
// Set up an alternate signal stack.
printf("page fault handler initialized!!\n");
_sigstk.ss_sp = mmap(NULL, SIGSTKSZ, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, -1, 0);
_sigstk.ss_size = SIGSTKSZ;
_sigstk.ss_flags = 0;
sigaltstack(&_sigstk, (stack_t *) 0);
// Now set up a signal handler for SIGSEGV events.
struct sigaction siga;
sigemptyset(&siga.sa_mask);
// Set the following signals to a set
sigaddset(&siga.sa_mask, SIGSEGV);
sigaddset(&siga.sa_mask, SIGALRM);
sigprocmask(SIG_BLOCK, &siga.sa_mask, NULL);
// Point to the handler function.
siga.sa_flags = SA_SIGINFO | SA_ONSTACK | SA_RESTART | SA_NODEFER;
siga.sa_sigaction = segvHandle;
if (sigaction(SIGSEGV, &siga, NULL) == -1) {
perror("sigaction(SIGSEGV)");
exit(-1);
}
sigprocmask(SIG_UNBLOCK, &siga.sa_mask, NULL);
return;
}
//static void setpage(void * addr){
// uint64_t pageNo = ((uint64_t)addr)/4096;
// unsigned long * pageStart = (unsigned long *)(pageNo*4096);
// mprotect(pageStart, 4096, PROT_READ);
// return;
//}
static void resetpage(void * addr){
uint64_t pageNo = ((uint64_t)addr)/4096;
unsigned long * pageStart = (unsigned long *)(pageNo*4096);
mprotect(pageStart, 4096, PROT_READ|PROT_WRITE);
return;
}
void* open_device(const char* pathname)
{
//int fd = os_open("/sys/devices/pci0000:00/0000:00:00.2/iommu/ivhd0/devices/0000:0a:00.0/resource0",O_RDWR|O_SYNC);
int fd = open(pathname,O_RDWR|O_SYNC);
if(fd == -1)
{
printf("Couldnt opene file!!\n");
exit(0);
}
void * ptr = mmap(0,4096,PROT_READ|PROT_WRITE, MAP_SHARED,fd,0);
if(ptr == (void *)-1)
{
printf("Could not map memory!!\n");
exit(0);
}
printf("opened device without error!!\n");
return ptr;
}
///////////////////////////////////////////////////////////////
void installSignalHandler (void) __attribute__ ((constructor));
int main(int argc, char **argv)
{
size_t mapped_len1;
int is_pmem1;
if ((checkpoint_start = pmem_map_file("/mnt/mem/checkpoint", 4096*50,
PMEM_FILE_CREATE, 0666, &mapped_len1, &is_pmem1)) == NULL) {
fprintf(stderr, "pmem_map_file failed\n");
exit(0);
}
device = open_device("/sys/devices/pci0000:00/0000:00:00.2/iommu/ivhd0/devices/0000:0a:00.0/resource0");
// Default list of comma-separated operations to run
static const char *FLAGS_benchmarks =
"fillseq,fillrandom,overwrite,readseq,readrandom,readmissing,deleteseq,deleterandom,readwhilewriting,readrandomwriterandom";
// Default engine name
static const char *FLAGS_engine = "cmap";
// Print usage statement if necessary
if (argc != 1) {
if ((strcmp(argv[1], "?") == 0) || (strcmp(argv[1], "-?") == 0) ||
(strcmp(argv[1], "h") == 0) || (strcmp(argv[1], "-h") == 0) ||
(strcmp(argv[1], "-help") == 0) || (strcmp(argv[1], "--help") == 0)) {
fprintf(stderr, "%s", USAGE.c_str());
exit(1);
}
}
// Parse command-line arguments
for (int i = 1; i < argc; i++) {
int n;
char junk;
if (leveldb::Slice(argv[i]).starts_with("--benchmarks=")) {
FLAGS_benchmarks = argv[i] + strlen("--benchmarks=");
} else if (strncmp(argv[i], "--engine=", 9) == 0) {
FLAGS_engine = argv[i] + 9;
} else if (sscanf(argv[i], "--histogram=%d%c", &n, &junk) == 1 && (n == 0 || n == 1)) {
FLAGS_histogram = n;
} else if (sscanf(argv[i], "--num=%d%c", &n, &junk) == 1) {
FLAGS_num = n;
} else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) {
FLAGS_reads = n;
} else if (sscanf(argv[i], "--threads=%d%c", &n, &junk) == 1) {
FLAGS_threads = n;
} else if (sscanf(argv[i], "--key_size=%d%c", &n, &junk) == 1) {
FLAGS_key_size = n;
} else if (sscanf(argv[i], "--value_size=%d%c", &n, &junk) == 1) {
FLAGS_value_size = n;
} else if (sscanf(argv[i], "--readwritepercent=%d%c", &n, &junk) == 1) {
FLAGS_readwritepercent = n;
} else if (strncmp(argv[i], "--db=", 5) == 0) {
FLAGS_db = argv[i] + 5;
} else if (sscanf(argv[i], "--db_size_in_gb=%d%c", &n, &junk) == 1) {
FLAGS_db_size_in_gb = n;
} else if (sscanf(argv[i], "--tx_size=%d%c", &n, &junk) == 1) {
FLAGS_tx_size = n;
} else if (sscanf(argv[i], "--disjoint=%d%c", &n, &junk) == 1 && (n == 0 || n == 1)) {
FLAGS_disjoint = n;
} else {
fprintf(stderr, "Invalid flag '%s'\n", argv[i]);
exit(1);
}
}
// Run benchmark against default environment
g_env = leveldb::Env::Default();
BenchmarkLogger logger = BenchmarkLogger();
int return_value = 0;
pmem::kv::db *kv = NULL;
const char *benchmarks = FLAGS_benchmarks;
while (benchmarks != NULL) {
const char *sep = strchr(benchmarks, ',');
Slice name;
if (sep == NULL) {
name = benchmarks;
benchmarks = NULL;
} else {
name = Slice(benchmarks, sep - benchmarks);
benchmarks = sep + 1;
}
try {
auto benchmark = Benchmark(name, kv, FLAGS_threads, FLAGS_engine, logger);
benchmark.Run();
} catch (std::exception &e) {
std::cerr << e.what() << std::endl;
return_value = 1;
break;
}
}
if (kv != NULL) {
kv->close();
delete kv;
}
logger.print();
if (FLAGS_histogram) {
logger.print_histogram();
}
return return_value;
}
| 35,996 | 25.984258 | 126 |
cc
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/bench/util/csv.h
|
// SPDX-License-Identifier: Apache-2.0
/* Copyright 2020-2021, Intel Corporation */
#pragma once
#include <iostream>
#include <map>
#include <ostream>
#include <set>
#include <string>
template <typename IdType>
class CSV {
private:
/* Hold data in two-dimensional map of strings: data_matrix[row][column]
*/
std::map<IdType, std::map<std::string, std::string>> data_matrix;
/* List of all columns, which is filled during inserts. Needed for
* printing header and data in the same order.
* */
std::set<std::string> columns;
std::string id_name;
public:
CSV(std::string id_column_name) : id_name(id_column_name){};
void insert(IdType row, std::string column, std::string data)
{
columns.insert(column);
data_matrix[row][column] = data;
}
void insert(IdType row, std::string column, const char *data)
{
insert(row, column, std::string(data));
}
template <typename T>
void insert(IdType row, std::string column, T data)
{
insert(row, column, std::to_string(data));
}
void print()
{
// Print first column name
std::cout << id_name;
for (auto &column : columns) {
std::cout << "," << column;
}
std::cout << "\r\n" << std::flush;
for (auto &row : data_matrix) {
std::cout << row.first;
for (auto &column : columns) {
std::cout << "," << data_matrix[row.first][column];
}
std::cout << "\r\n" << std::flush;
}
}
};
| 1,381 | 21.290323 | 73 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/bench/util/env.cc
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
#include "leveldb/env.h"
namespace leveldb
{
Env::~Env()
{
}
Status Env::NewAppendableFile(const std::string &fname, WritableFile **result)
{
return Status::NotSupported("NewAppendableFile", fname);
}
SequentialFile::~SequentialFile()
{
}
RandomAccessFile::~RandomAccessFile()
{
}
WritableFile::~WritableFile()
{
}
Logger::~Logger()
{
}
FileLock::~FileLock()
{
}
void Log(Logger *info_log, const char *format, ...)
{
if (info_log != NULL) {
va_list ap;
va_start(ap, format);
info_log->Logv(format, ap);
va_end(ap);
}
}
static Status DoWriteStringToFile(Env *env, const Slice &data, const std::string &fname, bool should_sync)
{
WritableFile *file;
Status s = env->NewWritableFile(fname, &file);
if (!s.ok()) {
return s;
}
s = file->Append(data);
if (s.ok() && should_sync) {
s = file->Sync();
}
if (s.ok()) {
s = file->Close();
}
delete file; // Will auto-close if we did not close above
if (!s.ok()) {
env->DeleteFile(fname);
}
return s;
}
Status WriteStringToFile(Env *env, const Slice &data, const std::string &fname)
{
return DoWriteStringToFile(env, data, fname, false);
}
Status WriteStringToFileSync(Env *env, const Slice &data, const std::string &fname)
{
return DoWriteStringToFile(env, data, fname, true);
}
Status ReadFileToString(Env *env, const std::string &fname, std::string *data)
{
data->clear();
SequentialFile *file;
Status s = env->NewSequentialFile(fname, &file);
if (!s.ok()) {
return s;
}
static const int kBufferSize = 8192;
char *space = new char[kBufferSize];
while (true) {
Slice fragment;
s = file->Read(kBufferSize, &fragment, space);
if (!s.ok()) {
break;
}
data->append(fragment.data(), fragment.size());
if (fragment.empty()) {
break;
}
}
delete[] space;
delete file;
return s;
}
EnvWrapper::~EnvWrapper()
{
}
} // namespace leveldb
| 2,069 | 17.648649 | 106 |
cc
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/bench/util/logging.cc
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
/* Copyright 2020, Intel Corporation */
#include "util/logging.h"
#include "leveldb/env.h"
#include "leveldb/slice.h"
#include <errno.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
namespace leveldb
{
void AppendNumberTo(std::string *str, uint64_t num)
{
char buf[30];
snprintf(buf, sizeof(buf), "%llu", (unsigned long long)num);
str->append(buf);
}
void AppendEscapedStringTo(std::string *str, const Slice &value)
{
for (size_t i = 0; i < value.size(); i++) {
char c = value[i];
if (c >= ' ' && c <= '~') {
str->push_back(c);
} else {
char buf[10];
snprintf(buf, sizeof(buf), "\\x%02x", static_cast<unsigned int>(c) & 0xff);
str->append(buf);
}
}
}
std::string NumberToString(uint64_t num)
{
std::string r;
AppendNumberTo(&r, num);
return r;
}
std::string EscapeString(const Slice &value)
{
std::string r;
AppendEscapedStringTo(&r, value);
return r;
}
bool ConsumeDecimalNumber(Slice *in, uint64_t *val)
{
uint64_t v = 0;
int digits = 0;
while (!in->empty()) {
char c = (*in)[0];
if (c >= '0' && c <= '9') {
++digits;
// |delta| intentionally unit64_t to avoid Android crash (see log).
const uint64_t delta = (c - '0');
static const uint64_t kMaxUint64 = ~static_cast<uint64_t>(0);
if (v > kMaxUint64 / 10 || (v == kMaxUint64 / 10 && delta > kMaxUint64 % 10)) {
// Overflow
return false;
}
v = (v * 10) + delta;
in->remove_prefix(1);
} else {
break;
}
}
*val = v;
return (digits > 0);
}
} // namespace leveldb
| 1,775 | 20.925926 | 82 |
cc
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/bench/util/logging.h
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
// Copyright 2020, Intel Corporation
// Must not be included from any .h files to avoid polluting the namespace
// with macros.
#ifndef STORAGE_LEVELDB_UTIL_LOGGING_H_
#define STORAGE_LEVELDB_UTIL_LOGGING_H_
#include "port/port_posix.h"
#include <stdint.h>
#include <stdio.h>
#include <string>
namespace leveldb
{
class Slice;
class WritableFile;
// Append a human-readable printout of "num" to *str
extern void AppendNumberTo(std::string *str, uint64_t num);
// Append a human-readable printout of "value" to *str.
// Escapes any non-printable characters found in "value".
extern void AppendEscapedStringTo(std::string *str, const Slice &value);
// Return a human-readable printout of "num"
extern std::string NumberToString(uint64_t num);
// Return a human-readable version of "value".
// Escapes any non-printable characters found in "value".
extern std::string EscapeString(const Slice &value);
// Parse a human-readable number from "*in" into *value. On success,
// advances "*in" past the consumed number and sets "*val" to the
// numeric value. Otherwise, returns false and leaves *in in an
// unspecified state.
extern bool ConsumeDecimalNumber(Slice *in, uint64_t *val);
} // namespace leveldb
#endif // STORAGE_LEVELDB_UTIL_LOGGING_H_
| 1,519 | 30.666667 | 81 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/bench/util/status.cc
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
/* Copyright 2020, Intel Corporation */
#include "leveldb/status.h"
#include "port/port_posix.h"
#include <stdio.h>
namespace leveldb
{
const char *Status::CopyState(const char *state)
{
uint32_t size;
memcpy(&size, state, sizeof(size));
char *result = new char[size + 5];
memcpy(result, state, size + 5);
return result;
}
Status::Status(Code code, const Slice &msg, const Slice &msg2)
{
assert(code != kOk);
const uint32_t len1 = msg.size();
const uint32_t len2 = msg2.size();
const uint32_t size = len1 + (len2 ? (2 + len2) : 0);
char *result = new char[size + 5];
memcpy(result, &size, sizeof(size));
result[4] = static_cast<char>(code);
memcpy(result + 5, msg.data(), len1);
if (len2) {
result[5 + len1] = ':';
result[6 + len1] = ' ';
memcpy(result + 7 + len1, msg2.data(), len2);
}
state_ = result;
}
std::string Status::ToString() const
{
if (state_ == NULL) {
return "OK";
} else {
char tmp[30];
const char *type;
switch (code()) {
case kOk:
type = "OK";
break;
case kNotFound:
type = "NotFound: ";
break;
case kCorruption:
type = "Corruption: ";
break;
case kNotSupported:
type = "Not implemented: ";
break;
case kInvalidArgument:
type = "Invalid argument: ";
break;
case kIOError:
type = "IO error: ";
break;
default:
snprintf(tmp, sizeof(tmp), "Unknown code(%d): ", static_cast<int>(code()));
type = tmp;
break;
}
std::string result(type);
uint32_t length;
memcpy(&length, state_, sizeof(length));
result.append(state_ + 5, length);
return result;
}
}
} // namespace leveldb
| 1,877 | 21.902439 | 81 |
cc
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/bench/util/testutil.h
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
// Copyright 2020, Intel Corporation
#ifndef STORAGE_LEVELDB_UTIL_TESTUTIL_H_
#define STORAGE_LEVELDB_UTIL_TESTUTIL_H_
#include "leveldb/env.h"
#include "leveldb/slice.h"
#include "util/random.h"
namespace leveldb
{
namespace test
{
// Store in *dst a random string of length "len" and return a Slice that
// references the generated data.
Slice RandomString(Random *rnd, int len, std::string *dst);
// Return a random key with the specified length that may contain interesting
// characters (e.g. \x00, \xff, etc.).
std::string RandomKey(Random *rnd, int len);
// Store in *dst a string of length "len" that will compress to
// "N*compressed_fraction" bytes and return a Slice that references
// the generated data.
Slice CompressibleString(Random *rnd, double compressed_fraction, size_t len, std::string *dst);
// A wrapper that allows injection of errors.
class ErrorEnv : public EnvWrapper {
public:
bool writable_file_error_;
int num_writable_file_errors_;
ErrorEnv() : EnvWrapper(Env::Default()), writable_file_error_(false), num_writable_file_errors_(0)
{
}
virtual Status NewWritableFile(const std::string &fname, WritableFile **result)
{
if (writable_file_error_) {
++num_writable_file_errors_;
*result = nullptr;
return Status::IOError(fname, "fake error");
}
return target()->NewWritableFile(fname, result);
}
virtual Status NewAppendableFile(const std::string &fname, WritableFile **result)
{
if (writable_file_error_) {
++num_writable_file_errors_;
*result = nullptr;
return Status::IOError(fname, "fake error");
}
return target()->NewAppendableFile(fname, result);
}
};
} // namespace test
} // namespace leveldb
#endif // STORAGE_LEVELDB_UTIL_TESTUTIL_H_
| 1,984 | 28.191176 | 99 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/bench/util/mutexlock.h
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
// Copyright 2020, Intel Corporation
#ifndef STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_
#define STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_
#include "port/port_posix.h"
#include "port/thread_annotations.h"
namespace leveldb
{
// Helper class that locks a mutex on construction and unlocks the mutex when
// the destructor of the MutexLock object is invoked.
//
// Typical usage:
//
// void MyClass::MyMethod() {
// MutexLock l(&mu_); // mu_ is an instance variable
// ... some complex code, possibly with multiple return paths ...
// }
class SCOPED_LOCKABLE MutexLock {
public:
explicit MutexLock(port::Mutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu)
{
this->mu_->Lock();
}
~MutexLock() UNLOCK_FUNCTION()
{
this->mu_->Unlock();
}
private:
port::Mutex *const mu_;
// No copying allowed
MutexLock(const MutexLock &);
void operator=(const MutexLock &);
};
} // namespace leveldb
#endif // STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_
| 1,202 | 24.0625 | 81 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/bench/util/histogram.h
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
// Copyright 2017-2020, Intel Corporation
#ifndef STORAGE_LEVELDB_UTIL_HISTOGRAM_H_
#define STORAGE_LEVELDB_UTIL_HISTOGRAM_H_
#include <string>
namespace leveldb
{
class Histogram {
public:
Histogram()
{
}
~Histogram()
{
}
void Clear();
void Add(double value);
void Merge(const Histogram &other);
std::string ToString() const;
double Median() const;
double Percentile(double p) const;
double Average() const;
double StandardDeviation() const;
private:
double min_;
double max_;
double num_;
double sum_;
double sum_squares_;
enum { kNumBuckets = 154 };
static const double kBucketLimit[kNumBuckets];
double buckets_[kNumBuckets];
};
} // namespace leveldb
#endif // STORAGE_LEVELDB_UTIL_HISTOGRAM_H_
| 993 | 18.490196 | 81 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/bench/util/testutil.cc
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
#include "util/testutil.h"
#include "util/random.h"
namespace leveldb
{
namespace test
{
Slice RandomString(Random *rnd, int len, std::string *dst)
{
dst->resize(len);
for (int i = 0; i < len; i++) {
(*dst)[i] = static_cast<char>(' ' + rnd->Uniform(95)); // ' ' .. '~'
}
return Slice(*dst);
}
std::string RandomKey(Random *rnd, int len)
{
// Make sure to generate a wide variety of characters so we
// test the boundary conditions for short-key optimizations.
static const char kTestChars[] = {'\0', '\1', 'a', 'b', 'c', 'd', 'e', '\xfd', '\xfe', '\xff'};
std::string result;
for (int i = 0; i < len; i++) {
result += kTestChars[rnd->Uniform(sizeof(kTestChars))];
}
return result;
}
Slice CompressibleString(Random *rnd, double compressed_fraction, size_t len, std::string *dst)
{
int raw = static_cast<int>(len * compressed_fraction);
if (raw < 1)
raw = 1;
std::string raw_data;
RandomString(rnd, raw, &raw_data);
// Duplicate the random data until we have filled "len" bytes
dst->clear();
while (dst->size() < len) {
dst->append(raw_data);
}
dst->resize(len);
return Slice(*dst);
}
} // namespace test
} // namespace leveldb
| 1,384 | 24.648148 | 96 |
cc
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/bench/util/histogram.cc
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
/* Copyright 2020, Intel Corporation */
#include "util/histogram.h"
#include "port/port_posix.h"
#include <math.h>
#include <stdio.h>
namespace leveldb
{
// clang-format off
const double Histogram::kBucketLimit[kNumBuckets] = {
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 25, 30, 35, 40, 45,
50, 60, 70, 80, 90, 100, 120, 140, 160, 180, 200, 250, 300, 350, 400, 450,
500, 600, 700, 800, 900, 1000, 1200, 1400, 1600, 1800, 2000, 2500, 3000,
3500, 4000, 4500, 5000, 6000, 7000, 8000, 9000, 10000, 12000, 14000,
16000, 18000, 20000, 25000, 30000, 35000, 40000, 45000, 50000, 60000,
70000, 80000, 90000, 100000, 120000, 140000, 160000, 180000, 200000,
250000, 300000, 350000, 400000, 450000, 500000, 600000, 700000, 800000,
900000, 1000000, 1200000, 1400000, 1600000, 1800000, 2000000, 2500000,
3000000, 3500000, 4000000, 4500000, 5000000, 6000000, 7000000, 8000000,
9000000, 10000000, 12000000, 14000000, 16000000, 18000000, 20000000,
25000000, 30000000, 35000000, 40000000, 45000000, 50000000, 60000000,
70000000, 80000000, 90000000, 100000000, 120000000, 140000000, 160000000,
180000000, 200000000, 250000000, 300000000, 350000000, 400000000,
450000000, 500000000, 600000000, 700000000, 800000000, 900000000,
1000000000, 1200000000, 1400000000, 1600000000, 1800000000, 2000000000,
2500000000.0, 3000000000.0, 3500000000.0, 4000000000.0, 4500000000.0,
5000000000.0, 6000000000.0, 7000000000.0, 8000000000.0, 9000000000.0,
1e200,
};
// clang-format on
void Histogram::Clear()
{
min_ = kBucketLimit[kNumBuckets - 1];
max_ = 0;
num_ = 0;
sum_ = 0;
sum_squares_ = 0;
for (int i = 0; i < kNumBuckets; i++) {
buckets_[i] = 0;
}
}
void Histogram::Add(double value)
{
// Linear search is fast enough for our usage in db_bench
int b = 0;
while (b < kNumBuckets - 1 && kBucketLimit[b] <= value) {
b++;
}
buckets_[b] += 1.0;
if (min_ > value)
min_ = value;
if (max_ < value)
max_ = value;
num_++;
sum_ += value;
sum_squares_ += (value * value);
}
void Histogram::Merge(const Histogram &other)
{
if (other.min_ < min_)
min_ = other.min_;
if (other.max_ > max_)
max_ = other.max_;
num_ += other.num_;
sum_ += other.sum_;
sum_squares_ += other.sum_squares_;
for (int b = 0; b < kNumBuckets; b++) {
buckets_[b] += other.buckets_[b];
}
}
double Histogram::Median() const
{
return Percentile(50.0);
}
double Histogram::Percentile(double p) const
{
double threshold = num_ * (p / 100.0);
double sum = 0;
for (int b = 0; b < kNumBuckets; b++) {
sum += buckets_[b];
if (sum >= threshold) {
// Scale linearly within this bucket
double left_point = (b == 0) ? 0 : kBucketLimit[b - 1];
double right_point = kBucketLimit[b];
double left_sum = sum - buckets_[b];
double right_sum = sum;
double pos = (threshold - left_sum) / (right_sum - left_sum);
double r = left_point + (right_point - left_point) * pos;
if (r < min_)
r = min_;
if (r > max_)
r = max_;
return r;
}
}
return max_;
}
double Histogram::Average() const
{
if (num_ == 0.0)
return 0;
return sum_ / num_;
}
double Histogram::StandardDeviation() const
{
if (num_ == 0.0)
return 0;
double variance = (sum_squares_ * num_ - sum_ * sum_) / (num_ * num_);
return sqrt(variance);
}
std::string Histogram::ToString() const
{
std::string r;
char buf[200];
snprintf(buf, sizeof(buf), "Count: %.0f Average: %.4f StdDev: %.2f\n", num_, Average(),
StandardDeviation());
r.append(buf);
snprintf(buf, sizeof(buf), "Min: %.4f Median: %.4f Max: %.4f\n", (num_ == 0.0 ? 0.0 : min_),
Median(), max_);
r.append(buf);
snprintf(buf, sizeof(buf), "Percentiles: P50: %.2f P75: %.2f P99: %.2f P99.9: %.2f P99.99: %.2f\n",
Percentile(50), Percentile(75), Percentile(99), Percentile(99.9), Percentile(99.99));
r.append(buf);
r.append("------------------------------------------------------\n");
const double mult = 100.0 / num_;
double sum = 0;
for (int b = 0; b < kNumBuckets; b++) {
if (buckets_[b] <= 0.0)
continue;
sum += buckets_[b];
snprintf(buf, sizeof(buf), "[ %7.0f, %7.0f ) %7.0f %7.3f%% %7.3f%% ",
((b == 0) ? 0.0 : kBucketLimit[b - 1]), // left
kBucketLimit[b], // right
buckets_[b], // count
mult * buckets_[b], // percentage
mult * sum); // cumulative percentage
r.append(buf);
// Add hash marks based on percentage; 20 marks for 100%.
int marks = static_cast<int>(20 * (buckets_[b] / num_) + 0.5);
r.append(marks, '#');
r.push_back('\n');
}
return r;
}
} // namespace leveldb
| 4,793 | 28.411043 | 100 |
cc
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/bench/util/env_posix.cc
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
/* Copyright 2020, Intel Corporation */
#include <deque>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <limits>
#include <pthread.h>
#include <set>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
#include "leveldb/env.h"
#include "leveldb/slice.h"
#include "port/port_posix.h"
#include "util/env_posix_test_helper.h"
#include "util/logging.h"
#include "util/mutexlock.h"
#include "util/posix_logger.h"
namespace leveldb
{
namespace
{
static int open_read_only_file_limit = -1;
static int mmap_limit = -1;
static const size_t kBufSize = 65536;
static Status PosixError(const std::string &context, int err_number)
{
if (err_number == ENOENT) {
return Status::NotFound(context, strerror(err_number));
} else {
return Status::IOError(context, strerror(err_number));
}
}
// Helper class to limit resource usage to avoid exhaustion.
// Currently used to limit read-only file descriptors and mmap file usage
// so that we do not end up running out of file descriptors, virtual memory,
// or running into kernel performance problems for very large databases.
class Limiter {
public:
// Limit maximum number of resources to |n|.
Limiter(intptr_t n)
{
SetAllowed(n);
}
// If another resource is available, acquire it and return true.
// Else return false.
bool Acquire()
{
if (GetAllowed() <= 0) {
return false;
}
MutexLock l(&mu_);
intptr_t x = GetAllowed();
if (x <= 0) {
return false;
} else {
SetAllowed(x - 1);
return true;
}
}
// Release a resource acquired by a previous call to Acquire() that returned
// true.
void Release()
{
MutexLock l(&mu_);
SetAllowed(GetAllowed() + 1);
}
private:
port::Mutex mu_;
port::AtomicPointer allowed_;
intptr_t GetAllowed() const
{
return reinterpret_cast<intptr_t>(allowed_.Acquire_Load());
}
// REQUIRES: mu_ must be held
void SetAllowed(intptr_t v)
{
allowed_.Release_Store(reinterpret_cast<void *>(v));
}
Limiter(const Limiter &);
void operator=(const Limiter &);
};
class PosixSequentialFile : public SequentialFile {
private:
std::string filename_;
int fd_;
public:
PosixSequentialFile(const std::string &fname, int fd) : filename_(fname), fd_(fd)
{
}
virtual ~PosixSequentialFile()
{
close(fd_);
}
virtual Status Read(size_t n, Slice *result, char *scratch)
{
Status s;
while (true) {
ssize_t r = read(fd_, scratch, n);
if (r < 0) {
if (errno == EINTR) {
continue; // Retry
}
s = PosixError(filename_, errno);
break;
}
*result = Slice(scratch, r);
break;
}
return s;
}
virtual Status Skip(uint64_t n)
{
if (lseek(fd_, n, SEEK_CUR) == static_cast<off_t>(-1)) {
return PosixError(filename_, errno);
}
return Status::OK();
}
};
// pread() based random-access
class PosixRandomAccessFile : public RandomAccessFile {
private:
std::string filename_;
bool temporary_fd_; // If true, fd_ is -1 and we open on every read.
int fd_;
Limiter *limiter_;
public:
PosixRandomAccessFile(const std::string &fname, int fd, Limiter *limiter)
: filename_(fname), fd_(fd), limiter_(limiter)
{
temporary_fd_ = !limiter->Acquire();
if (temporary_fd_) {
// Open file on every access.
close(fd_);
fd_ = -1;
}
}
virtual ~PosixRandomAccessFile()
{
if (!temporary_fd_) {
close(fd_);
limiter_->Release();
}
}
virtual Status Read(uint64_t offset, size_t n, Slice *result, char *scratch) const
{
int fd = fd_;
if (temporary_fd_) {
fd = open(filename_.c_str(), O_RDONLY);
if (fd < 0) {
return PosixError(filename_, errno);
}
}
Status s;
ssize_t r = pread(fd, scratch, n, static_cast<off_t>(offset));
*result = Slice(scratch, (r < 0) ? 0 : r);
if (r < 0) {
// An error: return a non-ok status
s = PosixError(filename_, errno);
}
if (temporary_fd_) {
// Close the temporary file descriptor opened earlier.
close(fd);
}
return s;
}
};
// mmap() based random-access
class PosixMmapReadableFile : public RandomAccessFile {
private:
std::string filename_;
void *mmapped_region_;
size_t length_;
Limiter *limiter_;
public:
// base[0,length-1] contains the mmapped contents of the file.
PosixMmapReadableFile(const std::string &fname, void *base, size_t length, Limiter *limiter)
: filename_(fname), mmapped_region_(base), length_(length), limiter_(limiter)
{
}
virtual ~PosixMmapReadableFile()
{
munmap(mmapped_region_, length_);
limiter_->Release();
}
virtual Status Read(uint64_t offset, size_t n, Slice *result, char *scratch) const
{
Status s;
if (offset + n > length_) {
*result = Slice();
s = PosixError(filename_, EINVAL);
} else {
*result = Slice(reinterpret_cast<char *>(mmapped_region_) + offset, n);
}
return s;
}
};
class PosixWritableFile : public WritableFile {
private:
// buf_[0, pos_-1] contains data to be written to fd_.
std::string filename_;
int fd_;
char buf_[kBufSize];
size_t pos_;
public:
PosixWritableFile(const std::string &fname, int fd) : filename_(fname), fd_(fd), pos_(0)
{
}
~PosixWritableFile()
{
if (fd_ >= 0) {
// Ignoring any potential errors
Close();
}
}
virtual Status Append(const Slice &data)
{
size_t n = data.size();
const char *p = data.data();
// Fit as much as possible into buffer.
size_t copy = std::min(n, kBufSize - pos_);
memcpy(buf_ + pos_, p, copy);
p += copy;
n -= copy;
pos_ += copy;
if (n == 0) {
return Status::OK();
}
// Can't fit in buffer, so need to do at least one write.
Status s = FlushBuffered();
if (!s.ok()) {
return s;
}
// Small writes go to buffer, large writes are written directly.
if (n < kBufSize) {
memcpy(buf_, p, n);
pos_ = n;
return Status::OK();
}
return WriteRaw(p, n);
}
virtual Status Close()
{
Status result = FlushBuffered();
const int r = close(fd_);
if (r < 0 && result.ok()) {
result = PosixError(filename_, errno);
}
fd_ = -1;
return result;
}
virtual Status Flush()
{
return FlushBuffered();
}
Status SyncDirIfManifest()
{
const char *f = filename_.c_str();
const char *sep = strrchr(f, '/');
Slice basename;
std::string dir;
if (sep == NULL) {
dir = ".";
basename = f;
} else {
dir = std::string(f, sep - f);
basename = sep + 1;
}
Status s;
if (basename.starts_with("MANIFEST")) {
int fd = open(dir.c_str(), O_RDONLY);
if (fd < 0) {
s = PosixError(dir, errno);
} else {
if (fsync(fd) < 0) {
s = PosixError(dir, errno);
}
close(fd);
}
}
return s;
}
virtual Status Sync()
{
// Ensure new files referred to by the manifest are in the filesystem.
Status s = SyncDirIfManifest();
if (!s.ok()) {
return s;
}
s = FlushBuffered();
if (s.ok()) {
if (fdatasync(fd_) != 0) {
s = PosixError(filename_, errno);
}
}
return s;
}
private:
Status FlushBuffered()
{
Status s = WriteRaw(buf_, pos_);
pos_ = 0;
return s;
}
Status WriteRaw(const char *p, size_t n)
{
while (n > 0) {
ssize_t r = write(fd_, p, n);
if (r < 0) {
if (errno == EINTR) {
continue; // Retry
}
return PosixError(filename_, errno);
}
p += r;
n -= r;
}
return Status::OK();
}
};
static int LockOrUnlock(int fd, bool lock)
{
errno = 0;
struct flock f;
memset(&f, 0, sizeof(f));
f.l_type = (lock ? F_WRLCK : F_UNLCK);
f.l_whence = SEEK_SET;
f.l_start = 0;
f.l_len = 0; // Lock/unlock entire file
return fcntl(fd, F_SETLK, &f);
}
class PosixFileLock : public FileLock {
public:
int fd_;
std::string name_;
};
// Set of locked files. We keep a separate set instead of just
// relying on fcntrl(F_SETLK) since fcntl(F_SETLK) does not provide
// any protection against multiple uses from the same process.
class PosixLockTable {
private:
port::Mutex mu_;
std::set<std::string> locked_files_;
public:
bool Insert(const std::string &fname)
{
MutexLock l(&mu_);
return locked_files_.insert(fname).second;
}
void Remove(const std::string &fname)
{
MutexLock l(&mu_);
locked_files_.erase(fname);
}
};
class PosixEnv : public Env {
public:
PosixEnv();
virtual ~PosixEnv()
{
char msg[] = "Destroying Env::Default()\n";
fwrite(msg, 1, sizeof(msg), stderr);
abort();
}
virtual Status NewSequentialFile(const std::string &fname, SequentialFile **result)
{
int fd = open(fname.c_str(), O_RDONLY);
if (fd < 0) {
*result = NULL;
return PosixError(fname, errno);
} else {
*result = new PosixSequentialFile(fname, fd);
return Status::OK();
}
}
virtual Status NewRandomAccessFile(const std::string &fname, RandomAccessFile **result)
{
*result = NULL;
Status s;
int fd = open(fname.c_str(), O_RDONLY);
if (fd < 0) {
s = PosixError(fname, errno);
} else if (mmap_limit_.Acquire()) {
uint64_t size;
s = GetFileSize(fname, &size);
if (s.ok()) {
void *base = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, 0);
if (base != MAP_FAILED) {
*result = new PosixMmapReadableFile(fname, base, size, &mmap_limit_);
} else {
s = PosixError(fname, errno);
}
}
close(fd);
if (!s.ok()) {
mmap_limit_.Release();
}
} else {
*result = new PosixRandomAccessFile(fname, fd, &fd_limit_);
}
return s;
}
virtual Status NewWritableFile(const std::string &fname, WritableFile **result)
{
Status s;
int fd = open(fname.c_str(), O_TRUNC | O_WRONLY | O_CREAT, 0644);
if (fd < 0) {
*result = NULL;
s = PosixError(fname, errno);
} else {
*result = new PosixWritableFile(fname, fd);
}
return s;
}
virtual Status NewAppendableFile(const std::string &fname, WritableFile **result)
{
Status s;
int fd = open(fname.c_str(), O_APPEND | O_WRONLY | O_CREAT, 0644);
if (fd < 0) {
*result = NULL;
s = PosixError(fname, errno);
} else {
*result = new PosixWritableFile(fname, fd);
}
return s;
}
virtual bool FileExists(const std::string &fname)
{
return access(fname.c_str(), F_OK) == 0;
}
virtual Status GetChildren(const std::string &dir, std::vector<std::string> *result)
{
result->clear();
DIR *d = opendir(dir.c_str());
if (d == NULL) {
return PosixError(dir, errno);
}
struct dirent *entry;
while ((entry = readdir(d)) != NULL) {
result->push_back(entry->d_name);
}
closedir(d);
return Status::OK();
}
virtual Status DeleteFile(const std::string &fname)
{
Status result;
if (unlink(fname.c_str()) != 0) {
result = PosixError(fname, errno);
}
return result;
}
virtual Status CreateDir(const std::string &name)
{
Status result;
if (mkdir(name.c_str(), 0755) != 0) {
result = PosixError(name, errno);
}
return result;
}
virtual Status DeleteDir(const std::string &name)
{
Status result;
if (rmdir(name.c_str()) != 0) {
result = PosixError(name, errno);
}
return result;
}
virtual Status GetFileSize(const std::string &fname, uint64_t *size)
{
Status s;
struct stat sbuf;
if (stat(fname.c_str(), &sbuf) != 0) {
*size = 0;
s = PosixError(fname, errno);
} else {
*size = sbuf.st_size;
}
return s;
}
virtual Status RenameFile(const std::string &src, const std::string &target)
{
Status result;
if (rename(src.c_str(), target.c_str()) != 0) {
result = PosixError(src, errno);
}
return result;
}
virtual Status LockFile(const std::string &fname, FileLock **lock)
{
*lock = NULL;
Status result;
int fd = open(fname.c_str(), O_RDWR | O_CREAT, 0644);
if (fd < 0) {
result = PosixError(fname, errno);
} else if (!locks_.Insert(fname)) {
close(fd);
result = Status::IOError("lock " + fname, "already held by process");
} else if (LockOrUnlock(fd, true) == -1) {
result = PosixError("lock " + fname, errno);
close(fd);
locks_.Remove(fname);
} else {
PosixFileLock *my_lock = new PosixFileLock;
my_lock->fd_ = fd;
my_lock->name_ = fname;
*lock = my_lock;
}
return result;
}
virtual Status UnlockFile(FileLock *lock)
{
PosixFileLock *my_lock = reinterpret_cast<PosixFileLock *>(lock);
Status result;
if (LockOrUnlock(my_lock->fd_, false) == -1) {
result = PosixError("unlock", errno);
}
locks_.Remove(my_lock->name_);
close(my_lock->fd_);
delete my_lock;
return result;
}
virtual void Schedule(void (*function)(void *), void *arg);
virtual void StartThread(void (*function)(void *arg), void *arg);
virtual Status GetTestDirectory(std::string *result)
{
const char *env = getenv("TEST_TMPDIR");
if (env && env[0] != '\0') {
*result = env;
} else {
char buf[100];
snprintf(buf, sizeof(buf), "/tmp/leveldbtest-%d", int(geteuid()));
*result = buf;
}
// Directory may already exist
CreateDir(*result);
return Status::OK();
}
static uint64_t gettid()
{
pthread_t tid = pthread_self();
uint64_t thread_id = 0;
memcpy(&thread_id, &tid, std::min(sizeof(thread_id), sizeof(tid)));
return thread_id;
}
virtual Status NewLogger(const std::string &fname, Logger **result)
{
FILE *f = fopen(fname.c_str(), "w");
if (f == NULL) {
*result = NULL;
return PosixError(fname, errno);
} else {
*result = new PosixLogger(f, &PosixEnv::gettid);
return Status::OK();
}
}
virtual uint64_t NowMicros()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return static_cast<uint64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
}
virtual void SleepForMicroseconds(int micros)
{
usleep(micros);
}
private:
void PthreadCall(const char *label, int result)
{
if (result != 0) {
fprintf(stderr, "pthread %s: %s\n", label, strerror(result));
abort();
}
}
// BGThread() is the body of the background thread
void BGThread();
static void *BGThreadWrapper(void *arg)
{
reinterpret_cast<PosixEnv *>(arg)->BGThread();
return NULL;
}
pthread_mutex_t mu_;
pthread_cond_t bgsignal_;
pthread_t bgthread_;
bool started_bgthread_;
// Entry per Schedule() call
struct BGItem {
void *arg;
void (*function)(void *);
};
typedef std::deque<BGItem> BGQueue;
BGQueue queue_;
PosixLockTable locks_;
Limiter mmap_limit_;
Limiter fd_limit_;
};
// Return the maximum number of concurrent mmaps.
static int MaxMmaps()
{
if (mmap_limit >= 0) {
return mmap_limit;
}
// Up to 1000 mmaps for 64-bit binaries; none for smaller pointer sizes.
mmap_limit = sizeof(void *) >= 8 ? 1000 : 0;
return mmap_limit;
}
// Return the maximum number of read-only files to keep open.
static intptr_t MaxOpenFiles()
{
if (open_read_only_file_limit >= 0) {
return open_read_only_file_limit;
}
struct rlimit rlim;
if (getrlimit(RLIMIT_NOFILE, &rlim)) {
// getrlimit failed, fallback to hard-coded default.
open_read_only_file_limit = 50;
} else if (rlim.rlim_cur == RLIM_INFINITY) {
open_read_only_file_limit = std::numeric_limits<int>::max();
} else {
// Allow use of 20% of available file descriptors for read-only files.
open_read_only_file_limit = rlim.rlim_cur / 5;
}
return open_read_only_file_limit;
}
PosixEnv::PosixEnv() : started_bgthread_(false), mmap_limit_(MaxMmaps()), fd_limit_(MaxOpenFiles())
{
PthreadCall("mutex_init", pthread_mutex_init(&mu_, NULL));
PthreadCall("cvar_init", pthread_cond_init(&bgsignal_, NULL));
}
void PosixEnv::Schedule(void (*function)(void *), void *arg)
{
PthreadCall("lock", pthread_mutex_lock(&mu_));
// Start background thread if necessary
if (!started_bgthread_) {
started_bgthread_ = true;
PthreadCall("create thread",
pthread_create(&bgthread_, NULL, &PosixEnv::BGThreadWrapper, this));
}
// If the queue is currently empty, the background thread may currently be
// waiting.
if (queue_.empty()) {
PthreadCall("signal", pthread_cond_signal(&bgsignal_));
}
// Add to priority queue
queue_.push_back(BGItem());
queue_.back().function = function;
queue_.back().arg = arg;
PthreadCall("unlock", pthread_mutex_unlock(&mu_));
}
void PosixEnv::BGThread()
{
while (true) {
// Wait until there is an item that is ready to run
PthreadCall("lock", pthread_mutex_lock(&mu_));
while (queue_.empty()) {
PthreadCall("wait", pthread_cond_wait(&bgsignal_, &mu_));
}
void (*function)(void *) = queue_.front().function;
void *arg = queue_.front().arg;
queue_.pop_front();
PthreadCall("unlock", pthread_mutex_unlock(&mu_));
(*function)(arg);
}
}
namespace
{
struct StartThreadState {
void (*user_function)(void *);
void *arg;
};
}
static void *StartThreadWrapper(void *arg)
{
StartThreadState *state = reinterpret_cast<StartThreadState *>(arg);
state->user_function(state->arg);
delete state;
return NULL;
}
void PosixEnv::StartThread(void (*function)(void *arg), void *arg)
{
pthread_t t;
StartThreadState *state = new StartThreadState;
state->user_function = function;
state->arg = arg;
PthreadCall("start thread", pthread_create(&t, NULL, &StartThreadWrapper, state));
}
} // namespace
static pthread_once_t once = PTHREAD_ONCE_INIT;
static Env *default_env;
static void InitDefaultEnv()
{
default_env = new PosixEnv;
}
void EnvPosixTestHelper::SetReadOnlyFDLimit(int limit)
{
assert(default_env == NULL);
open_read_only_file_limit = limit;
}
void EnvPosixTestHelper::SetReadOnlyMMapLimit(int limit)
{
assert(default_env == NULL);
mmap_limit = limit;
}
Env *Env::Default()
{
pthread_once(&once, InitDefaultEnv);
return default_env;
}
} // namespace leveldb
| 17,689 | 20.812577 | 99 |
cc
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/bench/util/random.h
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
// Copyright 2020, Intel Corporation
#ifndef STORAGE_LEVELDB_UTIL_RANDOM_H_
#define STORAGE_LEVELDB_UTIL_RANDOM_H_
#include <stdint.h>
namespace leveldb
{
// A very simple random number generator. Not especially good at
// generating truly random bits, but good enough for our needs in this
// package.
class Random {
private:
uint32_t seed_;
public:
explicit Random(uint32_t s) : seed_(s & 0x7fffffffu)
{
// Avoid bad seeds.
if (seed_ == 0 || seed_ == 2147483647L) {
seed_ = 1;
}
}
uint32_t Next()
{
static const uint32_t M = 2147483647L; // 2^31-1
static const uint64_t A = 16807; // bits 14, 8, 7, 5, 2, 1, 0
// We are computing
// seed_ = (seed_ * A) % M, where M = 2^31-1
//
// seed_ must not be zero or M, or else all subsequent computed values
// will be zero or M respectively. For all other values, seed_ will end
// up cycling through every number in [1,M-1]
uint64_t product = seed_ * A;
// Compute (product % M) using the fact that ((x << 31) % M) == x.
seed_ = static_cast<uint32_t>((product >> 31) + (product & M));
// The first reduction may overflow by 1 bit, so we may need to
// repeat. mod == M is not possible; using > allows the faster
// sign-bit-based test.
if (seed_ > M) {
seed_ -= M;
}
return seed_;
}
// Returns a uniformly distributed value in the range [0..n-1]
// REQUIRES: n > 0
uint32_t Uniform(int n)
{
return Next() % n;
}
// Randomly returns true ~"1/n" of the time, and false otherwise.
// REQUIRES: n > 0
bool OneIn(int n)
{
return (Next() % n) == 0;
}
// Skewed: pick "base" uniformly from range [0,max_log] and then
// return "base" random bits. The effect is to pick a number in the
// range [0,2^max_log-1] with exponential bias towards smaller numbers.
uint32_t Skewed(int max_log)
{
return Uniform(1 << Uniform(max_log + 1));
}
};
} // namespace leveldb
#endif // STORAGE_LEVELDB_UTIL_RANDOM_H_
| 2,202 | 26.886076 | 81 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/bench/util/posix_logger.h
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
// Copyright 2020, Intel Corporation
//
// Logger implementation that can be shared by all environments
// where enough posix functionality is available.
#ifndef STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_
#define STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_
#include "leveldb/env.h"
#include <algorithm>
#include <stdio.h>
#include <sys/time.h>
#include <time.h>
namespace leveldb
{
class PosixLogger : public Logger {
private:
FILE *file_;
uint64_t (*gettid_)(); // Return the thread id for the current thread
public:
PosixLogger(FILE *f, uint64_t (*gettid)()) : file_(f), gettid_(gettid)
{
}
virtual ~PosixLogger()
{
fclose(file_);
}
virtual void Logv(const char *format, va_list ap)
{
const uint64_t thread_id = (*gettid_)();
// We try twice: the first time with a fixed-size stack allocated buffer,
// and the second time with a much larger dynamically allocated buffer.
char buffer[500];
for (int iter = 0; iter < 2; iter++) {
char *base;
int bufsize;
if (iter == 0) {
bufsize = sizeof(buffer);
base = buffer;
} else {
bufsize = 30000;
base = new char[bufsize];
}
char *p = base;
char *limit = base + bufsize;
struct timeval now_tv;
gettimeofday(&now_tv, NULL);
const time_t seconds = now_tv.tv_sec;
struct tm t;
localtime_r(&seconds, &t);
p += snprintf(p, limit - p, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %llx ",
t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, t.tm_hour, t.tm_min,
t.tm_sec, static_cast<int>(now_tv.tv_usec),
static_cast<long long unsigned int>(thread_id));
// Print the message
if (p < limit) {
va_list backup_ap;
va_copy(backup_ap, ap);
p += vsnprintf(p, limit - p, format, backup_ap);
va_end(backup_ap);
}
// Truncate to available space if necessary
if (p >= limit) {
if (iter == 0) {
continue; // Try again with larger buffer
} else {
p = limit - 1;
}
}
// Add newline if necessary
if (p == base || p[-1] != '\n') {
*p++ = '\n';
}
assert(p <= limit);
fwrite(base, 1, p - base, file_);
fflush(file_);
if (base != buffer) {
delete[] base;
}
break;
}
}
};
} // namespace leveldb
#endif // STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_
| 2,503 | 23.54902 | 81 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/bench/util/env_posix_test_helper.h
|
// Copyright 2017 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
// Copyright 2020, Intel Corporation
#ifndef STORAGE_LEVELDB_UTIL_ENV_POSIX_TEST_HELPER_H_
#define STORAGE_LEVELDB_UTIL_ENV_POSIX_TEST_HELPER_H_
namespace leveldb
{
class EnvPosixTest;
// A helper for the POSIX Env to facilitate testing.
class EnvPosixTestHelper {
private:
friend class EnvPosixTest;
// Set the maximum number of read-only files that will be opened.
// Must be called before creating an Env.
static void SetReadOnlyFDLimit(int limit);
// Set the maximum number of read-only files that will be mapped via mmap.
// Must be called before creating an Env.
static void SetReadOnlyMMapLimit(int limit);
};
} // namespace leveldb
#endif // STORAGE_LEVELDB_UTIL_ENV_POSIX_TEST_HELPER_H_
| 967 | 28.333333 | 81 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/bench/port/port_posix.h
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
// Copyright 2020, Intel Corporation
// See port_example.h for documentation for the following types/functions.
#ifndef STORAGE_LEVELDB_PORT_PORT_POSIX_H_
#define STORAGE_LEVELDB_PORT_PORT_POSIX_H_
#undef PLATFORM_IS_LITTLE_ENDIAN
#if defined(__APPLE__)
#include <machine/endian.h>
#if defined(__DARWIN_LITTLE_ENDIAN) && defined(__DARWIN_BYTE_ORDER)
#define PLATFORM_IS_LITTLE_ENDIAN (__DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN)
#endif
#elif defined(OS_SOLARIS)
#include <sys/isa_defs.h>
#ifdef _LITTLE_ENDIAN
#define PLATFORM_IS_LITTLE_ENDIAN true
#else
#define PLATFORM_IS_LITTLE_ENDIAN false
#endif
#elif defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_NETBSD) || defined(OS_DRAGONFLYBSD)
#include <sys/endian.h>
#include <sys/types.h>
#define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN)
#elif defined(OS_HPUX)
#define PLATFORM_IS_LITTLE_ENDIAN false
#elif defined(OS_ANDROID)
// Due to a bug in the NDK x86 <sys/endian.h> definition,
// _BYTE_ORDER must be used instead of __BYTE_ORDER on Android.
// See http://code.google.com/p/android/issues/detail?id=39824
#include <endian.h>
#define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN)
#else
#include <endian.h>
#endif
#include <pthread.h>
#if defined(HAVE_CRC32C)
#include <crc32c/crc32c.h>
#endif // defined(HAVE_CRC32C)
#ifdef HAVE_SNAPPY
#include <snappy.h>
#endif // defined(HAVE_SNAPPY)
#include "port/atomic_pointer.h"
#include <stdint.h>
#include <string>
#ifndef PLATFORM_IS_LITTLE_ENDIAN
#define PLATFORM_IS_LITTLE_ENDIAN (__BYTE_ORDER == __LITTLE_ENDIAN)
#endif
#if defined(__APPLE__) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLYBSD)
// Use fsync() on platforms without fdatasync()
#define fdatasync fsync
#endif
#if defined(OS_ANDROID) && __ANDROID_API__ < 9
// fdatasync() was only introduced in API level 9 on Android. Use fsync()
// when targetting older platforms.
#define fdatasync fsync
#endif
namespace leveldb
{
namespace port
{
static const bool kLittleEndian = PLATFORM_IS_LITTLE_ENDIAN;
#undef PLATFORM_IS_LITTLE_ENDIAN
class CondVar;
class Mutex {
public:
Mutex();
~Mutex();
void Lock();
void Unlock();
void AssertHeld()
{
}
private:
friend class CondVar;
pthread_mutex_t mu_;
// No copying
Mutex(const Mutex &);
void operator=(const Mutex &);
};
class CondVar {
public:
explicit CondVar(Mutex *mu);
~CondVar();
void Wait();
void Signal();
void SignalAll();
private:
pthread_cond_t cv_;
Mutex *mu_;
};
typedef pthread_once_t OnceType;
#define LEVELDB_ONCE_INIT PTHREAD_ONCE_INIT
extern void InitOnce(OnceType *once, void (*initializer)());
inline bool Snappy_Compress(const char *input, size_t length, ::std::string *output)
{
#ifdef HAVE_SNAPPY
output->resize(snappy::MaxCompressedLength(length));
size_t outlen;
snappy::RawCompress(input, length, &(*output)[0], &outlen);
output->resize(outlen);
return true;
#endif // defined(HAVE_SNAPPY)
return false;
}
inline bool Snappy_GetUncompressedLength(const char *input, size_t length, size_t *result)
{
#ifdef HAVE_SNAPPY
return snappy::GetUncompressedLength(input, length, result);
#else
return false;
#endif // defined(HAVE_SNAPPY)
}
inline bool Snappy_Uncompress(const char *input, size_t length, char *output)
{
#ifdef HAVE_SNAPPY
return snappy::RawUncompress(input, length, output);
#else
return false;
#endif // defined(HAVE_SNAPPY)
}
inline bool GetHeapProfile(void (*func)(void *, const char *, int), void *arg)
{
return false;
}
inline uint32_t AcceleratedCRC32C(uint32_t crc, const char *buf, size_t size)
{
#if defined(HAVE_CRC32C)
return ::crc32c::Extend(crc, reinterpret_cast<const uint8_t *>(buf), size);
#else
return 0;
#endif // defined(HAVE_CRC32C)
}
} // namespace port
} // namespace leveldb
#endif // STORAGE_LEVELDB_PORT_PORT_POSIX_H_
| 4,061 | 23.768293 | 98 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/bench/port/port_posix.cc
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
/* Copyright 2020, Intel Corporation */
#include "port/port_posix.h"
#include <cstdlib>
#include <stdio.h>
#include <string.h>
namespace leveldb
{
namespace port
{
static void PthreadCall(const char *label, int result)
{
if (result != 0) {
fprintf(stderr, "pthread %s: %s\n", label, strerror(result));
abort();
}
}
Mutex::Mutex()
{
PthreadCall("init mutex", pthread_mutex_init(&mu_, NULL));
}
Mutex::~Mutex()
{
PthreadCall("destroy mutex", pthread_mutex_destroy(&mu_));
}
void Mutex::Lock()
{
PthreadCall("lock", pthread_mutex_lock(&mu_));
}
void Mutex::Unlock()
{
PthreadCall("unlock", pthread_mutex_unlock(&mu_));
}
CondVar::CondVar(Mutex *mu) : mu_(mu)
{
PthreadCall("init cv", pthread_cond_init(&cv_, NULL));
}
CondVar::~CondVar()
{
PthreadCall("destroy cv", pthread_cond_destroy(&cv_));
}
void CondVar::Wait()
{
PthreadCall("wait", pthread_cond_wait(&cv_, &mu_->mu_));
}
void CondVar::Signal()
{
PthreadCall("signal", pthread_cond_signal(&cv_));
}
void CondVar::SignalAll()
{
PthreadCall("broadcast", pthread_cond_broadcast(&cv_));
}
void InitOnce(OnceType *once, void (*initializer)())
{
PthreadCall("once", pthread_once(once, initializer));
}
} // namespace port
} // namespace leveldb
| 1,484 | 17.797468 | 81 |
cc
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/bench/port/thread_annotations.h
|
// Copyright (c) 2012 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
// Copyright 2020, Intel Corporation
#ifndef STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
#define STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
// Some environments provide custom macros to aid in static thread-safety
// analysis. Provide empty definitions of such macros unless they are already
// defined.
#ifndef EXCLUSIVE_LOCKS_REQUIRED
#define EXCLUSIVE_LOCKS_REQUIRED(...)
#endif
#ifndef SHARED_LOCKS_REQUIRED
#define SHARED_LOCKS_REQUIRED(...)
#endif
#ifndef LOCKS_EXCLUDED
#define LOCKS_EXCLUDED(...)
#endif
#ifndef LOCK_RETURNED
#define LOCK_RETURNED(x)
#endif
#ifndef LOCKABLE
#define LOCKABLE
#endif
#ifndef SCOPED_LOCKABLE
#define SCOPED_LOCKABLE
#endif
#ifndef EXCLUSIVE_LOCK_FUNCTION
#define EXCLUSIVE_LOCK_FUNCTION(...)
#endif
#ifndef SHARED_LOCK_FUNCTION
#define SHARED_LOCK_FUNCTION(...)
#endif
#ifndef EXCLUSIVE_TRYLOCK_FUNCTION
#define EXCLUSIVE_TRYLOCK_FUNCTION(...)
#endif
#ifndef SHARED_TRYLOCK_FUNCTION
#define SHARED_TRYLOCK_FUNCTION(...)
#endif
#ifndef UNLOCK_FUNCTION
#define UNLOCK_FUNCTION(...)
#endif
#ifndef NO_THREAD_SAFETY_ANALYSIS
#define NO_THREAD_SAFETY_ANALYSIS
#endif
#endif // STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
| 1,429 | 21.34375 | 81 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/bench/port/atomic_pointer.h
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
// Copyright 2020, Intel Corporation
// AtomicPointer provides storage for a lock-free pointer.
// Platform-dependent implementation of AtomicPointer:
// - If the platform provides a cheap barrier, we use it with raw pointers
// - If <atomic> is present (on newer versions of gcc, it is), we use
// a <atomic>-based AtomicPointer. However we prefer the memory
// barrier based version, because at least on a gcc 4.4 32-bit build
// on linux, we have encountered a buggy <atomic> implementation.
// Also, some <atomic> implementations are much slower than a memory-barrier
// based implementation (~16ns for <atomic> based acquire-load vs. ~1ns for
// a barrier based acquire-load).
// This code is based on atomicops-internals-* in Google's perftools:
// http://code.google.com/p/google-perftools/source/browse/#svn%2Ftrunk%2Fsrc%2Fbase
#ifndef PORT_ATOMIC_POINTER_H_
#define PORT_ATOMIC_POINTER_H_
#include <stdint.h>
#ifdef LEVELDB_ATOMIC_PRESENT
#include <atomic>
#endif
#ifdef OS_WIN
#include <windows.h>
#endif
#ifdef __APPLE__
#include <libkern/OSAtomic.h>
#endif
#if defined(_M_X64) || defined(__x86_64__)
#define ARCH_CPU_X86_FAMILY 1
#elif defined(_M_IX86) || defined(__i386__) || defined(__i386)
#define ARCH_CPU_X86_FAMILY 1
#elif defined(__ARMEL__)
#define ARCH_CPU_ARM_FAMILY 1
#elif defined(__aarch64__)
#define ARCH_CPU_ARM64_FAMILY 1
#elif defined(__ppc__) || defined(__powerpc__) || defined(__powerpc64__)
#define ARCH_CPU_PPC_FAMILY 1
#elif defined(__mips__)
#define ARCH_CPU_MIPS_FAMILY 1
#endif
namespace leveldb
{
namespace port
{
// Define MemoryBarrier() if available
// Windows on x86
#if defined(OS_WIN) && defined(COMPILER_MSVC) && defined(ARCH_CPU_X86_FAMILY)
// windows.h already provides a MemoryBarrier(void) macro
// http://msdn.microsoft.com/en-us/library/ms684208(v=vs.85).aspx
#define LEVELDB_HAVE_MEMORY_BARRIER
// Mac OS
#elif defined(__APPLE__)
inline void MemoryBarrier()
{
OSMemoryBarrier();
}
#define LEVELDB_HAVE_MEMORY_BARRIER
// Gcc on x86
#elif defined(ARCH_CPU_X86_FAMILY) && defined(__GNUC__)
inline void MemoryBarrier()
{
// See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on
// this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering.
__asm__ __volatile__("" : : : "memory");
}
#define LEVELDB_HAVE_MEMORY_BARRIER
// Sun Studio
#elif defined(ARCH_CPU_X86_FAMILY) && defined(__SUNPRO_CC)
inline void MemoryBarrier()
{
// See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on
// this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering.
asm volatile("" : : : "memory");
}
#define LEVELDB_HAVE_MEMORY_BARRIER
// ARM Linux
#elif defined(ARCH_CPU_ARM_FAMILY) && defined(__linux__)
typedef void (*LinuxKernelMemoryBarrierFunc)(void);
// The Linux ARM kernel provides a highly optimized device-specific memory
// barrier function at a fixed memory address that is mapped in every
// user-level process.
//
// This beats using CPU-specific instructions which are, on single-core
// devices, un-necessary and very costly (e.g. ARMv7-A "dmb" takes more
// than 180ns on a Cortex-A8 like the one on a Nexus One). Benchmarking
// shows that the extra function call cost is completely negligible on
// multi-core devices.
//
inline void MemoryBarrier()
{
(*(LinuxKernelMemoryBarrierFunc)0xffff0fa0)();
}
#define LEVELDB_HAVE_MEMORY_BARRIER
// ARM64
#elif defined(ARCH_CPU_ARM64_FAMILY)
inline void MemoryBarrier()
{
asm volatile("dmb sy" : : : "memory");
}
#define LEVELDB_HAVE_MEMORY_BARRIER
// PPC
#elif defined(ARCH_CPU_PPC_FAMILY) && defined(__GNUC__)
inline void MemoryBarrier()
{
// TODO for some powerpc expert: is there a cheaper suitable variant?
// Perhaps by having separate barriers for acquire and release ops.
asm volatile("sync" : : : "memory");
}
#define LEVELDB_HAVE_MEMORY_BARRIER
// MIPS
#elif defined(ARCH_CPU_MIPS_FAMILY) && defined(__GNUC__)
inline void MemoryBarrier()
{
__asm__ __volatile__("sync" : : : "memory");
}
#define LEVELDB_HAVE_MEMORY_BARRIER
#endif
// AtomicPointer built using platform-specific MemoryBarrier()
#if defined(LEVELDB_HAVE_MEMORY_BARRIER)
class AtomicPointer {
private:
void *rep_;
public:
AtomicPointer()
{
}
explicit AtomicPointer(void *p) : rep_(p)
{
}
inline void *NoBarrier_Load() const
{
return rep_;
}
inline void NoBarrier_Store(void *v)
{
rep_ = v;
}
inline void *Acquire_Load() const
{
void *result = rep_;
MemoryBarrier();
return result;
}
inline void Release_Store(void *v)
{
MemoryBarrier();
rep_ = v;
}
};
// AtomicPointer based on <cstdatomic>
#elif defined(LEVELDB_ATOMIC_PRESENT)
class AtomicPointer {
private:
std::atomic<void *> rep_;
public:
AtomicPointer()
{
}
explicit AtomicPointer(void *v) : rep_(v)
{
}
inline void *Acquire_Load() const
{
return rep_.load(std::memory_order_acquire);
}
inline void Release_Store(void *v)
{
rep_.store(v, std::memory_order_release);
}
inline void *NoBarrier_Load() const
{
return rep_.load(std::memory_order_relaxed);
}
inline void NoBarrier_Store(void *v)
{
rep_.store(v, std::memory_order_relaxed);
}
};
// Atomic pointer based on sparc memory barriers
#elif defined(__sparcv9) && defined(__GNUC__)
class AtomicPointer {
private:
void *rep_;
public:
AtomicPointer()
{
}
explicit AtomicPointer(void *v) : rep_(v)
{
}
inline void *Acquire_Load() const
{
void *val;
__asm__ __volatile__("ldx [%[rep_]], %[val] \n\t"
"membar #LoadLoad|#LoadStore \n\t"
: [val] "=r"(val)
: [rep_] "r"(&rep_)
: "memory");
return val;
}
inline void Release_Store(void *v)
{
__asm__ __volatile__("membar #LoadStore|#StoreStore \n\t"
"stx %[v], [%[rep_]] \n\t"
:
: [rep_] "r"(&rep_), [v] "r"(v)
: "memory");
}
inline void *NoBarrier_Load() const
{
return rep_;
}
inline void NoBarrier_Store(void *v)
{
rep_ = v;
}
};
// Atomic pointer based on ia64 acq/rel
#elif defined(__ia64) && defined(__GNUC__)
class AtomicPointer {
private:
void *rep_;
public:
AtomicPointer()
{
}
explicit AtomicPointer(void *v) : rep_(v)
{
}
inline void *Acquire_Load() const
{
void *val;
__asm__ __volatile__("ld8.acq %[val] = [%[rep_]] \n\t"
: [val] "=r"(val)
: [rep_] "r"(&rep_)
: "memory");
return val;
}
inline void Release_Store(void *v)
{
__asm__ __volatile__("st8.rel [%[rep_]] = %[v] \n\t"
:
: [rep_] "r"(&rep_), [v] "r"(v)
: "memory");
}
inline void *NoBarrier_Load() const
{
return rep_;
}
inline void NoBarrier_Store(void *v)
{
rep_ = v;
}
};
// We have neither MemoryBarrier(), nor <atomic>
#else
#error Please implement AtomicPointer for this platform.
#endif
#undef LEVELDB_HAVE_MEMORY_BARRIER
#undef ARCH_CPU_X86_FAMILY
#undef ARCH_CPU_ARM_FAMILY
#undef ARCH_CPU_ARM64_FAMILY
#undef ARCH_CPU_PPC_FAMILY
} // namespace port
} // namespace leveldb
#endif // PORT_ATOMIC_POINTER_H_
| 7,207 | 23.26936 | 84 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/bench/include/leveldb/status.h
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
// Copyright 2020, Intel Corporation
// A Status encapsulates the result of an operation. It may indicate success,
// or it may indicate an error with an associated error message.
//
// Multiple threads can invoke const methods on a Status without
// external synchronization, but if any of the threads may call a
// non-const method, all threads accessing the same Status must use
// external synchronization.
#ifndef STORAGE_LEVELDB_INCLUDE_STATUS_H_
#define STORAGE_LEVELDB_INCLUDE_STATUS_H_
#include "leveldb/slice.h"
#include <string>
namespace leveldb
{
class Status {
public:
// Create a success status.
Status() : state_(NULL)
{
}
~Status()
{
delete[] state_;
}
// Copy the specified status.
Status(const Status &s);
void operator=(const Status &s);
// Return a success status.
static Status OK()
{
return Status();
}
// Return error status of an appropriate type.
static Status NotFound(const Slice &msg, const Slice &msg2 = Slice())
{
return Status(kNotFound, msg, msg2);
}
static Status Corruption(const Slice &msg, const Slice &msg2 = Slice())
{
return Status(kCorruption, msg, msg2);
}
static Status NotSupported(const Slice &msg, const Slice &msg2 = Slice())
{
return Status(kNotSupported, msg, msg2);
}
static Status InvalidArgument(const Slice &msg, const Slice &msg2 = Slice())
{
return Status(kInvalidArgument, msg, msg2);
}
static Status IOError(const Slice &msg, const Slice &msg2 = Slice())
{
return Status(kIOError, msg, msg2);
}
// Returns true iff the status indicates success.
bool ok() const
{
return (state_ == NULL);
}
// Returns true iff the status indicates a NotFound error.
bool IsNotFound() const
{
return code() == kNotFound;
}
// Returns true iff the status indicates a Corruption error.
bool IsCorruption() const
{
return code() == kCorruption;
}
// Returns true iff the status indicates an IOError.
bool IsIOError() const
{
return code() == kIOError;
}
// Returns true iff the status indicates a NotSupportedError.
bool IsNotSupportedError() const
{
return code() == kNotSupported;
}
// Returns true iff the status indicates an InvalidArgument.
bool IsInvalidArgument() const
{
return code() == kInvalidArgument;
}
// Return a string representation of this status suitable for printing.
// Returns the string "OK" for success.
std::string ToString() const;
private:
// OK status has a NULL state_. Otherwise, state_ is a new[] array
// of the following form:
// state_[0..3] == length of message
// state_[4] == code
// state_[5..] == message
const char *state_;
enum Code {
kOk = 0,
kNotFound = 1,
kCorruption = 2,
kNotSupported = 3,
kInvalidArgument = 4,
kIOError = 5
};
Code code() const
{
return (state_ == NULL) ? kOk : static_cast<Code>(state_[4]);
}
Status(Code code, const Slice &msg, const Slice &msg2);
static const char *CopyState(const char *s);
};
inline Status::Status(const Status &s)
{
state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_);
}
inline void Status::operator=(const Status &s)
{
// The following condition catches both aliasing (when this == &s),
// and the common case where both s and *this are ok.
if (state_ != s.state_) {
delete[] state_;
state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_);
}
}
} // namespace leveldb
#endif // STORAGE_LEVELDB_INCLUDE_STATUS_H_
| 3,658 | 23.231788 | 81 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/bench/include/leveldb/slice.h
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
// Copyright 2020, Intel Corporation
// Slice is a simple structure containing a pointer into some external
// storage and a size. The user of a Slice must ensure that the slice
// is not used after the corresponding external storage has been
// deallocated.
//
// Multiple threads can invoke const methods on a Slice without
// external synchronization, but if any of the threads may call a
// non-const method, all threads accessing the same Slice must use
// external synchronization.
#ifndef STORAGE_LEVELDB_INCLUDE_SLICE_H_
#define STORAGE_LEVELDB_INCLUDE_SLICE_H_
#include <assert.h>
#include <stddef.h>
#include <string.h>
#include <string>
namespace leveldb
{
class Slice {
public:
// Create an empty slice.
Slice() : data_(""), size_(0)
{
}
// Create a slice that refers to d[0,n-1].
Slice(const char *d, size_t n) : data_(d), size_(n)
{
}
// Create a slice that refers to the contents of "s"
Slice(const std::string &s) : data_(s.data()), size_(s.size())
{
}
// Create a slice that refers to s[0,strlen(s)-1]
Slice(const char *s) : data_(s), size_(strlen(s))
{
}
// Return a pointer to the beginning of the referenced data
const char *data() const
{
return data_;
}
// Return the length (in bytes) of the referenced data
size_t size() const
{
return size_;
}
// Return true iff the length of the referenced data is zero
bool empty() const
{
return size_ == 0;
}
// Return the ith byte in the referenced data.
// REQUIRES: n < size()
char operator[](size_t n) const
{
assert(n < size());
return data_[n];
}
// Change this slice to refer to an empty array
void clear()
{
data_ = "";
size_ = 0;
}
// Drop the first "n" bytes from this slice.
void remove_prefix(size_t n)
{
assert(n <= size());
data_ += n;
size_ -= n;
}
// Return a string that contains the copy of the referenced data.
std::string ToString() const
{
return std::string(data_, size_);
}
// Three-way comparison. Returns value:
// < 0 iff "*this" < "b",
// == 0 iff "*this" == "b",
// > 0 iff "*this" > "b"
int compare(const Slice &b) const;
// Return true iff "x" is a prefix of "*this"
bool starts_with(const Slice &x) const
{
return ((size_ >= x.size_) && (memcmp(data_, x.data_, x.size_) == 0));
}
private:
const char *data_;
size_t size_;
// Intentionally copyable
};
inline bool operator==(const Slice &x, const Slice &y)
{
return ((x.size() == y.size()) && (memcmp(x.data(), y.data(), x.size()) == 0));
}
inline bool operator!=(const Slice &x, const Slice &y)
{
return !(x == y);
}
inline int Slice::compare(const Slice &b) const
{
const size_t min_len = (size_ < b.size_) ? size_ : b.size_;
int r = memcmp(data_, b.data_, min_len);
if (r == 0) {
if (size_ < b.size_)
r = -1;
else if (size_ > b.size_)
r = +1;
}
return r;
}
} // namespace leveldb
#endif // STORAGE_LEVELDB_INCLUDE_SLICE_H_
| 3,163 | 21.125874 | 81 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/bench/include/leveldb/env.h
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
// Copyright 2020, Intel Corporation
// An Env is an interface used by the leveldb implementation to access
// operating system functionality like the filesystem etc. Callers
// may wish to provide a custom Env object when opening a database to
// get fine gain control; e.g., to rate limit file system operations.
//
// All Env implementations are safe for concurrent access from
// multiple threads without any external synchronization.
#ifndef STORAGE_LEVELDB_INCLUDE_ENV_H_
#define STORAGE_LEVELDB_INCLUDE_ENV_H_
#include "leveldb/status.h"
#include <stdarg.h>
#include <stdint.h>
#include <string>
#include <vector>
namespace leveldb
{
class FileLock;
class Logger;
class RandomAccessFile;
class SequentialFile;
class Slice;
class WritableFile;
class Env {
public:
Env()
{
}
virtual ~Env();
// Return a default environment suitable for the current operating
// system. Sophisticated users may wish to provide their own Env
// implementation instead of relying on this default environment.
//
// The result of Default() belongs to leveldb and must never be deleted.
static Env *Default();
// Create a brand new sequentially-readable file with the specified name.
// On success, stores a pointer to the new file in *result and returns OK.
// On failure stores NULL in *result and returns non-OK. If the file does
// not exist, returns a non-OK status. Implementations should return a
// NotFound status when the file does not exist.
//
// The returned file will only be accessed by one thread at a time.
virtual Status NewSequentialFile(const std::string &fname, SequentialFile **result) = 0;
// Create a brand new random access read-only file with the
// specified name. On success, stores a pointer to the new file in
// *result and returns OK. On failure stores NULL in *result and
// returns non-OK. If the file does not exist, returns a non-OK
// status. Implementations should return a NotFound status when the file does
// not exist.
//
// The returned file may be concurrently accessed by multiple threads.
virtual Status NewRandomAccessFile(const std::string &fname, RandomAccessFile **result) = 0;
// Create an object that writes to a new file with the specified
// name. Deletes any existing file with the same name and creates a
// new file. On success, stores a pointer to the new file in
// *result and returns OK. On failure stores NULL in *result and
// returns non-OK.
//
// The returned file will only be accessed by one thread at a time.
virtual Status NewWritableFile(const std::string &fname, WritableFile **result) = 0;
// Create an object that either appends to an existing file, or
// writes to a new file (if the file does not exist to begin with).
// On success, stores a pointer to the new file in *result and
// returns OK. On failure stores NULL in *result and returns
// non-OK.
//
// The returned file will only be accessed by one thread at a time.
//
// May return an IsNotSupportedError error if this Env does
// not allow appending to an existing file. Users of Env (including
// the leveldb implementation) must be prepared to deal with
// an Env that does not support appending.
virtual Status NewAppendableFile(const std::string &fname, WritableFile **result);
// Returns true iff the named file exists.
virtual bool FileExists(const std::string &fname) = 0;
// Store in *result the names of the children of the specified directory.
// The names are relative to "dir".
// Original contents of *results are dropped.
virtual Status GetChildren(const std::string &dir, std::vector<std::string> *result) = 0;
// Delete the named file.
virtual Status DeleteFile(const std::string &fname) = 0;
// Create the specified directory.
virtual Status CreateDir(const std::string &dirname) = 0;
// Delete the specified directory.
virtual Status DeleteDir(const std::string &dirname) = 0;
// Store the size of fname in *file_size.
virtual Status GetFileSize(const std::string &fname, uint64_t *file_size) = 0;
// Rename file src to target.
virtual Status RenameFile(const std::string &src, const std::string &target) = 0;
// Lock the specified file. Used to prevent concurrent access to
// the same db by multiple processes. On failure, stores NULL in
// *lock and returns non-OK.
//
// On success, stores a pointer to the object that represents the
// acquired lock in *lock and returns OK. The caller should call
// UnlockFile(*lock) to release the lock. If the process exits,
// the lock will be automatically released.
//
// If somebody else already holds the lock, finishes immediately
// with a failure. I.e., this call does not wait for existing locks
// to go away.
//
// May create the named file if it does not already exist.
virtual Status LockFile(const std::string &fname, FileLock **lock) = 0;
// Release the lock acquired by a previous successful call to LockFile.
// REQUIRES: lock was returned by a successful LockFile() call
// REQUIRES: lock has not already been unlocked.
virtual Status UnlockFile(FileLock *lock) = 0;
// Arrange to run "(*function)(arg)" once in a background thread.
//
// "function" may run in an unspecified thread. Multiple functions
// added to the same Env may run concurrently in different threads.
// I.e., the caller may not assume that background work items are
// serialized.
virtual void Schedule(void (*function)(void *arg), void *arg) = 0;
// Start a new thread, invoking "function(arg)" within the new thread.
// When "function(arg)" returns, the thread will be destroyed.
virtual void StartThread(void (*function)(void *arg), void *arg) = 0;
// *path is set to a temporary directory that can be used for testing. It may
// or many not have just been created. The directory may or may not differ
// between runs of the same process, but subsequent calls will return the
// same directory.
virtual Status GetTestDirectory(std::string *path) = 0;
// Create and return a log file for storing informational messages.
virtual Status NewLogger(const std::string &fname, Logger **result) = 0;
// Returns the number of micro-seconds since some fixed point in time. Only
// useful for computing deltas of time.
virtual uint64_t NowMicros() = 0;
// Sleep/delay the thread for the prescribed number of micro-seconds.
virtual void SleepForMicroseconds(int micros) = 0;
private:
// No copying allowed
Env(const Env &);
void operator=(const Env &);
};
// A file abstraction for reading sequentially through a file
class SequentialFile {
public:
SequentialFile()
{
}
virtual ~SequentialFile();
// Read up to "n" bytes from the file. "scratch[0..n-1]" may be
// written by this routine. Sets "*result" to the data that was
// read (including if fewer than "n" bytes were successfully read).
// May set "*result" to point at data in "scratch[0..n-1]", so
// "scratch[0..n-1]" must be live when "*result" is used.
// If an error was encountered, returns a non-OK status.
//
// REQUIRES: External synchronization
virtual Status Read(size_t n, Slice *result, char *scratch) = 0;
// Skip "n" bytes from the file. This is guaranteed to be no
// slower that reading the same data, but may be faster.
//
// If end of file is reached, skipping will stop at the end of the
// file, and Skip will return OK.
//
// REQUIRES: External synchronization
virtual Status Skip(uint64_t n) = 0;
private:
// No copying allowed
SequentialFile(const SequentialFile &);
void operator=(const SequentialFile &);
};
// A file abstraction for randomly reading the contents of a file.
class RandomAccessFile {
public:
RandomAccessFile()
{
}
virtual ~RandomAccessFile();
// Read up to "n" bytes from the file starting at "offset".
// "scratch[0..n-1]" may be written by this routine. Sets "*result"
// to the data that was read (including if fewer than "n" bytes were
// successfully read). May set "*result" to point at data in
// "scratch[0..n-1]", so "scratch[0..n-1]" must be live when
// "*result" is used. If an error was encountered, returns a non-OK
// status.
//
// Safe for concurrent use by multiple threads.
virtual Status Read(uint64_t offset, size_t n, Slice *result, char *scratch) const = 0;
private:
// No copying allowed
RandomAccessFile(const RandomAccessFile &);
void operator=(const RandomAccessFile &);
};
// A file abstraction for sequential writing. The implementation
// must provide buffering since callers may append small fragments
// at a time to the file.
class WritableFile {
public:
WritableFile()
{
}
virtual ~WritableFile();
virtual Status Append(const Slice &data) = 0;
virtual Status Close() = 0;
virtual Status Flush() = 0;
virtual Status Sync() = 0;
private:
// No copying allowed
WritableFile(const WritableFile &);
void operator=(const WritableFile &);
};
// An interface for writing log messages.
class Logger {
public:
Logger()
{
}
virtual ~Logger();
// Write an entry to the log file with the specified format.
virtual void Logv(const char *format, va_list ap) = 0;
private:
// No copying allowed
Logger(const Logger &);
void operator=(const Logger &);
};
// Identifies a locked file.
class FileLock {
public:
FileLock()
{
}
virtual ~FileLock();
private:
// No copying allowed
FileLock(const FileLock &);
void operator=(const FileLock &);
};
// Log the specified data to *info_log if info_log is non-NULL.
extern void Log(Logger *info_log, const char *format, ...)
#if defined(__GNUC__) || defined(__clang__)
__attribute__((__format__(__printf__, 2, 3)))
#endif
;
// A utility routine: write "data" to the named file.
Status WriteStringToFile(Env *env, const Slice &data, const std::string &fname);
// A utility routine: read contents of named file into *data
Status ReadFileToString(Env *env, const std::string &fname, std::string *data);
// An implementation of Env that forwards all calls to another Env.
// May be useful to clients who wish to override just part of the
// functionality of another Env.
class EnvWrapper : public Env {
public:
// Initialize an EnvWrapper that delegates all calls to *t
explicit EnvWrapper(Env *t) : target_(t)
{
}
virtual ~EnvWrapper();
// Return the target to which this Env forwards all calls
Env *target() const
{
return target_;
}
// The following text is boilerplate that forwards all methods to target()
Status NewSequentialFile(const std::string &f, SequentialFile **r)
{
return target_->NewSequentialFile(f, r);
}
Status NewRandomAccessFile(const std::string &f, RandomAccessFile **r)
{
return target_->NewRandomAccessFile(f, r);
}
Status NewWritableFile(const std::string &f, WritableFile **r)
{
return target_->NewWritableFile(f, r);
}
Status NewAppendableFile(const std::string &f, WritableFile **r)
{
return target_->NewAppendableFile(f, r);
}
bool FileExists(const std::string &f)
{
return target_->FileExists(f);
}
Status GetChildren(const std::string &dir, std::vector<std::string> *r)
{
return target_->GetChildren(dir, r);
}
Status DeleteFile(const std::string &f)
{
return target_->DeleteFile(f);
}
Status CreateDir(const std::string &d)
{
return target_->CreateDir(d);
}
Status DeleteDir(const std::string &d)
{
return target_->DeleteDir(d);
}
Status GetFileSize(const std::string &f, uint64_t *s)
{
return target_->GetFileSize(f, s);
}
Status RenameFile(const std::string &s, const std::string &t)
{
return target_->RenameFile(s, t);
}
Status LockFile(const std::string &f, FileLock **l)
{
return target_->LockFile(f, l);
}
Status UnlockFile(FileLock *l)
{
return target_->UnlockFile(l);
}
void Schedule(void (*f)(void *), void *a)
{
return target_->Schedule(f, a);
}
void StartThread(void (*f)(void *), void *a)
{
return target_->StartThread(f, a);
}
virtual Status GetTestDirectory(std::string *path)
{
return target_->GetTestDirectory(path);
}
virtual Status NewLogger(const std::string &fname, Logger **result)
{
return target_->NewLogger(fname, result);
}
uint64_t NowMicros()
{
return target_->NowMicros();
}
void SleepForMicroseconds(int micros)
{
target_->SleepForMicroseconds(micros);
}
private:
Env *target_;
};
} // namespace leveldb
#endif // STORAGE_LEVELDB_INCLUDE_ENV_H_
| 12,539 | 30.827411 | 93 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/utils/jenkins/scripts/createNamespace.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019-2020, Intel Corporation
# createNamespace.sh - Remove old namespaces and create new
set -e
# region used for dax namespaces.
DEV_DAX_R=0x0000
# region used for fsdax namespaces.
FS_DAX_R=0x0001
CREATE_DAX=false
CREATE_PMEM=false
MOUNT_POINT="/mnt/pmem0"
SIZE=100G
function usage()
{
echo ""
echo "Script for creating namespaces, mountpoint, and configuring file permissions."
echo "Usage: $(basename $1) [-h|--help] [-d|--dax] [-p|--pmem] [--size]"
echo "-h, --help Print help and exit"
echo "-d, --dax Create dax device."
echo "-p, --pmem Create fsdax device and create mountpoint."
echo "--size Set size for namespaces [default: $SIZE]"
}
function clear_namespaces() {
scriptdir=$(readlink -f $(dirname ${BASH_SOURCE[0]}))
$scriptdir/removeNamespaces.sh
}
function create_devdax() {
local align=$1
local size=$2
local cmd="sudo ndctl create-namespace --mode devdax -a ${align} -s ${size} -r ${DEV_DAX_R} -f"
result=$(${cmd})
if [ $? -ne 0 ]; then
exit 1;
fi
jq -r '.daxregion.devices[].chardev' <<< $result
}
function create_fsdax() {
local size=$1
local cmd="sudo ndctl create-namespace --mode fsdax -s ${size} -r ${FS_DAX_R} -f"
result=$(${cmd})
if [ $? -ne 0 ]; then
exit 1;
fi
jq -r '.blockdev' <<< $result
}
while getopts ":dhp-:" optchar; do
case "${optchar}" in
-)
case "$OPTARG" in
help) usage $0 && exit 0 ;;
dax) CREATE_DAX=true ;;
pmem) CREATE_PMEM=true ;;
size=*) SIZE="${OPTARG#*=}" ;;
*) echo "Invalid argument '$OPTARG'"; usage $0 && exit 1 ;;
esac
;;
p) CREATE_PMEM=true ;;
d) CREATE_DAX=true ;;
h) usage $0 && exit 0 ;;
*) echo "Invalid argument '$OPTARG'"; usage $0 && exit 1 ;;
esac
done
# There is no default test cofiguration in this script. Configurations has to be specified.
if ! $CREATE_DAX && ! $CREATE_PMEM; then
echo ""
echo "ERROR: No config type selected. Please select one or more config types."
exit 1
fi
# Remove existing namespaces.
clear_namespaces
# Creating namespaces.
trap 'echo "ERROR: Failed to create namespaces"; clear_namespaces; exit 1' ERR SIGTERM SIGABRT
if $CREATE_DAX; then
create_devdax 4k $SIZE
fi
if $CREATE_PMEM; then
pmem_name=$(create_fsdax $SIZE)
fi
# Creating mountpoint.
trap 'echo "ERROR: Failed to create mountpoint"; clear_namespaces; exit 1' ERR SIGTERM SIGABRT
if $CREATE_PMEM; then
if [ ! -d "$MOUNT_POINT" ]; then
sudo mkdir $MOUNT_POINT
fi
if ! grep -qs "$MOUNT_POINT " /proc/mounts; then
sudo mkfs.ext4 -F /dev/$pmem_name
sudo mount -o dax /dev/$pmem_name $MOUNT_POINT
fi
fi
# Changing file permissions.
sudo chmod 777 $MOUNT_POINT || true
sudo chmod 777 /dev/dax* || true
sudo chmod a+rw /sys/bus/nd/devices/region*/deep_flush
sudo chmod +r /sys/bus/nd/devices/ndbus*/region*/resource
sudo chmod +r /sys/bus/nd/devices/ndbus*/region*/dax*/resource
# Print created namespaces.
ndctl list -X | jq -r '.[] | select(.mode=="devdax") | [.daxregion.devices[].chardev, "align: "+(.daxregion.align/1024|tostring+"k"), "size: "+(.size/1024/1024/1024|tostring+"G") ]'
ndctl list | jq -r '.[] | select(.mode=="fsdax") | [.blockdev, "size: "+(.size/1024/1024/1024|tostring+"G") ]'
| 3,239 | 26.457627 | 181 |
sh
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/utils/jenkins/scripts/removeNamespaces.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019-2020, Intel Corporation
# removeNamespaces.sh - clear all existing namespaces.
set -e
MOUNT_POINT="/mnt/pmem*"
sudo umount $MOUNT_POINT || true
namespace_names=$(ndctl list -X | jq -r '.[].dev')
for n in $namespace_names
do
sudo ndctl clear-errors $n -v
done
sudo ndctl disable-namespace all || true
sudo ndctl destroy-namespace all || true
| 424 | 20.25 | 54 |
sh
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmemkv-bench-sd/utils/jenkins/scripts/common.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019-2020, Intel Corporation
# common.sh - contains bash functions used in all jenkins pipelines.
set -o pipefail
scriptdir=$(readlink -f $(dirname ${BASH_SOURCE[0]}))
function system_info {
echo "********** system-info **********"
cat /etc/os-release | grep -oP "PRETTY_NAME=\K.*"
uname -r
echo "libndctl: $(pkg-config --modversion libndctl || echo 'libndctl not found')"
echo "libfabric: $(pkg-config --modversion libfabric || echo 'libfabric not found')"
echo "libpmem: $(pkg-config --modversion libpmem || echo 'libpmem not found')"
echo "libpmemobj: $(pkg-config --modversion libpmemobj || echo 'libpmemobj not found')"
echo "libpmemobj++: $(pkg-config --modversion libpmemobj++ || echo 'libpmemobj++ not found')"
echo "memkind: $(pkg-config --modversion memkind || echo 'memkind not found')"
echo "TBB : $(pkg-config --modversion TBB || echo 'TBB not found')"
echo "valgrind: $(pkg-config --modversion valgrind || echo 'valgrind not found')"
echo "**********memory-info**********"
sudo ipmctl show -dimm || true
sudo ipmctl show -topology || true
echo "**********list-existing-namespaces**********"
sudo ndctl list -M -N
echo "**********installed-packages**********"
zypper se --installed-only 2>/dev/null || true
apt list --installed 2>/dev/null || true
yum list installed 2>/dev/null || true
echo "**********/proc/cmdline**********"
cat /proc/cmdline
echo "**********/proc/modules**********"
cat /proc/modules
echo "**********/proc/cpuinfo**********"
cat /proc/cpuinfo
echo "**********/proc/meminfo**********"
cat /proc/meminfo
eco "**********/proc/swaps**********"
cat /proc/swaps
echo "**********/proc/version**********"
cat /proc/version
echo "**********check-updates**********"
sudo zypper list-updates 2>/dev/null || true
sudo apt-get update 2>/dev/null || true ; apt upgrade --dry-run 2>/dev/null || true
sudo dnf check-update 2>/dev/null || true
echo "**********list-enviroment**********"
env
}
function set_warning_message {
local info_addr=$1
sudo bash -c "cat > /etc/motd <<EOL
___ ___
/ \ / \ HELLO!
\_ \ / __/ THIS NODE IS CONNECTED TO PMEMKV JENKINS
_\ \ / /__ THERE ARE TESTS CURRENTLY RUNNING ON THIS MACHINE
\___ \____/ __/ PLEASE GO AWAY :)
\_ _/
| @ @ \_
| FOR MORE INFORMATION GO: ${info_addr}
_/ /\
/o) (o/\ \_
\_____/ /
\____/
EOL"
}
function disable_warning_message {
sudo rm /etc/motd || true
}
# Check host linux distribution and return distro name
function check_distro {
distro=$(cat /etc/os-release | grep -e ^NAME= | cut -c6-) && echo "${distro//\"}"
}
| 2,808 | 34.556962 | 94 |
sh
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/builddatastoreall.sh
|
make clobber
make -j12 EXTRA_CFLAGS+=-DRUN_COUNT=1 EXTRA_CFLAGS+=-DUSE_NDP_REDO EXTRA_CFLAGS+=-DUSE_NDP_CLOBBER
make EXTRA_CFLAGS+=-DRUN_COUNT=1 EXTRA_CFLAGS+=-DUSE_NDP_REDO EXTRA_CFLAGS+=-DUSE_NDP_CLOBBER
cat builddatastoreall.sh
| 236 | 46.4 | 99 |
sh
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/buildclobber.sh
|
make clobber
make -j12 EXTRA_CFLAGS+=-DUSE_NDP_CLOBBER EXTRA_CFLAGS+=-DRUN_COUNT=1
make EXTRA_CFLAGS+=-DUSE_NDP_CLOBBER EXTRA_CFLAGS+=-DRUN_COUNT=1
cat buildclobber.sh
| 171 | 33.4 | 70 |
sh
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/buildredo.sh
|
make clobber
make -j12 EXTRA_CFLAGS+=-DUSE_NDP_REDO EXTRA_CFLAGS+=-DRUN_COUNT=1
make EXTRA_CFLAGS+=-DUSE_NDP_REDO EXTRA_CFLAGS+=-DRUN_COUNT=1
cat buildredo.sh
| 166 | 32.4 | 69 |
sh
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/builddatastoreclobber.sh
|
make clobber
make -j12 EXTRA_CFLAGS+=-DRUN_COUNT=1 EXTRA_CFLAGS+=-DUSE_NDP_CLOBBER
make EXTRA_CFLAGS+=-DRUN_COUNT=1 EXTRA_CFLAGS+=-DUSE_NDP_CLOBBER
cat builddatastoreclobber.sh
| 182 | 35.6 | 70 |
sh
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/run.sh
|
make EXTRA_CFLAGS+=-DGET_NDP_PERFORMENCE EXTRA_CFLAGS+=-DRUN_COUNT=100000
make EXTRA_CFLAGS+=-DGET_NDP_PERFORMENCE EXTRA_CFLAGS+=-DUSE_NDP_REDO EXTRA_CFLAGS+=-DRUN_COUNT=100000
make EXTRA_CFLAGS+=-DGET_NDP_PERFORMENCE EXTRA_CFLAGS+=-DGET_NDP_BREAKDOWN
make -j12 EXTRA_CFLAGS+=-DGET_NDP_PERFORMENCE EXTRA_CFLAGS+=-DGET_NDP_BREAKDOWN
make -j12 EXTRA_CFLAGS+=-DGET_NDP_PERFORMENCE EXTRA_CFLAGS+=-DRUN_COUNT=10000 EXTRA_CFLAGS+=-DUSE_NDP_CLOBBER
EXTRA_CFLAGS="-Wno-error"
| 481 | 67.857143 | 112 |
sh
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/build.sh
|
make clobber
make -j12 EXTRA_CFLAGS+=-DGET_NDP_PERFORMENCE EXTRA_CFLAGS+=-DRUN_COUNT=10000
make EXTRA_CFLAGS+=-DGET_NDP_PERFORMENCE EXTRA_CFLAGS+=-DRUN_COUNT=10000
cat run.sh
| 180 | 35.2 | 79 |
sh
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/builddatastore.sh
|
make clobber
make -j12 EXTRA_CFLAGS+=-DRUN_COUNT=1
make EXTRA_CFLAGS+=-DRUN_COUNT=1
cat builddatastore.sh
| 111 | 21.4 | 38 |
sh
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/builddatastoreredo.sh
|
make clobber
make -j12 EXTRA_CFLAGS+=-DRUN_COUNT=1 EXTRA_CFLAGS+=-DUSE_NDP_REDO
make EXTRA_CFLAGS+=-DRUN_COUNT=1 EXTRA_CFLAGS+=-DUSE_NDP_REDO
cat builddatastoreredo.sh
| 173 | 33.8 | 67 |
sh
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/buildall.sh
|
make clobber
make -j12 EXTRA_CFLAGS+=-DGET_NDP_PERFORMENCE EXTRA_CFLAGS+=-DUSE_NDP_REDO EXTRA_CFLAGS+=-DUSE_NDP_CLOBBER EXTRA_CFLAGS+=-DRUN_COUNT=10000
make EXTRA_CFLAGS+=-DGET_NDP_PERFORMENCE EXTRA_CFLAGS+=-DUSE_NDP_REDO EXTRA_CFLAGS+=-DUSE_NDP_CLOBBER EXTRA_CFLAGS+=-DRUN_COUNT=10000
cat run.sh
| 300 | 59.2 | 139 |
sh
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/tools/rpmemd/rpmemd_config.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmemd_config.h -- internal definitions for rpmemd config
*/
#include <stdint.h>
#include <stdbool.h>
#ifndef RPMEMD_DEFAULT_LOG_FILE
#define RPMEMD_DEFAULT_LOG_FILE ("/var/log/" DAEMON_NAME ".log")
#endif
#ifndef RPMEMD_GLOBAL_CONFIG_FILE
#define RPMEMD_GLOBAL_CONFIG_FILE ("/etc/" DAEMON_NAME "/" DAEMON_NAME\
".conf")
#endif
#define RPMEMD_USER_CONFIG_FILE ("." DAEMON_NAME ".conf")
#define RPMEM_DEFAULT_MAX_LANES 1024
#define RPMEM_DEFAULT_NTHREADS 0
#define HOME_ENV "HOME"
#define HOME_STR_PLACEHOLDER ("$" HOME_ENV)
struct rpmemd_config {
char *log_file;
char *poolset_dir;
const char *rm_poolset;
bool force;
bool pool_set;
bool persist_apm;
bool persist_general;
bool use_syslog;
uint64_t max_lanes;
enum rpmemd_log_level log_level;
size_t nthreads;
};
int rpmemd_config_read(struct rpmemd_config *config, int argc, char *argv[]);
void rpmemd_config_free(struct rpmemd_config *config);
| 1,012 | 21.021739 | 77 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/tools/rpmemd/rpmemd_log.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmemd_log.h -- rpmemd logging functions declarations
*/
#include <string.h>
#include "util.h"
#define FORMAT_PRINTF(a, b) __attribute__((__format__(__printf__, (a), (b))))
/*
* The tab character is not allowed in rpmemd log,
* because it is not well handled by syslog.
* Please use RPMEMD_LOG_INDENT instead.
*/
#define RPMEMD_LOG_INDENT " "
#ifdef DEBUG
#define RPMEMD_LOG(level, fmt, arg...) do {\
COMPILE_ERROR_ON(strchr(fmt, '\t') != 0);\
rpmemd_log(RPD_LOG_##level, __FILE__, __LINE__, fmt, ## arg);\
} while (0)
#else
#define RPMEMD_LOG(level, fmt, arg...) do {\
COMPILE_ERROR_ON(strchr(fmt, '\t') != 0);\
rpmemd_log(RPD_LOG_##level, NULL, 0, fmt, ## arg);\
} while (0)
#endif
#ifdef DEBUG
#define RPMEMD_DBG(fmt, arg...) do {\
COMPILE_ERROR_ON(strchr(fmt, '\t') != 0);\
rpmemd_log(_RPD_LOG_DBG, __FILE__, __LINE__, fmt, ## arg);\
} while (0)
#else
#define RPMEMD_DBG(fmt, arg...) do {} while (0)
#endif
#define RPMEMD_ERR(fmt, arg...) do {\
RPMEMD_LOG(ERR, fmt, ## arg);\
} while (0)
#define RPMEMD_FATAL(fmt, arg...) do {\
RPMEMD_LOG(ERR, fmt, ## arg);\
abort();\
} while (0)
#define RPMEMD_ASSERT(cond) do {\
if (!(cond)) {\
rpmemd_log(RPD_LOG_ERR, __FILE__, __LINE__,\
"assertion fault: %s", #cond);\
abort();\
}\
} while (0)
enum rpmemd_log_level {
RPD_LOG_ERR,
RPD_LOG_WARN,
RPD_LOG_NOTICE,
RPD_LOG_INFO,
_RPD_LOG_DBG, /* disallow to use this with LOG macro */
MAX_RPD_LOG,
};
enum rpmemd_log_level rpmemd_log_level_from_str(const char *str);
const char *rpmemd_log_level_to_str(enum rpmemd_log_level level);
extern enum rpmemd_log_level rpmemd_log_level;
int rpmemd_log_init(const char *ident, const char *fname, int use_syslog);
void rpmemd_log_close(void);
int rpmemd_prefix(const char *fmt, ...) FORMAT_PRINTF(1, 2);
void rpmemd_log(enum rpmemd_log_level level, const char *fname,
int lineno, const char *fmt, ...) FORMAT_PRINTF(4, 5);
| 1,991 | 25.210526 | 77 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/tools/rpmemd/rpmemd_db.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmemd_db.h -- internal definitions for rpmemd database of pool set files
*/
struct rpmemd_db;
struct rpmem_pool_attr;
/*
* struct rpmemd_db_pool -- remote pool context
*/
struct rpmemd_db_pool {
void *pool_addr;
size_t pool_size;
struct pool_set *set;
};
struct rpmemd_db *rpmemd_db_init(const char *root_dir, mode_t mode);
struct rpmemd_db_pool *rpmemd_db_pool_create(struct rpmemd_db *db,
const char *pool_desc, size_t pool_size,
const struct rpmem_pool_attr *rattr);
struct rpmemd_db_pool *rpmemd_db_pool_open(struct rpmemd_db *db,
const char *pool_desc, size_t pool_size, struct rpmem_pool_attr *rattr);
int rpmemd_db_pool_remove(struct rpmemd_db *db, const char *pool_desc,
int force, int pool_set);
int rpmemd_db_pool_set_attr(struct rpmemd_db_pool *prp,
const struct rpmem_pool_attr *rattr);
void rpmemd_db_pool_close(struct rpmemd_db *db, struct rpmemd_db_pool *prp);
void rpmemd_db_fini(struct rpmemd_db *db);
int rpmemd_db_check_dir(struct rpmemd_db *db);
int rpmemd_db_pool_is_pmem(struct rpmemd_db_pool *pool);
| 1,132 | 32.323529 | 76 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/tools/rpmemd/rpmemd_util.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2018, Intel Corporation */
/*
* rpmemd_util.h -- rpmemd utility functions declarations
*/
int rpmemd_pmem_persist(const void *addr, size_t len);
int rpmemd_flush_fatal(const void *addr, size_t len);
int rpmemd_apply_pm_policy(enum rpmem_persist_method *persist_method,
int (**persist)(const void *addr, size_t len),
void *(**memcpy_persist)(void *pmemdest, const void *src, size_t len),
const int is_pmem);
| 473 | 32.857143 | 71 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/tools/rpmemd/rpmemd_fip.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmemd_fip.h -- rpmemd libfabric provider module header file
*/
#include <stddef.h>
struct rpmemd_fip;
struct rpmemd_fip_attr {
void *addr;
size_t size;
unsigned nlanes;
size_t nthreads;
size_t buff_size;
enum rpmem_provider provider;
enum rpmem_persist_method persist_method;
int (*persist)(const void *addr, size_t len);
void *(*memcpy_persist)(void *pmemdest, const void *src, size_t len);
int (*deep_persist)(const void *addr, size_t len, void *ctx);
void *ctx;
};
struct rpmemd_fip *rpmemd_fip_init(const char *node,
const char *service,
struct rpmemd_fip_attr *attr,
struct rpmem_resp_attr *resp,
enum rpmem_err *err);
void rpmemd_fip_fini(struct rpmemd_fip *fip);
int rpmemd_fip_accept(struct rpmemd_fip *fip, int timeout);
int rpmemd_fip_process_start(struct rpmemd_fip *fip);
int rpmemd_fip_process_stop(struct rpmemd_fip *fip);
int rpmemd_fip_wait_close(struct rpmemd_fip *fip, int timeout);
int rpmemd_fip_close(struct rpmemd_fip *fip);
| 1,066 | 27.078947 | 70 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/tools/rpmemd/rpmemd.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* rpmemd.h -- rpmemd main header file
*/
#define DAEMON_NAME "rpmemd"
| 158 | 16.666667 | 40 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/tools/rpmemd/rpmemd_obc.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmemd_obc.h -- rpmemd out-of-band connection declarations
*/
#include <stdint.h>
#include <sys/types.h>
#include <sys/socket.h>
struct rpmemd_obc;
struct rpmemd_obc_requests {
int (*create)(struct rpmemd_obc *obc, void *arg,
const struct rpmem_req_attr *req,
const struct rpmem_pool_attr *pool_attr);
int (*open)(struct rpmemd_obc *obc, void *arg,
const struct rpmem_req_attr *req);
int (*close)(struct rpmemd_obc *obc, void *arg, int flags);
int (*set_attr)(struct rpmemd_obc *obc, void *arg,
const struct rpmem_pool_attr *pool_attr);
};
struct rpmemd_obc *rpmemd_obc_init(int fd_in, int fd_out);
void rpmemd_obc_fini(struct rpmemd_obc *obc);
int rpmemd_obc_status(struct rpmemd_obc *obc, uint32_t status);
int rpmemd_obc_process(struct rpmemd_obc *obc,
struct rpmemd_obc_requests *req_cb, void *arg);
int rpmemd_obc_create_resp(struct rpmemd_obc *obc,
int status, const struct rpmem_resp_attr *res);
int rpmemd_obc_open_resp(struct rpmemd_obc *obc,
int status, const struct rpmem_resp_attr *res,
const struct rpmem_pool_attr *pool_attr);
int rpmemd_obc_set_attr_resp(struct rpmemd_obc *obc, int status);
int rpmemd_obc_close_resp(struct rpmemd_obc *obc,
int status);
| 1,296 | 31.425 | 65 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/tools/pmempool/check.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* check.h -- pmempool check command header file
*/
int pmempool_check_func(const char *appname, int argc, char *argv[]);
void pmempool_check_help(const char *appname);
| 261 | 25.2 | 69 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/tools/pmempool/create.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* create.h -- pmempool create command header file
*/
int pmempool_create_func(const char *appname, int argc, char *argv[]);
void pmempool_create_help(const char *appname);
| 265 | 25.6 | 70 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/tools/pmempool/dump.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* dump.h -- pmempool dump command header file
*/
int pmempool_dump_func(const char *appname, int argc, char *argv[]);
void pmempool_dump_help(const char *appname);
| 257 | 24.8 | 68 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/tools/pmempool/rm.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* rm.h -- pmempool rm command header file
*/
void pmempool_rm_help(const char *appname);
int pmempool_rm_func(const char *appname, int argc, char *argv[]);
| 249 | 24 | 66 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/tools/pmempool/feature.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* feature.h -- pmempool feature command header file
*/
int pmempool_feature_func(const char *appname, int argc, char *argv[]);
void pmempool_feature_help(const char *appname);
| 264 | 25.5 | 71 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/tools/pmempool/convert.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* convert.h -- pmempool convert command header file
*/
#include <sys/types.h>
int pmempool_convert_func(const char *appname, int argc, char *argv[]);
void pmempool_convert_help(const char *appname);
| 293 | 23.5 | 71 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/tools/pmempool/synchronize.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* synchronize.h -- pmempool sync command header file
*/
int pmempool_sync_func(const char *appname, int argc, char *argv[]);
void pmempool_sync_help(const char *appname);
| 264 | 25.5 | 68 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/tools/pmempool/common.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* common.h -- declarations of common functions
*/
#include <stdint.h>
#include <stddef.h>
#include <stdarg.h>
#include <stdbool.h>
#include "queue.h"
#include "log.h"
#include "blk.h"
#include "libpmemobj.h"
#include "lane.h"
#include "ulog.h"
#include "memops.h"
#include "pmalloc.h"
#include "list.h"
#include "obj.h"
#include "memblock.h"
#include "heap_layout.h"
#include "tx.h"
#include "heap.h"
#include "btt_layout.h"
#include "page_size.h"
/* XXX - modify Linux makefiles to generate srcversion.h and remove #ifdef */
#ifdef _WIN32
#include "srcversion.h"
#endif
#define COUNT_OF(x) (sizeof(x) / sizeof(0[x]))
#define OPT_SHIFT 12
#define OPT_MASK (~((1 << OPT_SHIFT) - 1))
#define OPT_LOG (1 << (PMEM_POOL_TYPE_LOG + OPT_SHIFT))
#define OPT_BLK (1 << (PMEM_POOL_TYPE_BLK + OPT_SHIFT))
#define OPT_OBJ (1 << (PMEM_POOL_TYPE_OBJ + OPT_SHIFT))
#define OPT_BTT (1 << (PMEM_POOL_TYPE_BTT + OPT_SHIFT))
#define OPT_ALL (OPT_LOG | OPT_BLK | OPT_OBJ | OPT_BTT)
#define OPT_REQ_SHIFT 8
#define OPT_REQ_MASK ((1 << OPT_REQ_SHIFT) - 1)
#define _OPT_REQ(c, n) ((c) << (OPT_REQ_SHIFT * (n)))
#define OPT_REQ0(c) _OPT_REQ(c, 0)
#define OPT_REQ1(c) _OPT_REQ(c, 1)
#define OPT_REQ2(c) _OPT_REQ(c, 2)
#define OPT_REQ3(c) _OPT_REQ(c, 3)
#define OPT_REQ4(c) _OPT_REQ(c, 4)
#define OPT_REQ5(c) _OPT_REQ(c, 5)
#define OPT_REQ6(c) _OPT_REQ(c, 6)
#define OPT_REQ7(c) _OPT_REQ(c, 7)
#ifndef min
#define min(a, b) ((a) < (b) ? (a) : (b))
#endif
#define FOREACH_RANGE(range, ranges)\
PMDK_LIST_FOREACH(range, &(ranges)->head, next)
#define PLIST_OFF_TO_PTR(pop, off)\
((off) == 0 ? NULL : (void *)((uintptr_t)(pop) + (off) - OBJ_OOB_SIZE))
#define ENTRY_TO_ALLOC_HDR(entry)\
((void *)((uintptr_t)(entry) - sizeof(struct allocation_header)))
#define OBJH_FROM_PTR(ptr)\
((void *)((uintptr_t)(ptr) - sizeof(struct legacy_object_header)))
#define DEFAULT_HDR_SIZE PMEM_PAGESIZE
#define DEFAULT_DESC_SIZE PMEM_PAGESIZE
#define POOL_HDR_DESC_SIZE (DEFAULT_HDR_SIZE + DEFAULT_DESC_SIZE)
#define PTR_TO_ALLOC_HDR(ptr)\
((void *)((uintptr_t)(ptr) -\
sizeof(struct legacy_object_header)))
#define OBJH_TO_PTR(objh)\
((void *)((uintptr_t)(objh) + sizeof(struct legacy_object_header)))
/* invalid answer for ask_* functions */
#define INV_ANS '\0'
#define FORMAT_PRINTF(a, b) __attribute__((__format__(__printf__, (a), (b))))
/*
* pmem_pool_type_t -- pool types
*/
typedef enum {
PMEM_POOL_TYPE_LOG = 0x01,
PMEM_POOL_TYPE_BLK = 0x02,
PMEM_POOL_TYPE_OBJ = 0x04,
PMEM_POOL_TYPE_BTT = 0x08,
PMEM_POOL_TYPE_ALL = 0x0f,
PMEM_POOL_TYPE_UNKNOWN = 0x80,
} pmem_pool_type_t;
struct option_requirement {
int opt;
pmem_pool_type_t type;
uint64_t req;
};
struct options {
const struct option *opts;
size_t noptions;
char *bitmap;
const struct option_requirement *req;
};
struct pmem_pool_params {
pmem_pool_type_t type;
char signature[POOL_HDR_SIG_LEN];
uint64_t size;
mode_t mode;
int is_poolset;
int is_part;
int is_checksum_ok;
union {
struct {
uint64_t bsize;
} blk;
struct {
char layout[PMEMOBJ_MAX_LAYOUT];
} obj;
};
};
struct pool_set_file {
int fd;
char *fname;
void *addr;
size_t size;
struct pool_set *poolset;
size_t replica;
time_t mtime;
mode_t mode;
bool fileio;
};
struct pool_set_file *pool_set_file_open(const char *fname,
int rdonly, int check);
void pool_set_file_close(struct pool_set_file *file);
int pool_set_file_read(struct pool_set_file *file, void *buff,
size_t nbytes, uint64_t off);
int pool_set_file_write(struct pool_set_file *file, void *buff,
size_t nbytes, uint64_t off);
int pool_set_file_set_replica(struct pool_set_file *file, size_t replica);
size_t pool_set_file_nreplicas(struct pool_set_file *file);
void *pool_set_file_map(struct pool_set_file *file, uint64_t offset);
void pool_set_file_persist(struct pool_set_file *file,
const void *addr, size_t len);
struct range {
PMDK_LIST_ENTRY(range) next;
uint64_t first;
uint64_t last;
};
struct ranges {
PMDK_LIST_HEAD(rangeshead, range) head;
};
pmem_pool_type_t pmem_pool_type_parse_hdr(const struct pool_hdr *hdrp);
pmem_pool_type_t pmem_pool_type(const void *base_pool_addr);
int pmem_pool_checksum(const void *base_pool_addr);
pmem_pool_type_t pmem_pool_type_parse_str(const char *str);
uint64_t pmem_pool_get_min_size(pmem_pool_type_t type);
int pmem_pool_parse_params(const char *fname, struct pmem_pool_params *paramsp,
int check);
int util_poolset_map(const char *fname, struct pool_set **poolset, int rdonly);
struct options *util_options_alloc(const struct option *options,
size_t nopts, const struct option_requirement *req);
void util_options_free(struct options *opts);
int util_options_verify(const struct options *opts, pmem_pool_type_t type);
int util_options_getopt(int argc, char *argv[], const char *optstr,
const struct options *opts);
pmem_pool_type_t util_get_pool_type_second_page(const void *pool_base_addr);
int util_parse_mode(const char *str, mode_t *mode);
int util_parse_ranges(const char *str, struct ranges *rangesp,
struct range entire);
int util_ranges_add(struct ranges *rangesp, struct range range);
void util_ranges_clear(struct ranges *rangesp);
int util_ranges_contain(const struct ranges *rangesp, uint64_t n);
int util_ranges_empty(const struct ranges *rangesp);
int util_check_memory(const uint8_t *buff, size_t len, uint8_t val);
int util_parse_chunk_types(const char *str, uint64_t *types);
int util_parse_lane_sections(const char *str, uint64_t *types);
char ask(char op, char *answers, char def_ans, const char *fmt, va_list ap);
char ask_Yn(char op, const char *fmt, ...) FORMAT_PRINTF(2, 3);
char ask_yN(char op, const char *fmt, ...) FORMAT_PRINTF(2, 3);
unsigned util_heap_max_zone(size_t size);
int util_pool_clear_badblocks(const char *path, int create);
static const struct range ENTIRE_UINT64 = {
{ NULL, NULL }, /* range */
0, /* first */
UINT64_MAX /* last */
};
| 5,957 | 28.205882 | 79 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/tools/pmempool/transform.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* transform.h -- pmempool transform command header file
*/
int pmempool_transform_func(const char *appname, int argc, char *argv[]);
void pmempool_transform_help(const char *appname);
| 277 | 26.8 | 73 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/tools/pmempool/info.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* info.h -- pmempool info command header file
*/
#include "vec.h"
/*
* Verbose levels used in application:
*
* VERBOSE_DEFAULT:
* Default value for application's verbosity level.
* This is also set for data structures which should be
* printed without any command line argument.
*
* VERBOSE_MAX:
* Maximum value for application's verbosity level.
* This value is used when -v command line argument passed.
*
* VERBOSE_SILENT:
* This value is higher than VERBOSE_MAX and it is used only
* for verbosity levels of data structures which should _not_ be
* printed without specified command line arguments.
*/
#define VERBOSE_SILENT 0
#define VERBOSE_DEFAULT 1
#define VERBOSE_MAX 2
/*
* print_bb_e -- printing bad blocks options
*/
enum print_bb_e {
PRINT_BAD_BLOCKS_NOT_SET,
PRINT_BAD_BLOCKS_NO,
PRINT_BAD_BLOCKS_YES,
PRINT_BAD_BLOCKS_MAX
};
/*
* pmempool_info_args -- structure for storing command line arguments
*/
struct pmempool_info_args {
char *file; /* input file */
unsigned col_width; /* column width for printing fields */
bool human; /* sizes in human-readable formats */
bool force; /* force parsing pool */
enum print_bb_e badblocks; /* print bad blocks */
pmem_pool_type_t type; /* forced pool type */
bool use_range; /* use range for blocks */
struct ranges ranges; /* range of block/chunks to dump */
int vlevel; /* verbosity level */
int vdata; /* verbosity level for data dump */
int vhdrdump; /* verbosity level for headers hexdump */
int vstats; /* verbosity level for statistics */
struct {
size_t walk; /* data chunk size */
} log;
struct {
int vmap; /* verbosity level for BTT Map */
int vflog; /* verbosity level for BTT FLOG */
int vbackup; /* verbosity level for BTT Info backup */
bool skip_zeros; /* skip blocks marked with zero flag */
bool skip_error; /* skip blocks marked with error flag */
bool skip_no_flag; /* skip blocks not marked with any flag */
} blk;
struct {
int vlanes; /* verbosity level for lanes */
int vroot;
int vobjects;
int valloc;
int voobhdr;
int vheap;
int vzonehdr;
int vchunkhdr;
int vbitmap;
bool lanes_recovery;
bool ignore_empty_obj;
uint64_t chunk_types;
size_t replica;
struct ranges lane_ranges;
struct ranges type_ranges;
struct ranges zone_ranges;
struct ranges chunk_ranges;
} obj;
};
/*
* pmem_blk_stats -- structure with statistics for pmemblk
*/
struct pmem_blk_stats {
uint32_t total; /* number of processed blocks */
uint32_t zeros; /* number of blocks marked by zero flag */
uint32_t errors; /* number of blocks marked by error flag */
uint32_t noflag; /* number of blocks not marked with any flag */
};
struct pmem_obj_class_stats {
uint64_t n_units;
uint64_t n_used;
uint64_t unit_size;
uint64_t alignment;
uint32_t nallocs;
uint16_t flags;
};
struct pmem_obj_zone_stats {
uint64_t n_chunks;
uint64_t n_chunks_type[MAX_CHUNK_TYPE];
uint64_t size_chunks;
uint64_t size_chunks_type[MAX_CHUNK_TYPE];
VEC(, struct pmem_obj_class_stats) class_stats;
};
struct pmem_obj_type_stats {
PMDK_TAILQ_ENTRY(pmem_obj_type_stats) next;
uint64_t type_num;
uint64_t n_objects;
uint64_t n_bytes;
};
struct pmem_obj_stats {
uint64_t n_total_objects;
uint64_t n_total_bytes;
uint64_t n_zones;
uint64_t n_zones_used;
struct pmem_obj_zone_stats *zone_stats;
PMDK_TAILQ_HEAD(obj_type_stats_head, pmem_obj_type_stats) type_stats;
};
/*
* pmem_info -- context for pmeminfo application
*/
struct pmem_info {
const char *file_name; /* current file name */
struct pool_set_file *pfile;
struct pmempool_info_args args; /* arguments parsed from command line */
struct options *opts;
struct pool_set *poolset;
pmem_pool_type_t type;
struct pmem_pool_params params;
struct {
struct pmem_blk_stats stats;
} blk;
struct {
struct pmemobjpool *pop;
struct palloc_heap *heap;
struct alloc_class_collection *alloc_classes;
size_t size;
struct pmem_obj_stats stats;
uint64_t uuid_lo;
uint64_t objid;
} obj;
};
int pmempool_info_func(const char *appname, int argc, char *argv[]);
void pmempool_info_help(const char *appname);
int pmempool_info_read(struct pmem_info *pip, void *buff,
size_t nbytes, uint64_t off);
int pmempool_info_blk(struct pmem_info *pip);
int pmempool_info_log(struct pmem_info *pip);
int pmempool_info_obj(struct pmem_info *pip);
int pmempool_info_btt(struct pmem_info *pip);
| 4,492 | 25.904192 | 73 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/tools/pmempool/output.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* output.h -- declarations of output printing related functions
*/
#include <time.h>
#include <stdint.h>
#include <stdio.h>
void out_set_vlevel(int vlevel);
void out_set_stream(FILE *stream);
void out_set_prefix(const char *prefix);
void out_set_col_width(unsigned col_width);
void outv_err(const char *fmt, ...) FORMAT_PRINTF(1, 2);
void out_err(const char *file, int line, const char *func,
const char *fmt, ...) FORMAT_PRINTF(4, 5);
void outv_err_vargs(const char *fmt, va_list ap);
void outv_indent(int vlevel, int i);
void outv(int vlevel, const char *fmt, ...) FORMAT_PRINTF(2, 3);
void outv_nl(int vlevel);
int outv_check(int vlevel);
void outv_title(int vlevel, const char *fmt, ...) FORMAT_PRINTF(2, 3);
void outv_field(int vlevel, const char *field, const char *fmt,
...) FORMAT_PRINTF(3, 4);
void outv_hexdump(int vlevel, const void *addr, size_t len, size_t offset,
int sep);
const char *out_get_uuid_str(uuid_t uuid);
const char *out_get_time_str(time_t time);
const char *out_get_size_str(uint64_t size, int human);
const char *out_get_percentage(double percentage);
const char *out_get_checksum(void *addr, size_t len, uint64_t *csump,
uint64_t skip_off);
const char *out_get_btt_map_entry(uint32_t map);
const char *out_get_pool_type_str(pmem_pool_type_t type);
const char *out_get_pool_signature(pmem_pool_type_t type);
const char *out_get_tx_state_str(uint64_t state);
const char *out_get_chunk_type_str(enum chunk_type type);
const char *out_get_chunk_flags(uint16_t flags);
const char *out_get_zone_magic_str(uint32_t magic);
const char *out_get_pmemoid_str(PMEMoid oid, uint64_t uuid_lo);
const char *out_get_arch_machine_class_str(uint8_t machine_class);
const char *out_get_arch_data_str(uint8_t data);
const char *out_get_arch_machine_str(uint16_t machine);
const char *out_get_last_shutdown_str(uint8_t dirty);
const char *out_get_alignment_desc_str(uint64_t ad, uint64_t cur_ad);
const char *out_get_incompat_features_str(uint32_t incompat);
| 2,070 | 41.265306 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/libpmemlog/log.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* log.h -- internal definitions for libpmem log module
*/
#ifndef LOG_H
#define LOG_H 1
#include <stdint.h>
#include <stddef.h>
#include <endian.h>
#include "ctl.h"
#include "util.h"
#include "os_thread.h"
#include "pool_hdr.h"
#include "page_size.h"
#ifdef __cplusplus
extern "C" {
#endif
#include "alloc.h"
#include "fault_injection.h"
#define PMEMLOG_LOG_PREFIX "libpmemlog"
#define PMEMLOG_LOG_LEVEL_VAR "PMEMLOG_LOG_LEVEL"
#define PMEMLOG_LOG_FILE_VAR "PMEMLOG_LOG_FILE"
/* attributes of the log memory pool format for the pool header */
#define LOG_HDR_SIG "PMEMLOG" /* must be 8 bytes including '\0' */
#define LOG_FORMAT_MAJOR 1
#define LOG_FORMAT_FEAT_DEFAULT \
{POOL_FEAT_COMPAT_DEFAULT, POOL_FEAT_INCOMPAT_DEFAULT, 0x0000}
#define LOG_FORMAT_FEAT_CHECK \
{POOL_FEAT_COMPAT_VALID, POOL_FEAT_INCOMPAT_VALID, 0x0000}
static const features_t log_format_feat_default = LOG_FORMAT_FEAT_DEFAULT;
struct pmemlog {
struct pool_hdr hdr; /* memory pool header */
/* root info for on-media format... */
uint64_t start_offset; /* start offset of the usable log space */
uint64_t end_offset; /* maximum offset of the usable log space */
uint64_t write_offset; /* current write point for the log */
/* some run-time state, allocated out of memory pool... */
void *addr; /* mapped region */
size_t size; /* size of mapped region */
int is_pmem; /* true if pool is PMEM */
int rdonly; /* true if pool is opened read-only */
os_rwlock_t *rwlockp; /* pointer to RW lock */
int is_dev_dax; /* true if mapped on device dax */
struct ctl *ctl; /* top level node of the ctl tree structure */
struct pool_set *set; /* pool set info */
};
/* data area starts at this alignment after the struct pmemlog above */
#define LOG_FORMAT_DATA_ALIGN ((uintptr_t)PMEM_PAGESIZE)
/*
* log_convert2h -- convert pmemlog structure to host byte order
*/
static inline void
log_convert2h(struct pmemlog *plp)
{
plp->start_offset = le64toh(plp->start_offset);
plp->end_offset = le64toh(plp->end_offset);
plp->write_offset = le64toh(plp->write_offset);
}
/*
* log_convert2le -- convert pmemlog structure to LE byte order
*/
static inline void
log_convert2le(struct pmemlog *plp)
{
plp->start_offset = htole64(plp->start_offset);
plp->end_offset = htole64(plp->end_offset);
plp->write_offset = htole64(plp->write_offset);
}
#if FAULT_INJECTION
void
pmemlog_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at);
int
pmemlog_fault_injection_enabled(void);
#else
static inline void
pmemlog_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
abort();
}
static inline int
pmemlog_fault_injection_enabled(void)
{
return 0;
}
#endif
#ifdef __cplusplus
}
#endif
#endif
| 2,832 | 23.422414 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/freebsd/include/endian.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* endian.h -- redirect for FreeBSD <sys/endian.h>
*/
#include <sys/endian.h>
| 165 | 17.444444 | 50 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/freebsd/include/features.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* features.h -- Empty file redirect
*/
| 126 | 17.142857 | 40 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/freebsd/include/sys/sysmacros.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* sys/sysmacros.h -- Empty file redirect
*/
| 131 | 17.857143 | 41 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/freebsd/include/linux/kdev_t.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* linux/kdev_t.h -- Empty file redirect
*/
| 130 | 17.714286 | 40 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/freebsd/include/linux/limits.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* linux/limits.h -- Empty file redirect
*/
| 130 | 17.714286 | 40 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/core/pmemcore.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmemcore.h -- definitions for "core" module
*/
#ifndef PMEMCORE_H
#define PMEMCORE_H 1
#include "util.h"
#include "out.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* core_init -- core module initialization
*/
static inline void
core_init(const char *log_prefix, const char *log_level_var,
const char *log_file_var, int major_version,
int minor_version)
{
util_init();
out_init(log_prefix, log_level_var, log_file_var, major_version,
minor_version);
}
/*
* core_fini -- core module cleanup
*/
static inline void
core_fini(void)
{
out_fini();
}
#ifdef __cplusplus
}
#endif
#endif
| 687 | 14.288889 | 65 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/core/fault_injection.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
#ifndef CORE_FAULT_INJECTION
#define CORE_FAULT_INJECTION
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
enum pmem_allocation_type { PMEM_MALLOC, PMEM_REALLOC };
#if FAULT_INJECTION
void core_inject_fault_at(enum pmem_allocation_type type,
int nth, const char *at);
int core_fault_injection_enabled(void);
#else
static inline void
core_inject_fault_at(enum pmem_allocation_type type, int nth, const char *at)
{
abort();
}
static inline int
core_fault_injection_enabled(void)
{
return 0;
}
#endif
#ifdef __cplusplus
}
#endif
#endif
| 642 | 15.075 | 77 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/core/os.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* os.h -- os abstraction layer
*/
#ifndef PMDK_OS_H
#define PMDK_OS_H 1
#include <sys/stat.h>
#include <stdio.h>
#include <unistd.h>
#include "errno_freebsd.h"
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _WIN32
#define OS_DIR_SEPARATOR '/'
#define OS_DIR_SEP_STR "/"
#else
#define OS_DIR_SEPARATOR '\\'
#define OS_DIR_SEP_STR "\\"
#endif
#ifndef _WIN32
/* madvise() */
#ifdef __FreeBSD__
#define os_madvise minherit
#define MADV_DONTFORK INHERIT_NONE
#else
#define os_madvise madvise
#endif
/* dlopen() */
#ifdef __FreeBSD__
#define RTLD_DEEPBIND 0 /* XXX */
#endif
/* major(), minor() */
#ifdef __FreeBSD__
#define os_major (unsigned)major
#define os_minor (unsigned)minor
#else
#define os_major major
#define os_minor minor
#endif
#endif /* #ifndef _WIN32 */
struct iovec;
/* os_flock */
#define OS_LOCK_SH 1
#define OS_LOCK_EX 2
#define OS_LOCK_NB 4
#define OS_LOCK_UN 8
#ifndef _WIN32
typedef struct stat os_stat_t;
#define os_fstat fstat
#define os_lseek lseek
#else
typedef struct _stat64 os_stat_t;
#define os_fstat _fstat64
#define os_lseek _lseeki64
#endif
#define os_close close
#define os_fclose fclose
#ifndef _WIN32
typedef off_t os_off_t;
#else
/* XXX: os_off_t defined in platform.h */
#endif
int os_open(const char *pathname, int flags, ...);
int os_fsync(int fd);
int os_fsync_dir(const char *dir_name);
int os_stat(const char *pathname, os_stat_t *buf);
int os_unlink(const char *pathname);
int os_access(const char *pathname, int mode);
FILE *os_fopen(const char *pathname, const char *mode);
FILE *os_fdopen(int fd, const char *mode);
int os_chmod(const char *pathname, mode_t mode);
int os_mkstemp(char *temp);
int os_posix_fallocate(int fd, os_off_t offset, os_off_t len);
int os_ftruncate(int fd, os_off_t length);
int os_flock(int fd, int operation);
ssize_t os_writev(int fd, const struct iovec *iov, int iovcnt);
int os_clock_gettime(int id, struct timespec *ts);
unsigned os_rand_r(unsigned *seedp);
int os_unsetenv(const char *name);
int os_setenv(const char *name, const char *value, int overwrite);
char *os_getenv(const char *name);
const char *os_strsignal(int sig);
int os_execv(const char *path, char *const argv[]);
/*
* XXX: missing APis (used in ut_file.c)
*
* rename
* read
* write
*/
#ifdef __cplusplus
}
#endif
#endif /* os.h */
| 2,388 | 19.594828 | 66 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/core/util.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* Copyright (c) 2016-2020, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* util.h -- internal definitions for util module
*/
#ifndef PMDK_UTIL_H
#define PMDK_UTIL_H 1
#include <string.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <ctype.h>
#ifdef _MSC_VER
#include <intrin.h> /* popcnt, bitscan */
#endif
#include <sys/param.h>
#ifdef __cplusplus
extern "C" {
#endif
extern unsigned long long Pagesize;
extern unsigned long long Mmap_align;
#if defined(__x86_64) || defined(_M_X64) || defined(__aarch64__)
#define CACHELINE_SIZE 64ULL
#elif defined(__PPC64__)
#define CACHELINE_SIZE 128ULL
#else
#error unable to recognize architecture at compile time
#endif
#define PAGE_ALIGNED_DOWN_SIZE(size) ((size) & ~(Pagesize - 1))
#define PAGE_ALIGNED_UP_SIZE(size)\
PAGE_ALIGNED_DOWN_SIZE((size) + (Pagesize - 1))
#define IS_PAGE_ALIGNED(size) (((size) & (Pagesize - 1)) == 0)
#define IS_MMAP_ALIGNED(size) (((size) & (Mmap_align - 1)) == 0)
#define PAGE_ALIGN_UP(addr) ((void *)PAGE_ALIGNED_UP_SIZE((uintptr_t)(addr)))
#define ALIGN_UP(size, align) (((size) + (align) - 1) & ~((align) - 1))
#define ALIGN_DOWN(size, align) ((size) & ~((align) - 1))
#define ADDR_SUM(vp, lp) ((void *)((char *)(vp) + (lp)))
#define util_alignof(t) offsetof(struct {char _util_c; t _util_m; }, _util_m)
#define FORMAT_PRINTF(a, b) __attribute__((__format__(__printf__, (a), (b))))
void util_init(void);
int util_is_zeroed(const void *addr, size_t len);
uint64_t util_checksum_compute(void *addr, size_t len, uint64_t *csump,
size_t skip_off);
int util_checksum(void *addr, size_t len, uint64_t *csump,
int insert, size_t skip_off);
uint64_t util_checksum_seq(const void *addr, size_t len, uint64_t csum);
int util_parse_size(const char *str, size_t *sizep);
char *util_fgets(char *buffer, int max, FILE *stream);
char *util_getexecname(char *path, size_t pathlen);
char *util_part_realpath(const char *path);
int util_compare_file_inodes(const char *path1, const char *path2);
void *util_aligned_malloc(size_t alignment, size_t size);
void util_aligned_free(void *ptr);
struct tm *util_localtime(const time_t *timep);
int util_safe_strcpy(char *dst, const char *src, size_t max_length);
void util_emit_log(const char *lib, const char *func, int order);
char *util_readline(FILE *fh);
int util_snprintf(char *str, size_t size,
const char *format, ...) FORMAT_PRINTF(3, 4);
#ifdef _WIN32
char *util_toUTF8(const wchar_t *wstr);
wchar_t *util_toUTF16(const char *wstr);
void util_free_UTF8(char *str);
void util_free_UTF16(wchar_t *str);
int util_toUTF16_buff(const char *in, wchar_t *out, size_t out_size);
int util_toUTF8_buff(const wchar_t *in, char *out, size_t out_size);
void util_suppress_errmsg(void);
int util_lasterror_to_errno(unsigned long err);
#endif
#define UTIL_MAX_ERR_MSG 128
void util_strerror(int errnum, char *buff, size_t bufflen);
void util_strwinerror(unsigned long err, char *buff, size_t bufflen);
void util_set_alloc_funcs(
void *(*malloc_func)(size_t size),
void (*free_func)(void *ptr),
void *(*realloc_func)(void *ptr, size_t size),
char *(*strdup_func)(const char *s));
/*
* Macro calculates number of elements in given table
*/
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
#ifdef _MSC_VER
#define force_inline inline __forceinline
#define NORETURN __declspec(noreturn)
#define barrier() _ReadWriteBarrier()
#else
#define force_inline __attribute__((always_inline)) inline
#define NORETURN __attribute__((noreturn))
#define barrier() asm volatile("" ::: "memory")
#endif
#ifdef _MSC_VER
typedef UNALIGNED uint64_t ua_uint64_t;
typedef UNALIGNED uint32_t ua_uint32_t;
typedef UNALIGNED uint16_t ua_uint16_t;
#else
typedef uint64_t ua_uint64_t __attribute__((aligned(1)));
typedef uint32_t ua_uint32_t __attribute__((aligned(1)));
typedef uint16_t ua_uint16_t __attribute__((aligned(1)));
#endif
#define util_get_not_masked_bits(x, mask) ((x) & ~(mask))
/*
* util_setbit -- setbit macro substitution which properly deals with types
*/
static inline void
util_setbit(uint8_t *b, uint32_t i)
{
b[i / 8] = (uint8_t)(b[i / 8] | (uint8_t)(1 << (i % 8)));
}
/*
* util_clrbit -- clrbit macro substitution which properly deals with types
*/
static inline void
util_clrbit(uint8_t *b, uint32_t i)
{
b[i / 8] = (uint8_t)(b[i / 8] & (uint8_t)(~(1 << (i % 8))));
}
#define util_isset(a, i) isset(a, i)
#define util_isclr(a, i) isclr(a, i)
#define util_flag_isset(a, f) ((a) & (f))
#define util_flag_isclr(a, f) (((a) & (f)) == 0)
/*
* util_is_pow2 -- returns !0 when there's only 1 bit set in v, 0 otherwise
*/
static force_inline int
util_is_pow2(uint64_t v)
{
return v && !(v & (v - 1));
}
/*
* util_div_ceil -- divides a by b and rounds up the result
*/
static force_inline unsigned
util_div_ceil(unsigned a, unsigned b)
{
return (unsigned)(((unsigned long)a + b - 1) / b);
}
/*
* util_bool_compare_and_swap -- perform an atomic compare and swap
* util_fetch_and_* -- perform an operation atomically, return old value
* util_synchronize -- issue a full memory barrier
* util_popcount -- count number of set bits
* util_lssb_index -- return index of least significant set bit,
* undefined on zero
* util_mssb_index -- return index of most significant set bit
* undefined on zero
*
* XXX assertions needed on (value != 0) in both versions of bitscans
*
*/
#ifndef _MSC_VER
/*
* ISO C11 -- 7.17.1.4
* memory_order - an enumerated type whose enumerators identify memory ordering
* constraints.
*/
typedef enum {
memory_order_relaxed = __ATOMIC_RELAXED,
memory_order_consume = __ATOMIC_CONSUME,
memory_order_acquire = __ATOMIC_ACQUIRE,
memory_order_release = __ATOMIC_RELEASE,
memory_order_acq_rel = __ATOMIC_ACQ_REL,
memory_order_seq_cst = __ATOMIC_SEQ_CST
} memory_order;
/*
* ISO C11 -- 7.17.7.2 The atomic_load generic functions
* Integer width specific versions as supplement for:
*
*
* #include <stdatomic.h>
* C atomic_load(volatile A *object);
* C atomic_load_explicit(volatile A *object, memory_order order);
*
* The atomic_load interface doesn't return the loaded value, but instead
* copies it to a specified address -- see comments at the MSVC version.
*
* Also, instead of generic functions, two versions are available:
* for 32 bit fundamental integers, and for 64 bit ones.
*/
#define util_atomic_load_explicit32 __atomic_load
#define util_atomic_load_explicit64 __atomic_load
/*
* ISO C11 -- 7.17.7.1 The atomic_store generic functions
* Integer width specific versions as supplement for:
*
* #include <stdatomic.h>
* void atomic_store(volatile A *object, C desired);
* void atomic_store_explicit(volatile A *object, C desired,
* memory_order order);
*/
#define util_atomic_store_explicit32 __atomic_store_n
#define util_atomic_store_explicit64 __atomic_store_n
/*
* https://gcc.gnu.org/onlinedocs/gcc/_005f_005fsync-Builtins.html
* https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html
* https://clang.llvm.org/docs/LanguageExtensions.html#builtin-functions
*/
#define util_bool_compare_and_swap32 __sync_bool_compare_and_swap
#define util_bool_compare_and_swap64 __sync_bool_compare_and_swap
#define util_fetch_and_add32 __sync_fetch_and_add
#define util_fetch_and_add64 __sync_fetch_and_add
#define util_fetch_and_sub32 __sync_fetch_and_sub
#define util_fetch_and_sub64 __sync_fetch_and_sub
#define util_fetch_and_and32 __sync_fetch_and_and
#define util_fetch_and_and64 __sync_fetch_and_and
#define util_fetch_and_or32 __sync_fetch_and_or
#define util_fetch_and_or64 __sync_fetch_and_or
#define util_synchronize __sync_synchronize
#define util_popcount(value) ((unsigned char)__builtin_popcount(value))
#define util_popcount64(value) ((unsigned char)__builtin_popcountll(value))
#define util_lssb_index(value) ((unsigned char)__builtin_ctz(value))
#define util_lssb_index64(value) ((unsigned char)__builtin_ctzll(value))
#define util_mssb_index(value) ((unsigned char)(31 - __builtin_clz(value)))
#define util_mssb_index64(value) ((unsigned char)(63 - __builtin_clzll(value)))
#else
/* ISO C11 -- 7.17.1.4 */
typedef enum {
memory_order_relaxed,
memory_order_consume,
memory_order_acquire,
memory_order_release,
memory_order_acq_rel,
memory_order_seq_cst
} memory_order;
/*
* ISO C11 -- 7.17.7.2 The atomic_load generic functions
* Integer width specific versions as supplement for:
*
*
* #include <stdatomic.h>
* C atomic_load(volatile A *object);
* C atomic_load_explicit(volatile A *object, memory_order order);
*
* The atomic_load interface doesn't return the loaded value, but instead
* copies it to a specified address.
* The MSVC specific implementation needs to trigger a barrier (at least
* compiler barrier) after the load from the volatile value. The actual load
* from the volatile value itself is expected to be atomic.
*
* The actual isnterface here:
* #include "util.h"
* void util_atomic_load32(volatile A *object, A *destination);
* void util_atomic_load64(volatile A *object, A *destination);
* void util_atomic_load_explicit32(volatile A *object, A *destination,
* memory_order order);
* void util_atomic_load_explicit64(volatile A *object, A *destination,
* memory_order order);
*/
#ifndef _M_X64
#error MSVC ports of util_atomic_ only work on X86_64
#endif
#if _MSC_VER >= 2000
#error util_atomic_ utility functions not tested with this version of VC++
#error These utility functions are not future proof, as they are not
#error based on publicly available documentation.
#endif
#define util_atomic_load_explicit(object, dest, order)\
do {\
COMPILE_ERROR_ON(order != memory_order_seq_cst &&\
order != memory_order_consume &&\
order != memory_order_acquire &&\
order != memory_order_relaxed);\
*dest = *object;\
if (order == memory_order_seq_cst ||\
order == memory_order_consume ||\
order == memory_order_acquire)\
_ReadWriteBarrier();\
} while (0)
#define util_atomic_load_explicit32 util_atomic_load_explicit
#define util_atomic_load_explicit64 util_atomic_load_explicit
/* ISO C11 -- 7.17.7.1 The atomic_store generic functions */
#define util_atomic_store_explicit64(object, desired, order)\
do {\
COMPILE_ERROR_ON(order != memory_order_seq_cst &&\
order != memory_order_release &&\
order != memory_order_relaxed);\
if (order == memory_order_seq_cst) {\
_InterlockedExchange64(\
(volatile long long *)object, desired);\
} else {\
if (order == memory_order_release)\
_ReadWriteBarrier();\
*object = desired;\
}\
} while (0)
#define util_atomic_store_explicit32(object, desired, order)\
do {\
COMPILE_ERROR_ON(order != memory_order_seq_cst &&\
order != memory_order_release &&\
order != memory_order_relaxed);\
if (order == memory_order_seq_cst) {\
_InterlockedExchange(\
(volatile long *)object, desired);\
} else {\
if (order == memory_order_release)\
_ReadWriteBarrier();\
*object = desired;\
}\
} while (0)
/*
* https://msdn.microsoft.com/en-us/library/hh977022.aspx
*/
static __inline int
bool_compare_and_swap32_VC(volatile LONG *ptr,
LONG oldval, LONG newval)
{
LONG old = InterlockedCompareExchange(ptr, newval, oldval);
return (old == oldval);
}
static __inline int
bool_compare_and_swap64_VC(volatile LONG64 *ptr,
LONG64 oldval, LONG64 newval)
{
LONG64 old = InterlockedCompareExchange64(ptr, newval, oldval);
return (old == oldval);
}
#define util_bool_compare_and_swap32(p, o, n)\
bool_compare_and_swap32_VC((LONG *)(p), (LONG)(o), (LONG)(n))
#define util_bool_compare_and_swap64(p, o, n)\
bool_compare_and_swap64_VC((LONG64 *)(p), (LONG64)(o), (LONG64)(n))
#define util_fetch_and_add32(ptr, value)\
InterlockedExchangeAdd((LONG *)(ptr), value)
#define util_fetch_and_add64(ptr, value)\
InterlockedExchangeAdd64((LONG64 *)(ptr), value)
#define util_fetch_and_sub32(ptr, value)\
InterlockedExchangeSubtract((LONG *)(ptr), value)
#define util_fetch_and_sub64(ptr, value)\
InterlockedExchangeAdd64((LONG64 *)(ptr), -((LONG64)(value)))
#define util_fetch_and_and32(ptr, value)\
InterlockedAnd((LONG *)(ptr), value)
#define util_fetch_and_and64(ptr, value)\
InterlockedAnd64((LONG64 *)(ptr), value)
#define util_fetch_and_or32(ptr, value)\
InterlockedOr((LONG *)(ptr), value)
#define util_fetch_and_or64(ptr, value)\
InterlockedOr64((LONG64 *)(ptr), value)
static __inline void
util_synchronize(void)
{
MemoryBarrier();
}
#define util_popcount(value) (unsigned char)__popcnt(value)
#define util_popcount64(value) (unsigned char)__popcnt64(value)
static __inline unsigned char
util_lssb_index(int value)
{
unsigned long ret;
_BitScanForward(&ret, value);
return (unsigned char)ret;
}
static __inline unsigned char
util_lssb_index64(long long value)
{
unsigned long ret;
_BitScanForward64(&ret, value);
return (unsigned char)ret;
}
static __inline unsigned char
util_mssb_index(int value)
{
unsigned long ret;
_BitScanReverse(&ret, value);
return (unsigned char)ret;
}
static __inline unsigned char
util_mssb_index64(long long value)
{
unsigned long ret;
_BitScanReverse64(&ret, value);
return (unsigned char)ret;
}
#endif
/* ISO C11 -- 7.17.7 Operations on atomic types */
#define util_atomic_load32(object, dest)\
util_atomic_load_explicit32(object, dest, memory_order_seq_cst)
#define util_atomic_load64(object, dest)\
util_atomic_load_explicit64(object, dest, memory_order_seq_cst)
#define util_atomic_store32(object, desired)\
util_atomic_store_explicit32(object, desired, memory_order_seq_cst)
#define util_atomic_store64(object, desired)\
util_atomic_store_explicit64(object, desired, memory_order_seq_cst)
/*
* util_get_printable_ascii -- convert non-printable ascii to dot '.'
*/
static inline char
util_get_printable_ascii(char c)
{
return isprint((unsigned char)c) ? c : '.';
}
char *util_concat_str(const char *s1, const char *s2);
#if !defined(likely)
#if defined(__GNUC__)
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else
#define likely(x) (!!(x))
#define unlikely(x) (!!(x))
#endif
#endif
#if defined(__CHECKER__)
#define COMPILE_ERROR_ON(cond)
#define ASSERT_COMPILE_ERROR_ON(cond)
#elif defined(_MSC_VER)
#define COMPILE_ERROR_ON(cond) C_ASSERT(!(cond))
/* XXX - can't be done with C_ASSERT() unless we have __builtin_constant_p() */
#define ASSERT_COMPILE_ERROR_ON(cond) do {} while (0)
#else
#define COMPILE_ERROR_ON(cond) ((void)sizeof(char[(cond) ? -1 : 1]))
#define ASSERT_COMPILE_ERROR_ON(cond) COMPILE_ERROR_ON(cond)
#endif
#ifndef _MSC_VER
#define ATTR_CONSTRUCTOR __attribute__((constructor)) static
#define ATTR_DESTRUCTOR __attribute__((destructor)) static
#else
#define ATTR_CONSTRUCTOR
#define ATTR_DESTRUCTOR
#endif
#ifndef _MSC_VER
#define CONSTRUCTOR(fun) ATTR_CONSTRUCTOR
#else
#ifdef __cplusplus
#define CONSTRUCTOR(fun) \
void fun(); \
struct _##fun { \
_##fun() { \
fun(); \
} \
}; static _##fun foo; \
static
#else
#define CONSTRUCTOR(fun) \
MSVC_CONSTR(fun) \
static
#endif
#endif
#ifdef __GNUC__
#define CHECK_FUNC_COMPATIBLE(func1, func2)\
COMPILE_ERROR_ON(!__builtin_types_compatible_p(typeof(func1),\
typeof(func2)))
#else
#define CHECK_FUNC_COMPATIBLE(func1, func2) do {} while (0)
#endif /* __GNUC__ */
#ifdef __cplusplus
}
#endif
#endif /* util.h */
| 17,058 | 30.47417 | 79 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/core/valgrind_internal.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* valgrind_internal.h -- internal definitions for valgrind macros
*/
#ifndef PMDK_VALGRIND_INTERNAL_H
#define PMDK_VALGRIND_INTERNAL_H 1
#if !defined(_WIN32) && !defined(__FreeBSD__)
#ifndef VALGRIND_ENABLED
#define VALGRIND_ENABLED 1
#endif
#endif
#if VALGRIND_ENABLED
#define VG_PMEMCHECK_ENABLED 1
#define VG_HELGRIND_ENABLED 1
#define VG_MEMCHECK_ENABLED 1
#define VG_DRD_ENABLED 1
#endif
#if VG_PMEMCHECK_ENABLED || VG_HELGRIND_ENABLED || VG_MEMCHECK_ENABLED || \
VG_DRD_ENABLED
#define ANY_VG_TOOL_ENABLED 1
#else
#define ANY_VG_TOOL_ENABLED 0
#endif
#if ANY_VG_TOOL_ENABLED
extern unsigned _On_valgrind;
#define On_valgrind __builtin_expect(_On_valgrind, 0)
#include "valgrind/valgrind.h"
#else
#define On_valgrind (0)
#endif
#if VG_HELGRIND_ENABLED
extern unsigned _On_helgrind;
#define On_helgrind __builtin_expect(_On_helgrind, 0)
#include "valgrind/helgrind.h"
#else
#define On_helgrind (0)
#endif
#if VG_DRD_ENABLED
extern unsigned _On_drd;
#define On_drd __builtin_expect(_On_drd, 0)
#include "valgrind/drd.h"
#else
#define On_drd (0)
#endif
#if VG_HELGRIND_ENABLED || VG_DRD_ENABLED
extern unsigned _On_drd_or_hg;
#define On_drd_or_hg __builtin_expect(_On_drd_or_hg, 0)
#define VALGRIND_ANNOTATE_HAPPENS_BEFORE(obj) do {\
if (On_drd_or_hg) \
ANNOTATE_HAPPENS_BEFORE((obj));\
} while (0)
#define VALGRIND_ANNOTATE_HAPPENS_AFTER(obj) do {\
if (On_drd_or_hg) \
ANNOTATE_HAPPENS_AFTER((obj));\
} while (0)
#define VALGRIND_ANNOTATE_NEW_MEMORY(addr, size) do {\
if (On_drd_or_hg) \
ANNOTATE_NEW_MEMORY((addr), (size));\
} while (0)
#define VALGRIND_ANNOTATE_IGNORE_READS_BEGIN() do {\
if (On_drd_or_hg) \
ANNOTATE_IGNORE_READS_BEGIN();\
} while (0)
#define VALGRIND_ANNOTATE_IGNORE_READS_END() do {\
if (On_drd_or_hg) \
ANNOTATE_IGNORE_READS_END();\
} while (0)
#define VALGRIND_ANNOTATE_IGNORE_WRITES_BEGIN() do {\
if (On_drd_or_hg) \
ANNOTATE_IGNORE_WRITES_BEGIN();\
} while (0)
#define VALGRIND_ANNOTATE_IGNORE_WRITES_END() do {\
if (On_drd_or_hg) \
ANNOTATE_IGNORE_WRITES_END();\
} while (0)
/* Supported by both helgrind and drd. */
#define VALGRIND_HG_DRD_DISABLE_CHECKING(addr, size) do {\
if (On_drd_or_hg) \
VALGRIND_HG_DISABLE_CHECKING((addr), (size));\
} while (0)
#else
#define On_drd_or_hg (0)
#define VALGRIND_ANNOTATE_HAPPENS_BEFORE(obj) do { (void)(obj); } while (0)
#define VALGRIND_ANNOTATE_HAPPENS_AFTER(obj) do { (void)(obj); } while (0)
#define VALGRIND_ANNOTATE_NEW_MEMORY(addr, size) do {\
(void) (addr);\
(void) (size);\
} while (0)
#define VALGRIND_ANNOTATE_IGNORE_READS_BEGIN() do {} while (0)
#define VALGRIND_ANNOTATE_IGNORE_READS_END() do {} while (0)
#define VALGRIND_ANNOTATE_IGNORE_WRITES_BEGIN() do {} while (0)
#define VALGRIND_ANNOTATE_IGNORE_WRITES_END() do {} while (0)
#define VALGRIND_HG_DRD_DISABLE_CHECKING(addr, size) do {\
(void) (addr);\
(void) (size);\
} while (0)
#endif
#if VG_PMEMCHECK_ENABLED
extern unsigned _On_pmemcheck;
#define On_pmemcheck __builtin_expect(_On_pmemcheck, 0)
#include "valgrind/pmemcheck.h"
void pobj_emit_log(const char *func, int order);
void pmem_emit_log(const char *func, int order);
void pmem2_emit_log(const char *func, int order);
extern int _Pmreorder_emit;
#define Pmreorder_emit __builtin_expect(_Pmreorder_emit, 0)
#define VALGRIND_REGISTER_PMEM_MAPPING(addr, len) do {\
if (On_pmemcheck)\
VALGRIND_PMC_REGISTER_PMEM_MAPPING((addr), (len));\
} while (0)
#define VALGRIND_REGISTER_PMEM_FILE(desc, base_addr, size, offset) do {\
if (On_pmemcheck)\
VALGRIND_PMC_REGISTER_PMEM_FILE((desc), (base_addr), (size), \
(offset));\
} while (0)
#define VALGRIND_REMOVE_PMEM_MAPPING(addr, len) do {\
if (On_pmemcheck)\
VALGRIND_PMC_REMOVE_PMEM_MAPPING((addr), (len));\
} while (0)
#define VALGRIND_CHECK_IS_PMEM_MAPPING(addr, len) do {\
if (On_pmemcheck)\
VALGRIND_PMC_CHECK_IS_PMEM_MAPPING((addr), (len));\
} while (0)
#define VALGRIND_PRINT_PMEM_MAPPINGS do {\
if (On_pmemcheck)\
VALGRIND_PMC_PRINT_PMEM_MAPPINGS;\
} while (0)
#define VALGRIND_DO_FLUSH(addr, len) do {\
if (On_pmemcheck)\
VALGRIND_PMC_DO_FLUSH((addr), (len));\
} while (0)
#define VALGRIND_DO_FENCE do {\
if (On_pmemcheck)\
VALGRIND_PMC_DO_FENCE;\
} while (0)
#define VALGRIND_DO_PERSIST(addr, len) do {\
if (On_pmemcheck) {\
VALGRIND_PMC_DO_FLUSH((addr), (len));\
VALGRIND_PMC_DO_FENCE;\
}\
} while (0)
#define VALGRIND_SET_CLEAN(addr, len) do {\
if (On_pmemcheck)\
VALGRIND_PMC_SET_CLEAN(addr, len);\
} while (0)
#define VALGRIND_WRITE_STATS do {\
if (On_pmemcheck)\
VALGRIND_PMC_WRITE_STATS;\
} while (0)
#define VALGRIND_EMIT_LOG(emit_log) do {\
if (On_pmemcheck)\
VALGRIND_PMC_EMIT_LOG((emit_log));\
} while (0)
#define VALGRIND_START_TX do {\
if (On_pmemcheck)\
VALGRIND_PMC_START_TX;\
} while (0)
#define VALGRIND_START_TX_N(txn) do {\
if (On_pmemcheck)\
VALGRIND_PMC_START_TX_N(txn);\
} while (0)
#define VALGRIND_END_TX do {\
if (On_pmemcheck)\
VALGRIND_PMC_END_TX;\
} while (0)
#define VALGRIND_END_TX_N(txn) do {\
if (On_pmemcheck)\
VALGRIND_PMC_END_TX_N(txn);\
} while (0)
#define VALGRIND_ADD_TO_TX(addr, len) do {\
if (On_pmemcheck)\
VALGRIND_PMC_ADD_TO_TX(addr, len);\
} while (0)
#define VALGRIND_ADD_TO_TX_N(txn, addr, len) do {\
if (On_pmemcheck)\
VALGRIND_PMC_ADD_TO_TX_N(txn, addr, len);\
} while (0)
#define VALGRIND_REMOVE_FROM_TX(addr, len) do {\
if (On_pmemcheck)\
VALGRIND_PMC_REMOVE_FROM_TX(addr, len);\
} while (0)
#define VALGRIND_REMOVE_FROM_TX_N(txn, addr, len) do {\
if (On_pmemcheck)\
VALGRIND_PMC_REMOVE_FROM_TX_N(txn, addr, len);\
} while (0)
#define VALGRIND_ADD_TO_GLOBAL_TX_IGNORE(addr, len) do {\
if (On_pmemcheck)\
VALGRIND_PMC_ADD_TO_GLOBAL_TX_IGNORE(addr, len);\
} while (0)
/*
* Logs library and function name with proper suffix
* to pmemcheck store log file.
*/
#define PMEMOBJ_API_START()\
if (Pmreorder_emit)\
pobj_emit_log(__func__, 0);
#define PMEMOBJ_API_END()\
if (Pmreorder_emit)\
pobj_emit_log(__func__, 1);
#define PMEM_API_START()\
if (Pmreorder_emit)\
pmem_emit_log(__func__, 0);
#define PMEM_API_END()\
if (Pmreorder_emit)\
pmem_emit_log(__func__, 1);
#define PMEM2_API_START(func_name)\
if (Pmreorder_emit)\
pmem2_emit_log(func_name, 0);
#define PMEM2_API_END(func_name)\
if (Pmreorder_emit)\
pmem2_emit_log(func_name, 1);
#else
#define On_pmemcheck (0)
#define Pmreorder_emit (0)
#define VALGRIND_REGISTER_PMEM_MAPPING(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_REGISTER_PMEM_FILE(desc, base_addr, size, offset) do {\
(void) (desc);\
(void) (base_addr);\
(void) (size);\
(void) (offset);\
} while (0)
#define VALGRIND_REMOVE_PMEM_MAPPING(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_CHECK_IS_PMEM_MAPPING(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_PRINT_PMEM_MAPPINGS do {} while (0)
#define VALGRIND_DO_FLUSH(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_DO_FENCE do {} while (0)
#define VALGRIND_DO_PERSIST(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_SET_CLEAN(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_WRITE_STATS do {} while (0)
#define VALGRIND_EMIT_LOG(emit_log) do {\
(void) (emit_log);\
} while (0)
#define VALGRIND_START_TX do {} while (0)
#define VALGRIND_START_TX_N(txn) do { (void) (txn); } while (0)
#define VALGRIND_END_TX do {} while (0)
#define VALGRIND_END_TX_N(txn) do {\
(void) (txn);\
} while (0)
#define VALGRIND_ADD_TO_TX(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_ADD_TO_TX_N(txn, addr, len) do {\
(void) (txn);\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_REMOVE_FROM_TX(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_REMOVE_FROM_TX_N(txn, addr, len) do {\
(void) (txn);\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_ADD_TO_GLOBAL_TX_IGNORE(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define PMEMOBJ_API_START() do {} while (0)
#define PMEMOBJ_API_END() do {} while (0)
#define PMEM_API_START() do {} while (0)
#define PMEM_API_END() do {} while (0)
#define PMEM2_API_START(func_name) do {\
(void) (func_name);\
} while (0)
#define PMEM2_API_END(func_name) do {\
(void) (func_name);\
} while (0)
#endif
#if VG_MEMCHECK_ENABLED
extern unsigned _On_memcheck;
#define On_memcheck __builtin_expect(_On_memcheck, 0)
#include "valgrind/memcheck.h"
#define VALGRIND_DO_DISABLE_ERROR_REPORTING do {\
if (On_valgrind)\
VALGRIND_DISABLE_ERROR_REPORTING;\
} while (0)
#define VALGRIND_DO_ENABLE_ERROR_REPORTING do {\
if (On_valgrind)\
VALGRIND_ENABLE_ERROR_REPORTING;\
} while (0)
#define VALGRIND_DO_CREATE_MEMPOOL(heap, rzB, is_zeroed) do {\
if (On_memcheck)\
VALGRIND_CREATE_MEMPOOL(heap, rzB, is_zeroed);\
} while (0)
#define VALGRIND_DO_DESTROY_MEMPOOL(heap) do {\
if (On_memcheck)\
VALGRIND_DESTROY_MEMPOOL(heap);\
} while (0)
#define VALGRIND_DO_MEMPOOL_ALLOC(heap, addr, size) do {\
if (On_memcheck)\
VALGRIND_MEMPOOL_ALLOC(heap, addr, size);\
} while (0)
#define VALGRIND_DO_MEMPOOL_FREE(heap, addr) do {\
if (On_memcheck)\
VALGRIND_MEMPOOL_FREE(heap, addr);\
} while (0)
#define VALGRIND_DO_MEMPOOL_CHANGE(heap, addrA, addrB, size) do {\
if (On_memcheck)\
VALGRIND_MEMPOOL_CHANGE(heap, addrA, addrB, size);\
} while (0)
#define VALGRIND_DO_MAKE_MEM_DEFINED(addr, len) do {\
if (On_memcheck)\
VALGRIND_MAKE_MEM_DEFINED(addr, len);\
} while (0)
#define VALGRIND_DO_MAKE_MEM_UNDEFINED(addr, len) do {\
if (On_memcheck)\
VALGRIND_MAKE_MEM_UNDEFINED(addr, len);\
} while (0)
#define VALGRIND_DO_MAKE_MEM_NOACCESS(addr, len) do {\
if (On_memcheck)\
VALGRIND_MAKE_MEM_NOACCESS(addr, len);\
} while (0)
#define VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len) do {\
if (On_memcheck)\
VALGRIND_CHECK_MEM_IS_ADDRESSABLE(addr, len);\
} while (0)
#else
#define On_memcheck (0)
#define VALGRIND_DO_DISABLE_ERROR_REPORTING do {} while (0)
#define VALGRIND_DO_ENABLE_ERROR_REPORTING do {} while (0)
#define VALGRIND_DO_CREATE_MEMPOOL(heap, rzB, is_zeroed)\
do { (void) (heap); (void) (rzB); (void) (is_zeroed); } while (0)
#define VALGRIND_DO_DESTROY_MEMPOOL(heap)\
do { (void) (heap); } while (0)
#define VALGRIND_DO_MEMPOOL_ALLOC(heap, addr, size)\
do { (void) (heap); (void) (addr); (void) (size); } while (0)
#define VALGRIND_DO_MEMPOOL_FREE(heap, addr)\
do { (void) (heap); (void) (addr); } while (0)
#define VALGRIND_DO_MEMPOOL_CHANGE(heap, addrA, addrB, size)\
do {\
(void) (heap); (void) (addrA); (void) (addrB); (void) (size);\
} while (0)
#define VALGRIND_DO_MAKE_MEM_DEFINED(addr, len)\
do { (void) (addr); (void) (len); } while (0)
#define VALGRIND_DO_MAKE_MEM_UNDEFINED(addr, len)\
do { (void) (addr); (void) (len); } while (0)
#define VALGRIND_DO_MAKE_MEM_NOACCESS(addr, len)\
do { (void) (addr); (void) (len); } while (0)
#define VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len)\
do { (void) (addr); (void) (len); } while (0)
#endif
#endif
| 11,169 | 22.319415 | 75 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/core/fs.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* fs.h -- file system traversal abstraction layer
*/
#ifndef PMDK_FS_H
#define PMDK_FS_H 1
#include <unistd.h>
#ifdef __cplusplus
extern "C" {
#endif
struct fs;
enum fs_entry_type {
FS_ENTRY_FILE,
FS_ENTRY_DIRECTORY,
FS_ENTRY_SYMLINK,
FS_ENTRY_OTHER,
MAX_FS_ENTRY_TYPES
};
struct fs_entry {
enum fs_entry_type type;
const char *name;
size_t namelen;
const char *path;
size_t pathlen;
/* the depth of the traversal */
/* XXX long on FreeBSD. Linux uses short. No harm in it being bigger */
long level;
};
struct fs *fs_new(const char *path);
void fs_delete(struct fs *f);
/* this call invalidates the previous entry */
struct fs_entry *fs_read(struct fs *f);
#ifdef __cplusplus
}
#endif
#endif /* PMDK_FS_H */
| 827 | 14.923077 | 72 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/core/alloc.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
#ifndef COMMON_ALLOC_H
#define COMMON_ALLOC_H
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef void *(*Malloc_func)(size_t size);
typedef void *(*Realloc_func)(void *ptr, size_t size);
extern Malloc_func fn_malloc;
extern Realloc_func fn_realloc;
#if FAULT_INJECTION
void *_flt_Malloc(size_t, const char *);
void *_flt_Realloc(void *, size_t, const char *);
#define Malloc(size) _flt_Malloc(size, __func__)
#define Realloc(ptr, size) _flt_Realloc(ptr, size, __func__)
#else
void *_Malloc(size_t);
void *_Realloc(void *, size_t);
#define Malloc(size) _Malloc(size)
#define Realloc(ptr, size) _Realloc(ptr, size)
#endif
void set_func_malloc(void *(*malloc_func)(size_t size));
void set_func_realloc(void *(*realloc_func)(void *ptr, size_t size));
/*
* overridable names for malloc & friends used by this library
*/
typedef void (*Free_func)(void *ptr);
typedef char *(*Strdup_func)(const char *s);
extern Free_func Free;
extern Strdup_func Strdup;
extern void *Zalloc(size_t sz);
#ifdef __cplusplus
}
#endif
#endif
| 1,131 | 21.64 | 69 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/core/errno_freebsd.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* errno_freebsd.h -- map Linux errno's to something close on FreeBSD
*/
#ifndef PMDK_ERRNO_FREEBSD_H
#define PMDK_ERRNO_FREEBSD_H 1
#ifdef __FreeBSD__
#define EBADFD EBADF
#define ELIBACC EINVAL
#define EMEDIUMTYPE EOPNOTSUPP
#define ENOMEDIUM ENODEV
#define EREMOTEIO EIO
#endif
#endif /* PMDK_ERRNO_FREEBSD_H */
| 409 | 19.5 | 69 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/core/os_thread.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* os_thread.h -- os thread abstraction layer
*/
#ifndef OS_THREAD_H
#define OS_THREAD_H 1
#include <stdint.h>
#include <time.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef union {
long long align;
char padding[44]; /* linux: 40 windows: 44 */
} os_mutex_t;
typedef union {
long long align;
char padding[56]; /* linux: 56 windows: 13 */
} os_rwlock_t;
typedef union {
long long align;
char padding[48]; /* linux: 48 windows: 12 */
} os_cond_t;
typedef union {
long long align;
char padding[32]; /* linux: 8 windows: 32 */
} os_thread_t;
typedef union {
long long align; /* linux: long windows: 8 FreeBSD: 12 */
char padding[16]; /* 16 to be safe */
} os_once_t;
#define OS_ONCE_INIT { .padding = {0} }
typedef unsigned os_tls_key_t;
typedef union {
long long align;
char padding[56]; /* linux: 56 windows: 8 */
} os_semaphore_t;
typedef union {
long long align;
char padding[56]; /* linux: 56 windows: 8 */
} os_thread_attr_t;
typedef union {
long long align;
char padding[512];
} os_cpu_set_t;
#ifdef __FreeBSD__
#define cpu_set_t cpuset_t
typedef uintptr_t os_spinlock_t;
#else
typedef volatile int os_spinlock_t; /* XXX: not implemented on windows */
#endif
void os_cpu_zero(os_cpu_set_t *set);
void os_cpu_set(size_t cpu, os_cpu_set_t *set);
#ifndef _WIN32
#define _When_(...)
#endif
int os_once(os_once_t *o, void (*func)(void));
int os_tls_key_create(os_tls_key_t *key, void (*destructor)(void *));
int os_tls_key_delete(os_tls_key_t key);
int os_tls_set(os_tls_key_t key, const void *value);
void *os_tls_get(os_tls_key_t key);
int os_mutex_init(os_mutex_t *__restrict mutex);
int os_mutex_destroy(os_mutex_t *__restrict mutex);
_When_(return == 0, _Acquires_lock_(mutex->lock))
int os_mutex_lock(os_mutex_t *__restrict mutex);
_When_(return == 0, _Acquires_lock_(mutex->lock))
int os_mutex_trylock(os_mutex_t *__restrict mutex);
int os_mutex_unlock(os_mutex_t *__restrict mutex);
/* XXX - non POSIX */
int os_mutex_timedlock(os_mutex_t *__restrict mutex,
const struct timespec *abstime);
int os_rwlock_init(os_rwlock_t *__restrict rwlock);
int os_rwlock_destroy(os_rwlock_t *__restrict rwlock);
int os_rwlock_rdlock(os_rwlock_t *__restrict rwlock);
int os_rwlock_wrlock(os_rwlock_t *__restrict rwlock);
int os_rwlock_tryrdlock(os_rwlock_t *__restrict rwlock);
_When_(return == 0, _Acquires_exclusive_lock_(rwlock->lock))
int os_rwlock_trywrlock(os_rwlock_t *__restrict rwlock);
_When_(rwlock->is_write != 0, _Requires_exclusive_lock_held_(rwlock->lock))
_When_(rwlock->is_write == 0, _Requires_shared_lock_held_(rwlock->lock))
int os_rwlock_unlock(os_rwlock_t *__restrict rwlock);
int os_rwlock_timedrdlock(os_rwlock_t *__restrict rwlock,
const struct timespec *abstime);
int os_rwlock_timedwrlock(os_rwlock_t *__restrict rwlock,
const struct timespec *abstime);
int os_spin_init(os_spinlock_t *lock, int pshared);
int os_spin_destroy(os_spinlock_t *lock);
int os_spin_lock(os_spinlock_t *lock);
int os_spin_unlock(os_spinlock_t *lock);
int os_spin_trylock(os_spinlock_t *lock);
int os_cond_init(os_cond_t *__restrict cond);
int os_cond_destroy(os_cond_t *__restrict cond);
int os_cond_broadcast(os_cond_t *__restrict cond);
int os_cond_signal(os_cond_t *__restrict cond);
int os_cond_timedwait(os_cond_t *__restrict cond,
os_mutex_t *__restrict mutex, const struct timespec *abstime);
int os_cond_wait(os_cond_t *__restrict cond,
os_mutex_t *__restrict mutex);
/* threading */
int os_thread_create(os_thread_t *thread, const os_thread_attr_t *attr,
void *(*start_routine)(void *), void *arg);
int os_thread_join(os_thread_t *thread, void **result);
void os_thread_self(os_thread_t *thread);
/* thread affinity */
int os_thread_setaffinity_np(os_thread_t *thread, size_t set_size,
const os_cpu_set_t *set);
int os_thread_atfork(void (*prepare)(void), void (*parent)(void),
void (*child)(void));
int os_semaphore_init(os_semaphore_t *sem, unsigned value);
int os_semaphore_destroy(os_semaphore_t *sem);
int os_semaphore_wait(os_semaphore_t *sem);
int os_semaphore_trywait(os_semaphore_t *sem);
int os_semaphore_post(os_semaphore_t *sem);
#ifdef __cplusplus
}
#endif
#endif /* OS_THREAD_H */
| 5,876 | 31.291209 | 75 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/core/out.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* out.h -- definitions for "out" module
*/
#ifndef PMDK_OUT_H
#define PMDK_OUT_H 1
#include <stdarg.h>
#include <stddef.h>
#include <stdlib.h>
#include "util.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* Suppress errors which are after appropriate ASSERT* macro for nondebug
* builds.
*/
#if !defined(DEBUG) && (defined(__clang_analyzer__) || defined(__COVERITY__) ||\
defined(__KLOCWORK__))
#define OUT_FATAL_DISCARD_NORETURN __attribute__((noreturn))
#else
#define OUT_FATAL_DISCARD_NORETURN
#endif
#ifndef EVALUATE_DBG_EXPRESSIONS
#if defined(DEBUG) || defined(__clang_analyzer__) || defined(__COVERITY__) ||\
defined(__KLOCWORK__)
#define EVALUATE_DBG_EXPRESSIONS 1
#else
#define EVALUATE_DBG_EXPRESSIONS 0
#endif
#endif
#ifdef DEBUG
#define OUT_LOG out_log
#define OUT_NONL out_nonl
#define OUT_FATAL out_fatal
#define OUT_FATAL_ABORT out_fatal
#else
static __attribute__((always_inline)) inline void
out_log_discard(const char *file, int line, const char *func, int level,
const char *fmt, ...)
{
(void) file;
(void) line;
(void) func;
(void) level;
(void) fmt;
}
static __attribute__((always_inline)) inline void
out_nonl_discard(int level, const char *fmt, ...)
{
(void) level;
(void) fmt;
}
static __attribute__((always_inline)) OUT_FATAL_DISCARD_NORETURN inline void
out_fatal_discard(const char *file, int line, const char *func,
const char *fmt, ...)
{
(void) file;
(void) line;
(void) func;
(void) fmt;
}
static __attribute__((always_inline)) NORETURN inline void
out_fatal_abort(const char *file, int line, const char *func,
const char *fmt, ...)
{
(void) file;
(void) line;
(void) func;
(void) fmt;
abort();
}
#define OUT_LOG out_log_discard
#define OUT_NONL out_nonl_discard
#define OUT_FATAL out_fatal_discard
#define OUT_FATAL_ABORT out_fatal_abort
#endif
#if defined(__KLOCWORK__)
#define TEST_ALWAYS_TRUE_EXPR(cnd)
#define TEST_ALWAYS_EQ_EXPR(cnd)
#define TEST_ALWAYS_NE_EXPR(cnd)
#else
#define TEST_ALWAYS_TRUE_EXPR(cnd)\
if (__builtin_constant_p(cnd))\
ASSERT_COMPILE_ERROR_ON(cnd);
#define TEST_ALWAYS_EQ_EXPR(lhs, rhs)\
if (__builtin_constant_p(lhs) && __builtin_constant_p(rhs))\
ASSERT_COMPILE_ERROR_ON((lhs) == (rhs));
#define TEST_ALWAYS_NE_EXPR(lhs, rhs)\
if (__builtin_constant_p(lhs) && __builtin_constant_p(rhs))\
ASSERT_COMPILE_ERROR_ON((lhs) != (rhs));
#endif
/* produce debug/trace output */
#define LOG(level, ...) do { \
if (!EVALUATE_DBG_EXPRESSIONS) break;\
OUT_LOG(__FILE__, __LINE__, __func__, level, __VA_ARGS__);\
} while (0)
/* produce debug/trace output without prefix and new line */
#define LOG_NONL(level, ...) do { \
if (!EVALUATE_DBG_EXPRESSIONS) break; \
OUT_NONL(level, __VA_ARGS__); \
} while (0)
/* produce output and exit */
#define FATAL(...)\
OUT_FATAL_ABORT(__FILE__, __LINE__, __func__, __VA_ARGS__)
/* assert a condition is true at runtime */
#define ASSERT_rt(cnd) do { \
if (!EVALUATE_DBG_EXPRESSIONS || (cnd)) break; \
OUT_FATAL(__FILE__, __LINE__, __func__, "assertion failure: %s", #cnd);\
} while (0)
/* assertion with extra info printed if assertion fails at runtime */
#define ASSERTinfo_rt(cnd, info) do { \
if (!EVALUATE_DBG_EXPRESSIONS || (cnd)) break; \
OUT_FATAL(__FILE__, __LINE__, __func__, \
"assertion failure: %s (%s = %s)", #cnd, #info, info);\
} while (0)
/* assert two integer values are equal at runtime */
#define ASSERTeq_rt(lhs, rhs) do { \
if (!EVALUATE_DBG_EXPRESSIONS || ((lhs) == (rhs))) break; \
OUT_FATAL(__FILE__, __LINE__, __func__,\
"assertion failure: %s (0x%llx) == %s (0x%llx)", #lhs,\
(unsigned long long)(lhs), #rhs, (unsigned long long)(rhs)); \
} while (0)
/* assert two integer values are not equal at runtime */
#define ASSERTne_rt(lhs, rhs) do { \
if (!EVALUATE_DBG_EXPRESSIONS || ((lhs) != (rhs))) break; \
OUT_FATAL(__FILE__, __LINE__, __func__,\
"assertion failure: %s (0x%llx) != %s (0x%llx)", #lhs,\
(unsigned long long)(lhs), #rhs, (unsigned long long)(rhs)); \
} while (0)
/* assert a condition is true */
#define ASSERT(cnd)\
do {\
/*\
* Detect useless asserts on always true expression. Please use\
* COMPILE_ERROR_ON(!cnd) or ASSERT_rt(cnd) in such cases.\
*/\
TEST_ALWAYS_TRUE_EXPR(cnd);\
ASSERT_rt(cnd);\
} while (0)
/* assertion with extra info printed if assertion fails */
#define ASSERTinfo(cnd, info)\
do {\
/* See comment in ASSERT. */\
TEST_ALWAYS_TRUE_EXPR(cnd);\
ASSERTinfo_rt(cnd, info);\
} while (0)
/* assert two integer values are equal */
#define ASSERTeq(lhs, rhs)\
do {\
/* See comment in ASSERT. */\
TEST_ALWAYS_EQ_EXPR(lhs, rhs);\
ASSERTeq_rt(lhs, rhs);\
} while (0)
/* assert two integer values are not equal */
#define ASSERTne(lhs, rhs)\
do {\
/* See comment in ASSERT. */\
TEST_ALWAYS_NE_EXPR(lhs, rhs);\
ASSERTne_rt(lhs, rhs);\
} while (0)
#define ERR(...)\
out_err(__FILE__, __LINE__, __func__, __VA_ARGS__)
void out_init(const char *log_prefix, const char *log_level_var,
const char *log_file_var, int major_version,
int minor_version);
void out_fini(void);
void out(const char *fmt, ...) FORMAT_PRINTF(1, 2);
void out_nonl(int level, const char *fmt, ...) FORMAT_PRINTF(2, 3);
void out_log(const char *file, int line, const char *func, int level,
const char *fmt, ...) FORMAT_PRINTF(5, 6);
void out_err(const char *file, int line, const char *func,
const char *fmt, ...) FORMAT_PRINTF(4, 5);
void NORETURN out_fatal(const char *file, int line, const char *func,
const char *fmt, ...) FORMAT_PRINTF(4, 5);
void out_set_print_func(void (*print_func)(const char *s));
void out_set_vsnprintf_func(int (*vsnprintf_func)(char *str, size_t size,
const char *format, va_list ap));
#ifdef _WIN32
#ifndef PMDK_UTF8_API
#define out_get_errormsg out_get_errormsgW
#else
#define out_get_errormsg out_get_errormsgU
#endif
#endif
#ifndef _WIN32
const char *out_get_errormsg(void);
#else
const char *out_get_errormsgU(void);
const wchar_t *out_get_errormsgW(void);
#endif
#ifdef __cplusplus
}
#endif
#endif
| 6,066 | 25.150862 | 80 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/core/valgrind/memcheck.h
|
/*
----------------------------------------------------------------
Notice that the following BSD-style license applies to this one
file (memcheck.h) only. The rest of Valgrind is licensed under the
terms of the GNU General Public License, version 2, unless
otherwise indicated. See the COPYING file in the source
distribution for details.
----------------------------------------------------------------
This file is part of MemCheck, a heavyweight Valgrind tool for
detecting memory errors.
Copyright (C) 2000-2017 Julian Seward. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. The origin of this software must not be misrepresented; you must
not claim that you wrote the original software. If you use this
software in a product, an acknowledgment in the product
documentation would be appreciated but is not required.
3. Altered source versions must be plainly marked as such, and must
not be misrepresented as being the original software.
4. The name of the author may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
----------------------------------------------------------------
Notice that the above BSD-style license applies to this one file
(memcheck.h) only. The entire rest of Valgrind is licensed under
the terms of the GNU General Public License, version 2. See the
COPYING file in the source distribution for details.
----------------------------------------------------------------
*/
#ifndef __MEMCHECK_H
#define __MEMCHECK_H
/* This file is for inclusion into client (your!) code.
You can use these macros to manipulate and query memory permissions
inside your own programs.
See comment near the top of valgrind.h on how to use them.
*/
#include "valgrind.h"
/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
This enum comprises an ABI exported by Valgrind to programs
which use client requests. DO NOT CHANGE THE ORDER OF THESE
ENTRIES, NOR DELETE ANY -- add new ones at the end. */
typedef
enum {
VG_USERREQ__MAKE_MEM_NOACCESS = VG_USERREQ_TOOL_BASE('M','C'),
VG_USERREQ__MAKE_MEM_UNDEFINED,
VG_USERREQ__MAKE_MEM_DEFINED,
VG_USERREQ__DISCARD,
VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,
VG_USERREQ__CHECK_MEM_IS_DEFINED,
VG_USERREQ__DO_LEAK_CHECK,
VG_USERREQ__COUNT_LEAKS,
VG_USERREQ__GET_VBITS,
VG_USERREQ__SET_VBITS,
VG_USERREQ__CREATE_BLOCK,
VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE,
/* Not next to VG_USERREQ__COUNT_LEAKS because it was added later. */
VG_USERREQ__COUNT_LEAK_BLOCKS,
VG_USERREQ__ENABLE_ADDR_ERROR_REPORTING_IN_RANGE,
VG_USERREQ__DISABLE_ADDR_ERROR_REPORTING_IN_RANGE,
VG_USERREQ__CHECK_MEM_IS_UNADDRESSABLE,
VG_USERREQ__CHECK_MEM_IS_UNDEFINED,
/* This is just for memcheck's internal use - don't use it */
_VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR
= VG_USERREQ_TOOL_BASE('M','C') + 256
} Vg_MemCheckClientRequest;
/* Client-code macros to manipulate the state of memory. */
/* Mark memory at _qzz_addr as unaddressable for _qzz_len bytes. */
#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__MAKE_MEM_NOACCESS, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Similarly, mark memory at _qzz_addr as addressable but undefined
for _qzz_len bytes. */
#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__MAKE_MEM_UNDEFINED, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Similarly, mark memory at _qzz_addr as addressable and defined
for _qzz_len bytes. */
#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__MAKE_MEM_DEFINED, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Similar to VALGRIND_MAKE_MEM_DEFINED except that addressability is
not altered: bytes which are addressable are marked as defined,
but those which are not addressable are left unchanged. */
#define VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Create a block-description handle. The description is an ascii
string which is included in any messages pertaining to addresses
within the specified memory range. Has no other effect on the
properties of the memory range. */
#define VALGRIND_CREATE_BLOCK(_qzz_addr,_qzz_len, _qzz_desc) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__CREATE_BLOCK, \
(_qzz_addr), (_qzz_len), (_qzz_desc), \
0, 0)
/* Discard a block-description-handle. Returns 1 for an
invalid handle, 0 for a valid handle. */
#define VALGRIND_DISCARD(_qzz_blkindex) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__DISCARD, \
0, (_qzz_blkindex), 0, 0, 0)
/* Client-code macros to check the state of memory. */
/* Check that memory at _qzz_addr is addressable for _qzz_len bytes.
If suitable addressability is not established, Valgrind prints an
error message and returns the address of the first offending byte.
Otherwise it returns zero. */
#define VALGRIND_CHECK_MEM_IS_ADDRESSABLE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Check that memory at _qzz_addr is addressable and defined for
_qzz_len bytes. If suitable addressability and definedness are not
established, Valgrind prints an error message and returns the
address of the first offending byte. Otherwise it returns zero. */
#define VALGRIND_CHECK_MEM_IS_DEFINED(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__CHECK_MEM_IS_DEFINED, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Use this macro to force the definedness and addressability of an
lvalue to be checked. If suitable addressability and definedness
are not established, Valgrind prints an error message and returns
the address of the first offending byte. Otherwise it returns
zero. */
#define VALGRIND_CHECK_VALUE_IS_DEFINED(__lvalue) \
VALGRIND_CHECK_MEM_IS_DEFINED( \
(volatile unsigned char *)&(__lvalue), \
(unsigned long)(sizeof (__lvalue)))
/* Check that memory at _qzz_addr is unaddressable for _qzz_len bytes.
If any byte in this range is addressable, Valgrind returns the
address of the first offending byte. Otherwise it returns zero. */
#define VALGRIND_CHECK_MEM_IS_UNADDRESSABLE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__CHECK_MEM_IS_UNADDRESSABLE,\
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Check that memory at _qzz_addr is undefined for _qzz_len bytes. If any
byte in this range is defined or unaddressable, Valgrind returns the
address of the first offending byte. Otherwise it returns zero. */
#define VALGRIND_CHECK_MEM_IS_UNDEFINED(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__CHECK_MEM_IS_UNDEFINED, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Do a full memory leak check (like --leak-check=full) mid-execution. */
#define VALGRIND_DO_LEAK_CHECK \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
0, 0, 0, 0, 0)
/* Same as VALGRIND_DO_LEAK_CHECK but only showing the entries for
which there was an increase in leaked bytes or leaked nr of blocks
since the previous leak search. */
#define VALGRIND_DO_ADDED_LEAK_CHECK \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
0, 1, 0, 0, 0)
/* Same as VALGRIND_DO_ADDED_LEAK_CHECK but showing entries with
increased or decreased leaked bytes/blocks since previous leak
search. */
#define VALGRIND_DO_CHANGED_LEAK_CHECK \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
0, 2, 0, 0, 0)
/* Do a summary memory leak check (like --leak-check=summary) mid-execution. */
#define VALGRIND_DO_QUICK_LEAK_CHECK \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
1, 0, 0, 0, 0)
/* Return number of leaked, dubious, reachable and suppressed bytes found by
all previous leak checks. They must be lvalues. */
#define VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed) \
/* For safety on 64-bit platforms we assign the results to private
unsigned long variables, then assign these to the lvalues the user
specified, which works no matter what type 'leaked', 'dubious', etc
are. We also initialise '_qzz_leaked', etc because
VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
defined. */ \
{ \
unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \
unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \
VALGRIND_DO_CLIENT_REQUEST_STMT( \
VG_USERREQ__COUNT_LEAKS, \
&_qzz_leaked, &_qzz_dubious, \
&_qzz_reachable, &_qzz_suppressed, 0); \
leaked = _qzz_leaked; \
dubious = _qzz_dubious; \
reachable = _qzz_reachable; \
suppressed = _qzz_suppressed; \
}
/* Return number of leaked, dubious, reachable and suppressed bytes found by
all previous leak checks. They must be lvalues. */
#define VALGRIND_COUNT_LEAK_BLOCKS(leaked, dubious, reachable, suppressed) \
/* For safety on 64-bit platforms we assign the results to private
unsigned long variables, then assign these to the lvalues the user
specified, which works no matter what type 'leaked', 'dubious', etc
are. We also initialise '_qzz_leaked', etc because
VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
defined. */ \
{ \
unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \
unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \
VALGRIND_DO_CLIENT_REQUEST_STMT( \
VG_USERREQ__COUNT_LEAK_BLOCKS, \
&_qzz_leaked, &_qzz_dubious, \
&_qzz_reachable, &_qzz_suppressed, 0); \
leaked = _qzz_leaked; \
dubious = _qzz_dubious; \
reachable = _qzz_reachable; \
suppressed = _qzz_suppressed; \
}
/* Get the validity data for addresses [zza..zza+zznbytes-1] and copy it
into the provided zzvbits array. Return values:
0 if not running on valgrind
1 success
2 [previously indicated unaligned arrays; these are now allowed]
3 if any parts of zzsrc/zzvbits are not addressable.
The metadata is not copied in cases 0, 2 or 3 so it should be
impossible to segfault your system by using this call.
*/
#define VALGRIND_GET_VBITS(zza,zzvbits,zznbytes) \
(unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__GET_VBITS, \
(const char*)(zza), \
(char*)(zzvbits), \
(zznbytes), 0, 0)
/* Set the validity data for addresses [zza..zza+zznbytes-1], copying it
from the provided zzvbits array. Return values:
0 if not running on valgrind
1 success
2 [previously indicated unaligned arrays; these are now allowed]
3 if any parts of zza/zzvbits are not addressable.
The metadata is not copied in cases 0, 2 or 3 so it should be
impossible to segfault your system by using this call.
*/
#define VALGRIND_SET_VBITS(zza,zzvbits,zznbytes) \
(unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__SET_VBITS, \
(const char*)(zza), \
(const char*)(zzvbits), \
(zznbytes), 0, 0 )
/* Disable and re-enable reporting of addressing errors in the
specified address range. */
#define VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__DISABLE_ADDR_ERROR_REPORTING_IN_RANGE, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
#define VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__ENABLE_ADDR_ERROR_REPORTING_IN_RANGE, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
#endif
| 15,621 | 47.666667 | 79 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/core/valgrind/helgrind.h
|
/*
----------------------------------------------------------------
Notice that the above BSD-style license applies to this one file
(helgrind.h) only. The entire rest of Valgrind is licensed under
the terms of the GNU General Public License, version 2. See the
COPYING file in the source distribution for details.
----------------------------------------------------------------
This file is part of Helgrind, a Valgrind tool for detecting errors
in threaded programs.
Copyright (C) 2007-2017 OpenWorks LLP
[email protected]
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. The origin of this software must not be misrepresented; you must
not claim that you wrote the original software. If you use this
software in a product, an acknowledgment in the product
documentation would be appreciated but is not required.
3. Altered source versions must be plainly marked as such, and must
not be misrepresented as being the original software.
4. The name of the author may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
----------------------------------------------------------------
Notice that the above BSD-style license applies to this one file
(helgrind.h) only. The entire rest of Valgrind is licensed under
the terms of the GNU General Public License, version 2. See the
COPYING file in the source distribution for details.
----------------------------------------------------------------
*/
#ifndef __HELGRIND_H
#define __HELGRIND_H
#include "valgrind.h"
/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
This enum comprises an ABI exported by Valgrind to programs
which use client requests. DO NOT CHANGE THE ORDER OF THESE
ENTRIES, NOR DELETE ANY -- add new ones at the end. */
typedef
enum {
VG_USERREQ__HG_CLEAN_MEMORY = VG_USERREQ_TOOL_BASE('H','G'),
/* The rest are for Helgrind's internal use. Not for end-user
use. Do not use them unless you are a Valgrind developer. */
/* Notify the tool what this thread's pthread_t is. */
_VG_USERREQ__HG_SET_MY_PTHREAD_T = VG_USERREQ_TOOL_BASE('H','G')
+ 256,
_VG_USERREQ__HG_PTH_API_ERROR, /* char*, int */
_VG_USERREQ__HG_PTHREAD_JOIN_POST, /* pthread_t of quitter */
_VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST, /* pth_mx_t*, long mbRec */
_VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE, /* pth_mx_t*, long isInit */
_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE, /* pth_mx_t* */
_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST, /* pth_mx_t* */
_VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_PRE, /* void*, long isTryLock */
_VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_POST, /* void* */
_VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE, /* pth_cond_t* */
_VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE, /* pth_cond_t* */
_VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE, /* pth_cond_t*, pth_mx_t* */
_VG_USERREQ__HG_PTHREAD_COND_WAIT_POST, /* pth_cond_t*, pth_mx_t* */
_VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE, /* pth_cond_t*, long isInit */
_VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST, /* pth_rwlk_t* */
_VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE, /* pth_rwlk_t* */
_VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE, /* pth_rwlk_t*, long isW */
_VG_USERREQ__HG_PTHREAD_RWLOCK_ACQUIRED, /* void*, long isW */
_VG_USERREQ__HG_PTHREAD_RWLOCK_RELEASED, /* void* */
_VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST, /* pth_rwlk_t* */
_VG_USERREQ__HG_POSIX_SEM_INIT_POST, /* sem_t*, ulong value */
_VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE, /* sem_t* */
_VG_USERREQ__HG_POSIX_SEM_RELEASED, /* void* */
_VG_USERREQ__HG_POSIX_SEM_ACQUIRED, /* void* */
_VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE, /* pth_bar_t*, ulong, ulong */
_VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE, /* pth_bar_t* */
_VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE, /* pth_bar_t* */
_VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE, /* pth_slk_t* */
_VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST, /* pth_slk_t* */
_VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE, /* pth_slk_t* */
_VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST, /* pth_slk_t* */
_VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE, /* pth_slk_t* */
_VG_USERREQ__HG_CLIENTREQ_UNIMP, /* char* */
_VG_USERREQ__HG_USERSO_SEND_PRE, /* arbitrary UWord SO-tag */
_VG_USERREQ__HG_USERSO_RECV_POST, /* arbitrary UWord SO-tag */
_VG_USERREQ__HG_USERSO_FORGET_ALL, /* arbitrary UWord SO-tag */
_VG_USERREQ__HG_RESERVED2, /* Do not use */
_VG_USERREQ__HG_RESERVED3, /* Do not use */
_VG_USERREQ__HG_RESERVED4, /* Do not use */
_VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED, /* Addr a, ulong len */
_VG_USERREQ__HG_ARANGE_MAKE_TRACKED, /* Addr a, ulong len */
_VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE, /* pth_bar_t*, ulong */
_VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK, /* Addr start_of_block */
_VG_USERREQ__HG_PTHREAD_COND_INIT_POST, /* pth_cond_t*, pth_cond_attr_t*/
_VG_USERREQ__HG_GNAT_MASTER_HOOK, /* void*d,void*m,Word ml */
_VG_USERREQ__HG_GNAT_MASTER_COMPLETED_HOOK, /* void*s,Word ml */
_VG_USERREQ__HG_GET_ABITS, /* Addr a,Addr abits, ulong len */
_VG_USERREQ__HG_PTHREAD_CREATE_BEGIN,
_VG_USERREQ__HG_PTHREAD_CREATE_END,
_VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE, /* pth_mx_t*,long isTryLock */
_VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST, /* pth_mx_t *,long tookLock */
_VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST, /* pth_rwlk_t*,long isW,long */
_VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE, /* pth_rwlk_t* */
_VG_USERREQ__HG_POSIX_SEM_POST_PRE, /* sem_t* */
_VG_USERREQ__HG_POSIX_SEM_POST_POST, /* sem_t* */
_VG_USERREQ__HG_POSIX_SEM_WAIT_PRE, /* sem_t* */
_VG_USERREQ__HG_POSIX_SEM_WAIT_POST, /* sem_t*, long tookLock */
_VG_USERREQ__HG_PTHREAD_COND_SIGNAL_POST, /* pth_cond_t* */
_VG_USERREQ__HG_PTHREAD_COND_BROADCAST_POST,/* pth_cond_t* */
_VG_USERREQ__HG_RTLD_BIND_GUARD, /* int flags */
_VG_USERREQ__HG_RTLD_BIND_CLEAR, /* int flags */
_VG_USERREQ__HG_GNAT_DEPENDENT_MASTER_JOIN /* void*d, void*m */
} Vg_TCheckClientRequest;
/*----------------------------------------------------------------*/
/*--- ---*/
/*--- Implementation-only facilities. Not for end-user use. ---*/
/*--- For end-user facilities see below (the next section in ---*/
/*--- this file.) ---*/
/*--- ---*/
/*----------------------------------------------------------------*/
/* Do a client request. These are macros rather than a functions so
as to avoid having an extra frame in stack traces.
NB: these duplicate definitions in hg_intercepts.c. But here, we
have to make do with weaker typing (no definition of Word etc) and
no assertions, whereas in helgrind.h we can use those facilities.
Obviously it's important the two sets of definitions are kept in
sync.
The commented-out asserts should actually hold, but unfortunately
they can't be allowed to be visible here, because that would
require the end-user code to #include <assert.h>.
*/
#define DO_CREQ_v_W(_creqF, _ty1F,_arg1F) \
do { \
long int _arg1; \
/* assert(sizeof(_ty1F) == sizeof(long int)); */ \
_arg1 = (long int)(_arg1F); \
VALGRIND_DO_CLIENT_REQUEST_STMT( \
(_creqF), \
_arg1, 0,0,0,0); \
} while (0)
#define DO_CREQ_W_W(_resF, _dfltF, _creqF, _ty1F,_arg1F) \
do { \
long int _arg1; \
/* assert(sizeof(_ty1F) == sizeof(long int)); */ \
_arg1 = (long int)(_arg1F); \
_qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR( \
(_dfltF), \
(_creqF), \
_arg1, 0,0,0,0); \
_resF = _qzz_res; \
} while (0)
#define DO_CREQ_v_WW(_creqF, _ty1F,_arg1F, _ty2F,_arg2F) \
do { \
long int _arg1, _arg2; \
/* assert(sizeof(_ty1F) == sizeof(long int)); */ \
/* assert(sizeof(_ty2F) == sizeof(long int)); */ \
_arg1 = (long int)(_arg1F); \
_arg2 = (long int)(_arg2F); \
VALGRIND_DO_CLIENT_REQUEST_STMT( \
(_creqF), \
_arg1,_arg2,0,0,0); \
} while (0)
#define DO_CREQ_v_WWW(_creqF, _ty1F,_arg1F, \
_ty2F,_arg2F, _ty3F, _arg3F) \
do { \
long int _arg1, _arg2, _arg3; \
/* assert(sizeof(_ty1F) == sizeof(long int)); */ \
/* assert(sizeof(_ty2F) == sizeof(long int)); */ \
/* assert(sizeof(_ty3F) == sizeof(long int)); */ \
_arg1 = (long int)(_arg1F); \
_arg2 = (long int)(_arg2F); \
_arg3 = (long int)(_arg3F); \
VALGRIND_DO_CLIENT_REQUEST_STMT( \
(_creqF), \
_arg1,_arg2,_arg3,0,0); \
} while (0)
#define DO_CREQ_W_WWW(_resF, _dfltF, _creqF, _ty1F,_arg1F, \
_ty2F,_arg2F, _ty3F, _arg3F) \
do { \
long int _qzz_res; \
long int _arg1, _arg2, _arg3; \
/* assert(sizeof(_ty1F) == sizeof(long int)); */ \
_arg1 = (long int)(_arg1F); \
_arg2 = (long int)(_arg2F); \
_arg3 = (long int)(_arg3F); \
/* \
* XXX: here PMDK's version deviates from upstream;\
* without the fix, this code generates \
* a sign-conversion warning, which PMDK's \
* "awesome" build system promotes to an error \
*/ \
_qzz_res = (long)VALGRIND_DO_CLIENT_REQUEST_EXPR( \
(_dfltF), \
(_creqF), \
_arg1,_arg2,_arg3,0,0); \
_resF = _qzz_res; \
} while (0)
#define _HG_CLIENTREQ_UNIMP(_qzz_str) \
DO_CREQ_v_W(_VG_USERREQ__HG_CLIENTREQ_UNIMP, \
(char*),(_qzz_str))
/*----------------------------------------------------------------*/
/*--- ---*/
/*--- Helgrind-native requests. These allow access to ---*/
/*--- the same set of annotation primitives that are used ---*/
/*--- to build the POSIX pthread wrappers. ---*/
/*--- ---*/
/*----------------------------------------------------------------*/
/* ----------------------------------------------------------
For describing ordinary mutexes (non-rwlocks). For rwlock
descriptions see ANNOTATE_RWLOCK_* below.
---------------------------------------------------------- */
/* Notify here immediately after mutex creation. _mbRec == 0 for a
non-recursive mutex, 1 for a recursive mutex. */
#define VALGRIND_HG_MUTEX_INIT_POST(_mutex, _mbRec) \
DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST, \
void*,(_mutex), long,(_mbRec))
/* Notify here immediately before mutex acquisition. _isTryLock == 0
for a normal acquisition, 1 for a "try" style acquisition. */
#define VALGRIND_HG_MUTEX_LOCK_PRE(_mutex, _isTryLock) \
DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_PRE, \
void*,(_mutex), long,(_isTryLock))
/* Notify here immediately after a successful mutex acquisition. */
#define VALGRIND_HG_MUTEX_LOCK_POST(_mutex) \
DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_POST, \
void*,(_mutex))
/* Notify here immediately before a mutex release. */
#define VALGRIND_HG_MUTEX_UNLOCK_PRE(_mutex) \
DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE, \
void*,(_mutex))
/* Notify here immediately after a mutex release. */
#define VALGRIND_HG_MUTEX_UNLOCK_POST(_mutex) \
DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST, \
void*,(_mutex))
/* Notify here immediately before mutex destruction. */
#define VALGRIND_HG_MUTEX_DESTROY_PRE(_mutex) \
DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE, \
void*,(_mutex))
/* ----------------------------------------------------------
For describing semaphores.
---------------------------------------------------------- */
/* Notify here immediately after semaphore creation. */
#define VALGRIND_HG_SEM_INIT_POST(_sem, _value) \
DO_CREQ_v_WW(_VG_USERREQ__HG_POSIX_SEM_INIT_POST, \
void*, (_sem), unsigned long, (_value))
/* Notify here immediately after a semaphore wait (an acquire-style
operation) */
#define VALGRIND_HG_SEM_WAIT_POST(_sem) \
DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_ACQUIRED, \
void*,(_sem))
/* Notify here immediately before semaphore post (a release-style
operation) */
#define VALGRIND_HG_SEM_POST_PRE(_sem) \
DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_RELEASED, \
void*,(_sem))
/* Notify here immediately before semaphore destruction. */
#define VALGRIND_HG_SEM_DESTROY_PRE(_sem) \
DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE, \
void*, (_sem))
/* ----------------------------------------------------------
For describing barriers.
---------------------------------------------------------- */
/* Notify here immediately before barrier creation. _count is the
capacity. _resizable == 0 means the barrier may not be resized, 1
means it may be. */
#define VALGRIND_HG_BARRIER_INIT_PRE(_bar, _count, _resizable) \
DO_CREQ_v_WWW(_VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE, \
void*,(_bar), \
unsigned long,(_count), \
unsigned long,(_resizable))
/* Notify here immediately before arrival at a barrier. */
#define VALGRIND_HG_BARRIER_WAIT_PRE(_bar) \
DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE, \
void*,(_bar))
/* Notify here immediately before a resize (change of barrier
capacity). If _newcount >= the existing capacity, then there is no
change in the state of any threads waiting at the barrier. If
_newcount < the existing capacity, and >= _newcount threads are
currently waiting at the barrier, then this notification is
considered to also have the effect of telling the checker that all
waiting threads have now moved past the barrier. (I can't think of
any other sane semantics.) */
#define VALGRIND_HG_BARRIER_RESIZE_PRE(_bar, _newcount) \
DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE, \
void*,(_bar), \
unsigned long,(_newcount))
/* Notify here immediately before barrier destruction. */
#define VALGRIND_HG_BARRIER_DESTROY_PRE(_bar) \
DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE, \
void*,(_bar))
/* ----------------------------------------------------------
For describing memory ownership changes.
---------------------------------------------------------- */
/* Clean memory state. This makes Helgrind forget everything it knew
about the specified memory range. Effectively this announces that
the specified memory range now "belongs" to the calling thread, so
that: (1) the calling thread can access it safely without
synchronisation, and (2) all other threads must sync with this one
to access it safely. This is particularly useful for memory
allocators that wish to recycle memory. */
#define VALGRIND_HG_CLEAN_MEMORY(_qzz_start, _qzz_len) \
DO_CREQ_v_WW(VG_USERREQ__HG_CLEAN_MEMORY, \
void*,(_qzz_start), \
unsigned long,(_qzz_len))
/* The same, but for the heap block starting at _qzz_blockstart. This
allows painting when we only know the address of an object, but not
its size, which is sometimes the case in C++ code involving
inheritance, and in which RTTI is not, for whatever reason,
available. Returns the number of bytes painted, which can be zero
for a zero-sized block. Hence, return values >= 0 indicate success
(the block was found), and the value -1 indicates block not
found, and -2 is returned when not running on Helgrind. */
#define VALGRIND_HG_CLEAN_MEMORY_HEAPBLOCK(_qzz_blockstart) \
(__extension__ \
({long int _npainted; \
DO_CREQ_W_W(_npainted, (-2)/*default*/, \
_VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK, \
void*,(_qzz_blockstart)); \
_npainted; \
}))
/* ----------------------------------------------------------
For error control.
---------------------------------------------------------- */
/* Tell H that an address range is not to be "tracked" until further
notice. This puts it in the NOACCESS state, in which case we
ignore all reads and writes to it. Useful for ignoring ranges of
memory where there might be races we don't want to see. If the
memory is subsequently reallocated via malloc/new/stack allocation,
then it is put back in the trackable state. Hence it is safe in
the situation where checking is disabled, the containing area is
deallocated and later reallocated for some other purpose. */
#define VALGRIND_HG_DISABLE_CHECKING(_qzz_start, _qzz_len) \
DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED, \
void*,(_qzz_start), \
unsigned long,(_qzz_len))
/* And put it back into the normal "tracked" state, that is, make it
once again subject to the normal race-checking machinery. This
puts it in the same state as new memory allocated by this thread --
that is, basically owned exclusively by this thread. */
#define VALGRIND_HG_ENABLE_CHECKING(_qzz_start, _qzz_len) \
DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_TRACKED, \
void*,(_qzz_start), \
unsigned long,(_qzz_len))
/* Checks the accessibility bits for addresses [zza..zza+zznbytes-1].
If zzabits array is provided, copy the accessibility bits in zzabits.
Return values:
-2 if not running on helgrind
-1 if any parts of zzabits is not addressable
>= 0 : success.
When success, it returns the nr of addressable bytes found.
So, to check that a whole range is addressable, check
VALGRIND_HG_GET_ABITS(addr,NULL,len) == len
In addition, if you want to examine the addressability of each
byte of the range, you need to provide a non NULL ptr as
second argument, pointing to an array of unsigned char
of length len.
Addressable bytes are indicated with 0xff.
Non-addressable bytes are indicated with 0x00.
*/
#define VALGRIND_HG_GET_ABITS(zza,zzabits,zznbytes) \
(__extension__ \
({long int _res; \
/* \
* XXX: here PMDK's version deviates from upstream; \
* without the fix, this macro doesn't return \
* the default value correctly \
*/ \
DO_CREQ_W_WWW(_res, (-2LL)/*default*/, \
_VG_USERREQ__HG_GET_ABITS, \
void*,(zza), void*,(zzabits), \
unsigned long,(zznbytes)); \
_res; \
}))
/* End-user request for Ada applications compiled with GNAT.
Helgrind understands the Ada concept of Ada task dependencies and
terminations. See Ada Reference Manual section 9.3 "Task Dependence
- Termination of Tasks".
However, in some cases, the master of (terminated) tasks completes
only when the application exits. An example of this is dynamically
allocated tasks with an access type defined at Library Level.
By default, the state of such tasks in Helgrind will be 'exited but
join not done yet'. Many tasks in such a state are however causing
Helgrind CPU and memory to increase significantly.
VALGRIND_HG_GNAT_DEPENDENT_MASTER_JOIN can be used to indicate
to Helgrind that a not yet completed master has however already
'seen' the termination of a dependent : this is conceptually the
same as a pthread_join and causes the cleanup of the dependent
as done by Helgrind when a master completes.
This allows to avoid the overhead in helgrind caused by such tasks.
A typical usage for a master to indicate it has done conceptually a join
with a dependent task before the master completes is:
while not Dep_Task'Terminated loop
... do whatever to wait for Dep_Task termination.
end loop;
VALGRIND_HG_GNAT_DEPENDENT_MASTER_JOIN
(Dep_Task'Identity,
Ada.Task_Identification.Current_Task);
Note that VALGRIND_HG_GNAT_DEPENDENT_MASTER_JOIN should be a binding
to a C function built with the below macro. */
#define VALGRIND_HG_GNAT_DEPENDENT_MASTER_JOIN(_qzz_dep, _qzz_master) \
DO_CREQ_v_WW(_VG_USERREQ__HG_GNAT_DEPENDENT_MASTER_JOIN, \
void*,(_qzz_dep), \
void*,(_qzz_master))
/*----------------------------------------------------------------*/
/*--- ---*/
/*--- ThreadSanitizer-compatible requests ---*/
/*--- (mostly unimplemented) ---*/
/*--- ---*/
/*----------------------------------------------------------------*/
/* A quite-broad set of annotations, as used in the ThreadSanitizer
project. This implementation aims to be a (source-level)
compatible implementation of the macros defined in:
http://code.google.com/p/data-race-test/source
/browse/trunk/dynamic_annotations/dynamic_annotations.h
(some of the comments below are taken from the above file)
The implementation here is very incomplete, and intended as a
starting point. Many of the macros are unimplemented. Rather than
allowing unimplemented macros to silently do nothing, they cause an
assertion. Intention is to implement them on demand.
The major use of these macros is to make visible to race detectors,
the behaviour (effects) of user-implemented synchronisation
primitives, that the detectors could not otherwise deduce from the
normal observation of pthread etc calls.
Some of the macros are no-ops in Helgrind. That's because Helgrind
is a pure happens-before detector, whereas ThreadSanitizer uses a
hybrid lockset and happens-before scheme, which requires more
accurate annotations for correct operation.
The macros are listed in the same order as in dynamic_annotations.h
(URL just above).
I should point out that I am less than clear about the intended
semantics of quite a number of them. Comments and clarifications
welcomed!
*/
/* ----------------------------------------------------------------
These four allow description of user-level condition variables,
apparently in the style of POSIX's pthread_cond_t. Currently
unimplemented and will assert.
----------------------------------------------------------------
*/
/* Report that wait on the condition variable at address CV has
succeeded and the lock at address LOCK is now held. CV and LOCK
are completely arbitrary memory addresses which presumably mean
something to the application, but are meaningless to Helgrind. */
#define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \
_HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_LOCK_WAIT")
/* Report that wait on the condition variable at CV has succeeded.
Variant w/o lock. */
#define ANNOTATE_CONDVAR_WAIT(cv) \
_HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_WAIT")
/* Report that we are about to signal on the condition variable at
address CV. */
#define ANNOTATE_CONDVAR_SIGNAL(cv) \
_HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL")
/* Report that we are about to signal_all on the condition variable at
CV. */
#define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \
_HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL_ALL")
/* ----------------------------------------------------------------
Create completely arbitrary happens-before edges between threads.
If threads T1 .. Tn all do ANNOTATE_HAPPENS_BEFORE(obj) and later
(w.r.t. some notional global clock for the computation) thread Tm
does ANNOTATE_HAPPENS_AFTER(obj), then Helgrind will regard all
memory accesses done by T1 .. Tn before the ..BEFORE.. call as
happening-before all memory accesses done by Tm after the
..AFTER.. call. Hence Helgrind won't complain about races if Tm's
accesses afterwards are to the same locations as accesses before by
any of T1 .. Tn.
OBJ is a machine word (unsigned long, or void*), is completely
arbitrary, and denotes the identity of some synchronisation object
you're modelling.
You must do the _BEFORE call just before the real sync event on the
signaller's side, and _AFTER just after the real sync event on the
waiter's side.
If none of the rest of these macros make sense to you, at least
take the time to understand these two. They form the very essence
of describing arbitrary inter-thread synchronisation events to
Helgrind. You can get a long way just with them alone.
See also, extensive discussion on semantics of this in
https://bugs.kde.org/show_bug.cgi?id=243935
ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) is interim until such time
as bug 243935 is fully resolved. It instructs Helgrind to forget
about any ANNOTATE_HAPPENS_BEFORE calls on the specified object, in
effect putting it back in its original state. Once in that state,
a use of ANNOTATE_HAPPENS_AFTER on it has no effect on the calling
thread.
An implementation may optionally release resources it has
associated with 'obj' when ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj)
happens. Users are recommended to use
ANNOTATE_HAPPENS_BEFORE_FORGET_ALL to indicate when a
synchronisation object is no longer needed, so as to avoid
potential indefinite resource leaks.
----------------------------------------------------------------
*/
#define ANNOTATE_HAPPENS_BEFORE(obj) \
DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_SEND_PRE, void*,(obj))
#define ANNOTATE_HAPPENS_AFTER(obj) \
DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_RECV_POST, void*,(obj))
#define ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) \
DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_FORGET_ALL, void*,(obj))
/* ----------------------------------------------------------------
Memory publishing. The TSan sources say:
Report that the bytes in the range [pointer, pointer+size) are about
to be published safely. The race checker will create a happens-before
arc from the call ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) to
subsequent accesses to this memory.
I'm not sure I understand what this means exactly, nor whether it
is relevant for a pure h-b detector. Leaving unimplemented for
now.
----------------------------------------------------------------
*/
#define ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \
_HG_CLIENTREQ_UNIMP("ANNOTATE_PUBLISH_MEMORY_RANGE")
/* DEPRECATED. Don't use it. */
/* #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size) */
/* DEPRECATED. Don't use it. */
/* #define ANNOTATE_SWAP_MEMORY_RANGE(pointer, size) */
/* ----------------------------------------------------------------
TSan sources say:
Instruct the tool to create a happens-before arc between
MU->Unlock() and MU->Lock(). This annotation may slow down the
race detector; normally it is used only when it would be
difficult to annotate each of the mutex's critical sections
individually using the annotations above.
If MU is a posix pthread_mutex_t then Helgrind will do this anyway.
In any case, leave as unimp for now. I'm unsure about the intended
behaviour.
----------------------------------------------------------------
*/
#define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) \
_HG_CLIENTREQ_UNIMP("ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX")
/* Deprecated. Use ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX. */
/* #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) */
/* ----------------------------------------------------------------
TSan sources say:
Annotations useful when defining memory allocators, or when
memory that was protected in one way starts to be protected in
another.
Report that a new memory at "address" of size "size" has been
allocated. This might be used when the memory has been retrieved
from a free list and is about to be reused, or when a the locking
discipline for a variable changes.
AFAICS this is the same as VALGRIND_HG_CLEAN_MEMORY.
----------------------------------------------------------------
*/
#define ANNOTATE_NEW_MEMORY(address, size) \
VALGRIND_HG_CLEAN_MEMORY((address), (size))
/* ----------------------------------------------------------------
TSan sources say:
Annotations useful when defining FIFO queues that transfer data
between threads.
All unimplemented. Am not claiming to understand this (yet).
----------------------------------------------------------------
*/
/* Report that the producer-consumer queue object at address PCQ has
been created. The ANNOTATE_PCQ_* annotations should be used only
for FIFO queues. For non-FIFO queues use ANNOTATE_HAPPENS_BEFORE
(for put) and ANNOTATE_HAPPENS_AFTER (for get). */
#define ANNOTATE_PCQ_CREATE(pcq) \
_HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_CREATE")
/* Report that the queue at address PCQ is about to be destroyed. */
#define ANNOTATE_PCQ_DESTROY(pcq) \
_HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_DESTROY")
/* Report that we are about to put an element into a FIFO queue at
address PCQ. */
#define ANNOTATE_PCQ_PUT(pcq) \
_HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_PUT")
/* Report that we've just got an element from a FIFO queue at address
PCQ. */
#define ANNOTATE_PCQ_GET(pcq) \
_HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_GET")
/* ----------------------------------------------------------------
Annotations that suppress errors. It is usually better to express
the program's synchronization using the other annotations, but
these can be used when all else fails.
Currently these are all unimplemented. I can't think of a simple
way to implement them without at least some performance overhead.
----------------------------------------------------------------
*/
/* Report that we may have a benign race at "pointer", with size
"sizeof(*(pointer))". "pointer" must be a non-void* pointer. Insert at the
point where "pointer" has been allocated, preferably close to the point
where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC.
XXX: what's this actually supposed to do? And what's the type of
DESCRIPTION? When does the annotation stop having an effect?
*/
#define ANNOTATE_BENIGN_RACE(pointer, description) \
_HG_CLIENTREQ_UNIMP("ANNOTATE_BENIGN_RACE")
/* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to
the memory range [address, address+size). */
#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
VALGRIND_HG_DISABLE_CHECKING(address, size)
/* Request the analysis tool to ignore all reads in the current thread
until ANNOTATE_IGNORE_READS_END is called. Useful to ignore
intentional racey reads, while still checking other reads and all
writes. */
#define ANNOTATE_IGNORE_READS_BEGIN() \
_HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_BEGIN")
/* Stop ignoring reads. */
#define ANNOTATE_IGNORE_READS_END() \
_HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_END")
/* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */
#define ANNOTATE_IGNORE_WRITES_BEGIN() \
_HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_BEGIN")
/* Stop ignoring writes. */
#define ANNOTATE_IGNORE_WRITES_END() \
_HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_END")
/* Start ignoring all memory accesses (reads and writes). */
#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
do { \
ANNOTATE_IGNORE_READS_BEGIN(); \
ANNOTATE_IGNORE_WRITES_BEGIN(); \
} while (0)
/* Stop ignoring all memory accesses. */
#define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
do { \
ANNOTATE_IGNORE_WRITES_END(); \
ANNOTATE_IGNORE_READS_END(); \
} while (0)
/* ----------------------------------------------------------------
Annotations useful for debugging.
Again, so for unimplemented, partly for performance reasons.
----------------------------------------------------------------
*/
/* Request to trace every access to ADDRESS. */
#define ANNOTATE_TRACE_MEMORY(address) \
_HG_CLIENTREQ_UNIMP("ANNOTATE_TRACE_MEMORY")
/* Report the current thread name to a race detector. */
#define ANNOTATE_THREAD_NAME(name) \
_HG_CLIENTREQ_UNIMP("ANNOTATE_THREAD_NAME")
/* ----------------------------------------------------------------
Annotations for describing behaviour of user-implemented lock
primitives. In all cases, the LOCK argument is a completely
arbitrary machine word (unsigned long, or void*) and can be any
value which gives a unique identity to the lock objects being
modelled.
We just pretend they're ordinary posix rwlocks. That'll probably
give some rather confusing wording in error messages, claiming that
the arbitrary LOCK values are pthread_rwlock_t*'s, when in fact
they are not. Ah well.
----------------------------------------------------------------
*/
/* Report that a lock has just been created at address LOCK. */
#define ANNOTATE_RWLOCK_CREATE(lock) \
DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST, \
void*,(lock))
/* Report that the lock at address LOCK is about to be destroyed. */
#define ANNOTATE_RWLOCK_DESTROY(lock) \
DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE, \
void*,(lock))
/* Report that the lock at address LOCK has just been acquired.
is_w=1 for writer lock, is_w=0 for reader lock. */
#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_RWLOCK_ACQUIRED, \
void*,(lock), unsigned long,(is_w))
/* Report that the lock at address LOCK is about to be released. */
#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_RELEASED, \
void*,(lock)) /* is_w is ignored */
/* -------------------------------------------------------------
Annotations useful when implementing barriers. They are not
normally needed by modules that merely use barriers.
The "barrier" argument is a pointer to the barrier object.
----------------------------------------------------------------
*/
/* Report that the "barrier" has been initialized with initial
"count". If 'reinitialization_allowed' is true, initialization is
allowed to happen multiple times w/o calling barrier_destroy() */
#define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \
_HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_INIT")
/* Report that we are about to enter barrier_wait("barrier"). */
#define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \
_HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY")
/* Report that we just exited barrier_wait("barrier"). */
#define ANNOTATE_BARRIER_WAIT_AFTER(barrier) \
_HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY")
/* Report that the "barrier" has been destroyed. */
#define ANNOTATE_BARRIER_DESTROY(barrier) \
_HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY")
/* ----------------------------------------------------------------
Annotations useful for testing race detectors.
----------------------------------------------------------------
*/
/* Report that we expect a race on the variable at ADDRESS. Use only
in unit tests for a race detector. */
#define ANNOTATE_EXPECT_RACE(address, description) \
_HG_CLIENTREQ_UNIMP("ANNOTATE_EXPECT_RACE")
/* A no-op. Insert where you like to test the interceptors. */
#define ANNOTATE_NO_OP(arg) \
_HG_CLIENTREQ_UNIMP("ANNOTATE_NO_OP")
/* Force the race detector to flush its state. The actual effect depends on
* the implementation of the detector. */
#define ANNOTATE_FLUSH_STATE() \
_HG_CLIENTREQ_UNIMP("ANNOTATE_FLUSH_STATE")
#endif /* __HELGRIND_H */
| 39,544 | 45.965558 | 80 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/core/valgrind/valgrind.h
|
/* -*- c -*-
----------------------------------------------------------------
Notice that the following BSD-style license applies to this one
file (valgrind.h) only. The rest of Valgrind is licensed under the
terms of the GNU General Public License, version 2, unless
otherwise indicated. See the COPYING file in the source
distribution for details.
----------------------------------------------------------------
This file is part of Valgrind, a dynamic binary instrumentation
framework.
Copyright (C) 2000-2017 Julian Seward. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. The origin of this software must not be misrepresented; you must
not claim that you wrote the original software. If you use this
software in a product, an acknowledgment in the product
documentation would be appreciated but is not required.
3. Altered source versions must be plainly marked as such, and must
not be misrepresented as being the original software.
4. The name of the author may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
----------------------------------------------------------------
Notice that the above BSD-style license applies to this one file
(valgrind.h) only. The entire rest of Valgrind is licensed under
the terms of the GNU General Public License, version 2. See the
COPYING file in the source distribution for details.
----------------------------------------------------------------
*/
/* This file is for inclusion into client (your!) code.
You can use these macros to manipulate and query Valgrind's
execution inside your own programs.
The resulting executables will still run without Valgrind, just a
little bit more slowly than they otherwise would, but otherwise
unchanged. When not running on valgrind, each client request
consumes very few (eg. 7) instructions, so the resulting performance
loss is negligible unless you plan to execute client requests
millions of times per second. Nevertheless, if that is still a
problem, you can compile with the NVALGRIND symbol defined (gcc
-DNVALGRIND) so that client requests are not even compiled in. */
#ifndef __VALGRIND_H
#define __VALGRIND_H
/* ------------------------------------------------------------------ */
/* VERSION NUMBER OF VALGRIND */
/* ------------------------------------------------------------------ */
/* Specify Valgrind's version number, so that user code can
conditionally compile based on our version number. Note that these
were introduced at version 3.6 and so do not exist in version 3.5
or earlier. The recommended way to use them to check for "version
X.Y or later" is (eg)
#if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__) \
&& (__VALGRIND_MAJOR__ > 3 \
|| (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6))
*/
#define __VALGRIND_MAJOR__ 3
#define __VALGRIND_MINOR__ 14
#include <stdarg.h>
/* Nb: this file might be included in a file compiled with -ansi. So
we can't use C++ style "//" comments nor the "asm" keyword (instead
use "__asm__"). */
/* Derive some tags indicating what the target platform is. Note
that in this file we're using the compiler's CPP symbols for
identifying architectures, which are different to the ones we use
within the rest of Valgrind. Note, __powerpc__ is active for both
32 and 64-bit PPC, whereas __powerpc64__ is only active for the
latter (on Linux, that is).
Misc note: how to find out what's predefined in gcc by default:
gcc -Wp,-dM somefile.c
*/
#undef PLAT_x86_darwin
#undef PLAT_amd64_darwin
#undef PLAT_x86_win32
#undef PLAT_amd64_win64
#undef PLAT_x86_linux
#undef PLAT_amd64_linux
#undef PLAT_ppc32_linux
#undef PLAT_ppc64be_linux
#undef PLAT_ppc64le_linux
#undef PLAT_arm_linux
#undef PLAT_arm64_linux
#undef PLAT_s390x_linux
#undef PLAT_mips32_linux
#undef PLAT_mips64_linux
#undef PLAT_x86_solaris
#undef PLAT_amd64_solaris
#if defined(__APPLE__) && defined(__i386__)
# define PLAT_x86_darwin 1
#elif defined(__APPLE__) && defined(__x86_64__)
# define PLAT_amd64_darwin 1
#elif (defined(__MINGW32__) && !defined(__MINGW64__)) \
|| defined(__CYGWIN32__) \
|| (defined(_WIN32) && defined(_M_IX86))
# define PLAT_x86_win32 1
#elif defined(__MINGW64__) \
|| (defined(_WIN64) && defined(_M_X64))
# define PLAT_amd64_win64 1
#elif defined(__linux__) && defined(__i386__)
# define PLAT_x86_linux 1
#elif defined(__linux__) && defined(__x86_64__) && !defined(__ILP32__)
# define PLAT_amd64_linux 1
#elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__)
# define PLAT_ppc32_linux 1
#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__) && _CALL_ELF != 2
/* Big Endian uses ELF version 1 */
# define PLAT_ppc64be_linux 1
#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__) && _CALL_ELF == 2
/* Little Endian uses ELF version 2 */
# define PLAT_ppc64le_linux 1
#elif defined(__linux__) && defined(__arm__) && !defined(__aarch64__)
# define PLAT_arm_linux 1
#elif defined(__linux__) && defined(__aarch64__) && !defined(__arm__)
# define PLAT_arm64_linux 1
#elif defined(__linux__) && defined(__s390__) && defined(__s390x__)
# define PLAT_s390x_linux 1
#elif defined(__linux__) && defined(__mips__) && (__mips==64)
# define PLAT_mips64_linux 1
#elif defined(__linux__) && defined(__mips__) && (__mips!=64)
# define PLAT_mips32_linux 1
#elif defined(__sun) && defined(__i386__)
# define PLAT_x86_solaris 1
#elif defined(__sun) && defined(__x86_64__)
# define PLAT_amd64_solaris 1
#else
/* If we're not compiling for our target platform, don't generate
any inline asms. */
# if !defined(NVALGRIND)
# define NVALGRIND 1
# endif
#endif
/* ------------------------------------------------------------------ */
/* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */
/* in here of use to end-users -- skip to the next section. */
/* ------------------------------------------------------------------ */
/*
* VALGRIND_DO_CLIENT_REQUEST(): a statement that invokes a Valgrind client
* request. Accepts both pointers and integers as arguments.
*
* VALGRIND_DO_CLIENT_REQUEST_STMT(): a statement that invokes a Valgrind
* client request that does not return a value.
* VALGRIND_DO_CLIENT_REQUEST_EXPR(): a C expression that invokes a Valgrind
* client request and whose value equals the client request result. Accepts
* both pointers and integers as arguments. Note that such calls are not
* necessarily pure functions -- they may have side effects.
*/
#define VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, \
_zzq_request, _zzq_arg1, _zzq_arg2, \
_zzq_arg3, _zzq_arg4, _zzq_arg5) \
do { (_zzq_rlval) = VALGRIND_DO_CLIENT_REQUEST_EXPR((_zzq_default), \
(_zzq_request), (_zzq_arg1), (_zzq_arg2), \
(_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0)
#define VALGRIND_DO_CLIENT_REQUEST_STMT(_zzq_request, _zzq_arg1, \
_zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
do { (void) VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
(_zzq_request), (_zzq_arg1), (_zzq_arg2), \
(_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0)
#if defined(NVALGRIND)
/* Define NVALGRIND to completely remove the Valgrind magic sequence
from the compiled code (analogous to NDEBUG's effects on
assert()) */
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
(_zzq_default)
#else /* ! NVALGRIND */
/* The following defines the magic code sequences which the JITter
spots and handles magically. Don't look too closely at them as
they will rot your brain.
The assembly code sequences for all architectures is in this one
file. This is because this file must be stand-alone, and we don't
want to have multiple files.
For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default
value gets put in the return slot, so that everything works when
this is executed not under Valgrind. Args are passed in a memory
block, and so there's no intrinsic limit to the number that could
be passed, but it's currently five.
The macro args are:
_zzq_rlval result lvalue
_zzq_default default value (result returned when running on real CPU)
_zzq_request request code
_zzq_arg1..5 request params
The other two macros are used to support function wrapping, and are
a lot simpler. VALGRIND_GET_NR_CONTEXT returns the value of the
guest's NRADDR pseudo-register and whatever other information is
needed to safely run the call original from the wrapper: on
ppc64-linux, the R2 value at the divert point is also needed. This
information is abstracted into a user-visible type, OrigFn.
VALGRIND_CALL_NOREDIR_* behaves the same as the following on the
guest, but guarantees that the branch instruction will not be
redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64:
branch-and-link-to-r11. VALGRIND_CALL_NOREDIR is just text, not a
complete inline asm, since it needs to be combined with more magic
inline asm stuff to be useful.
*/
/* ----------------- x86-{linux,darwin,solaris} ---------------- */
#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \
|| (defined(PLAT_x86_win32) && defined(__GNUC__)) \
|| defined(PLAT_x86_solaris)
typedef
struct {
unsigned int nraddr; /* where's the code? */
}
OrigFn;
#define __SPECIAL_INSTRUCTION_PREAMBLE \
"roll $3, %%edi ; roll $13, %%edi\n\t" \
"roll $29, %%edi ; roll $19, %%edi\n\t"
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
__extension__ \
({volatile unsigned int _zzq_args[6]; \
volatile unsigned int _zzq_result; \
_zzq_args[0] = (unsigned int)(_zzq_request); \
_zzq_args[1] = (unsigned int)(_zzq_arg1); \
_zzq_args[2] = (unsigned int)(_zzq_arg2); \
_zzq_args[3] = (unsigned int)(_zzq_arg3); \
_zzq_args[4] = (unsigned int)(_zzq_arg4); \
_zzq_args[5] = (unsigned int)(_zzq_arg5); \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %EDX = client_request ( %EAX ) */ \
"xchgl %%ebx,%%ebx" \
: "=d" (_zzq_result) \
: "a" (&_zzq_args[0]), "0" (_zzq_default) \
: "cc", "memory" \
); \
_zzq_result; \
})
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
volatile unsigned int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %EAX = guest_NRADDR */ \
"xchgl %%ecx,%%ecx" \
: "=a" (__addr) \
: \
: "cc", "memory" \
); \
_zzq_orig->nraddr = __addr; \
}
#define VALGRIND_CALL_NOREDIR_EAX \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* call-noredir *%EAX */ \
"xchgl %%edx,%%edx\n\t"
#define VALGRIND_VEX_INJECT_IR() \
do { \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
"xchgl %%edi,%%edi\n\t" \
: : : "cc", "memory" \
); \
} while (0)
#endif /* PLAT_x86_linux || PLAT_x86_darwin || (PLAT_x86_win32 && __GNUC__)
|| PLAT_x86_solaris */
/* ------------------------- x86-Win32 ------------------------- */
#if defined(PLAT_x86_win32) && !defined(__GNUC__)
typedef
struct {
unsigned int nraddr; /* where's the code? */
}
OrigFn;
#if defined(_MSC_VER)
#define __SPECIAL_INSTRUCTION_PREAMBLE \
__asm rol edi, 3 __asm rol edi, 13 \
__asm rol edi, 29 __asm rol edi, 19
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
valgrind_do_client_request_expr((uintptr_t)(_zzq_default), \
(uintptr_t)(_zzq_request), (uintptr_t)(_zzq_arg1), \
(uintptr_t)(_zzq_arg2), (uintptr_t)(_zzq_arg3), \
(uintptr_t)(_zzq_arg4), (uintptr_t)(_zzq_arg5))
static __inline uintptr_t
valgrind_do_client_request_expr(uintptr_t _zzq_default, uintptr_t _zzq_request,
uintptr_t _zzq_arg1, uintptr_t _zzq_arg2,
uintptr_t _zzq_arg3, uintptr_t _zzq_arg4,
uintptr_t _zzq_arg5)
{
volatile uintptr_t _zzq_args[6];
volatile unsigned int _zzq_result;
_zzq_args[0] = (uintptr_t)(_zzq_request);
_zzq_args[1] = (uintptr_t)(_zzq_arg1);
_zzq_args[2] = (uintptr_t)(_zzq_arg2);
_zzq_args[3] = (uintptr_t)(_zzq_arg3);
_zzq_args[4] = (uintptr_t)(_zzq_arg4);
_zzq_args[5] = (uintptr_t)(_zzq_arg5);
__asm { __asm lea eax, _zzq_args __asm mov edx, _zzq_default
__SPECIAL_INSTRUCTION_PREAMBLE
/* %EDX = client_request ( %EAX ) */
__asm xchg ebx,ebx
__asm mov _zzq_result, edx
}
return _zzq_result;
}
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
volatile unsigned int __addr; \
__asm { __SPECIAL_INSTRUCTION_PREAMBLE \
/* %EAX = guest_NRADDR */ \
__asm xchg ecx,ecx \
__asm mov __addr, eax \
} \
_zzq_orig->nraddr = __addr; \
}
#define VALGRIND_CALL_NOREDIR_EAX ERROR
#define VALGRIND_VEX_INJECT_IR() \
do { \
__asm { __SPECIAL_INSTRUCTION_PREAMBLE \
__asm xchg edi,edi \
} \
} while (0)
#else
#error Unsupported compiler.
#endif
#endif /* PLAT_x86_win32 */
/* ----------------- amd64-{linux,darwin,solaris} --------------- */
#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \
|| defined(PLAT_amd64_solaris) \
|| (defined(PLAT_amd64_win64) && defined(__GNUC__))
typedef
struct {
unsigned long int nraddr; /* where's the code? */
}
OrigFn;
#define __SPECIAL_INSTRUCTION_PREAMBLE \
"rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \
"rolq $61, %%rdi ; rolq $51, %%rdi\n\t"
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
__extension__ \
({ volatile unsigned long int _zzq_args[6]; \
volatile unsigned long int _zzq_result; \
_zzq_args[0] = (unsigned long int)(_zzq_request); \
_zzq_args[1] = (unsigned long int)(_zzq_arg1); \
_zzq_args[2] = (unsigned long int)(_zzq_arg2); \
_zzq_args[3] = (unsigned long int)(_zzq_arg3); \
_zzq_args[4] = (unsigned long int)(_zzq_arg4); \
_zzq_args[5] = (unsigned long int)(_zzq_arg5); \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %RDX = client_request ( %RAX ) */ \
"xchgq %%rbx,%%rbx" \
: "=d" (_zzq_result) \
: "a" (&_zzq_args[0]), "0" (_zzq_default) \
: "cc", "memory" \
); \
_zzq_result; \
})
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
volatile unsigned long int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %RAX = guest_NRADDR */ \
"xchgq %%rcx,%%rcx" \
: "=a" (__addr) \
: \
: "cc", "memory" \
); \
_zzq_orig->nraddr = __addr; \
}
#define VALGRIND_CALL_NOREDIR_RAX \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* call-noredir *%RAX */ \
"xchgq %%rdx,%%rdx\n\t"
#define VALGRIND_VEX_INJECT_IR() \
do { \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
"xchgq %%rdi,%%rdi\n\t" \
: : : "cc", "memory" \
); \
} while (0)
#endif /* PLAT_amd64_linux || PLAT_amd64_darwin || PLAT_amd64_solaris */
/* ------------------------- amd64-Win64 ------------------------- */
#if defined(PLAT_amd64_win64) && !defined(__GNUC__)
#error Unsupported compiler.
#endif /* PLAT_amd64_win64 */
/* ------------------------ ppc32-linux ------------------------ */
#if defined(PLAT_ppc32_linux)
typedef
struct {
unsigned int nraddr; /* where's the code? */
}
OrigFn;
#define __SPECIAL_INSTRUCTION_PREAMBLE \
"rlwinm 0,0,3,0,31 ; rlwinm 0,0,13,0,31\n\t" \
"rlwinm 0,0,29,0,31 ; rlwinm 0,0,19,0,31\n\t"
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
\
__extension__ \
({ unsigned int _zzq_args[6]; \
unsigned int _zzq_result; \
unsigned int* _zzq_ptr; \
_zzq_args[0] = (unsigned int)(_zzq_request); \
_zzq_args[1] = (unsigned int)(_zzq_arg1); \
_zzq_args[2] = (unsigned int)(_zzq_arg2); \
_zzq_args[3] = (unsigned int)(_zzq_arg3); \
_zzq_args[4] = (unsigned int)(_zzq_arg4); \
_zzq_args[5] = (unsigned int)(_zzq_arg5); \
_zzq_ptr = _zzq_args; \
__asm__ volatile("mr 3,%1\n\t" /*default*/ \
"mr 4,%2\n\t" /*ptr*/ \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = client_request ( %R4 ) */ \
"or 1,1,1\n\t" \
"mr %0,3" /*result*/ \
: "=b" (_zzq_result) \
: "b" (_zzq_default), "b" (_zzq_ptr) \
: "cc", "memory", "r3", "r4"); \
_zzq_result; \
})
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
unsigned int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = guest_NRADDR */ \
"or 2,2,2\n\t" \
"mr %0,3" \
: "=b" (__addr) \
: \
: "cc", "memory", "r3" \
); \
_zzq_orig->nraddr = __addr; \
}
#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* branch-and-link-to-noredir *%R11 */ \
"or 3,3,3\n\t"
#define VALGRIND_VEX_INJECT_IR() \
do { \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
"or 5,5,5\n\t" \
); \
} while (0)
#endif /* PLAT_ppc32_linux */
/* ------------------------ ppc64-linux ------------------------ */
#if defined(PLAT_ppc64be_linux)
typedef
struct {
unsigned long int nraddr; /* where's the code? */
unsigned long int r2; /* what tocptr do we need? */
}
OrigFn;
#define __SPECIAL_INSTRUCTION_PREAMBLE \
"rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
"rotldi 0,0,61 ; rotldi 0,0,51\n\t"
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
\
__extension__ \
({ unsigned long int _zzq_args[6]; \
unsigned long int _zzq_result; \
unsigned long int* _zzq_ptr; \
_zzq_args[0] = (unsigned long int)(_zzq_request); \
_zzq_args[1] = (unsigned long int)(_zzq_arg1); \
_zzq_args[2] = (unsigned long int)(_zzq_arg2); \
_zzq_args[3] = (unsigned long int)(_zzq_arg3); \
_zzq_args[4] = (unsigned long int)(_zzq_arg4); \
_zzq_args[5] = (unsigned long int)(_zzq_arg5); \
_zzq_ptr = _zzq_args; \
__asm__ volatile("mr 3,%1\n\t" /*default*/ \
"mr 4,%2\n\t" /*ptr*/ \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = client_request ( %R4 ) */ \
"or 1,1,1\n\t" \
"mr %0,3" /*result*/ \
: "=b" (_zzq_result) \
: "b" (_zzq_default), "b" (_zzq_ptr) \
: "cc", "memory", "r3", "r4"); \
_zzq_result; \
})
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
unsigned long int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = guest_NRADDR */ \
"or 2,2,2\n\t" \
"mr %0,3" \
: "=b" (__addr) \
: \
: "cc", "memory", "r3" \
); \
_zzq_orig->nraddr = __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = guest_NRADDR_GPR2 */ \
"or 4,4,4\n\t" \
"mr %0,3" \
: "=b" (__addr) \
: \
: "cc", "memory", "r3" \
); \
_zzq_orig->r2 = __addr; \
}
#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* branch-and-link-to-noredir *%R11 */ \
"or 3,3,3\n\t"
#define VALGRIND_VEX_INJECT_IR() \
do { \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
"or 5,5,5\n\t" \
); \
} while (0)
#endif /* PLAT_ppc64be_linux */
#if defined(PLAT_ppc64le_linux)
typedef
struct {
unsigned long int nraddr; /* where's the code? */
unsigned long int r2; /* what tocptr do we need? */
}
OrigFn;
#define __SPECIAL_INSTRUCTION_PREAMBLE \
"rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
"rotldi 0,0,61 ; rotldi 0,0,51\n\t"
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
\
__extension__ \
({ unsigned long int _zzq_args[6]; \
unsigned long int _zzq_result; \
unsigned long int* _zzq_ptr; \
_zzq_args[0] = (unsigned long int)(_zzq_request); \
_zzq_args[1] = (unsigned long int)(_zzq_arg1); \
_zzq_args[2] = (unsigned long int)(_zzq_arg2); \
_zzq_args[3] = (unsigned long int)(_zzq_arg3); \
_zzq_args[4] = (unsigned long int)(_zzq_arg4); \
_zzq_args[5] = (unsigned long int)(_zzq_arg5); \
_zzq_ptr = _zzq_args; \
__asm__ volatile("mr 3,%1\n\t" /*default*/ \
"mr 4,%2\n\t" /*ptr*/ \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = client_request ( %R4 ) */ \
"or 1,1,1\n\t" \
"mr %0,3" /*result*/ \
: "=b" (_zzq_result) \
: "b" (_zzq_default), "b" (_zzq_ptr) \
: "cc", "memory", "r3", "r4"); \
_zzq_result; \
})
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
unsigned long int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = guest_NRADDR */ \
"or 2,2,2\n\t" \
"mr %0,3" \
: "=b" (__addr) \
: \
: "cc", "memory", "r3" \
); \
_zzq_orig->nraddr = __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = guest_NRADDR_GPR2 */ \
"or 4,4,4\n\t" \
"mr %0,3" \
: "=b" (__addr) \
: \
: "cc", "memory", "r3" \
); \
_zzq_orig->r2 = __addr; \
}
#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* branch-and-link-to-noredir *%R12 */ \
"or 3,3,3\n\t"
#define VALGRIND_VEX_INJECT_IR() \
do { \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
"or 5,5,5\n\t" \
); \
} while (0)
#endif /* PLAT_ppc64le_linux */
/* ------------------------- arm-linux ------------------------- */
#if defined(PLAT_arm_linux)
typedef
struct {
unsigned int nraddr; /* where's the code? */
}
OrigFn;
#define __SPECIAL_INSTRUCTION_PREAMBLE \
"mov r12, r12, ror #3 ; mov r12, r12, ror #13 \n\t" \
"mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t"
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
\
__extension__ \
({volatile unsigned int _zzq_args[6]; \
volatile unsigned int _zzq_result; \
_zzq_args[0] = (unsigned int)(_zzq_request); \
_zzq_args[1] = (unsigned int)(_zzq_arg1); \
_zzq_args[2] = (unsigned int)(_zzq_arg2); \
_zzq_args[3] = (unsigned int)(_zzq_arg3); \
_zzq_args[4] = (unsigned int)(_zzq_arg4); \
_zzq_args[5] = (unsigned int)(_zzq_arg5); \
__asm__ volatile("mov r3, %1\n\t" /*default*/ \
"mov r4, %2\n\t" /*ptr*/ \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* R3 = client_request ( R4 ) */ \
"orr r10, r10, r10\n\t" \
"mov %0, r3" /*result*/ \
: "=r" (_zzq_result) \
: "r" (_zzq_default), "r" (&_zzq_args[0]) \
: "cc","memory", "r3", "r4"); \
_zzq_result; \
})
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
unsigned int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* R3 = guest_NRADDR */ \
"orr r11, r11, r11\n\t" \
"mov %0, r3" \
: "=r" (__addr) \
: \
: "cc", "memory", "r3" \
); \
_zzq_orig->nraddr = __addr; \
}
#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* branch-and-link-to-noredir *%R4 */ \
"orr r12, r12, r12\n\t"
#define VALGRIND_VEX_INJECT_IR() \
do { \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
"orr r9, r9, r9\n\t" \
: : : "cc", "memory" \
); \
} while (0)
#endif /* PLAT_arm_linux */
/* ------------------------ arm64-linux ------------------------- */
#if defined(PLAT_arm64_linux)
typedef
struct {
unsigned long int nraddr; /* where's the code? */
}
OrigFn;
#define __SPECIAL_INSTRUCTION_PREAMBLE \
"ror x12, x12, #3 ; ror x12, x12, #13 \n\t" \
"ror x12, x12, #51 ; ror x12, x12, #61 \n\t"
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
\
__extension__ \
({volatile unsigned long int _zzq_args[6]; \
volatile unsigned long int _zzq_result; \
_zzq_args[0] = (unsigned long int)(_zzq_request); \
_zzq_args[1] = (unsigned long int)(_zzq_arg1); \
_zzq_args[2] = (unsigned long int)(_zzq_arg2); \
_zzq_args[3] = (unsigned long int)(_zzq_arg3); \
_zzq_args[4] = (unsigned long int)(_zzq_arg4); \
_zzq_args[5] = (unsigned long int)(_zzq_arg5); \
__asm__ volatile("mov x3, %1\n\t" /*default*/ \
"mov x4, %2\n\t" /*ptr*/ \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* X3 = client_request ( X4 ) */ \
"orr x10, x10, x10\n\t" \
"mov %0, x3" /*result*/ \
: "=r" (_zzq_result) \
: "r" ((unsigned long int)(_zzq_default)), \
"r" (&_zzq_args[0]) \
: "cc","memory", "x3", "x4"); \
_zzq_result; \
})
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
unsigned long int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* X3 = guest_NRADDR */ \
"orr x11, x11, x11\n\t" \
"mov %0, x3" \
: "=r" (__addr) \
: \
: "cc", "memory", "x3" \
); \
_zzq_orig->nraddr = __addr; \
}
#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* branch-and-link-to-noredir X8 */ \
"orr x12, x12, x12\n\t"
#define VALGRIND_VEX_INJECT_IR() \
do { \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
"orr x9, x9, x9\n\t" \
: : : "cc", "memory" \
); \
} while (0)
#endif /* PLAT_arm64_linux */
/* ------------------------ s390x-linux ------------------------ */
#if defined(PLAT_s390x_linux)
typedef
struct {
unsigned long int nraddr; /* where's the code? */
}
OrigFn;
/* __SPECIAL_INSTRUCTION_PREAMBLE will be used to identify Valgrind specific
* code. This detection is implemented in platform specific toIR.c
* (e.g. VEX/priv/guest_s390_decoder.c).
*/
#define __SPECIAL_INSTRUCTION_PREAMBLE \
"lr 15,15\n\t" \
"lr 1,1\n\t" \
"lr 2,2\n\t" \
"lr 3,3\n\t"
#define __CLIENT_REQUEST_CODE "lr 2,2\n\t"
#define __GET_NR_CONTEXT_CODE "lr 3,3\n\t"
#define __CALL_NO_REDIR_CODE "lr 4,4\n\t"
#define __VEX_INJECT_IR_CODE "lr 5,5\n\t"
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
__extension__ \
({volatile unsigned long int _zzq_args[6]; \
volatile unsigned long int _zzq_result; \
_zzq_args[0] = (unsigned long int)(_zzq_request); \
_zzq_args[1] = (unsigned long int)(_zzq_arg1); \
_zzq_args[2] = (unsigned long int)(_zzq_arg2); \
_zzq_args[3] = (unsigned long int)(_zzq_arg3); \
_zzq_args[4] = (unsigned long int)(_zzq_arg4); \
_zzq_args[5] = (unsigned long int)(_zzq_arg5); \
__asm__ volatile(/* r2 = args */ \
"lgr 2,%1\n\t" \
/* r3 = default */ \
"lgr 3,%2\n\t" \
__SPECIAL_INSTRUCTION_PREAMBLE \
__CLIENT_REQUEST_CODE \
/* results = r3 */ \
"lgr %0, 3\n\t" \
: "=d" (_zzq_result) \
: "a" (&_zzq_args[0]), "0" (_zzq_default) \
: "cc", "2", "3", "memory" \
); \
_zzq_result; \
})
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
volatile unsigned long int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
__GET_NR_CONTEXT_CODE \
"lgr %0, 3\n\t" \
: "=a" (__addr) \
: \
: "cc", "3", "memory" \
); \
_zzq_orig->nraddr = __addr; \
}
#define VALGRIND_CALL_NOREDIR_R1 \
__SPECIAL_INSTRUCTION_PREAMBLE \
__CALL_NO_REDIR_CODE
#define VALGRIND_VEX_INJECT_IR() \
do { \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
__VEX_INJECT_IR_CODE); \
} while (0)
#endif /* PLAT_s390x_linux */
/* ------------------------- mips32-linux ---------------- */
#if defined(PLAT_mips32_linux)
typedef
struct {
unsigned int nraddr; /* where's the code? */
}
OrigFn;
/* .word 0x342
* .word 0x742
* .word 0xC2
* .word 0x4C2*/
#define __SPECIAL_INSTRUCTION_PREAMBLE \
"srl $0, $0, 13\n\t" \
"srl $0, $0, 29\n\t" \
"srl $0, $0, 3\n\t" \
"srl $0, $0, 19\n\t"
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
__extension__ \
({ volatile unsigned int _zzq_args[6]; \
volatile unsigned int _zzq_result; \
_zzq_args[0] = (unsigned int)(_zzq_request); \
_zzq_args[1] = (unsigned int)(_zzq_arg1); \
_zzq_args[2] = (unsigned int)(_zzq_arg2); \
_zzq_args[3] = (unsigned int)(_zzq_arg3); \
_zzq_args[4] = (unsigned int)(_zzq_arg4); \
_zzq_args[5] = (unsigned int)(_zzq_arg5); \
__asm__ volatile("move $11, %1\n\t" /*default*/ \
"move $12, %2\n\t" /*ptr*/ \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* T3 = client_request ( T4 ) */ \
"or $13, $13, $13\n\t" \
"move %0, $11\n\t" /*result*/ \
: "=r" (_zzq_result) \
: "r" (_zzq_default), "r" (&_zzq_args[0]) \
: "$11", "$12", "memory"); \
_zzq_result; \
})
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
volatile unsigned int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %t9 = guest_NRADDR */ \
"or $14, $14, $14\n\t" \
"move %0, $11" /*result*/ \
: "=r" (__addr) \
: \
: "$11" \
); \
_zzq_orig->nraddr = __addr; \
}
#define VALGRIND_CALL_NOREDIR_T9 \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* call-noredir *%t9 */ \
"or $15, $15, $15\n\t"
#define VALGRIND_VEX_INJECT_IR() \
do { \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
"or $11, $11, $11\n\t" \
); \
} while (0)
#endif /* PLAT_mips32_linux */
/* ------------------------- mips64-linux ---------------- */
#if defined(PLAT_mips64_linux)
typedef
struct {
unsigned long nraddr; /* where's the code? */
}
OrigFn;
/* dsll $0,$0, 3
* dsll $0,$0, 13
* dsll $0,$0, 29
* dsll $0,$0, 19*/
#define __SPECIAL_INSTRUCTION_PREAMBLE \
"dsll $0,$0, 3 ; dsll $0,$0,13\n\t" \
"dsll $0,$0,29 ; dsll $0,$0,19\n\t"
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
__extension__ \
({ volatile unsigned long int _zzq_args[6]; \
volatile unsigned long int _zzq_result; \
_zzq_args[0] = (unsigned long int)(_zzq_request); \
_zzq_args[1] = (unsigned long int)(_zzq_arg1); \
_zzq_args[2] = (unsigned long int)(_zzq_arg2); \
_zzq_args[3] = (unsigned long int)(_zzq_arg3); \
_zzq_args[4] = (unsigned long int)(_zzq_arg4); \
_zzq_args[5] = (unsigned long int)(_zzq_arg5); \
__asm__ volatile("move $11, %1\n\t" /*default*/ \
"move $12, %2\n\t" /*ptr*/ \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* $11 = client_request ( $12 ) */ \
"or $13, $13, $13\n\t" \
"move %0, $11\n\t" /*result*/ \
: "=r" (_zzq_result) \
: "r" (_zzq_default), "r" (&_zzq_args[0]) \
: "$11", "$12", "memory"); \
_zzq_result; \
})
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
volatile unsigned long int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* $11 = guest_NRADDR */ \
"or $14, $14, $14\n\t" \
"move %0, $11" /*result*/ \
: "=r" (__addr) \
: \
: "$11"); \
_zzq_orig->nraddr = __addr; \
}
#define VALGRIND_CALL_NOREDIR_T9 \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* call-noredir $25 */ \
"or $15, $15, $15\n\t"
#define VALGRIND_VEX_INJECT_IR() \
do { \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
"or $11, $11, $11\n\t" \
); \
} while (0)
#endif /* PLAT_mips64_linux */
/* Insert assembly code for other platforms here... */
#endif /* NVALGRIND */
/* ------------------------------------------------------------------ */
/* PLATFORM SPECIFICS for FUNCTION WRAPPING. This is all very */
/* ugly. It's the least-worst tradeoff I can think of. */
/* ------------------------------------------------------------------ */
/* This section defines magic (a.k.a appalling-hack) macros for doing
guaranteed-no-redirection macros, so as to get from function
wrappers to the functions they are wrapping. The whole point is to
construct standard call sequences, but to do the call itself with a
special no-redirect call pseudo-instruction that the JIT
understands and handles specially. This section is long and
repetitious, and I can't see a way to make it shorter.
The naming scheme is as follows:
CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc}
'W' stands for "word" and 'v' for "void". Hence there are
different macros for calling arity 0, 1, 2, 3, 4, etc, functions,
and for each, the possibility of returning a word-typed result, or
no result.
*/
/* Use these to write the name of your wrapper. NOTE: duplicates
VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. NOTE also: inserts
the default behaviour equivalance class tag "0000" into the name.
See pub_tool_redir.h for details -- normally you don't need to
think about this, though. */
/* Use an extra level of macroisation so as to ensure the soname/fnname
args are fully macro-expanded before pasting them together. */
#define VG_CONCAT4(_aa,_bb,_cc,_dd) _aa##_bb##_cc##_dd
#define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \
VG_CONCAT4(_vgw00000ZU_,soname,_,fnname)
#define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \
VG_CONCAT4(_vgw00000ZZ_,soname,_,fnname)
/* Use this macro from within a wrapper function to collect the
context (address and possibly other info) of the original function.
Once you have that you can then use it in one of the CALL_FN_
macros. The type of the argument _lval is OrigFn. */
#define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NR_CONTEXT(_lval)
/* Also provide end-user facilities for function replacement, rather
than wrapping. A replacement function differs from a wrapper in
that it has no way to get hold of the original function being
called, and hence no way to call onwards to it. In a replacement
function, VALGRIND_GET_ORIG_FN always returns zero. */
#define I_REPLACE_SONAME_FNNAME_ZU(soname,fnname) \
VG_CONCAT4(_vgr00000ZU_,soname,_,fnname)
#define I_REPLACE_SONAME_FNNAME_ZZ(soname,fnname) \
VG_CONCAT4(_vgr00000ZZ_,soname,_,fnname)
/* Derivatives of the main macros below, for calling functions
returning void. */
#define CALL_FN_v_v(fnptr) \
do { volatile unsigned long _junk; \
CALL_FN_W_v(_junk,fnptr); } while (0)
#define CALL_FN_v_W(fnptr, arg1) \
do { volatile unsigned long _junk; \
CALL_FN_W_W(_junk,fnptr,arg1); } while (0)
#define CALL_FN_v_WW(fnptr, arg1,arg2) \
do { volatile unsigned long _junk; \
CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0)
#define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3) \
do { volatile unsigned long _junk; \
CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0)
#define CALL_FN_v_WWWW(fnptr, arg1,arg2,arg3,arg4) \
do { volatile unsigned long _junk; \
CALL_FN_W_WWWW(_junk,fnptr,arg1,arg2,arg3,arg4); } while (0)
#define CALL_FN_v_5W(fnptr, arg1,arg2,arg3,arg4,arg5) \
do { volatile unsigned long _junk; \
CALL_FN_W_5W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5); } while (0)
#define CALL_FN_v_6W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6) \
do { volatile unsigned long _junk; \
CALL_FN_W_6W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6); } while (0)
#define CALL_FN_v_7W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6,arg7) \
do { volatile unsigned long _junk; \
CALL_FN_W_7W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6,arg7); } while (0)
/* ----------------- x86-{linux,darwin,solaris} ---------------- */
#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \
|| defined(PLAT_x86_solaris)
/* These regs are trashed by the hidden call. No need to mention eax
as gcc can already see that, plus causes gcc to bomb. */
#define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx"
/* Macros to save and align the stack before making a function
call and restore it afterwards as gcc may not keep the stack
pointer aligned if it doesn't realise calls are being made
to other functions. */
#define VALGRIND_ALIGN_STACK \
"movl %%esp,%%edi\n\t" \
"andl $0xfffffff0,%%esp\n\t"
#define VALGRIND_RESTORE_STACK \
"movl %%edi,%%esp\n\t"
/* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned
long) == 4. */
#define CALL_FN_W_v(lval, orig) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[1]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_W(lval, orig, arg1) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[2]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"subl $12, %%esp\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"subl $8, %%esp\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[4]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"subl $4, %%esp\n\t" \
"pushl 12(%%eax)\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[5]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"pushl 16(%%eax)\n\t" \
"pushl 12(%%eax)\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[6]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"subl $12, %%esp\n\t" \
"pushl 20(%%eax)\n\t" \
"pushl 16(%%eax)\n\t" \
"pushl 12(%%eax)\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[7]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"subl $8, %%esp\n\t" \
"pushl 24(%%eax)\n\t" \
"pushl 20(%%eax)\n\t" \
"pushl 16(%%eax)\n\t" \
"pushl 12(%%eax)\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[8]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"subl $4, %%esp\n\t" \
"pushl 28(%%eax)\n\t" \
"pushl 24(%%eax)\n\t" \
"pushl 20(%%eax)\n\t" \
"pushl 16(%%eax)\n\t" \
"pushl 12(%%eax)\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[9]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"pushl 32(%%eax)\n\t" \
"pushl 28(%%eax)\n\t" \
"pushl 24(%%eax)\n\t" \
"pushl 20(%%eax)\n\t" \
"pushl 16(%%eax)\n\t" \
"pushl 12(%%eax)\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[10]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"subl $12, %%esp\n\t" \
"pushl 36(%%eax)\n\t" \
"pushl 32(%%eax)\n\t" \
"pushl 28(%%eax)\n\t" \
"pushl 24(%%eax)\n\t" \
"pushl 20(%%eax)\n\t" \
"pushl 16(%%eax)\n\t" \
"pushl 12(%%eax)\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[11]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"subl $8, %%esp\n\t" \
"pushl 40(%%eax)\n\t" \
"pushl 36(%%eax)\n\t" \
"pushl 32(%%eax)\n\t" \
"pushl 28(%%eax)\n\t" \
"pushl 24(%%eax)\n\t" \
"pushl 20(%%eax)\n\t" \
"pushl 16(%%eax)\n\t" \
"pushl 12(%%eax)\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
arg6,arg7,arg8,arg9,arg10, \
arg11) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[12]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"subl $4, %%esp\n\t" \
"pushl 44(%%eax)\n\t" \
"pushl 40(%%eax)\n\t" \
"pushl 36(%%eax)\n\t" \
"pushl 32(%%eax)\n\t" \
"pushl 28(%%eax)\n\t" \
"pushl 24(%%eax)\n\t" \
"pushl 20(%%eax)\n\t" \
"pushl 16(%%eax)\n\t" \
"pushl 12(%%eax)\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
arg6,arg7,arg8,arg9,arg10, \
arg11,arg12) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[13]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
_argvec[12] = (unsigned long)(arg12); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"pushl 48(%%eax)\n\t" \
"pushl 44(%%eax)\n\t" \
"pushl 40(%%eax)\n\t" \
"pushl 36(%%eax)\n\t" \
"pushl 32(%%eax)\n\t" \
"pushl 28(%%eax)\n\t" \
"pushl 24(%%eax)\n\t" \
"pushl 20(%%eax)\n\t" \
"pushl 16(%%eax)\n\t" \
"pushl 12(%%eax)\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#endif /* PLAT_x86_linux || PLAT_x86_darwin || PLAT_x86_solaris */
/* ---------------- amd64-{linux,darwin,solaris} --------------- */
#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \
|| defined(PLAT_amd64_solaris)
/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
/* These regs are trashed by the hidden call. */
#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \
"rdi", "r8", "r9", "r10", "r11"
/* This is all pretty complex. It's so as to make stack unwinding
work reliably. See bug 243270. The basic problem is the sub and
add of 128 of %rsp in all of the following macros. If gcc believes
the CFA is in %rsp, then unwinding may fail, because what's at the
CFA is not what gcc "expected" when it constructs the CFIs for the
places where the macros are instantiated.
But we can't just add a CFI annotation to increase the CFA offset
by 128, to match the sub of 128 from %rsp, because we don't know
whether gcc has chosen %rsp as the CFA at that point, or whether it
has chosen some other register (eg, %rbp). In the latter case,
adding a CFI annotation to change the CFA offset is simply wrong.
So the solution is to get hold of the CFA using
__builtin_dwarf_cfa(), put it in a known register, and add a
CFI annotation to say what the register is. We choose %rbp for
this (perhaps perversely), because:
(1) %rbp is already subject to unwinding. If a new register was
chosen then the unwinder would have to unwind it in all stack
traces, which is expensive, and
(2) %rbp is already subject to precise exception updates in the
JIT. If a new register was chosen, we'd have to have precise
exceptions for it too, which reduces performance of the
generated code.
However .. one extra complication. We can't just whack the result
of __builtin_dwarf_cfa() into %rbp and then add %rbp to the
list of trashed registers at the end of the inline assembly
fragments; gcc won't allow %rbp to appear in that list. Hence
instead we need to stash %rbp in %r15 for the duration of the asm,
and say that %r15 is trashed instead. gcc seems happy to go with
that.
Oh .. and this all needs to be conditionalised so that it is
unchanged from before this commit, when compiled with older gccs
that don't support __builtin_dwarf_cfa. Furthermore, since
this header file is freestanding, it has to be independent of
config.h, and so the following conditionalisation cannot depend on
configure time checks.
Although it's not clear from
'defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)',
this expression excludes Darwin.
.cfi directives in Darwin assembly appear to be completely
different and I haven't investigated how they work.
For even more entertainment value, note we have to use the
completely undocumented __builtin_dwarf_cfa(), which appears to
really compute the CFA, whereas __builtin_frame_address(0) claims
to but actually doesn't. See
https://bugs.kde.org/show_bug.cgi?id=243270#c47
*/
#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)
# define __FRAME_POINTER \
,"r"(__builtin_dwarf_cfa())
# define VALGRIND_CFI_PROLOGUE \
"movq %%rbp, %%r15\n\t" \
"movq %2, %%rbp\n\t" \
".cfi_remember_state\n\t" \
".cfi_def_cfa rbp, 0\n\t"
# define VALGRIND_CFI_EPILOGUE \
"movq %%r15, %%rbp\n\t" \
".cfi_restore_state\n\t"
#else
# define __FRAME_POINTER
# define VALGRIND_CFI_PROLOGUE
# define VALGRIND_CFI_EPILOGUE
#endif
/* Macros to save and align the stack before making a function
call and restore it afterwards as gcc may not keep the stack
pointer aligned if it doesn't realise calls are being made
to other functions. */
#define VALGRIND_ALIGN_STACK \
"movq %%rsp,%%r14\n\t" \
"andq $0xfffffffffffffff0,%%rsp\n\t"
#define VALGRIND_RESTORE_STACK \
"movq %%r14,%%rsp\n\t"
/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
long) == 8. */
/* NB 9 Sept 07. There is a nasty kludge here in all these CALL_FN_
macros. In order not to trash the stack redzone, we need to drop
%rsp by 128 before the hidden call, and restore afterwards. The
nastiness is that it is only by luck that the stack still appears
to be unwindable during the hidden call - since then the behaviour
of any routine using this macro does not match what the CFI data
says. Sigh.
Why is this important? Imagine that a wrapper has a stack
allocated local, and passes to the hidden call, a pointer to it.
Because gcc does not know about the hidden call, it may allocate
that local in the redzone. Unfortunately the hidden call may then
trash it before it comes to use it. So we must step clear of the
redzone, for the duration of the hidden call, to make it safe.
Probably the same problem afflicts the other redzone-style ABIs too
(ppc64-linux); but for those, the stack is
self describing (none of this CFI nonsense) so at least messing
with the stack pointer doesn't give a danger of non-unwindable
stack. */
#define CALL_FN_W_v(lval, orig) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[1]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
VALGRIND_ALIGN_STACK \
"subq $128,%%rsp\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
VALGRIND_RESTORE_STACK \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_W(lval, orig, arg1) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[2]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
VALGRIND_ALIGN_STACK \
"subq $128,%%rsp\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
VALGRIND_RESTORE_STACK \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
VALGRIND_ALIGN_STACK \
"subq $128,%%rsp\n\t" \
"movq 16(%%rax), %%rsi\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
VALGRIND_RESTORE_STACK \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[4]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
VALGRIND_ALIGN_STACK \
"subq $128,%%rsp\n\t" \
"movq 24(%%rax), %%rdx\n\t" \
"movq 16(%%rax), %%rsi\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
VALGRIND_RESTORE_STACK \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[5]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
VALGRIND_ALIGN_STACK \
"subq $128,%%rsp\n\t" \
"movq 32(%%rax), %%rcx\n\t" \
"movq 24(%%rax), %%rdx\n\t" \
"movq 16(%%rax), %%rsi\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
VALGRIND_RESTORE_STACK \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[6]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
VALGRIND_ALIGN_STACK \
"subq $128,%%rsp\n\t" \
"movq 40(%%rax), %%r8\n\t" \
"movq 32(%%rax), %%rcx\n\t" \
"movq 24(%%rax), %%rdx\n\t" \
"movq 16(%%rax), %%rsi\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
VALGRIND_RESTORE_STACK \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[7]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
VALGRIND_ALIGN_STACK \
"subq $128,%%rsp\n\t" \
"movq 48(%%rax), %%r9\n\t" \
"movq 40(%%rax), %%r8\n\t" \
"movq 32(%%rax), %%rcx\n\t" \
"movq 24(%%rax), %%rdx\n\t" \
"movq 16(%%rax), %%rsi\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
VALGRIND_RESTORE_STACK \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[8]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
VALGRIND_ALIGN_STACK \
"subq $136,%%rsp\n\t" \
"pushq 56(%%rax)\n\t" \
"movq 48(%%rax), %%r9\n\t" \
"movq 40(%%rax), %%r8\n\t" \
"movq 32(%%rax), %%rcx\n\t" \
"movq 24(%%rax), %%rdx\n\t" \
"movq 16(%%rax), %%rsi\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
VALGRIND_RESTORE_STACK \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[9]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
VALGRIND_ALIGN_STACK \
"subq $128,%%rsp\n\t" \
"pushq 64(%%rax)\n\t" \
"pushq 56(%%rax)\n\t" \
"movq 48(%%rax), %%r9\n\t" \
"movq 40(%%rax), %%r8\n\t" \
"movq 32(%%rax), %%rcx\n\t" \
"movq 24(%%rax), %%rdx\n\t" \
"movq 16(%%rax), %%rsi\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
VALGRIND_RESTORE_STACK \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[10]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
VALGRIND_ALIGN_STACK \
"subq $136,%%rsp\n\t" \
"pushq 72(%%rax)\n\t" \
"pushq 64(%%rax)\n\t" \
"pushq 56(%%rax)\n\t" \
"movq 48(%%rax), %%r9\n\t" \
"movq 40(%%rax), %%r8\n\t" \
"movq 32(%%rax), %%rcx\n\t" \
"movq 24(%%rax), %%rdx\n\t" \
"movq 16(%%rax), %%rsi\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
VALGRIND_RESTORE_STACK \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[11]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
VALGRIND_ALIGN_STACK \
"subq $128,%%rsp\n\t" \
"pushq 80(%%rax)\n\t" \
"pushq 72(%%rax)\n\t" \
"pushq 64(%%rax)\n\t" \
"pushq 56(%%rax)\n\t" \
"movq 48(%%rax), %%r9\n\t" \
"movq 40(%%rax), %%r8\n\t" \
"movq 32(%%rax), %%rcx\n\t" \
"movq 24(%%rax), %%rdx\n\t" \
"movq 16(%%rax), %%rsi\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
VALGRIND_RESTORE_STACK \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10,arg11) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[12]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
VALGRIND_ALIGN_STACK \
"subq $136,%%rsp\n\t" \
"pushq 88(%%rax)\n\t" \
"pushq 80(%%rax)\n\t" \
"pushq 72(%%rax)\n\t" \
"pushq 64(%%rax)\n\t" \
"pushq 56(%%rax)\n\t" \
"movq 48(%%rax), %%r9\n\t" \
"movq 40(%%rax), %%r8\n\t" \
"movq 32(%%rax), %%rcx\n\t" \
"movq 24(%%rax), %%rdx\n\t" \
"movq 16(%%rax), %%rsi\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
VALGRIND_RESTORE_STACK \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10,arg11,arg12) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[13]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
_argvec[12] = (unsigned long)(arg12); \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
VALGRIND_ALIGN_STACK \
"subq $128,%%rsp\n\t" \
"pushq 96(%%rax)\n\t" \
"pushq 88(%%rax)\n\t" \
"pushq 80(%%rax)\n\t" \
"pushq 72(%%rax)\n\t" \
"pushq 64(%%rax)\n\t" \
"pushq 56(%%rax)\n\t" \
"movq 48(%%rax), %%r9\n\t" \
"movq 40(%%rax), %%r8\n\t" \
"movq 32(%%rax), %%rcx\n\t" \
"movq 24(%%rax), %%rdx\n\t" \
"movq 16(%%rax), %%rsi\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
VALGRIND_RESTORE_STACK \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#endif /* PLAT_amd64_linux || PLAT_amd64_darwin || PLAT_amd64_solaris */
/* ------------------------ ppc32-linux ------------------------ */
#if defined(PLAT_ppc32_linux)
/* This is useful for finding out about the on-stack stuff:
extern int f9 ( int,int,int,int,int,int,int,int,int );
extern int f10 ( int,int,int,int,int,int,int,int,int,int );
extern int f11 ( int,int,int,int,int,int,int,int,int,int,int );
extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int );
int g9 ( void ) {
return f9(11,22,33,44,55,66,77,88,99);
}
int g10 ( void ) {
return f10(11,22,33,44,55,66,77,88,99,110);
}
int g11 ( void ) {
return f11(11,22,33,44,55,66,77,88,99,110,121);
}
int g12 ( void ) {
return f12(11,22,33,44,55,66,77,88,99,110,121,132);
}
*/
/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
/* These regs are trashed by the hidden call. */
#define __CALLER_SAVED_REGS \
"lr", "ctr", "xer", \
"cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
"r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
"r11", "r12", "r13"
/* Macros to save and align the stack before making a function
call and restore it afterwards as gcc may not keep the stack
pointer aligned if it doesn't realise calls are being made
to other functions. */
#define VALGRIND_ALIGN_STACK \
"mr 28,1\n\t" \
"rlwinm 1,1,0,0,27\n\t"
#define VALGRIND_RESTORE_STACK \
"mr 1,28\n\t"
/* These CALL_FN_ macros assume that on ppc32-linux,
sizeof(unsigned long) == 4. */
#define CALL_FN_W_v(lval, orig) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[1]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_W(lval, orig, arg1) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[2]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[4]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
"lwz 5,12(11)\n\t" \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[5]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
"lwz 5,12(11)\n\t" \
"lwz 6,16(11)\n\t" /* arg4->r6 */ \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[6]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
"lwz 5,12(11)\n\t" \
"lwz 6,16(11)\n\t" /* arg4->r6 */ \
"lwz 7,20(11)\n\t" \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[7]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
"lwz 5,12(11)\n\t" \
"lwz 6,16(11)\n\t" /* arg4->r6 */ \
"lwz 7,20(11)\n\t" \
"lwz 8,24(11)\n\t" \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[8]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
_argvec[7] = (unsigned long)arg7; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
"lwz 5,12(11)\n\t" \
"lwz 6,16(11)\n\t" /* arg4->r6 */ \
"lwz 7,20(11)\n\t" \
"lwz 8,24(11)\n\t" \
"lwz 9,28(11)\n\t" \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[9]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
_argvec[7] = (unsigned long)arg7; \
_argvec[8] = (unsigned long)arg8; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
"lwz 5,12(11)\n\t" \
"lwz 6,16(11)\n\t" /* arg4->r6 */ \
"lwz 7,20(11)\n\t" \
"lwz 8,24(11)\n\t" \
"lwz 9,28(11)\n\t" \
"lwz 10,32(11)\n\t" /* arg8->r10 */ \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[10]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
_argvec[7] = (unsigned long)arg7; \
_argvec[8] = (unsigned long)arg8; \
_argvec[9] = (unsigned long)arg9; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"addi 1,1,-16\n\t" \
/* arg9 */ \
"lwz 3,36(11)\n\t" \
"stw 3,8(1)\n\t" \
/* args1-8 */ \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
"lwz 5,12(11)\n\t" \
"lwz 6,16(11)\n\t" /* arg4->r6 */ \
"lwz 7,20(11)\n\t" \
"lwz 8,24(11)\n\t" \
"lwz 9,28(11)\n\t" \
"lwz 10,32(11)\n\t" /* arg8->r10 */ \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[11]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
_argvec[7] = (unsigned long)arg7; \
_argvec[8] = (unsigned long)arg8; \
_argvec[9] = (unsigned long)arg9; \
_argvec[10] = (unsigned long)arg10; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"addi 1,1,-16\n\t" \
/* arg10 */ \
"lwz 3,40(11)\n\t" \
"stw 3,12(1)\n\t" \
/* arg9 */ \
"lwz 3,36(11)\n\t" \
"stw 3,8(1)\n\t" \
/* args1-8 */ \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
"lwz 5,12(11)\n\t" \
"lwz 6,16(11)\n\t" /* arg4->r6 */ \
"lwz 7,20(11)\n\t" \
"lwz 8,24(11)\n\t" \
"lwz 9,28(11)\n\t" \
"lwz 10,32(11)\n\t" /* arg8->r10 */ \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10,arg11) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[12]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
_argvec[7] = (unsigned long)arg7; \
_argvec[8] = (unsigned long)arg8; \
_argvec[9] = (unsigned long)arg9; \
_argvec[10] = (unsigned long)arg10; \
_argvec[11] = (unsigned long)arg11; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"addi 1,1,-32\n\t" \
/* arg11 */ \
"lwz 3,44(11)\n\t" \
"stw 3,16(1)\n\t" \
/* arg10 */ \
"lwz 3,40(11)\n\t" \
"stw 3,12(1)\n\t" \
/* arg9 */ \
"lwz 3,36(11)\n\t" \
"stw 3,8(1)\n\t" \
/* args1-8 */ \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
"lwz 5,12(11)\n\t" \
"lwz 6,16(11)\n\t" /* arg4->r6 */ \
"lwz 7,20(11)\n\t" \
"lwz 8,24(11)\n\t" \
"lwz 9,28(11)\n\t" \
"lwz 10,32(11)\n\t" /* arg8->r10 */ \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10,arg11,arg12) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[13]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
_argvec[7] = (unsigned long)arg7; \
_argvec[8] = (unsigned long)arg8; \
_argvec[9] = (unsigned long)arg9; \
_argvec[10] = (unsigned long)arg10; \
_argvec[11] = (unsigned long)arg11; \
_argvec[12] = (unsigned long)arg12; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"addi 1,1,-32\n\t" \
/* arg12 */ \
"lwz 3,48(11)\n\t" \
"stw 3,20(1)\n\t" \
/* arg11 */ \
"lwz 3,44(11)\n\t" \
"stw 3,16(1)\n\t" \
/* arg10 */ \
"lwz 3,40(11)\n\t" \
"stw 3,12(1)\n\t" \
/* arg9 */ \
"lwz 3,36(11)\n\t" \
"stw 3,8(1)\n\t" \
/* args1-8 */ \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
"lwz 5,12(11)\n\t" \
"lwz 6,16(11)\n\t" /* arg4->r6 */ \
"lwz 7,20(11)\n\t" \
"lwz 8,24(11)\n\t" \
"lwz 9,28(11)\n\t" \
"lwz 10,32(11)\n\t" /* arg8->r10 */ \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#endif /* PLAT_ppc32_linux */
/* ------------------------ ppc64-linux ------------------------ */
#if defined(PLAT_ppc64be_linux)
/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
/* These regs are trashed by the hidden call. */
#define __CALLER_SAVED_REGS \
"lr", "ctr", "xer", \
"cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
"r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
"r11", "r12", "r13"
/* Macros to save and align the stack before making a function
call and restore it afterwards as gcc may not keep the stack
pointer aligned if it doesn't realise calls are being made
to other functions. */
#define VALGRIND_ALIGN_STACK \
"mr 28,1\n\t" \
"rldicr 1,1,0,59\n\t"
#define VALGRIND_RESTORE_STACK \
"mr 1,28\n\t"
/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned
long) == 8. */
#define CALL_FN_W_v(lval, orig) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+0]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"ld 11, 0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_W(lval, orig, arg1) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+1]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(11)\n\t" /* arg1->r3 */ \
"ld 11, 0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+2]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(11)\n\t" /* arg1->r3 */ \
"ld 4, 16(11)\n\t" /* arg2->r4 */ \
"ld 11, 0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+3]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(11)\n\t" /* arg1->r3 */ \
"ld 4, 16(11)\n\t" /* arg2->r4 */ \
"ld 5, 24(11)\n\t" /* arg3->r5 */ \
"ld 11, 0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+4]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(11)\n\t" /* arg1->r3 */ \
"ld 4, 16(11)\n\t" /* arg2->r4 */ \
"ld 5, 24(11)\n\t" /* arg3->r5 */ \
"ld 6, 32(11)\n\t" /* arg4->r6 */ \
"ld 11, 0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+5]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(11)\n\t" /* arg1->r3 */ \
"ld 4, 16(11)\n\t" /* arg2->r4 */ \
"ld 5, 24(11)\n\t" /* arg3->r5 */ \
"ld 6, 32(11)\n\t" /* arg4->r6 */ \
"ld 7, 40(11)\n\t" /* arg5->r7 */ \
"ld 11, 0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+6]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(11)\n\t" /* arg1->r3 */ \
"ld 4, 16(11)\n\t" /* arg2->r4 */ \
"ld 5, 24(11)\n\t" /* arg3->r5 */ \
"ld 6, 32(11)\n\t" /* arg4->r6 */ \
"ld 7, 40(11)\n\t" /* arg5->r7 */ \
"ld 8, 48(11)\n\t" /* arg6->r8 */ \
"ld 11, 0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+7]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(11)\n\t" /* arg1->r3 */ \
"ld 4, 16(11)\n\t" /* arg2->r4 */ \
"ld 5, 24(11)\n\t" /* arg3->r5 */ \
"ld 6, 32(11)\n\t" /* arg4->r6 */ \
"ld 7, 40(11)\n\t" /* arg5->r7 */ \
"ld 8, 48(11)\n\t" /* arg6->r8 */ \
"ld 9, 56(11)\n\t" /* arg7->r9 */ \
"ld 11, 0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+8]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
_argvec[2+8] = (unsigned long)arg8; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(11)\n\t" /* arg1->r3 */ \
"ld 4, 16(11)\n\t" /* arg2->r4 */ \
"ld 5, 24(11)\n\t" /* arg3->r5 */ \
"ld 6, 32(11)\n\t" /* arg4->r6 */ \
"ld 7, 40(11)\n\t" /* arg5->r7 */ \
"ld 8, 48(11)\n\t" /* arg6->r8 */ \
"ld 9, 56(11)\n\t" /* arg7->r9 */ \
"ld 10, 64(11)\n\t" /* arg8->r10 */ \
"ld 11, 0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+9]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
_argvec[2+8] = (unsigned long)arg8; \
_argvec[2+9] = (unsigned long)arg9; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"addi 1,1,-128\n\t" /* expand stack frame */ \
/* arg9 */ \
"ld 3,72(11)\n\t" \
"std 3,112(1)\n\t" \
/* args1-8 */ \
"ld 3, 8(11)\n\t" /* arg1->r3 */ \
"ld 4, 16(11)\n\t" /* arg2->r4 */ \
"ld 5, 24(11)\n\t" /* arg3->r5 */ \
"ld 6, 32(11)\n\t" /* arg4->r6 */ \
"ld 7, 40(11)\n\t" /* arg5->r7 */ \
"ld 8, 48(11)\n\t" /* arg6->r8 */ \
"ld 9, 56(11)\n\t" /* arg7->r9 */ \
"ld 10, 64(11)\n\t" /* arg8->r10 */ \
"ld 11, 0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+10]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
_argvec[2+8] = (unsigned long)arg8; \
_argvec[2+9] = (unsigned long)arg9; \
_argvec[2+10] = (unsigned long)arg10; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"addi 1,1,-128\n\t" /* expand stack frame */ \
/* arg10 */ \
"ld 3,80(11)\n\t" \
"std 3,120(1)\n\t" \
/* arg9 */ \
"ld 3,72(11)\n\t" \
"std 3,112(1)\n\t" \
/* args1-8 */ \
"ld 3, 8(11)\n\t" /* arg1->r3 */ \
"ld 4, 16(11)\n\t" /* arg2->r4 */ \
"ld 5, 24(11)\n\t" /* arg3->r5 */ \
"ld 6, 32(11)\n\t" /* arg4->r6 */ \
"ld 7, 40(11)\n\t" /* arg5->r7 */ \
"ld 8, 48(11)\n\t" /* arg6->r8 */ \
"ld 9, 56(11)\n\t" /* arg7->r9 */ \
"ld 10, 64(11)\n\t" /* arg8->r10 */ \
"ld 11, 0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10,arg11) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+11]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
_argvec[2+8] = (unsigned long)arg8; \
_argvec[2+9] = (unsigned long)arg9; \
_argvec[2+10] = (unsigned long)arg10; \
_argvec[2+11] = (unsigned long)arg11; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"addi 1,1,-144\n\t" /* expand stack frame */ \
/* arg11 */ \
"ld 3,88(11)\n\t" \
"std 3,128(1)\n\t" \
/* arg10 */ \
"ld 3,80(11)\n\t" \
"std 3,120(1)\n\t" \
/* arg9 */ \
"ld 3,72(11)\n\t" \
"std 3,112(1)\n\t" \
/* args1-8 */ \
"ld 3, 8(11)\n\t" /* arg1->r3 */ \
"ld 4, 16(11)\n\t" /* arg2->r4 */ \
"ld 5, 24(11)\n\t" /* arg3->r5 */ \
"ld 6, 32(11)\n\t" /* arg4->r6 */ \
"ld 7, 40(11)\n\t" /* arg5->r7 */ \
"ld 8, 48(11)\n\t" /* arg6->r8 */ \
"ld 9, 56(11)\n\t" /* arg7->r9 */ \
"ld 10, 64(11)\n\t" /* arg8->r10 */ \
"ld 11, 0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10,arg11,arg12) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+12]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
_argvec[2+8] = (unsigned long)arg8; \
_argvec[2+9] = (unsigned long)arg9; \
_argvec[2+10] = (unsigned long)arg10; \
_argvec[2+11] = (unsigned long)arg11; \
_argvec[2+12] = (unsigned long)arg12; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"addi 1,1,-144\n\t" /* expand stack frame */ \
/* arg12 */ \
"ld 3,96(11)\n\t" \
"std 3,136(1)\n\t" \
/* arg11 */ \
"ld 3,88(11)\n\t" \
"std 3,128(1)\n\t" \
/* arg10 */ \
"ld 3,80(11)\n\t" \
"std 3,120(1)\n\t" \
/* arg9 */ \
"ld 3,72(11)\n\t" \
"std 3,112(1)\n\t" \
/* args1-8 */ \
"ld 3, 8(11)\n\t" /* arg1->r3 */ \
"ld 4, 16(11)\n\t" /* arg2->r4 */ \
"ld 5, 24(11)\n\t" /* arg3->r5 */ \
"ld 6, 32(11)\n\t" /* arg4->r6 */ \
"ld 7, 40(11)\n\t" /* arg5->r7 */ \
"ld 8, 48(11)\n\t" /* arg6->r8 */ \
"ld 9, 56(11)\n\t" /* arg7->r9 */ \
"ld 10, 64(11)\n\t" /* arg8->r10 */ \
"ld 11, 0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#endif /* PLAT_ppc64be_linux */
/* ------------------------- ppc64le-linux ----------------------- */
#if defined(PLAT_ppc64le_linux)
/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
/* These regs are trashed by the hidden call. */
#define __CALLER_SAVED_REGS \
"lr", "ctr", "xer", \
"cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
"r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
"r11", "r12", "r13"
/* Macros to save and align the stack before making a function
call and restore it afterwards as gcc may not keep the stack
pointer aligned if it doesn't realise calls are being made
to other functions. */
#define VALGRIND_ALIGN_STACK \
"mr 28,1\n\t" \
"rldicr 1,1,0,59\n\t"
#define VALGRIND_RESTORE_STACK \
"mr 1,28\n\t"
/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned
long) == 8. */
#define CALL_FN_W_v(lval, orig) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+0]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 12,%1\n\t" \
"std 2,-16(12)\n\t" /* save tocptr */ \
"ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
"ld 12, 0(12)\n\t" /* target->r12 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
"mr 12,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(12)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_W(lval, orig, arg1) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+1]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 12,%1\n\t" \
"std 2,-16(12)\n\t" /* save tocptr */ \
"ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(12)\n\t" /* arg1->r3 */ \
"ld 12, 0(12)\n\t" /* target->r12 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
"mr 12,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(12)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+2]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 12,%1\n\t" \
"std 2,-16(12)\n\t" /* save tocptr */ \
"ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(12)\n\t" /* arg1->r3 */ \
"ld 4, 16(12)\n\t" /* arg2->r4 */ \
"ld 12, 0(12)\n\t" /* target->r12 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
"mr 12,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(12)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+3]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 12,%1\n\t" \
"std 2,-16(12)\n\t" /* save tocptr */ \
"ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(12)\n\t" /* arg1->r3 */ \
"ld 4, 16(12)\n\t" /* arg2->r4 */ \
"ld 5, 24(12)\n\t" /* arg3->r5 */ \
"ld 12, 0(12)\n\t" /* target->r12 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
"mr 12,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(12)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+4]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 12,%1\n\t" \
"std 2,-16(12)\n\t" /* save tocptr */ \
"ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(12)\n\t" /* arg1->r3 */ \
"ld 4, 16(12)\n\t" /* arg2->r4 */ \
"ld 5, 24(12)\n\t" /* arg3->r5 */ \
"ld 6, 32(12)\n\t" /* arg4->r6 */ \
"ld 12, 0(12)\n\t" /* target->r12 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
"mr 12,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(12)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+5]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 12,%1\n\t" \
"std 2,-16(12)\n\t" /* save tocptr */ \
"ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(12)\n\t" /* arg1->r3 */ \
"ld 4, 16(12)\n\t" /* arg2->r4 */ \
"ld 5, 24(12)\n\t" /* arg3->r5 */ \
"ld 6, 32(12)\n\t" /* arg4->r6 */ \
"ld 7, 40(12)\n\t" /* arg5->r7 */ \
"ld 12, 0(12)\n\t" /* target->r12 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
"mr 12,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(12)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+6]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 12,%1\n\t" \
"std 2,-16(12)\n\t" /* save tocptr */ \
"ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(12)\n\t" /* arg1->r3 */ \
"ld 4, 16(12)\n\t" /* arg2->r4 */ \
"ld 5, 24(12)\n\t" /* arg3->r5 */ \
"ld 6, 32(12)\n\t" /* arg4->r6 */ \
"ld 7, 40(12)\n\t" /* arg5->r7 */ \
"ld 8, 48(12)\n\t" /* arg6->r8 */ \
"ld 12, 0(12)\n\t" /* target->r12 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
"mr 12,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(12)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+7]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 12,%1\n\t" \
"std 2,-16(12)\n\t" /* save tocptr */ \
"ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(12)\n\t" /* arg1->r3 */ \
"ld 4, 16(12)\n\t" /* arg2->r4 */ \
"ld 5, 24(12)\n\t" /* arg3->r5 */ \
"ld 6, 32(12)\n\t" /* arg4->r6 */ \
"ld 7, 40(12)\n\t" /* arg5->r7 */ \
"ld 8, 48(12)\n\t" /* arg6->r8 */ \
"ld 9, 56(12)\n\t" /* arg7->r9 */ \
"ld 12, 0(12)\n\t" /* target->r12 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
"mr 12,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(12)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+8]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
_argvec[2+8] = (unsigned long)arg8; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 12,%1\n\t" \
"std 2,-16(12)\n\t" /* save tocptr */ \
"ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(12)\n\t" /* arg1->r3 */ \
"ld 4, 16(12)\n\t" /* arg2->r4 */ \
"ld 5, 24(12)\n\t" /* arg3->r5 */ \
"ld 6, 32(12)\n\t" /* arg4->r6 */ \
"ld 7, 40(12)\n\t" /* arg5->r7 */ \
"ld 8, 48(12)\n\t" /* arg6->r8 */ \
"ld 9, 56(12)\n\t" /* arg7->r9 */ \
"ld 10, 64(12)\n\t" /* arg8->r10 */ \
"ld 12, 0(12)\n\t" /* target->r12 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
"mr 12,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(12)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+9]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
_argvec[2+8] = (unsigned long)arg8; \
_argvec[2+9] = (unsigned long)arg9; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 12,%1\n\t" \
"std 2,-16(12)\n\t" /* save tocptr */ \
"ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
"addi 1,1,-128\n\t" /* expand stack frame */ \
/* arg9 */ \
"ld 3,72(12)\n\t" \
"std 3,96(1)\n\t" \
/* args1-8 */ \
"ld 3, 8(12)\n\t" /* arg1->r3 */ \
"ld 4, 16(12)\n\t" /* arg2->r4 */ \
"ld 5, 24(12)\n\t" /* arg3->r5 */ \
"ld 6, 32(12)\n\t" /* arg4->r6 */ \
"ld 7, 40(12)\n\t" /* arg5->r7 */ \
"ld 8, 48(12)\n\t" /* arg6->r8 */ \
"ld 9, 56(12)\n\t" /* arg7->r9 */ \
"ld 10, 64(12)\n\t" /* arg8->r10 */ \
"ld 12, 0(12)\n\t" /* target->r12 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
"mr 12,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(12)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+10]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
_argvec[2+8] = (unsigned long)arg8; \
_argvec[2+9] = (unsigned long)arg9; \
_argvec[2+10] = (unsigned long)arg10; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 12,%1\n\t" \
"std 2,-16(12)\n\t" /* save tocptr */ \
"ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
"addi 1,1,-128\n\t" /* expand stack frame */ \
/* arg10 */ \
"ld 3,80(12)\n\t" \
"std 3,104(1)\n\t" \
/* arg9 */ \
"ld 3,72(12)\n\t" \
"std 3,96(1)\n\t" \
/* args1-8 */ \
"ld 3, 8(12)\n\t" /* arg1->r3 */ \
"ld 4, 16(12)\n\t" /* arg2->r4 */ \
"ld 5, 24(12)\n\t" /* arg3->r5 */ \
"ld 6, 32(12)\n\t" /* arg4->r6 */ \
"ld 7, 40(12)\n\t" /* arg5->r7 */ \
"ld 8, 48(12)\n\t" /* arg6->r8 */ \
"ld 9, 56(12)\n\t" /* arg7->r9 */ \
"ld 10, 64(12)\n\t" /* arg8->r10 */ \
"ld 12, 0(12)\n\t" /* target->r12 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
"mr 12,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(12)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10,arg11) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+11]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
_argvec[2+8] = (unsigned long)arg8; \
_argvec[2+9] = (unsigned long)arg9; \
_argvec[2+10] = (unsigned long)arg10; \
_argvec[2+11] = (unsigned long)arg11; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 12,%1\n\t" \
"std 2,-16(12)\n\t" /* save tocptr */ \
"ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
"addi 1,1,-144\n\t" /* expand stack frame */ \
/* arg11 */ \
"ld 3,88(12)\n\t" \
"std 3,112(1)\n\t" \
/* arg10 */ \
"ld 3,80(12)\n\t" \
"std 3,104(1)\n\t" \
/* arg9 */ \
"ld 3,72(12)\n\t" \
"std 3,96(1)\n\t" \
/* args1-8 */ \
"ld 3, 8(12)\n\t" /* arg1->r3 */ \
"ld 4, 16(12)\n\t" /* arg2->r4 */ \
"ld 5, 24(12)\n\t" /* arg3->r5 */ \
"ld 6, 32(12)\n\t" /* arg4->r6 */ \
"ld 7, 40(12)\n\t" /* arg5->r7 */ \
"ld 8, 48(12)\n\t" /* arg6->r8 */ \
"ld 9, 56(12)\n\t" /* arg7->r9 */ \
"ld 10, 64(12)\n\t" /* arg8->r10 */ \
"ld 12, 0(12)\n\t" /* target->r12 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
"mr 12,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(12)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10,arg11,arg12) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+12]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
_argvec[2+8] = (unsigned long)arg8; \
_argvec[2+9] = (unsigned long)arg9; \
_argvec[2+10] = (unsigned long)arg10; \
_argvec[2+11] = (unsigned long)arg11; \
_argvec[2+12] = (unsigned long)arg12; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 12,%1\n\t" \
"std 2,-16(12)\n\t" /* save tocptr */ \
"ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
"addi 1,1,-144\n\t" /* expand stack frame */ \
/* arg12 */ \
"ld 3,96(12)\n\t" \
"std 3,120(1)\n\t" \
/* arg11 */ \
"ld 3,88(12)\n\t" \
"std 3,112(1)\n\t" \
/* arg10 */ \
"ld 3,80(12)\n\t" \
"std 3,104(1)\n\t" \
/* arg9 */ \
"ld 3,72(12)\n\t" \
"std 3,96(1)\n\t" \
/* args1-8 */ \
"ld 3, 8(12)\n\t" /* arg1->r3 */ \
"ld 4, 16(12)\n\t" /* arg2->r4 */ \
"ld 5, 24(12)\n\t" /* arg3->r5 */ \
"ld 6, 32(12)\n\t" /* arg4->r6 */ \
"ld 7, 40(12)\n\t" /* arg5->r7 */ \
"ld 8, 48(12)\n\t" /* arg6->r8 */ \
"ld 9, 56(12)\n\t" /* arg7->r9 */ \
"ld 10, 64(12)\n\t" /* arg8->r10 */ \
"ld 12, 0(12)\n\t" /* target->r12 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
"mr 12,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(12)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#endif /* PLAT_ppc64le_linux */
/* ------------------------- arm-linux ------------------------- */
#if defined(PLAT_arm_linux)
/* These regs are trashed by the hidden call. */
#define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3","r4", "r12", "r14"
/* Macros to save and align the stack before making a function
call and restore it afterwards as gcc may not keep the stack
pointer aligned if it doesn't realise calls are being made
to other functions. */
/* This is a bit tricky. We store the original stack pointer in r10
as it is callee-saves. gcc doesn't allow the use of r11 for some
reason. Also, we can't directly "bic" the stack pointer in thumb
mode since r13 isn't an allowed register number in that context.
So use r4 as a temporary, since that is about to get trashed
anyway, just after each use of this macro. Side effect is we need
to be very careful about any future changes, since
VALGRIND_ALIGN_STACK simply assumes r4 is usable. */
#define VALGRIND_ALIGN_STACK \
"mov r10, sp\n\t" \
"mov r4, sp\n\t" \
"bic r4, r4, #7\n\t" \
"mov sp, r4\n\t"
#define VALGRIND_RESTORE_STACK \
"mov sp, r10\n\t"
/* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned
long) == 4. */
#define CALL_FN_W_v(lval, orig) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[1]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
VALGRIND_RESTORE_STACK \
"mov %0, r0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_W(lval, orig, arg1) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[2]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr r0, [%1, #4] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
VALGRIND_RESTORE_STACK \
"mov %0, r0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr r0, [%1, #4] \n\t" \
"ldr r1, [%1, #8] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
VALGRIND_RESTORE_STACK \
"mov %0, r0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[4]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr r0, [%1, #4] \n\t" \
"ldr r1, [%1, #8] \n\t" \
"ldr r2, [%1, #12] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
VALGRIND_RESTORE_STACK \
"mov %0, r0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[5]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr r0, [%1, #4] \n\t" \
"ldr r1, [%1, #8] \n\t" \
"ldr r2, [%1, #12] \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[6]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"sub sp, sp, #4 \n\t" \
"ldr r0, [%1, #20] \n\t" \
"push {r0} \n\t" \
"ldr r0, [%1, #4] \n\t" \
"ldr r1, [%1, #8] \n\t" \
"ldr r2, [%1, #12] \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[7]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr r0, [%1, #20] \n\t" \
"ldr r1, [%1, #24] \n\t" \
"push {r0, r1} \n\t" \
"ldr r0, [%1, #4] \n\t" \
"ldr r1, [%1, #8] \n\t" \
"ldr r2, [%1, #12] \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[8]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"sub sp, sp, #4 \n\t" \
"ldr r0, [%1, #20] \n\t" \
"ldr r1, [%1, #24] \n\t" \
"ldr r2, [%1, #28] \n\t" \
"push {r0, r1, r2} \n\t" \
"ldr r0, [%1, #4] \n\t" \
"ldr r1, [%1, #8] \n\t" \
"ldr r2, [%1, #12] \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[9]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr r0, [%1, #20] \n\t" \
"ldr r1, [%1, #24] \n\t" \
"ldr r2, [%1, #28] \n\t" \
"ldr r3, [%1, #32] \n\t" \
"push {r0, r1, r2, r3} \n\t" \
"ldr r0, [%1, #4] \n\t" \
"ldr r1, [%1, #8] \n\t" \
"ldr r2, [%1, #12] \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[10]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"sub sp, sp, #4 \n\t" \
"ldr r0, [%1, #20] \n\t" \
"ldr r1, [%1, #24] \n\t" \
"ldr r2, [%1, #28] \n\t" \
"ldr r3, [%1, #32] \n\t" \
"ldr r4, [%1, #36] \n\t" \
"push {r0, r1, r2, r3, r4} \n\t" \
"ldr r0, [%1, #4] \n\t" \
"ldr r1, [%1, #8] \n\t" \
"ldr r2, [%1, #12] \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[11]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr r0, [%1, #40] \n\t" \
"push {r0} \n\t" \
"ldr r0, [%1, #20] \n\t" \
"ldr r1, [%1, #24] \n\t" \
"ldr r2, [%1, #28] \n\t" \
"ldr r3, [%1, #32] \n\t" \
"ldr r4, [%1, #36] \n\t" \
"push {r0, r1, r2, r3, r4} \n\t" \
"ldr r0, [%1, #4] \n\t" \
"ldr r1, [%1, #8] \n\t" \
"ldr r2, [%1, #12] \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
arg6,arg7,arg8,arg9,arg10, \
arg11) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[12]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"sub sp, sp, #4 \n\t" \
"ldr r0, [%1, #40] \n\t" \
"ldr r1, [%1, #44] \n\t" \
"push {r0, r1} \n\t" \
"ldr r0, [%1, #20] \n\t" \
"ldr r1, [%1, #24] \n\t" \
"ldr r2, [%1, #28] \n\t" \
"ldr r3, [%1, #32] \n\t" \
"ldr r4, [%1, #36] \n\t" \
"push {r0, r1, r2, r3, r4} \n\t" \
"ldr r0, [%1, #4] \n\t" \
"ldr r1, [%1, #8] \n\t" \
"ldr r2, [%1, #12] \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
arg6,arg7,arg8,arg9,arg10, \
arg11,arg12) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[13]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
_argvec[12] = (unsigned long)(arg12); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr r0, [%1, #40] \n\t" \
"ldr r1, [%1, #44] \n\t" \
"ldr r2, [%1, #48] \n\t" \
"push {r0, r1, r2} \n\t" \
"ldr r0, [%1, #20] \n\t" \
"ldr r1, [%1, #24] \n\t" \
"ldr r2, [%1, #28] \n\t" \
"ldr r3, [%1, #32] \n\t" \
"ldr r4, [%1, #36] \n\t" \
"push {r0, r1, r2, r3, r4} \n\t" \
"ldr r0, [%1, #4] \n\t" \
"ldr r1, [%1, #8] \n\t" \
"ldr r2, [%1, #12] \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#endif /* PLAT_arm_linux */
/* ------------------------ arm64-linux ------------------------ */
#if defined(PLAT_arm64_linux)
/* These regs are trashed by the hidden call. */
#define __CALLER_SAVED_REGS \
"x0", "x1", "x2", "x3","x4", "x5", "x6", "x7", "x8", "x9", \
"x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", \
"x18", "x19", "x20", "x30", \
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", \
"v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", \
"v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", \
"v26", "v27", "v28", "v29", "v30", "v31"
/* x21 is callee-saved, so we can use it to save and restore SP around
the hidden call. */
#define VALGRIND_ALIGN_STACK \
"mov x21, sp\n\t" \
"bic sp, x21, #15\n\t"
#define VALGRIND_RESTORE_STACK \
"mov sp, x21\n\t"
/* These CALL_FN_ macros assume that on arm64-linux,
sizeof(unsigned long) == 8. */
#define CALL_FN_W_v(lval, orig) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[1]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr x8, [%1] \n\t" /* target->x8 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
VALGRIND_RESTORE_STACK \
"mov %0, x0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_W(lval, orig, arg1) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[2]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr x0, [%1, #8] \n\t" \
"ldr x8, [%1] \n\t" /* target->x8 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
VALGRIND_RESTORE_STACK \
"mov %0, x0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr x0, [%1, #8] \n\t" \
"ldr x1, [%1, #16] \n\t" \
"ldr x8, [%1] \n\t" /* target->x8 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
VALGRIND_RESTORE_STACK \
"mov %0, x0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[4]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr x0, [%1, #8] \n\t" \
"ldr x1, [%1, #16] \n\t" \
"ldr x2, [%1, #24] \n\t" \
"ldr x8, [%1] \n\t" /* target->x8 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
VALGRIND_RESTORE_STACK \
"mov %0, x0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[5]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr x0, [%1, #8] \n\t" \
"ldr x1, [%1, #16] \n\t" \
"ldr x2, [%1, #24] \n\t" \
"ldr x3, [%1, #32] \n\t" \
"ldr x8, [%1] \n\t" /* target->x8 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
VALGRIND_RESTORE_STACK \
"mov %0, x0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[6]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr x0, [%1, #8] \n\t" \
"ldr x1, [%1, #16] \n\t" \
"ldr x2, [%1, #24] \n\t" \
"ldr x3, [%1, #32] \n\t" \
"ldr x4, [%1, #40] \n\t" \
"ldr x8, [%1] \n\t" /* target->x8 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
VALGRIND_RESTORE_STACK \
"mov %0, x0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[7]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr x0, [%1, #8] \n\t" \
"ldr x1, [%1, #16] \n\t" \
"ldr x2, [%1, #24] \n\t" \
"ldr x3, [%1, #32] \n\t" \
"ldr x4, [%1, #40] \n\t" \
"ldr x5, [%1, #48] \n\t" \
"ldr x8, [%1] \n\t" /* target->x8 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
VALGRIND_RESTORE_STACK \
"mov %0, x0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[8]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr x0, [%1, #8] \n\t" \
"ldr x1, [%1, #16] \n\t" \
"ldr x2, [%1, #24] \n\t" \
"ldr x3, [%1, #32] \n\t" \
"ldr x4, [%1, #40] \n\t" \
"ldr x5, [%1, #48] \n\t" \
"ldr x6, [%1, #56] \n\t" \
"ldr x8, [%1] \n\t" /* target->x8 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
VALGRIND_RESTORE_STACK \
"mov %0, x0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[9]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr x0, [%1, #8] \n\t" \
"ldr x1, [%1, #16] \n\t" \
"ldr x2, [%1, #24] \n\t" \
"ldr x3, [%1, #32] \n\t" \
"ldr x4, [%1, #40] \n\t" \
"ldr x5, [%1, #48] \n\t" \
"ldr x6, [%1, #56] \n\t" \
"ldr x7, [%1, #64] \n\t" \
"ldr x8, [%1] \n\t" /* target->x8 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
VALGRIND_RESTORE_STACK \
"mov %0, x0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[10]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"sub sp, sp, #0x20 \n\t" \
"ldr x0, [%1, #8] \n\t" \
"ldr x1, [%1, #16] \n\t" \
"ldr x2, [%1, #24] \n\t" \
"ldr x3, [%1, #32] \n\t" \
"ldr x4, [%1, #40] \n\t" \
"ldr x5, [%1, #48] \n\t" \
"ldr x6, [%1, #56] \n\t" \
"ldr x7, [%1, #64] \n\t" \
"ldr x8, [%1, #72] \n\t" \
"str x8, [sp, #0] \n\t" \
"ldr x8, [%1] \n\t" /* target->x8 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
VALGRIND_RESTORE_STACK \
"mov %0, x0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[11]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"sub sp, sp, #0x20 \n\t" \
"ldr x0, [%1, #8] \n\t" \
"ldr x1, [%1, #16] \n\t" \
"ldr x2, [%1, #24] \n\t" \
"ldr x3, [%1, #32] \n\t" \
"ldr x4, [%1, #40] \n\t" \
"ldr x5, [%1, #48] \n\t" \
"ldr x6, [%1, #56] \n\t" \
"ldr x7, [%1, #64] \n\t" \
"ldr x8, [%1, #72] \n\t" \
"str x8, [sp, #0] \n\t" \
"ldr x8, [%1, #80] \n\t" \
"str x8, [sp, #8] \n\t" \
"ldr x8, [%1] \n\t" /* target->x8 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
VALGRIND_RESTORE_STACK \
"mov %0, x0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10,arg11) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[12]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"sub sp, sp, #0x30 \n\t" \
"ldr x0, [%1, #8] \n\t" \
"ldr x1, [%1, #16] \n\t" \
"ldr x2, [%1, #24] \n\t" \
"ldr x3, [%1, #32] \n\t" \
"ldr x4, [%1, #40] \n\t" \
"ldr x5, [%1, #48] \n\t" \
"ldr x6, [%1, #56] \n\t" \
"ldr x7, [%1, #64] \n\t" \
"ldr x8, [%1, #72] \n\t" \
"str x8, [sp, #0] \n\t" \
"ldr x8, [%1, #80] \n\t" \
"str x8, [sp, #8] \n\t" \
"ldr x8, [%1, #88] \n\t" \
"str x8, [sp, #16] \n\t" \
"ldr x8, [%1] \n\t" /* target->x8 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
VALGRIND_RESTORE_STACK \
"mov %0, x0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10,arg11, \
arg12) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[13]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
_argvec[12] = (unsigned long)(arg12); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"sub sp, sp, #0x30 \n\t" \
"ldr x0, [%1, #8] \n\t" \
"ldr x1, [%1, #16] \n\t" \
"ldr x2, [%1, #24] \n\t" \
"ldr x3, [%1, #32] \n\t" \
"ldr x4, [%1, #40] \n\t" \
"ldr x5, [%1, #48] \n\t" \
"ldr x6, [%1, #56] \n\t" \
"ldr x7, [%1, #64] \n\t" \
"ldr x8, [%1, #72] \n\t" \
"str x8, [sp, #0] \n\t" \
"ldr x8, [%1, #80] \n\t" \
"str x8, [sp, #8] \n\t" \
"ldr x8, [%1, #88] \n\t" \
"str x8, [sp, #16] \n\t" \
"ldr x8, [%1, #96] \n\t" \
"str x8, [sp, #24] \n\t" \
"ldr x8, [%1] \n\t" /* target->x8 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
VALGRIND_RESTORE_STACK \
"mov %0, x0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#endif /* PLAT_arm64_linux */
/* ------------------------- s390x-linux ------------------------- */
#if defined(PLAT_s390x_linux)
/* Similar workaround as amd64 (see above), but we use r11 as frame
pointer and save the old r11 in r7. r11 might be used for
argvec, therefore we copy argvec in r1 since r1 is clobbered
after the call anyway. */
#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)
# define __FRAME_POINTER \
,"d"(__builtin_dwarf_cfa())
# define VALGRIND_CFI_PROLOGUE \
".cfi_remember_state\n\t" \
"lgr 1,%1\n\t" /* copy the argvec pointer in r1 */ \
"lgr 7,11\n\t" \
"lgr 11,%2\n\t" \
".cfi_def_cfa r11, 0\n\t"
# define VALGRIND_CFI_EPILOGUE \
"lgr 11, 7\n\t" \
".cfi_restore_state\n\t"
#else
# define __FRAME_POINTER
# define VALGRIND_CFI_PROLOGUE \
"lgr 1,%1\n\t"
# define VALGRIND_CFI_EPILOGUE
#endif
/* Nb: On s390 the stack pointer is properly aligned *at all times*
according to the s390 GCC maintainer. (The ABI specification is not
precise in this regard.) Therefore, VALGRIND_ALIGN_STACK and
VALGRIND_RESTORE_STACK are not defined here. */
/* These regs are trashed by the hidden call. Note that we overwrite
r14 in s390_irgen_noredir (VEX/priv/guest_s390_irgen.c) to give the
function a proper return address. All others are ABI defined call
clobbers. */
#define __CALLER_SAVED_REGS "0","1","2","3","4","5","14", \
"f0","f1","f2","f3","f4","f5","f6","f7"
/* Nb: Although r11 is modified in the asm snippets below (inside
VALGRIND_CFI_PROLOGUE) it is not listed in the clobber section, for
two reasons:
(1) r11 is restored in VALGRIND_CFI_EPILOGUE, so effectively it is not
modified
(2) GCC will complain that r11 cannot appear inside a clobber section,
when compiled with -O -fno-omit-frame-pointer
*/
#define CALL_FN_W_v(lval, orig) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[1]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
"aghi 15,-160\n\t" \
"lg 1, 0(1)\n\t" /* target->r1 */ \
VALGRIND_CALL_NOREDIR_R1 \
"lgr %0, 2\n\t" \
"aghi 15,160\n\t" \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=d" (_res) \
: /*in*/ "d" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
/* The call abi has the arguments in r2-r6 and stack */
#define CALL_FN_W_W(lval, orig, arg1) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[2]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
"aghi 15,-160\n\t" \
"lg 2, 8(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
"lgr %0, 2\n\t" \
"aghi 15,160\n\t" \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WW(lval, orig, arg1, arg2) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
"aghi 15,-160\n\t" \
"lg 2, 8(1)\n\t" \
"lg 3,16(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
"lgr %0, 2\n\t" \
"aghi 15,160\n\t" \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWW(lval, orig, arg1, arg2, arg3) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[4]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
"aghi 15,-160\n\t" \
"lg 2, 8(1)\n\t" \
"lg 3,16(1)\n\t" \
"lg 4,24(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
"lgr %0, 2\n\t" \
"aghi 15,160\n\t" \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWWW(lval, orig, arg1, arg2, arg3, arg4) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[5]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
"aghi 15,-160\n\t" \
"lg 2, 8(1)\n\t" \
"lg 3,16(1)\n\t" \
"lg 4,24(1)\n\t" \
"lg 5,32(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
"lgr %0, 2\n\t" \
"aghi 15,160\n\t" \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_5W(lval, orig, arg1, arg2, arg3, arg4, arg5) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[6]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
"aghi 15,-160\n\t" \
"lg 2, 8(1)\n\t" \
"lg 3,16(1)\n\t" \
"lg 4,24(1)\n\t" \
"lg 5,32(1)\n\t" \
"lg 6,40(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
"lgr %0, 2\n\t" \
"aghi 15,160\n\t" \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_6W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
arg6) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[7]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
"aghi 15,-168\n\t" \
"lg 2, 8(1)\n\t" \
"lg 3,16(1)\n\t" \
"lg 4,24(1)\n\t" \
"lg 5,32(1)\n\t" \
"lg 6,40(1)\n\t" \
"mvc 160(8,15), 48(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
"lgr %0, 2\n\t" \
"aghi 15,168\n\t" \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_7W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
arg6, arg7) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[8]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
_argvec[7] = (unsigned long)arg7; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
"aghi 15,-176\n\t" \
"lg 2, 8(1)\n\t" \
"lg 3,16(1)\n\t" \
"lg 4,24(1)\n\t" \
"lg 5,32(1)\n\t" \
"lg 6,40(1)\n\t" \
"mvc 160(8,15), 48(1)\n\t" \
"mvc 168(8,15), 56(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
"lgr %0, 2\n\t" \
"aghi 15,176\n\t" \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_8W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
arg6, arg7 ,arg8) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[9]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
_argvec[7] = (unsigned long)arg7; \
_argvec[8] = (unsigned long)arg8; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
"aghi 15,-184\n\t" \
"lg 2, 8(1)\n\t" \
"lg 3,16(1)\n\t" \
"lg 4,24(1)\n\t" \
"lg 5,32(1)\n\t" \
"lg 6,40(1)\n\t" \
"mvc 160(8,15), 48(1)\n\t" \
"mvc 168(8,15), 56(1)\n\t" \
"mvc 176(8,15), 64(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
"lgr %0, 2\n\t" \
"aghi 15,184\n\t" \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_9W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
arg6, arg7 ,arg8, arg9) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[10]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
_argvec[7] = (unsigned long)arg7; \
_argvec[8] = (unsigned long)arg8; \
_argvec[9] = (unsigned long)arg9; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
"aghi 15,-192\n\t" \
"lg 2, 8(1)\n\t" \
"lg 3,16(1)\n\t" \
"lg 4,24(1)\n\t" \
"lg 5,32(1)\n\t" \
"lg 6,40(1)\n\t" \
"mvc 160(8,15), 48(1)\n\t" \
"mvc 168(8,15), 56(1)\n\t" \
"mvc 176(8,15), 64(1)\n\t" \
"mvc 184(8,15), 72(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
"lgr %0, 2\n\t" \
"aghi 15,192\n\t" \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_10W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
arg6, arg7 ,arg8, arg9, arg10) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[11]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
_argvec[7] = (unsigned long)arg7; \
_argvec[8] = (unsigned long)arg8; \
_argvec[9] = (unsigned long)arg9; \
_argvec[10] = (unsigned long)arg10; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
"aghi 15,-200\n\t" \
"lg 2, 8(1)\n\t" \
"lg 3,16(1)\n\t" \
"lg 4,24(1)\n\t" \
"lg 5,32(1)\n\t" \
"lg 6,40(1)\n\t" \
"mvc 160(8,15), 48(1)\n\t" \
"mvc 168(8,15), 56(1)\n\t" \
"mvc 176(8,15), 64(1)\n\t" \
"mvc 184(8,15), 72(1)\n\t" \
"mvc 192(8,15), 80(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
"lgr %0, 2\n\t" \
"aghi 15,200\n\t" \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_11W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
arg6, arg7 ,arg8, arg9, arg10, arg11) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[12]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
_argvec[7] = (unsigned long)arg7; \
_argvec[8] = (unsigned long)arg8; \
_argvec[9] = (unsigned long)arg9; \
_argvec[10] = (unsigned long)arg10; \
_argvec[11] = (unsigned long)arg11; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
"aghi 15,-208\n\t" \
"lg 2, 8(1)\n\t" \
"lg 3,16(1)\n\t" \
"lg 4,24(1)\n\t" \
"lg 5,32(1)\n\t" \
"lg 6,40(1)\n\t" \
"mvc 160(8,15), 48(1)\n\t" \
"mvc 168(8,15), 56(1)\n\t" \
"mvc 176(8,15), 64(1)\n\t" \
"mvc 184(8,15), 72(1)\n\t" \
"mvc 192(8,15), 80(1)\n\t" \
"mvc 200(8,15), 88(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
"lgr %0, 2\n\t" \
"aghi 15,208\n\t" \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_12W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
arg6, arg7 ,arg8, arg9, arg10, arg11, arg12)\
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[13]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
_argvec[7] = (unsigned long)arg7; \
_argvec[8] = (unsigned long)arg8; \
_argvec[9] = (unsigned long)arg9; \
_argvec[10] = (unsigned long)arg10; \
_argvec[11] = (unsigned long)arg11; \
_argvec[12] = (unsigned long)arg12; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
"aghi 15,-216\n\t" \
"lg 2, 8(1)\n\t" \
"lg 3,16(1)\n\t" \
"lg 4,24(1)\n\t" \
"lg 5,32(1)\n\t" \
"lg 6,40(1)\n\t" \
"mvc 160(8,15), 48(1)\n\t" \
"mvc 168(8,15), 56(1)\n\t" \
"mvc 176(8,15), 64(1)\n\t" \
"mvc 184(8,15), 72(1)\n\t" \
"mvc 192(8,15), 80(1)\n\t" \
"mvc 200(8,15), 88(1)\n\t" \
"mvc 208(8,15), 96(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
"lgr %0, 2\n\t" \
"aghi 15,216\n\t" \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#endif /* PLAT_s390x_linux */
/* ------------------------- mips32-linux ----------------------- */
#if defined(PLAT_mips32_linux)
/* These regs are trashed by the hidden call. */
#define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \
"$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
"$25", "$31"
/* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned
long) == 4. */
#define CALL_FN_W_v(lval, orig) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[1]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
"subu $29, $29, 8 \n\t" \
"sw $28, 0($29) \n\t" \
"sw $31, 4($29) \n\t" \
"subu $29, $29, 16 \n\t" \
"lw $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"addu $29, $29, 16\n\t" \
"lw $28, 0($29) \n\t" \
"lw $31, 4($29) \n\t" \
"addu $29, $29, 8 \n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_W(lval, orig, arg1) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[2]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
__asm__ volatile( \
"subu $29, $29, 8 \n\t" \
"sw $28, 0($29) \n\t" \
"sw $31, 4($29) \n\t" \
"subu $29, $29, 16 \n\t" \
"lw $4, 4(%1) \n\t" /* arg1*/ \
"lw $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"addu $29, $29, 16 \n\t" \
"lw $28, 0($29) \n\t" \
"lw $31, 4($29) \n\t" \
"addu $29, $29, 8 \n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
__asm__ volatile( \
"subu $29, $29, 8 \n\t" \
"sw $28, 0($29) \n\t" \
"sw $31, 4($29) \n\t" \
"subu $29, $29, 16 \n\t" \
"lw $4, 4(%1) \n\t" \
"lw $5, 8(%1) \n\t" \
"lw $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"addu $29, $29, 16 \n\t" \
"lw $28, 0($29) \n\t" \
"lw $31, 4($29) \n\t" \
"addu $29, $29, 8 \n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[4]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
__asm__ volatile( \
"subu $29, $29, 8 \n\t" \
"sw $28, 0($29) \n\t" \
"sw $31, 4($29) \n\t" \
"subu $29, $29, 16 \n\t" \
"lw $4, 4(%1) \n\t" \
"lw $5, 8(%1) \n\t" \
"lw $6, 12(%1) \n\t" \
"lw $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"addu $29, $29, 16 \n\t" \
"lw $28, 0($29) \n\t" \
"lw $31, 4($29) \n\t" \
"addu $29, $29, 8 \n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[5]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
__asm__ volatile( \
"subu $29, $29, 8 \n\t" \
"sw $28, 0($29) \n\t" \
"sw $31, 4($29) \n\t" \
"subu $29, $29, 16 \n\t" \
"lw $4, 4(%1) \n\t" \
"lw $5, 8(%1) \n\t" \
"lw $6, 12(%1) \n\t" \
"lw $7, 16(%1) \n\t" \
"lw $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"addu $29, $29, 16 \n\t" \
"lw $28, 0($29) \n\t" \
"lw $31, 4($29) \n\t" \
"addu $29, $29, 8 \n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[6]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
__asm__ volatile( \
"subu $29, $29, 8 \n\t" \
"sw $28, 0($29) \n\t" \
"sw $31, 4($29) \n\t" \
"lw $4, 20(%1) \n\t" \
"subu $29, $29, 24\n\t" \
"sw $4, 16($29) \n\t" \
"lw $4, 4(%1) \n\t" \
"lw $5, 8(%1) \n\t" \
"lw $6, 12(%1) \n\t" \
"lw $7, 16(%1) \n\t" \
"lw $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"addu $29, $29, 24 \n\t" \
"lw $28, 0($29) \n\t" \
"lw $31, 4($29) \n\t" \
"addu $29, $29, 8 \n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[7]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
__asm__ volatile( \
"subu $29, $29, 8 \n\t" \
"sw $28, 0($29) \n\t" \
"sw $31, 4($29) \n\t" \
"lw $4, 20(%1) \n\t" \
"subu $29, $29, 32\n\t" \
"sw $4, 16($29) \n\t" \
"lw $4, 24(%1) \n\t" \
"nop\n\t" \
"sw $4, 20($29) \n\t" \
"lw $4, 4(%1) \n\t" \
"lw $5, 8(%1) \n\t" \
"lw $6, 12(%1) \n\t" \
"lw $7, 16(%1) \n\t" \
"lw $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"addu $29, $29, 32 \n\t" \
"lw $28, 0($29) \n\t" \
"lw $31, 4($29) \n\t" \
"addu $29, $29, 8 \n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[8]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
__asm__ volatile( \
"subu $29, $29, 8 \n\t" \
"sw $28, 0($29) \n\t" \
"sw $31, 4($29) \n\t" \
"lw $4, 20(%1) \n\t" \
"subu $29, $29, 32\n\t" \
"sw $4, 16($29) \n\t" \
"lw $4, 24(%1) \n\t" \
"sw $4, 20($29) \n\t" \
"lw $4, 28(%1) \n\t" \
"sw $4, 24($29) \n\t" \
"lw $4, 4(%1) \n\t" \
"lw $5, 8(%1) \n\t" \
"lw $6, 12(%1) \n\t" \
"lw $7, 16(%1) \n\t" \
"lw $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"addu $29, $29, 32 \n\t" \
"lw $28, 0($29) \n\t" \
"lw $31, 4($29) \n\t" \
"addu $29, $29, 8 \n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[9]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
__asm__ volatile( \
"subu $29, $29, 8 \n\t" \
"sw $28, 0($29) \n\t" \
"sw $31, 4($29) \n\t" \
"lw $4, 20(%1) \n\t" \
"subu $29, $29, 40\n\t" \
"sw $4, 16($29) \n\t" \
"lw $4, 24(%1) \n\t" \
"sw $4, 20($29) \n\t" \
"lw $4, 28(%1) \n\t" \
"sw $4, 24($29) \n\t" \
"lw $4, 32(%1) \n\t" \
"sw $4, 28($29) \n\t" \
"lw $4, 4(%1) \n\t" \
"lw $5, 8(%1) \n\t" \
"lw $6, 12(%1) \n\t" \
"lw $7, 16(%1) \n\t" \
"lw $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"addu $29, $29, 40 \n\t" \
"lw $28, 0($29) \n\t" \
"lw $31, 4($29) \n\t" \
"addu $29, $29, 8 \n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[10]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
__asm__ volatile( \
"subu $29, $29, 8 \n\t" \
"sw $28, 0($29) \n\t" \
"sw $31, 4($29) \n\t" \
"lw $4, 20(%1) \n\t" \
"subu $29, $29, 40\n\t" \
"sw $4, 16($29) \n\t" \
"lw $4, 24(%1) \n\t" \
"sw $4, 20($29) \n\t" \
"lw $4, 28(%1) \n\t" \
"sw $4, 24($29) \n\t" \
"lw $4, 32(%1) \n\t" \
"sw $4, 28($29) \n\t" \
"lw $4, 36(%1) \n\t" \
"sw $4, 32($29) \n\t" \
"lw $4, 4(%1) \n\t" \
"lw $5, 8(%1) \n\t" \
"lw $6, 12(%1) \n\t" \
"lw $7, 16(%1) \n\t" \
"lw $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"addu $29, $29, 40 \n\t" \
"lw $28, 0($29) \n\t" \
"lw $31, 4($29) \n\t" \
"addu $29, $29, 8 \n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[11]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
__asm__ volatile( \
"subu $29, $29, 8 \n\t" \
"sw $28, 0($29) \n\t" \
"sw $31, 4($29) \n\t" \
"lw $4, 20(%1) \n\t" \
"subu $29, $29, 48\n\t" \
"sw $4, 16($29) \n\t" \
"lw $4, 24(%1) \n\t" \
"sw $4, 20($29) \n\t" \
"lw $4, 28(%1) \n\t" \
"sw $4, 24($29) \n\t" \
"lw $4, 32(%1) \n\t" \
"sw $4, 28($29) \n\t" \
"lw $4, 36(%1) \n\t" \
"sw $4, 32($29) \n\t" \
"lw $4, 40(%1) \n\t" \
"sw $4, 36($29) \n\t" \
"lw $4, 4(%1) \n\t" \
"lw $5, 8(%1) \n\t" \
"lw $6, 12(%1) \n\t" \
"lw $7, 16(%1) \n\t" \
"lw $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"addu $29, $29, 48 \n\t" \
"lw $28, 0($29) \n\t" \
"lw $31, 4($29) \n\t" \
"addu $29, $29, 8 \n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
arg6,arg7,arg8,arg9,arg10, \
arg11) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[12]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
__asm__ volatile( \
"subu $29, $29, 8 \n\t" \
"sw $28, 0($29) \n\t" \
"sw $31, 4($29) \n\t" \
"lw $4, 20(%1) \n\t" \
"subu $29, $29, 48\n\t" \
"sw $4, 16($29) \n\t" \
"lw $4, 24(%1) \n\t" \
"sw $4, 20($29) \n\t" \
"lw $4, 28(%1) \n\t" \
"sw $4, 24($29) \n\t" \
"lw $4, 32(%1) \n\t" \
"sw $4, 28($29) \n\t" \
"lw $4, 36(%1) \n\t" \
"sw $4, 32($29) \n\t" \
"lw $4, 40(%1) \n\t" \
"sw $4, 36($29) \n\t" \
"lw $4, 44(%1) \n\t" \
"sw $4, 40($29) \n\t" \
"lw $4, 4(%1) \n\t" \
"lw $5, 8(%1) \n\t" \
"lw $6, 12(%1) \n\t" \
"lw $7, 16(%1) \n\t" \
"lw $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"addu $29, $29, 48 \n\t" \
"lw $28, 0($29) \n\t" \
"lw $31, 4($29) \n\t" \
"addu $29, $29, 8 \n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
arg6,arg7,arg8,arg9,arg10, \
arg11,arg12) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[13]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
_argvec[12] = (unsigned long)(arg12); \
__asm__ volatile( \
"subu $29, $29, 8 \n\t" \
"sw $28, 0($29) \n\t" \
"sw $31, 4($29) \n\t" \
"lw $4, 20(%1) \n\t" \
"subu $29, $29, 56\n\t" \
"sw $4, 16($29) \n\t" \
"lw $4, 24(%1) \n\t" \
"sw $4, 20($29) \n\t" \
"lw $4, 28(%1) \n\t" \
"sw $4, 24($29) \n\t" \
"lw $4, 32(%1) \n\t" \
"sw $4, 28($29) \n\t" \
"lw $4, 36(%1) \n\t" \
"sw $4, 32($29) \n\t" \
"lw $4, 40(%1) \n\t" \
"sw $4, 36($29) \n\t" \
"lw $4, 44(%1) \n\t" \
"sw $4, 40($29) \n\t" \
"lw $4, 48(%1) \n\t" \
"sw $4, 44($29) \n\t" \
"lw $4, 4(%1) \n\t" \
"lw $5, 8(%1) \n\t" \
"lw $6, 12(%1) \n\t" \
"lw $7, 16(%1) \n\t" \
"lw $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"addu $29, $29, 56 \n\t" \
"lw $28, 0($29) \n\t" \
"lw $31, 4($29) \n\t" \
"addu $29, $29, 8 \n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#endif /* PLAT_mips32_linux */
/* ------------------------- mips64-linux ------------------------- */
#if defined(PLAT_mips64_linux)
/* These regs are trashed by the hidden call. */
#define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \
"$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
"$25", "$31"
/* These CALL_FN_ macros assume that on mips64-linux,
sizeof(long long) == 8. */
#define MIPS64_LONG2REG_CAST(x) ((long long)(long)x)
#define CALL_FN_W_v(lval, orig) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long long _argvec[1]; \
volatile unsigned long long _res; \
_argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
__asm__ volatile( \
"ld $25, 0(%1)\n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) (long)_res; \
} while (0)
#define CALL_FN_W_W(lval, orig, arg1) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long long _argvec[2]; \
volatile unsigned long long _res; \
_argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
_argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
__asm__ volatile( \
"ld $4, 8(%1)\n\t" /* arg1*/ \
"ld $25, 0(%1)\n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) (long)_res; \
} while (0)
#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long long _argvec[3]; \
volatile unsigned long long _res; \
_argvec[0] = _orig.nraddr; \
_argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
_argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
__asm__ volatile( \
"ld $4, 8(%1)\n\t" \
"ld $5, 16(%1)\n\t" \
"ld $25, 0(%1)\n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) (long)_res; \
} while (0)
#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long long _argvec[4]; \
volatile unsigned long long _res; \
_argvec[0] = _orig.nraddr; \
_argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
_argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
_argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
__asm__ volatile( \
"ld $4, 8(%1)\n\t" \
"ld $5, 16(%1)\n\t" \
"ld $6, 24(%1)\n\t" \
"ld $25, 0(%1)\n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) (long)_res; \
} while (0)
#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long long _argvec[5]; \
volatile unsigned long long _res; \
_argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
_argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
_argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
_argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
_argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
__asm__ volatile( \
"ld $4, 8(%1)\n\t" \
"ld $5, 16(%1)\n\t" \
"ld $6, 24(%1)\n\t" \
"ld $7, 32(%1)\n\t" \
"ld $25, 0(%1)\n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) (long)_res; \
} while (0)
#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long long _argvec[6]; \
volatile unsigned long long _res; \
_argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
_argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
_argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
_argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
_argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
_argvec[5] = MIPS64_LONG2REG_CAST(arg5); \
__asm__ volatile( \
"ld $4, 8(%1)\n\t" \
"ld $5, 16(%1)\n\t" \
"ld $6, 24(%1)\n\t" \
"ld $7, 32(%1)\n\t" \
"ld $8, 40(%1)\n\t" \
"ld $25, 0(%1)\n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) (long)_res; \
} while (0)
#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long long _argvec[7]; \
volatile unsigned long long _res; \
_argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
_argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
_argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
_argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
_argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
_argvec[5] = MIPS64_LONG2REG_CAST(arg5); \
_argvec[6] = MIPS64_LONG2REG_CAST(arg6); \
__asm__ volatile( \
"ld $4, 8(%1)\n\t" \
"ld $5, 16(%1)\n\t" \
"ld $6, 24(%1)\n\t" \
"ld $7, 32(%1)\n\t" \
"ld $8, 40(%1)\n\t" \
"ld $9, 48(%1)\n\t" \
"ld $25, 0(%1)\n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) (long)_res; \
} while (0)
#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long long _argvec[8]; \
volatile unsigned long long _res; \
_argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
_argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
_argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
_argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
_argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
_argvec[5] = MIPS64_LONG2REG_CAST(arg5); \
_argvec[6] = MIPS64_LONG2REG_CAST(arg6); \
_argvec[7] = MIPS64_LONG2REG_CAST(arg7); \
__asm__ volatile( \
"ld $4, 8(%1)\n\t" \
"ld $5, 16(%1)\n\t" \
"ld $6, 24(%1)\n\t" \
"ld $7, 32(%1)\n\t" \
"ld $8, 40(%1)\n\t" \
"ld $9, 48(%1)\n\t" \
"ld $10, 56(%1)\n\t" \
"ld $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) (long)_res; \
} while (0)
#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long long _argvec[9]; \
volatile unsigned long long _res; \
_argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
_argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
_argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
_argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
_argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
_argvec[5] = MIPS64_LONG2REG_CAST(arg5); \
_argvec[6] = MIPS64_LONG2REG_CAST(arg6); \
_argvec[7] = MIPS64_LONG2REG_CAST(arg7); \
_argvec[8] = MIPS64_LONG2REG_CAST(arg8); \
__asm__ volatile( \
"ld $4, 8(%1)\n\t" \
"ld $5, 16(%1)\n\t" \
"ld $6, 24(%1)\n\t" \
"ld $7, 32(%1)\n\t" \
"ld $8, 40(%1)\n\t" \
"ld $9, 48(%1)\n\t" \
"ld $10, 56(%1)\n\t" \
"ld $11, 64(%1)\n\t" \
"ld $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) (long)_res; \
} while (0)
#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long long _argvec[10]; \
volatile unsigned long long _res; \
_argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
_argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
_argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
_argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
_argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
_argvec[5] = MIPS64_LONG2REG_CAST(arg5); \
_argvec[6] = MIPS64_LONG2REG_CAST(arg6); \
_argvec[7] = MIPS64_LONG2REG_CAST(arg7); \
_argvec[8] = MIPS64_LONG2REG_CAST(arg8); \
_argvec[9] = MIPS64_LONG2REG_CAST(arg9); \
__asm__ volatile( \
"dsubu $29, $29, 8\n\t" \
"ld $4, 72(%1)\n\t" \
"sd $4, 0($29)\n\t" \
"ld $4, 8(%1)\n\t" \
"ld $5, 16(%1)\n\t" \
"ld $6, 24(%1)\n\t" \
"ld $7, 32(%1)\n\t" \
"ld $8, 40(%1)\n\t" \
"ld $9, 48(%1)\n\t" \
"ld $10, 56(%1)\n\t" \
"ld $11, 64(%1)\n\t" \
"ld $25, 0(%1)\n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"daddu $29, $29, 8\n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) (long)_res; \
} while (0)
#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long long _argvec[11]; \
volatile unsigned long long _res; \
_argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
_argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
_argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
_argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
_argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
_argvec[5] = MIPS64_LONG2REG_CAST(arg5); \
_argvec[6] = MIPS64_LONG2REG_CAST(arg6); \
_argvec[7] = MIPS64_LONG2REG_CAST(arg7); \
_argvec[8] = MIPS64_LONG2REG_CAST(arg8); \
_argvec[9] = MIPS64_LONG2REG_CAST(arg9); \
_argvec[10] = MIPS64_LONG2REG_CAST(arg10); \
__asm__ volatile( \
"dsubu $29, $29, 16\n\t" \
"ld $4, 72(%1)\n\t" \
"sd $4, 0($29)\n\t" \
"ld $4, 80(%1)\n\t" \
"sd $4, 8($29)\n\t" \
"ld $4, 8(%1)\n\t" \
"ld $5, 16(%1)\n\t" \
"ld $6, 24(%1)\n\t" \
"ld $7, 32(%1)\n\t" \
"ld $8, 40(%1)\n\t" \
"ld $9, 48(%1)\n\t" \
"ld $10, 56(%1)\n\t" \
"ld $11, 64(%1)\n\t" \
"ld $25, 0(%1)\n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"daddu $29, $29, 16\n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) (long)_res; \
} while (0)
#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
arg6,arg7,arg8,arg9,arg10, \
arg11) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long long _argvec[12]; \
volatile unsigned long long _res; \
_argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
_argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
_argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
_argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
_argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
_argvec[5] = MIPS64_LONG2REG_CAST(arg5); \
_argvec[6] = MIPS64_LONG2REG_CAST(arg6); \
_argvec[7] = MIPS64_LONG2REG_CAST(arg7); \
_argvec[8] = MIPS64_LONG2REG_CAST(arg8); \
_argvec[9] = MIPS64_LONG2REG_CAST(arg9); \
_argvec[10] = MIPS64_LONG2REG_CAST(arg10); \
_argvec[11] = MIPS64_LONG2REG_CAST(arg11); \
__asm__ volatile( \
"dsubu $29, $29, 24\n\t" \
"ld $4, 72(%1)\n\t" \
"sd $4, 0($29)\n\t" \
"ld $4, 80(%1)\n\t" \
"sd $4, 8($29)\n\t" \
"ld $4, 88(%1)\n\t" \
"sd $4, 16($29)\n\t" \
"ld $4, 8(%1)\n\t" \
"ld $5, 16(%1)\n\t" \
"ld $6, 24(%1)\n\t" \
"ld $7, 32(%1)\n\t" \
"ld $8, 40(%1)\n\t" \
"ld $9, 48(%1)\n\t" \
"ld $10, 56(%1)\n\t" \
"ld $11, 64(%1)\n\t" \
"ld $25, 0(%1)\n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"daddu $29, $29, 24\n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) (long)_res; \
} while (0)
#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
arg6,arg7,arg8,arg9,arg10, \
arg11,arg12) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long long _argvec[13]; \
volatile unsigned long long _res; \
_argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
_argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
_argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
_argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
_argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
_argvec[5] = MIPS64_LONG2REG_CAST(arg5); \
_argvec[6] = MIPS64_LONG2REG_CAST(arg6); \
_argvec[7] = MIPS64_LONG2REG_CAST(arg7); \
_argvec[8] = MIPS64_LONG2REG_CAST(arg8); \
_argvec[9] = MIPS64_LONG2REG_CAST(arg9); \
_argvec[10] = MIPS64_LONG2REG_CAST(arg10); \
_argvec[11] = MIPS64_LONG2REG_CAST(arg11); \
_argvec[12] = MIPS64_LONG2REG_CAST(arg12); \
__asm__ volatile( \
"dsubu $29, $29, 32\n\t" \
"ld $4, 72(%1)\n\t" \
"sd $4, 0($29)\n\t" \
"ld $4, 80(%1)\n\t" \
"sd $4, 8($29)\n\t" \
"ld $4, 88(%1)\n\t" \
"sd $4, 16($29)\n\t" \
"ld $4, 96(%1)\n\t" \
"sd $4, 24($29)\n\t" \
"ld $4, 8(%1)\n\t" \
"ld $5, 16(%1)\n\t" \
"ld $6, 24(%1)\n\t" \
"ld $7, 32(%1)\n\t" \
"ld $8, 40(%1)\n\t" \
"ld $9, 48(%1)\n\t" \
"ld $10, 56(%1)\n\t" \
"ld $11, 64(%1)\n\t" \
"ld $25, 0(%1)\n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"daddu $29, $29, 32\n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) (long)_res; \
} while (0)
#endif /* PLAT_mips64_linux */
/* ------------------------------------------------------------------ */
/* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */
/* */
/* ------------------------------------------------------------------ */
/* Some request codes. There are many more of these, but most are not
exposed to end-user view. These are the public ones, all of the
form 0x1000 + small_number.
Core ones are in the range 0x00000000--0x0000ffff. The non-public
ones start at 0x2000.
*/
/* These macros are used by tools -- they must be public, but don't
embed them into other programs. */
#define VG_USERREQ_TOOL_BASE(a,b) \
((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16))
#define VG_IS_TOOL_USERREQ(a, b, v) \
(VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000))
/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
This enum comprises an ABI exported by Valgrind to programs
which use client requests. DO NOT CHANGE THE NUMERIC VALUES OF THESE
ENTRIES, NOR DELETE ANY -- add new ones at the end of the most
relevant group. */
typedef
enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001,
VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002,
/* These allow any function to be called from the simulated
CPU but run on the real CPU. Nb: the first arg passed to
the function is always the ThreadId of the running
thread! So CLIENT_CALL0 actually requires a 1 arg
function, etc. */
VG_USERREQ__CLIENT_CALL0 = 0x1101,
VG_USERREQ__CLIENT_CALL1 = 0x1102,
VG_USERREQ__CLIENT_CALL2 = 0x1103,
VG_USERREQ__CLIENT_CALL3 = 0x1104,
/* Can be useful in regression testing suites -- eg. can
send Valgrind's output to /dev/null and still count
errors. */
VG_USERREQ__COUNT_ERRORS = 0x1201,
/* Allows the client program and/or gdbserver to execute a monitor
command. */
VG_USERREQ__GDB_MONITOR_COMMAND = 0x1202,
/* These are useful and can be interpreted by any tool that
tracks malloc() et al, by using vg_replace_malloc.c. */
VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
VG_USERREQ__RESIZEINPLACE_BLOCK = 0x130b,
VG_USERREQ__FREELIKE_BLOCK = 0x1302,
/* Memory pool support. */
VG_USERREQ__CREATE_MEMPOOL = 0x1303,
VG_USERREQ__DESTROY_MEMPOOL = 0x1304,
VG_USERREQ__MEMPOOL_ALLOC = 0x1305,
VG_USERREQ__MEMPOOL_FREE = 0x1306,
VG_USERREQ__MEMPOOL_TRIM = 0x1307,
VG_USERREQ__MOVE_MEMPOOL = 0x1308,
VG_USERREQ__MEMPOOL_CHANGE = 0x1309,
VG_USERREQ__MEMPOOL_EXISTS = 0x130a,
/* Allow printfs to valgrind log. */
/* The first two pass the va_list argument by value, which
assumes it is the same size as or smaller than a UWord,
which generally isn't the case. Hence are deprecated.
The second two pass the vargs by reference and so are
immune to this problem. */
/* both :: char* fmt, va_list vargs (DEPRECATED) */
VG_USERREQ__PRINTF = 0x1401,
VG_USERREQ__PRINTF_BACKTRACE = 0x1402,
/* both :: char* fmt, va_list* vargs */
VG_USERREQ__PRINTF_VALIST_BY_REF = 0x1403,
VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF = 0x1404,
/* Stack support. */
VG_USERREQ__STACK_REGISTER = 0x1501,
VG_USERREQ__STACK_DEREGISTER = 0x1502,
VG_USERREQ__STACK_CHANGE = 0x1503,
/* Wine support */
VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601,
/* Querying of debug info. */
VG_USERREQ__MAP_IP_TO_SRCLOC = 0x1701,
/* Disable/enable error reporting level. Takes a single
Word arg which is the delta to this thread's error
disablement indicator. Hence 1 disables or further
disables errors, and -1 moves back towards enablement.
Other values are not allowed. */
VG_USERREQ__CHANGE_ERR_DISABLEMENT = 0x1801,
/* Some requests used for Valgrind internal, such as
self-test or self-hosting. */
/* Initialise IR injection */
VG_USERREQ__VEX_INIT_FOR_IRI = 0x1901,
/* Used by Inner Valgrind to inform Outer Valgrind where to
find the list of inner guest threads */
VG_USERREQ__INNER_THREADS = 0x1902
} Vg_ClientRequest;
#if !defined(__GNUC__)
# define __extension__ /* */
#endif
/* Returns the number of Valgrinds this code is running under. That
is, 0 if running natively, 1 if running under Valgrind, 2 if
running under Valgrind which is running under another Valgrind,
etc. */
#define RUNNING_ON_VALGRIND \
(unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* if not */, \
VG_USERREQ__RUNNING_ON_VALGRIND, \
0, 0, 0, 0, 0) \
/* Discard translation of code in the range [_qzz_addr .. _qzz_addr +
_qzz_len - 1]. Useful if you are debugging a JITter or some such,
since it provides a way to make sure valgrind will retranslate the
invalidated area. Returns no value. */
#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DISCARD_TRANSLATIONS, \
_qzz_addr, _qzz_len, 0, 0, 0)
#define VALGRIND_INNER_THREADS(_qzz_addr) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__INNER_THREADS, \
_qzz_addr, 0, 0, 0, 0)
/* These requests are for getting Valgrind itself to print something.
Possibly with a backtrace. This is a really ugly hack. The return value
is the number of characters printed, excluding the "**<pid>** " part at the
start and the backtrace (if present). */
#if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER)
/* Modern GCC will optimize the static routine out if unused,
and unused attribute will shut down warnings about it. */
static int VALGRIND_PRINTF(const char *format, ...)
__attribute__((format(__printf__, 1, 2), __unused__));
#endif
static int
#if defined(_MSC_VER)
__inline
#endif
VALGRIND_PRINTF(const char *format, ...)
{
#if defined(NVALGRIND)
(void)format;
return 0;
#else /* NVALGRIND */
#if defined(_MSC_VER) || defined(__MINGW64__)
uintptr_t _qzz_res;
#else
unsigned long _qzz_res;
#endif
va_list vargs;
va_start(vargs, format);
#if defined(_MSC_VER) || defined(__MINGW64__)
_qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
VG_USERREQ__PRINTF_VALIST_BY_REF,
(uintptr_t)format,
(uintptr_t)&vargs,
0, 0, 0);
#else
_qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
VG_USERREQ__PRINTF_VALIST_BY_REF,
(unsigned long)format,
(unsigned long)&vargs,
0, 0, 0);
#endif
va_end(vargs);
return (int)_qzz_res;
#endif /* NVALGRIND */
}
#if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER)
static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
__attribute__((format(__printf__, 1, 2), __unused__));
#endif
static int
#if defined(_MSC_VER)
__inline
#endif
VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
{
#if defined(NVALGRIND)
(void)format;
return 0;
#else /* NVALGRIND */
#if defined(_MSC_VER) || defined(__MINGW64__)
uintptr_t _qzz_res;
#else
unsigned long _qzz_res;
#endif
va_list vargs;
va_start(vargs, format);
#if defined(_MSC_VER) || defined(__MINGW64__)
_qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
(uintptr_t)format,
(uintptr_t)&vargs,
0, 0, 0);
#else
_qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
(unsigned long)format,
(unsigned long)&vargs,
0, 0, 0);
#endif
va_end(vargs);
return (int)_qzz_res;
#endif /* NVALGRIND */
}
/* These requests allow control to move from the simulated CPU to the
real CPU, calling an arbitrary function.
Note that the current ThreadId is inserted as the first argument.
So this call:
VALGRIND_NON_SIMD_CALL2(f, arg1, arg2)
requires f to have this signature:
Word f(Word tid, Word arg1, Word arg2)
where "Word" is a word-sized type.
Note that these client requests are not entirely reliable. For example,
if you call a function with them that subsequently calls printf(),
there's a high chance Valgrind will crash. Generally, your prospects of
these working are made higher if the called function does not refer to
any global variables, and does not refer to any libc or other functions
(printf et al). Any kind of entanglement with libc or dynamic linking is
likely to have a bad outcome, for tricky reasons which we've grappled
with a lot in the past.
*/
#define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__CLIENT_CALL0, \
_qyy_fn, \
0, 0, 0, 0)
#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__CLIENT_CALL1, \
_qyy_fn, \
_qyy_arg1, 0, 0, 0)
#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__CLIENT_CALL2, \
_qyy_fn, \
_qyy_arg1, _qyy_arg2, 0, 0)
#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__CLIENT_CALL3, \
_qyy_fn, \
_qyy_arg1, _qyy_arg2, \
_qyy_arg3, 0)
/* Counts the number of errors that have been recorded by a tool. Nb:
the tool must record the errors with VG_(maybe_record_error)() or
VG_(unique_error)() for them to be counted. */
#define VALGRIND_COUNT_ERRORS \
(unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR( \
0 /* default return */, \
VG_USERREQ__COUNT_ERRORS, \
0, 0, 0, 0, 0)
/* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing
when heap blocks are allocated in order to give accurate results. This
happens automatically for the standard allocator functions such as
malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete,
delete[], etc.
But if your program uses a custom allocator, this doesn't automatically
happen, and Valgrind will not do as well. For example, if you allocate
superblocks with mmap() and then allocates chunks of the superblocks, all
Valgrind's observations will be at the mmap() level and it won't know that
the chunks should be considered separate entities. In Memcheck's case,
that means you probably won't get heap block overrun detection (because
there won't be redzones marked as unaddressable) and you definitely won't
get any leak detection.
The following client requests allow a custom allocator to be annotated so
that it can be handled accurately by Valgrind.
VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated
by a malloc()-like function. For Memcheck (an illustrative case), this
does two things:
- It records that the block has been allocated. This means any addresses
within the block mentioned in error messages will be
identified as belonging to the block. It also means that if the block
isn't freed it will be detected by the leak checker.
- It marks the block as being addressable and undefined (if 'is_zeroed' is
not set), or addressable and defined (if 'is_zeroed' is set). This
controls how accesses to the block by the program are handled.
'addr' is the start of the usable block (ie. after any
redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator
can apply redzones -- these are blocks of padding at the start and end of
each block. Adding redzones is recommended as it makes it much more likely
Valgrind will spot block overruns. `is_zeroed' indicates if the memory is
zeroed (or filled with another predictable value), as is the case for
calloc().
VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a
heap block -- that will be used by the client program -- is allocated.
It's best to put it at the outermost level of the allocator if possible;
for example, if you have a function my_alloc() which calls
internal_alloc(), and the client request is put inside internal_alloc(),
stack traces relating to the heap block will contain entries for both
my_alloc() and internal_alloc(), which is probably not what you want.
For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out
custom blocks from within a heap block, B, that has been allocated with
malloc/calloc/new/etc, then block B will be *ignored* during leak-checking
-- the custom blocks will take precedence.
VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK. For
Memcheck, it does two things:
- It records that the block has been deallocated. This assumes that the
block was annotated as having been allocated via
VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued.
- It marks the block as being unaddressable.
VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a
heap block is deallocated.
VALGRIND_RESIZEINPLACE_BLOCK informs a tool about reallocation. For
Memcheck, it does four things:
- It records that the size of a block has been changed. This assumes that
the block was annotated as having been allocated via
VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued.
- If the block shrunk, it marks the freed memory as being unaddressable.
- If the block grew, it marks the new area as undefined and defines a red
zone past the end of the new block.
- The V-bits of the overlap between the old and the new block are preserved.
VALGRIND_RESIZEINPLACE_BLOCK should be put after allocation of the new block
and before deallocation of the old block.
In many cases, these three client requests will not be enough to get your
allocator working well with Memcheck. More specifically, if your allocator
writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call
will be necessary to mark the memory as addressable just before the zeroing
occurs, otherwise you'll get a lot of invalid write errors. For example,
you'll need to do this if your allocator recycles freed blocks, but it
zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK).
Alternatively, if your allocator reuses freed blocks for allocator-internal
data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary.
Really, what's happening is a blurring of the lines between the client
program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the
memory should be considered unaddressable to the client program, but the
allocator knows more than the rest of the client program and so may be able
to safely access it. Extra client requests are necessary for Valgrind to
understand the distinction between the allocator and the rest of the
program.
Ignored if addr == 0.
*/
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MALLOCLIKE_BLOCK, \
addr, sizeB, rzB, is_zeroed, 0)
/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
Ignored if addr == 0.
*/
#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__RESIZEINPLACE_BLOCK, \
addr, oldSizeB, newSizeB, rzB, 0)
/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
Ignored if addr == 0.
*/
#define VALGRIND_FREELIKE_BLOCK(addr, rzB) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__FREELIKE_BLOCK, \
addr, rzB, 0, 0, 0)
/* Create a memory pool. */
#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, \
pool, rzB, is_zeroed, 0, 0)
/* Create a memory pool with some flags specifying extended behaviour.
When flags is zero, the behaviour is identical to VALGRIND_CREATE_MEMPOOL.
The flag VALGRIND_MEMPOOL_METAPOOL specifies that the pieces of memory
associated with the pool using VALGRIND_MEMPOOL_ALLOC will be used
by the application as superblocks to dole out MALLOC_LIKE blocks using
VALGRIND_MALLOCLIKE_BLOCK. In other words, a meta pool is a "2 levels"
pool : first level is the blocks described by VALGRIND_MEMPOOL_ALLOC.
The second level blocks are described using VALGRIND_MALLOCLIKE_BLOCK.
Note that the association between the pool and the second level blocks
is implicit : second level blocks will be located inside first level
blocks. It is necessary to use the VALGRIND_MEMPOOL_METAPOOL flag
for such 2 levels pools, as otherwise valgrind will detect overlapping
memory blocks, and will abort execution (e.g. during leak search).
Such a meta pool can also be marked as an 'auto free' pool using the flag
VALGRIND_MEMPOOL_AUTO_FREE, which must be OR-ed together with the
VALGRIND_MEMPOOL_METAPOOL. For an 'auto free' pool, VALGRIND_MEMPOOL_FREE
will automatically free the second level blocks that are contained
inside the first level block freed with VALGRIND_MEMPOOL_FREE.
In other words, calling VALGRIND_MEMPOOL_FREE will cause implicit calls
to VALGRIND_FREELIKE_BLOCK for all the second level blocks included
in the first level block.
Note: it is an error to use the VALGRIND_MEMPOOL_AUTO_FREE flag
without the VALGRIND_MEMPOOL_METAPOOL flag.
*/
#define VALGRIND_MEMPOOL_AUTO_FREE 1
#define VALGRIND_MEMPOOL_METAPOOL 2
#define VALGRIND_CREATE_MEMPOOL_EXT(pool, rzB, is_zeroed, flags) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, \
pool, rzB, is_zeroed, flags, 0)
/* Destroy a memory pool. */
#define VALGRIND_DESTROY_MEMPOOL(pool) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DESTROY_MEMPOOL, \
pool, 0, 0, 0, 0)
/* Associate a piece of memory with a memory pool. */
#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_ALLOC, \
pool, addr, size, 0, 0)
/* Disassociate a piece of memory from a memory pool. */
#define VALGRIND_MEMPOOL_FREE(pool, addr) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_FREE, \
pool, addr, 0, 0, 0)
/* Disassociate any pieces outside a particular range. */
#define VALGRIND_MEMPOOL_TRIM(pool, addr, size) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_TRIM, \
pool, addr, size, 0, 0)
/* Resize and/or move a piece associated with a memory pool. */
#define VALGRIND_MOVE_MEMPOOL(poolA, poolB) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MOVE_MEMPOOL, \
poolA, poolB, 0, 0, 0)
/* Resize and/or move a piece associated with a memory pool. */
#define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_CHANGE, \
pool, addrA, addrB, size, 0)
/* Return 1 if a mempool exists, else 0. */
#define VALGRIND_MEMPOOL_EXISTS(pool) \
(unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__MEMPOOL_EXISTS, \
pool, 0, 0, 0, 0)
/* Mark a piece of memory as being a stack. Returns a stack id.
start is the lowest addressable stack byte, end is the highest
addressable stack byte. */
#define VALGRIND_STACK_REGISTER(start, end) \
(unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__STACK_REGISTER, \
start, end, 0, 0, 0)
/* Unmark the piece of memory associated with a stack id as being a
stack. */
#define VALGRIND_STACK_DEREGISTER(id) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_DEREGISTER, \
id, 0, 0, 0, 0)
/* Change the start and end address of the stack id.
start is the new lowest addressable stack byte, end is the new highest
addressable stack byte. */
#define VALGRIND_STACK_CHANGE(id, start, end) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_CHANGE, \
id, start, end, 0, 0)
/* Load PDB debug info for Wine PE image_map. */
#define VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, delta) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__LOAD_PDB_DEBUGINFO, \
fd, ptr, total_size, delta, 0)
/* Map a code address to a source file name and line number. buf64
must point to a 64-byte buffer in the caller's address space. The
result will be dumped in there and is guaranteed to be zero
terminated. If no info is found, the first byte is set to zero. */
#define VALGRIND_MAP_IP_TO_SRCLOC(addr, buf64) \
(unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__MAP_IP_TO_SRCLOC, \
addr, buf64, 0, 0, 0)
/* Disable error reporting for this thread. Behaves in a stack like
way, so you can safely call this multiple times provided that
VALGRIND_ENABLE_ERROR_REPORTING is called the same number of times
to re-enable reporting. The first call of this macro disables
reporting. Subsequent calls have no effect except to increase the
number of VALGRIND_ENABLE_ERROR_REPORTING calls needed to re-enable
reporting. Child threads do not inherit this setting from their
parents -- they are always created with reporting enabled. */
#define VALGRIND_DISABLE_ERROR_REPORTING \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \
1, 0, 0, 0, 0)
/* Re-enable error reporting, as per comments on
VALGRIND_DISABLE_ERROR_REPORTING. */
#define VALGRIND_ENABLE_ERROR_REPORTING \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \
-1, 0, 0, 0, 0)
/* Execute a monitor command from the client program.
If a connection is opened with GDB, the output will be sent
according to the output mode set for vgdb.
If no connection is opened, output will go to the log output.
Returns 1 if command not recognised, 0 otherwise. */
#define VALGRIND_MONITOR_COMMAND(command) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0, VG_USERREQ__GDB_MONITOR_COMMAND, \
command, 0, 0, 0, 0)
#undef PLAT_x86_darwin
#undef PLAT_amd64_darwin
#undef PLAT_x86_win32
#undef PLAT_amd64_win64
#undef PLAT_x86_linux
#undef PLAT_amd64_linux
#undef PLAT_ppc32_linux
#undef PLAT_ppc64be_linux
#undef PLAT_ppc64le_linux
#undef PLAT_arm_linux
#undef PLAT_s390x_linux
#undef PLAT_mips32_linux
#undef PLAT_mips64_linux
#undef PLAT_x86_solaris
#undef PLAT_amd64_solaris
#endif /* __VALGRIND_H */
| 391,825 | 57.938929 | 92 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/core/valgrind/drd.h
|
/*
----------------------------------------------------------------
Notice that the following BSD-style license applies to this one
file (drd.h) only. The rest of Valgrind is licensed under the
terms of the GNU General Public License, version 2, unless
otherwise indicated. See the COPYING file in the source
distribution for details.
----------------------------------------------------------------
This file is part of DRD, a Valgrind tool for verification of
multithreaded programs.
Copyright (C) 2006-2017 Bart Van Assche <[email protected]>.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. The origin of this software must not be misrepresented; you must
not claim that you wrote the original software. If you use this
software in a product, an acknowledgment in the product
documentation would be appreciated but is not required.
3. Altered source versions must be plainly marked as such, and must
not be misrepresented as being the original software.
4. The name of the author may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
----------------------------------------------------------------
Notice that the above BSD-style license applies to this one file
(drd.h) only. The entire rest of Valgrind is licensed under
the terms of the GNU General Public License, version 2. See the
COPYING file in the source distribution for details.
----------------------------------------------------------------
*/
#ifndef __VALGRIND_DRD_H
#define __VALGRIND_DRD_H
#include "valgrind.h"
/** Obtain the thread ID assigned by Valgrind's core. */
#define DRD_GET_VALGRIND_THREADID \
(unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__DRD_GET_VALGRIND_THREAD_ID, \
0, 0, 0, 0, 0)
/** Obtain the thread ID assigned by DRD. */
#define DRD_GET_DRD_THREADID \
(unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__DRD_GET_DRD_THREAD_ID, \
0, 0, 0, 0, 0)
/** Tell DRD not to complain about data races for the specified variable. */
#define DRD_IGNORE_VAR(x) ANNOTATE_BENIGN_RACE_SIZED(&(x), sizeof(x), "")
/** Tell DRD to no longer ignore data races for the specified variable. */
#define DRD_STOP_IGNORING_VAR(x) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_FINISH_SUPPRESSION, \
&(x), sizeof(x), 0, 0, 0)
/**
* Tell DRD to trace all memory accesses for the specified variable
* until the memory that was allocated for the variable is freed.
*/
#define DRD_TRACE_VAR(x) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_START_TRACE_ADDR, \
&(x), sizeof(x), 0, 0, 0)
/**
* Tell DRD to stop tracing memory accesses for the specified variable.
*/
#define DRD_STOP_TRACING_VAR(x) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_STOP_TRACE_ADDR, \
&(x), sizeof(x), 0, 0, 0)
/**
* @defgroup RaceDetectionAnnotations Data race detection annotations.
*
* @see See also the source file <a href="http://code.google.com/p/data-race-test/source/browse/trunk/dynamic_annotations/dynamic_annotations.h</a>
* in the ThreadSanitizer project.
*/
/*@{*/
#ifndef __HELGRIND_H
/**
* Tell DRD to insert a happens-before mark. addr is the address of an object
* that is not a pthread synchronization object.
*/
#define ANNOTATE_HAPPENS_BEFORE(addr) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATE_HAPPENS_BEFORE, \
addr, 0, 0, 0, 0)
/**
* Tell DRD that the memory accesses executed after this annotation will
* happen after all memory accesses performed before all preceding
* ANNOTATE_HAPPENS_BEFORE(addr). addr is the address of an object that is not
* a pthread synchronization object. Inserting a happens-after annotation
* before any other thread has passed by a happens-before annotation for the
* same address is an error.
*/
#define ANNOTATE_HAPPENS_AFTER(addr) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATE_HAPPENS_AFTER, \
addr, 0, 0, 0, 0)
#else /* __HELGRIND_H */
#undef ANNOTATE_CONDVAR_LOCK_WAIT
#undef ANNOTATE_CONDVAR_WAIT
#undef ANNOTATE_CONDVAR_SIGNAL
#undef ANNOTATE_CONDVAR_SIGNAL_ALL
#undef ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX
#undef ANNOTATE_PUBLISH_MEMORY_RANGE
#undef ANNOTATE_BARRIER_INIT
#undef ANNOTATE_BARRIER_WAIT_BEFORE
#undef ANNOTATE_BARRIER_WAIT_AFTER
#undef ANNOTATE_BARRIER_DESTROY
#undef ANNOTATE_PCQ_CREATE
#undef ANNOTATE_PCQ_DESTROY
#undef ANNOTATE_PCQ_PUT
#undef ANNOTATE_PCQ_GET
#undef ANNOTATE_BENIGN_RACE
#undef ANNOTATE_BENIGN_RACE_SIZED
#undef ANNOTATE_IGNORE_READS_BEGIN
#undef ANNOTATE_IGNORE_READS_END
#undef ANNOTATE_IGNORE_WRITES_BEGIN
#undef ANNOTATE_IGNORE_WRITES_END
#undef ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN
#undef ANNOTATE_IGNORE_READS_AND_WRITES_END
#undef ANNOTATE_NEW_MEMORY
#undef ANNOTATE_TRACE_MEMORY
#undef ANNOTATE_THREAD_NAME
#endif /* __HELGRIND_H */
/**
* Tell DRD that waiting on the condition variable at address cv has succeeded
* and a lock on the mutex at address mtx is now held. Since DRD always inserts
* a happens before relation between the pthread_cond_signal() or
* pthread_cond_broadcast() call that wakes up a pthread_cond_wait() or
* pthread_cond_timedwait() call and the woken up thread, this macro has been
* defined such that it has no effect.
*/
#define ANNOTATE_CONDVAR_LOCK_WAIT(cv, mtx) do { } while(0)
/**
* Tell DRD that the condition variable at address cv is about to be signaled.
*/
#define ANNOTATE_CONDVAR_SIGNAL(cv) do { } while(0)
/**
* Tell DRD that the condition variable at address cv is about to be signaled.
*/
#define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) do { } while(0)
/**
* Tell DRD that waiting on condition variable at address cv succeeded and that
* the memory operations performed after this annotation should be considered
* to happen after the matching ANNOTATE_CONDVAR_SIGNAL(cv). Since this is the
* default behavior of DRD, this macro and the macro above have been defined
* such that they have no effect.
*/
#define ANNOTATE_CONDVAR_WAIT(cv) do { } while(0)
/**
* Tell DRD to consider the memory operations that happened before a mutex
* unlock event and after the subsequent mutex lock event on the same mutex as
* ordered. This is how DRD always behaves, so this macro has been defined
* such that it has no effect.
*/
#define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mtx) do { } while(0)
/** Deprecated -- don't use this annotation. */
#define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mtx) do { } while(0)
/**
* Tell DRD to handle the specified memory range like a pure happens-before
* detector would do. Since this is how DRD always behaves, this annotation
* has been defined such that it has no effect.
*/
#define ANNOTATE_PUBLISH_MEMORY_RANGE(addr, size) do { } while(0)
/** Deprecated -- don't use this annotation. */
#define ANNOTATE_UNPUBLISH_MEMORY_RANGE(addr, size) do { } while(0)
/** Deprecated -- don't use this annotation. */
#define ANNOTATE_SWAP_MEMORY_RANGE(addr, size) do { } while(0)
#ifndef __HELGRIND_H
/** Tell DRD that a reader-writer lock object has been initialized. */
#define ANNOTATE_RWLOCK_CREATE(rwlock) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATE_RWLOCK_CREATE, \
rwlock, 0, 0, 0, 0);
/** Tell DRD that a reader-writer lock object has been destroyed. */
#define ANNOTATE_RWLOCK_DESTROY(rwlock) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATE_RWLOCK_DESTROY, \
rwlock, 0, 0, 0, 0);
/**
* Tell DRD that a reader-writer lock has been acquired. is_w == 1 means that
* a write lock has been obtained, is_w == 0 means that a read lock has been
* obtained.
*/
#define ANNOTATE_RWLOCK_ACQUIRED(rwlock, is_w) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATE_RWLOCK_ACQUIRED, \
rwlock, is_w, 0, 0, 0)
#endif /* __HELGRIND_H */
/**
* Tell DRD that a reader lock has been acquired on a reader-writer
* synchronization object.
*/
#define ANNOTATE_READERLOCK_ACQUIRED(rwlock) ANNOTATE_RWLOCK_ACQUIRED(rwlock, 0)
/**
* Tell DRD that a writer lock has been acquired on a reader-writer
* synchronization object.
*/
#define ANNOTATE_WRITERLOCK_ACQUIRED(rwlock) ANNOTATE_RWLOCK_ACQUIRED(rwlock, 1)
#ifndef __HELGRIND_H
/**
* Tell DRD that a reader-writer lock is about to be released. is_w == 1 means
* that a write lock is about to be released, is_w == 0 means that a read lock
* is about to be released.
*/
#define ANNOTATE_RWLOCK_RELEASED(rwlock, is_w) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATE_RWLOCK_RELEASED, \
rwlock, is_w, 0, 0, 0);
#endif /* __HELGRIND_H */
/**
* Tell DRD that a reader lock is about to be released.
*/
#define ANNOTATE_READERLOCK_RELEASED(rwlock) ANNOTATE_RWLOCK_RELEASED(rwlock, 0)
/**
* Tell DRD that a writer lock is about to be released.
*/
#define ANNOTATE_WRITERLOCK_RELEASED(rwlock) ANNOTATE_RWLOCK_RELEASED(rwlock, 1)
/** Tell DRD that a semaphore object is going to be initialized. */
#define ANNOTATE_SEM_INIT_PRE(sem, value) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATE_SEM_INIT_PRE, \
sem, value, 0, 0, 0);
/** Tell DRD that a semaphore object has been destroyed. */
#define ANNOTATE_SEM_DESTROY_POST(sem) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATE_SEM_DESTROY_POST, \
sem, 0, 0, 0, 0);
/** Tell DRD that a semaphore is going to be acquired. */
#define ANNOTATE_SEM_WAIT_PRE(sem) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATE_SEM_WAIT_PRE, \
sem, 0, 0, 0, 0)
/** Tell DRD that a semaphore has been acquired. */
#define ANNOTATE_SEM_WAIT_POST(sem) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATE_SEM_WAIT_POST, \
sem, 0, 0, 0, 0)
/** Tell DRD that a semaphore is going to be released. */
#define ANNOTATE_SEM_POST_PRE(sem) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATE_SEM_POST_PRE, \
sem, 0, 0, 0, 0)
/*
* Report that a barrier has been initialized with a given barrier count. The
* third argument specifies whether or not reinitialization is allowed, that
* is, whether or not it is allowed to call barrier_init() several times
* without calling barrier_destroy().
*/
#define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATION_UNIMP, \
"ANNOTATE_BARRIER_INIT", barrier, \
count, reinitialization_allowed, 0)
/* Report that a barrier has been destroyed. */
#define ANNOTATE_BARRIER_DESTROY(barrier) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATION_UNIMP, \
"ANNOTATE_BARRIER_DESTROY", \
barrier, 0, 0, 0)
/* Report that the calling thread is about to start waiting for a barrier. */
#define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATION_UNIMP, \
"ANNOTATE_BARRIER_WAIT_BEFORE", \
barrier, 0, 0, 0)
/* Report that the calling thread has just finished waiting for a barrier. */
#define ANNOTATE_BARRIER_WAIT_AFTER(barrier) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATION_UNIMP, \
"ANNOTATE_BARRIER_WAIT_AFTER", \
barrier, 0, 0, 0)
/**
* Tell DRD that a FIFO queue has been created. The abbreviation PCQ stands for
* <em>producer-consumer</em>.
*/
#define ANNOTATE_PCQ_CREATE(pcq) do { } while(0)
/** Tell DRD that a FIFO queue has been destroyed. */
#define ANNOTATE_PCQ_DESTROY(pcq) do { } while(0)
/**
* Tell DRD that an element has been added to the FIFO queue at address pcq.
*/
#define ANNOTATE_PCQ_PUT(pcq) do { } while(0)
/**
* Tell DRD that an element has been removed from the FIFO queue at address pcq,
* and that DRD should insert a happens-before relationship between the memory
* accesses that occurred before the corresponding ANNOTATE_PCQ_PUT(pcq)
* annotation and the memory accesses after this annotation. Correspondence
* between PUT and GET annotations happens in FIFO order. Since locking
* of the queue is needed anyway to add elements to or to remove elements from
* the queue, for DRD all four FIFO annotations are defined as no-ops.
*/
#define ANNOTATE_PCQ_GET(pcq) do { } while(0)
/**
* Tell DRD that data races at the specified address are expected and must not
* be reported.
*/
#define ANNOTATE_BENIGN_RACE(addr, descr) \
ANNOTATE_BENIGN_RACE_SIZED(addr, sizeof(*addr), descr)
/* Same as ANNOTATE_BENIGN_RACE(addr, descr), but applies to
the memory range [addr, addr + size). */
#define ANNOTATE_BENIGN_RACE_SIZED(addr, size, descr) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_START_SUPPRESSION, \
addr, size, 0, 0, 0)
/** Tell DRD to ignore all reads performed by the current thread. */
#define ANNOTATE_IGNORE_READS_BEGIN() \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_RECORD_LOADS, \
0, 0, 0, 0, 0);
/** Tell DRD to no longer ignore the reads performed by the current thread. */
#define ANNOTATE_IGNORE_READS_END() \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_RECORD_LOADS, \
1, 0, 0, 0, 0);
/** Tell DRD to ignore all writes performed by the current thread. */
#define ANNOTATE_IGNORE_WRITES_BEGIN() \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_RECORD_STORES, \
0, 0, 0, 0, 0)
/** Tell DRD to no longer ignore the writes performed by the current thread. */
#define ANNOTATE_IGNORE_WRITES_END() \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_RECORD_STORES, \
1, 0, 0, 0, 0)
/** Tell DRD to ignore all memory accesses performed by the current thread. */
#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
do { ANNOTATE_IGNORE_READS_BEGIN(); ANNOTATE_IGNORE_WRITES_BEGIN(); } while(0)
/**
* Tell DRD to no longer ignore the memory accesses performed by the current
* thread.
*/
#define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
do { ANNOTATE_IGNORE_READS_END(); ANNOTATE_IGNORE_WRITES_END(); } while(0)
/**
* Tell DRD that size bytes starting at addr has been allocated by a custom
* memory allocator.
*/
#define ANNOTATE_NEW_MEMORY(addr, size) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_CLEAN_MEMORY, \
addr, size, 0, 0, 0)
/** Ask DRD to report every access to the specified address. */
#define ANNOTATE_TRACE_MEMORY(addr) DRD_TRACE_VAR(*(char*)(addr))
/**
* Tell DRD to assign the specified name to the current thread. This name will
* be used in error messages printed by DRD.
*/
#define ANNOTATE_THREAD_NAME(name) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_SET_THREAD_NAME, \
name, 0, 0, 0, 0)
/*@}*/
/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
This enum comprises an ABI exported by Valgrind to programs
which use client requests. DO NOT CHANGE THE ORDER OF THESE
ENTRIES, NOR DELETE ANY -- add new ones at the end.
*/
enum {
/* Ask the DRD tool to discard all information about memory accesses */
/* and client objects for the specified range. This client request is */
/* binary compatible with the similarly named Helgrind client request. */
VG_USERREQ__DRD_CLEAN_MEMORY = VG_USERREQ_TOOL_BASE('H','G'),
/* args: Addr, SizeT. */
/* Ask the DRD tool the thread ID assigned by Valgrind. */
VG_USERREQ__DRD_GET_VALGRIND_THREAD_ID = VG_USERREQ_TOOL_BASE('D','R'),
/* args: none. */
/* Ask the DRD tool the thread ID assigned by DRD. */
VG_USERREQ__DRD_GET_DRD_THREAD_ID,
/* args: none. */
/* To tell the DRD tool to suppress data race detection on the */
/* specified address range. */
VG_USERREQ__DRD_START_SUPPRESSION,
/* args: start address, size in bytes */
/* To tell the DRD tool no longer to suppress data race detection on */
/* the specified address range. */
VG_USERREQ__DRD_FINISH_SUPPRESSION,
/* args: start address, size in bytes */
/* To ask the DRD tool to trace all accesses to the specified range. */
VG_USERREQ__DRD_START_TRACE_ADDR,
/* args: Addr, SizeT. */
/* To ask the DRD tool to stop tracing accesses to the specified range. */
VG_USERREQ__DRD_STOP_TRACE_ADDR,
/* args: Addr, SizeT. */
/* Tell DRD whether or not to record memory loads in the calling thread. */
VG_USERREQ__DRD_RECORD_LOADS,
/* args: Bool. */
/* Tell DRD whether or not to record memory stores in the calling thread. */
VG_USERREQ__DRD_RECORD_STORES,
/* args: Bool. */
/* Set the name of the thread that performs this client request. */
VG_USERREQ__DRD_SET_THREAD_NAME,
/* args: null-terminated character string. */
/* Tell DRD that a DRD annotation has not yet been implemented. */
VG_USERREQ__DRD_ANNOTATION_UNIMP,
/* args: char*. */
/* Tell DRD that a user-defined semaphore synchronization object
* is about to be created. */
VG_USERREQ__DRD_ANNOTATE_SEM_INIT_PRE,
/* args: Addr, UInt value. */
/* Tell DRD that a user-defined semaphore synchronization object
* has been destroyed. */
VG_USERREQ__DRD_ANNOTATE_SEM_DESTROY_POST,
/* args: Addr. */
/* Tell DRD that a user-defined semaphore synchronization
* object is going to be acquired (semaphore wait). */
VG_USERREQ__DRD_ANNOTATE_SEM_WAIT_PRE,
/* args: Addr. */
/* Tell DRD that a user-defined semaphore synchronization
* object has been acquired (semaphore wait). */
VG_USERREQ__DRD_ANNOTATE_SEM_WAIT_POST,
/* args: Addr. */
/* Tell DRD that a user-defined semaphore synchronization
* object is about to be released (semaphore post). */
VG_USERREQ__DRD_ANNOTATE_SEM_POST_PRE,
/* args: Addr. */
/* Tell DRD to ignore the inter-thread ordering introduced by a mutex. */
VG_USERREQ__DRD_IGNORE_MUTEX_ORDERING,
/* args: Addr. */
/* Tell DRD that a user-defined reader-writer synchronization object
* has been created. */
VG_USERREQ__DRD_ANNOTATE_RWLOCK_CREATE
= VG_USERREQ_TOOL_BASE('H','G') + 256 + 14,
/* args: Addr. */
/* Tell DRD that a user-defined reader-writer synchronization object
* is about to be destroyed. */
VG_USERREQ__DRD_ANNOTATE_RWLOCK_DESTROY
= VG_USERREQ_TOOL_BASE('H','G') + 256 + 15,
/* args: Addr. */
/* Tell DRD that a lock on a user-defined reader-writer synchronization
* object has been acquired. */
VG_USERREQ__DRD_ANNOTATE_RWLOCK_ACQUIRED
= VG_USERREQ_TOOL_BASE('H','G') + 256 + 17,
/* args: Addr, Int is_rw. */
/* Tell DRD that a lock on a user-defined reader-writer synchronization
* object is about to be released. */
VG_USERREQ__DRD_ANNOTATE_RWLOCK_RELEASED
= VG_USERREQ_TOOL_BASE('H','G') + 256 + 18,
/* args: Addr, Int is_rw. */
/* Tell DRD that a Helgrind annotation has not yet been implemented. */
VG_USERREQ__HELGRIND_ANNOTATION_UNIMP
= VG_USERREQ_TOOL_BASE('H','G') + 256 + 32,
/* args: char*. */
/* Tell DRD to insert a happens-before annotation. */
VG_USERREQ__DRD_ANNOTATE_HAPPENS_BEFORE
= VG_USERREQ_TOOL_BASE('H','G') + 256 + 33,
/* args: Addr. */
/* Tell DRD to insert a happens-after annotation. */
VG_USERREQ__DRD_ANNOTATE_HAPPENS_AFTER
= VG_USERREQ_TOOL_BASE('H','G') + 256 + 34,
/* args: Addr. */
};
/**
* @addtogroup RaceDetectionAnnotations
*/
/*@{*/
#ifdef __cplusplus
/* ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racy reads.
Instead of doing
ANNOTATE_IGNORE_READS_BEGIN();
... = x;
ANNOTATE_IGNORE_READS_END();
one can use
... = ANNOTATE_UNPROTECTED_READ(x); */
template <typename T>
inline T ANNOTATE_UNPROTECTED_READ(const volatile T& x) {
ANNOTATE_IGNORE_READS_BEGIN();
const T result = x;
ANNOTATE_IGNORE_READS_END();
return result;
}
/* Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable. */
#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \
namespace { \
static class static_var##_annotator \
{ \
public: \
static_var##_annotator() \
{ \
ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), \
#static_var ": " description); \
} \
} the_##static_var##_annotator; \
}
#endif
/*@}*/
#endif /* __VALGRIND_DRD_H */
| 22,982 | 39.18007 | 147 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/core/valgrind/pmemcheck.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2015, Intel Corporation */
#ifndef __PMEMCHECK_H
#define __PMEMCHECK_H
/* This file is for inclusion into client (your!) code.
You can use these macros to manipulate and query memory permissions
inside your own programs.
See comment near the top of valgrind.h on how to use them.
*/
#include "valgrind.h"
/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
This enum comprises an ABI exported by Valgrind to programs
which use client requests. DO NOT CHANGE THE ORDER OF THESE
ENTRIES, NOR DELETE ANY -- add new ones at the end. */
typedef
enum {
VG_USERREQ__PMC_REGISTER_PMEM_MAPPING = VG_USERREQ_TOOL_BASE('P','C'),
VG_USERREQ__PMC_REGISTER_PMEM_FILE,
VG_USERREQ__PMC_REMOVE_PMEM_MAPPING,
VG_USERREQ__PMC_CHECK_IS_PMEM_MAPPING,
VG_USERREQ__PMC_PRINT_PMEM_MAPPINGS,
VG_USERREQ__PMC_DO_FLUSH,
VG_USERREQ__PMC_DO_FENCE,
VG_USERREQ__PMC_RESERVED1, /* Do not use. */
VG_USERREQ__PMC_WRITE_STATS,
VG_USERREQ__PMC_RESERVED2, /* Do not use. */
VG_USERREQ__PMC_RESERVED3, /* Do not use. */
VG_USERREQ__PMC_RESERVED4, /* Do not use. */
VG_USERREQ__PMC_RESERVED5, /* Do not use. */
VG_USERREQ__PMC_RESERVED7, /* Do not use. */
VG_USERREQ__PMC_RESERVED8, /* Do not use. */
VG_USERREQ__PMC_RESERVED9, /* Do not use. */
VG_USERREQ__PMC_RESERVED10, /* Do not use. */
VG_USERREQ__PMC_SET_CLEAN,
/* transaction support */
VG_USERREQ__PMC_START_TX,
VG_USERREQ__PMC_START_TX_N,
VG_USERREQ__PMC_END_TX,
VG_USERREQ__PMC_END_TX_N,
VG_USERREQ__PMC_ADD_TO_TX,
VG_USERREQ__PMC_ADD_TO_TX_N,
VG_USERREQ__PMC_REMOVE_FROM_TX,
VG_USERREQ__PMC_REMOVE_FROM_TX_N,
VG_USERREQ__PMC_ADD_THREAD_TO_TX_N,
VG_USERREQ__PMC_REMOVE_THREAD_FROM_TX_N,
VG_USERREQ__PMC_ADD_TO_GLOBAL_TX_IGNORE,
VG_USERREQ__PMC_RESERVED6, /* Do not use. */
VG_USERREQ__PMC_EMIT_LOG,
} Vg_PMemCheckClientRequest;
/* Client-code macros to manipulate pmem mappings */
/** Register a persistent memory mapping region */
#define VALGRIND_PMC_REGISTER_PMEM_MAPPING(_qzz_addr, _qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_REGISTER_PMEM_MAPPING, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/** Register a persistent memory file */
#define VALGRIND_PMC_REGISTER_PMEM_FILE(_qzz_desc, _qzz_addr_base, \
_qzz_size, _qzz_offset) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_REGISTER_PMEM_FILE, \
(_qzz_desc), (_qzz_addr_base), (_qzz_size), \
(_qzz_offset), 0)
/** Remove a persistent memory mapping region */
#define VALGRIND_PMC_REMOVE_PMEM_MAPPING(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_REMOVE_PMEM_MAPPING, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/** Check if the given range is a registered persistent memory mapping */
#define VALGRIND_PMC_CHECK_IS_PMEM_MAPPING(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_CHECK_IS_PMEM_MAPPING, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/** Register an SFENCE */
#define VALGRIND_PMC_PRINT_PMEM_MAPPINGS \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PMC_PRINT_PMEM_MAPPINGS, \
0, 0, 0, 0, 0)
/** Register a CLFLUSH-like operation */
#define VALGRIND_PMC_DO_FLUSH(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_DO_FLUSH, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/** Register an SFENCE */
#define VALGRIND_PMC_DO_FENCE \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PMC_DO_FENCE, \
0, 0, 0, 0, 0)
/** Write tool stats */
#define VALGRIND_PMC_WRITE_STATS \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PMC_WRITE_STATS, \
0, 0, 0, 0, 0)
/** Emit user log */
#define VALGRIND_PMC_EMIT_LOG(_qzz_emit_log) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_EMIT_LOG, \
(_qzz_emit_log), 0, 0, 0, 0)
/** Set a region of persistent memory as clean */
#define VALGRIND_PMC_SET_CLEAN(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_SET_CLEAN, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/** Support for transactions */
/** Start an implicit persistent memory transaction */
#define VALGRIND_PMC_START_TX \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PMC_START_TX, \
0, 0, 0, 0, 0)
/** Start an explicit persistent memory transaction */
#define VALGRIND_PMC_START_TX_N(_qzz_txn) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_START_TX_N, \
(_qzz_txn), 0, 0, 0, 0)
/** End an implicit persistent memory transaction */
#define VALGRIND_PMC_END_TX \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PMC_END_TX, \
0, 0, 0, 0, 0)
/** End an explicit persistent memory transaction */
#define VALGRIND_PMC_END_TX_N(_qzz_txn) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_END_TX_N, \
(_qzz_txn), 0, 0, 0, 0)
/** Add a persistent memory region to the implicit transaction */
#define VALGRIND_PMC_ADD_TO_TX(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_ADD_TO_TX, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/** Add a persistent memory region to an explicit transaction */
#define VALGRIND_PMC_ADD_TO_TX_N(_qzz_txn,_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_ADD_TO_TX_N, \
(_qzz_txn), (_qzz_addr), (_qzz_len), 0, 0)
/** Remove a persistent memory region from the implicit transaction */
#define VALGRIND_PMC_REMOVE_FROM_TX(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_REMOVE_FROM_TX, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/** Remove a persistent memory region from an explicit transaction */
#define VALGRIND_PMC_REMOVE_FROM_TX_N(_qzz_txn,_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_REMOVE_FROM_TX_N, \
(_qzz_txn), (_qzz_addr), (_qzz_len), 0, 0)
/** End an explicit persistent memory transaction */
#define VALGRIND_PMC_ADD_THREAD_TX_N(_qzz_txn) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_ADD_THREAD_TO_TX_N, \
(_qzz_txn), 0, 0, 0, 0)
/** End an explicit persistent memory transaction */
#define VALGRIND_PMC_REMOVE_THREAD_FROM_TX_N(_qzz_txn) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_REMOVE_THREAD_FROM_TX_N, \
(_qzz_txn), 0, 0, 0, 0)
/** Remove a persistent memory region from the implicit transaction */
#define VALGRIND_PMC_ADD_TO_GLOBAL_TX_IGNORE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PMC_ADD_TO_GLOBAL_TX_IGNORE,\
(_qzz_addr), (_qzz_len), 0, 0, 0)
#endif
| 9,085 | 47.588235 | 77 |
h
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/benchmarks/clo_vec.hpp
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* clo_vec.hpp -- command line options vector declarations
*/
#include "queue.h"
#include <cstdlib>
struct clo_vec_args {
PMDK_TAILQ_ENTRY(clo_vec_args) next;
void *args;
};
struct clo_vec_alloc {
PMDK_TAILQ_ENTRY(clo_vec_alloc) next;
void *ptr;
};
struct clo_vec_value {
PMDK_TAILQ_ENTRY(clo_vec_value) next;
void *ptr;
};
struct clo_vec_vlist {
PMDK_TAILQ_HEAD(valueshead, clo_vec_value) head;
size_t nvalues;
};
struct clo_vec {
size_t size;
PMDK_TAILQ_HEAD(argshead, clo_vec_args) args;
size_t nargs;
PMDK_TAILQ_HEAD(allochead, clo_vec_alloc) allocs;
size_t nallocs;
};
struct clo_vec *clo_vec_alloc(size_t size);
void clo_vec_free(struct clo_vec *clovec);
void *clo_vec_get_args(struct clo_vec *clovec, size_t i);
int clo_vec_add_alloc(struct clo_vec *clovec, void *ptr);
int clo_vec_memcpy(struct clo_vec *clovec, size_t off, size_t size, void *ptr);
int clo_vec_memcpy_list(struct clo_vec *clovec, size_t off, size_t size,
struct clo_vec_vlist *list);
struct clo_vec_vlist *clo_vec_vlist_alloc(void);
void clo_vec_vlist_free(struct clo_vec_vlist *list);
void clo_vec_vlist_add(struct clo_vec_vlist *list, void *ptr, size_t size);
| 1,249 | 25.595745 | 79 |
hpp
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/benchmarks/pmem_flush.cpp
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* pmem_flush.cpp -- benchmark implementation for pmem_persist and pmem_msync
*/
#include <cassert>
#include <cerrno>
#include <climits>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fcntl.h>
#include <libpmem.h>
#include <sys/mman.h>
#include <unistd.h>
#include "benchmark.hpp"
#include "file.h"
#define PAGE_4K ((uintptr_t)1 << 12)
#define PAGE_2M ((uintptr_t)1 << 21)
/*
* align_addr -- round addr down to given boundary
*/
static void *
align_addr(void *addr, uintptr_t align)
{
return (char *)((uintptr_t)addr & ~(align - 1));
}
/*
* align_len -- increase len by the amount we gain when we round addr down
*/
static size_t
align_len(size_t len, void *addr, uintptr_t align)
{
return len + ((uintptr_t)addr & (align - 1));
}
/*
* roundup_len -- increase len by the amount we gain when we round addr down,
* then round up to the nearest multiple of 4K
*/
static size_t
roundup_len(size_t len, void *addr, uintptr_t align)
{
return (align_len(len, addr, align) + align - 1) & ~(align - 1);
}
/*
* pmem_args -- benchmark specific arguments
*/
struct pmem_args {
char *operation; /* msync, dummy_msync, persist, ... */
char *mode; /* stat, seq, rand */
bool no_warmup; /* don't do warmup */
};
/*
* pmem_bench -- benchmark context
*/
struct pmem_bench {
uint64_t *offsets; /* write offsets */
size_t n_offsets; /* number of elements in offsets array */
size_t fsize; /* The size of the allocated PMEM */
struct pmem_args *pargs; /* prog_args structure */
void *pmem_addr; /* PMEM base address */
size_t pmem_len; /* length of PMEM mapping */
void *invalid_addr; /* invalid pages */
void *nondirty_addr; /* non-dirty pages */
void *pmem_addr_aligned; /* PMEM pages - 2M aligned */
void *invalid_addr_aligned; /* invalid pages - 2M aligned */
void *nondirty_addr_aligned; /* non-dirty pages - 2M aligned */
/* the actual benchmark operation */
int (*func_op)(struct pmem_bench *pmb, void *addr, size_t len);
};
/*
* mode_seq -- if copy mode is sequential, returns index of a chunk.
*/
static uint64_t
mode_seq(struct pmem_bench *pmb, uint64_t index)
{
return index;
}
/*
* mode_stat -- if mode is static, the offset is always 0
*/
static uint64_t
mode_stat(struct pmem_bench *pmb, uint64_t index)
{
return 0;
}
/*
* mode_rand -- if mode is random, returns index of a random chunk
*/
static uint64_t
mode_rand(struct pmem_bench *pmb, uint64_t index)
{
return rand() % pmb->n_offsets;
}
/*
* operation_mode -- the mode of the copy process
*
* * static - write always the same chunk,
* * sequential - write chunk by chunk,
* * random - write to chunks selected randomly.
*/
struct op_mode {
const char *mode;
uint64_t (*func_mode)(struct pmem_bench *pmb, uint64_t index);
};
static struct op_mode modes[] = {
{"stat", mode_stat},
{"seq", mode_seq},
{"rand", mode_rand},
};
#define MODES (sizeof(modes) / sizeof(modes[0]))
/*
* parse_op_mode -- parses command line "--mode"
* and returns proper operation mode index.
*/
static int
parse_op_mode(const char *arg)
{
for (unsigned i = 0; i < MODES; i++) {
if (strcmp(arg, modes[i].mode) == 0)
return i;
}
return -1;
}
/*
* flush_noop -- dummy flush, does nothing
*/
static int
flush_noop(struct pmem_bench *pmb, void *addr, size_t len)
{
return 0;
}
/*
* flush_persist -- flush data to persistence using pmem_persist()
*/
static int
flush_persist(struct pmem_bench *pmb, void *addr, size_t len)
{
pmem_persist(addr, len);
return 0;
}
/*
* flush_persist_4K -- always flush entire 4K page(s) using pmem_persist()
*/
static int
flush_persist_4K(struct pmem_bench *pmb, void *addr, size_t len)
{
void *ptr = align_addr(addr, PAGE_4K);
len = roundup_len(len, addr, PAGE_4K);
pmem_persist(ptr, len);
return 0;
}
/*
* flush_persist_2M -- always flush entire 2M page(s) using pmem_persist()
*/
static int
flush_persist_2M(struct pmem_bench *pmb, void *addr, size_t len)
{
void *ptr = align_addr(addr, PAGE_2M);
len = roundup_len(len, addr, PAGE_2M);
pmem_persist(ptr, len);
return 0;
}
/*
* flush_msync -- flush data to persistence using pmem_msync()
*/
static int
flush_msync(struct pmem_bench *pmb, void *addr, size_t len)
{
pmem_msync(addr, len);
return 0;
}
/*
* flush_msync_async -- emulate dummy msync() using MS_ASYNC flag
*/
static int
flush_msync_async(struct pmem_bench *pmb, void *addr, size_t len)
{
void *ptr = align_addr(addr, PAGE_4K);
len = align_len(len, addr, PAGE_4K);
msync(ptr, len, MS_ASYNC);
return 0;
}
/*
* flush_msync_0 -- emulate dummy msync() using zero length
*/
static int
flush_msync_0(struct pmem_bench *pmb, void *addr, size_t len)
{
void *ptr = align_addr(addr, PAGE_4K);
(void)len;
msync(ptr, 0, MS_SYNC);
return 0;
}
/*
* flush_persist_4K_msync_0 -- emulate msync() that only flushes CPU cache
*
* Do flushing in user space (4K pages) + dummy syscall.
*/
static int
flush_persist_4K_msync_0(struct pmem_bench *pmb, void *addr, size_t len)
{
void *ptr = align_addr(addr, PAGE_4K);
len = roundup_len(len, addr, PAGE_4K);
pmem_persist(ptr, len);
msync(ptr, 0, MS_SYNC);
return 0;
}
/*
* flush_persist_2M_msync_0 -- emulate msync() that only flushes CPU cache
*
* Do flushing in user space (2M pages) + dummy syscall.
*/
static int
flush_persist_2M_msync_0(struct pmem_bench *pmb, void *addr, size_t len)
{
void *ptr = align_addr(addr, PAGE_2M);
len = roundup_len(len, addr, PAGE_2M);
pmem_persist(ptr, len);
msync(ptr, 0, MS_SYNC);
return 0;
}
/*
* flush_msync_err -- emulate dummy msync() using invalid flags
*/
static int
flush_msync_err(struct pmem_bench *pmb, void *addr, size_t len)
{
void *ptr = align_addr(addr, PAGE_4K);
len = align_len(len, addr, PAGE_4K);
msync(ptr, len, MS_SYNC | MS_ASYNC);
return 0;
}
/*
* flush_msync_nodirty -- call msync() on non-dirty pages
*/
static int
flush_msync_nodirty(struct pmem_bench *pmb, void *addr, size_t len)
{
uintptr_t uptr = (uintptr_t)addr - (uintptr_t)pmb->pmem_addr_aligned;
uptr += (uintptr_t)pmb->nondirty_addr_aligned;
void *ptr = align_addr((void *)uptr, PAGE_4K);
len = align_len(len, (void *)uptr, PAGE_4K);
pmem_msync(ptr, len);
return 0;
}
/*
* flush_msync_invalid -- emulate dummy msync() using invalid address
*/
static int
flush_msync_invalid(struct pmem_bench *pmb, void *addr, size_t len)
{
uintptr_t uptr = (uintptr_t)addr - (uintptr_t)pmb->pmem_addr_aligned;
uptr += (uintptr_t)pmb->invalid_addr_aligned;
void *ptr = align_addr((void *)uptr, PAGE_4K);
len = align_len(len, (void *)uptr, PAGE_4K);
pmem_msync(ptr, len);
return 0;
}
struct op {
const char *opname;
int (*func_op)(struct pmem_bench *pmb, void *addr, size_t len);
};
static struct op ops[] = {
{"noop", flush_noop},
{"persist", flush_persist},
{"persist_4K", flush_persist_4K},
{"persist_2M", flush_persist_2M},
{"msync", flush_msync},
{"msync_0", flush_msync_0},
{"msync_err", flush_msync_err},
{"persist_4K_msync_0", flush_persist_4K_msync_0},
{"persist_2M_msync_0", flush_persist_2M_msync_0},
{"msync_async", flush_msync_async},
{"msync_nodirty", flush_msync_nodirty},
{"msync_invalid", flush_msync_invalid},
};
#define NOPS (sizeof(ops) / sizeof(ops[0]))
/*
* parse_op_type -- parses command line "--operation" argument
* and returns proper operation type.
*/
static int
parse_op_type(const char *arg)
{
for (unsigned i = 0; i < NOPS; i++) {
if (strcmp(arg, ops[i].opname) == 0)
return i;
}
return -1;
}
/*
* pmem_flush_init -- benchmark initialization
*
* Parses command line arguments, allocates persistent memory, and maps it.
*/
static int
pmem_flush_init(struct benchmark *bench, struct benchmark_args *args)
{
assert(bench != nullptr);
assert(args != nullptr);
size_t file_size = 0;
int flags = 0;
enum file_type type = util_file_get_type(args->fname);
if (type == OTHER_ERROR) {
fprintf(stderr, "could not check type of file %s\n",
args->fname);
return -1;
}
uint64_t (*func_mode)(struct pmem_bench * pmb, uint64_t index);
auto *pmb = (struct pmem_bench *)malloc(sizeof(struct pmem_bench));
assert(pmb != nullptr);
pmb->pargs = (struct pmem_args *)args->opts;
assert(pmb->pargs != nullptr);
int i = parse_op_type(pmb->pargs->operation);
if (i == -1) {
fprintf(stderr, "wrong operation: %s\n", pmb->pargs->operation);
goto err_free_pmb;
}
pmb->func_op = ops[i].func_op;
pmb->n_offsets = args->n_ops_per_thread * args->n_threads;
pmb->fsize = pmb->n_offsets * args->dsize + (2 * PAGE_2M);
/* round up to 2M boundary */
pmb->fsize = (pmb->fsize + PAGE_2M - 1) & ~(PAGE_2M - 1);
i = parse_op_mode(pmb->pargs->mode);
if (i == -1) {
fprintf(stderr, "wrong mode: %s\n", pmb->pargs->mode);
goto err_free_pmb;
}
func_mode = modes[i].func_mode;
/* populate offsets array */
assert(pmb->n_offsets != 0);
pmb->offsets = (size_t *)malloc(pmb->n_offsets * sizeof(*pmb->offsets));
assert(pmb->offsets != nullptr);
for (size_t i = 0; i < pmb->n_offsets; ++i)
pmb->offsets[i] = func_mode(pmb, i);
if (type != TYPE_DEVDAX) {
file_size = pmb->fsize;
flags = PMEM_FILE_CREATE | PMEM_FILE_EXCL;
}
/* create a pmem file and memory map it */
pmb->pmem_addr = pmem_map_file(args->fname, file_size, flags,
args->fmode, &pmb->pmem_len, nullptr);
if (pmb->pmem_addr == nullptr) {
perror("pmem_map_file");
goto err_free_pmb;
}
pmb->nondirty_addr = mmap(nullptr, pmb->fsize, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, -1, 0);
if (pmb->nondirty_addr == MAP_FAILED) {
perror("mmap(1)");
goto err_unmap1;
}
pmb->invalid_addr = mmap(nullptr, pmb->fsize, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, -1, 0);
if (pmb->invalid_addr == MAP_FAILED) {
perror("mmap(2)");
goto err_unmap2;
}
munmap(pmb->invalid_addr, pmb->fsize);
pmb->pmem_addr_aligned =
(void *)(((uintptr_t)pmb->pmem_addr + PAGE_2M - 1) &
~(PAGE_2M - 1));
pmb->nondirty_addr_aligned =
(void *)(((uintptr_t)pmb->nondirty_addr + PAGE_2M - 1) &
~(PAGE_2M - 1));
pmb->invalid_addr_aligned =
(void *)(((uintptr_t)pmb->invalid_addr + PAGE_2M - 1) &
~(PAGE_2M - 1));
pmembench_set_priv(bench, pmb);
if (!pmb->pargs->no_warmup) {
size_t off;
for (off = 0; off < pmb->fsize - PAGE_2M; off += PAGE_4K) {
*(int *)((char *)pmb->pmem_addr_aligned + off) = 0;
*(int *)((char *)pmb->nondirty_addr_aligned + off) = 0;
}
}
return 0;
err_unmap2:
munmap(pmb->nondirty_addr, pmb->fsize);
err_unmap1:
pmem_unmap(pmb->pmem_addr, pmb->pmem_len);
err_free_pmb:
free(pmb);
return -1;
}
/*
* pmem_flush_exit -- benchmark cleanup
*/
static int
pmem_flush_exit(struct benchmark *bench, struct benchmark_args *args)
{
auto *pmb = (struct pmem_bench *)pmembench_get_priv(bench);
pmem_unmap(pmb->pmem_addr, pmb->fsize);
munmap(pmb->nondirty_addr, pmb->fsize);
free(pmb);
return 0;
}
/*
* pmem_flush_operation -- actual benchmark operation
*/
static int
pmem_flush_operation(struct benchmark *bench, struct operation_info *info)
{
auto *pmb = (struct pmem_bench *)pmembench_get_priv(bench);
size_t op_idx = info->index;
assert(op_idx < pmb->n_offsets);
uint64_t chunk_idx = pmb->offsets[op_idx];
void *addr =
(char *)pmb->pmem_addr_aligned + chunk_idx * info->args->dsize;
/* store + flush */
*(int *)addr = *(int *)addr + 1;
pmb->func_op(pmb, addr, info->args->dsize);
return 0;
}
/* structure to define command line arguments */
static struct benchmark_clo pmem_flush_clo[3];
/* Stores information about benchmark. */
static struct benchmark_info pmem_flush_bench;
CONSTRUCTOR(pmem_flush_constructor)
void
pmem_flush_constructor(void)
{
pmem_flush_clo[0].opt_short = 'o';
pmem_flush_clo[0].opt_long = "operation";
pmem_flush_clo[0].descr = "Operation type - persist,"
" msync, ...";
pmem_flush_clo[0].type = CLO_TYPE_STR;
pmem_flush_clo[0].off = clo_field_offset(struct pmem_args, operation);
pmem_flush_clo[0].def = "noop";
pmem_flush_clo[1].opt_short = 0;
pmem_flush_clo[1].opt_long = "mode";
pmem_flush_clo[1].descr = "mode - stat, seq or rand";
pmem_flush_clo[1].type = CLO_TYPE_STR;
pmem_flush_clo[1].off = clo_field_offset(struct pmem_args, mode);
pmem_flush_clo[1].def = "stat";
pmem_flush_clo[2].opt_short = 'w';
pmem_flush_clo[2].opt_long = "no-warmup";
pmem_flush_clo[2].descr = "Don't do warmup";
pmem_flush_clo[2].type = CLO_TYPE_FLAG;
pmem_flush_clo[2].off = clo_field_offset(struct pmem_args, no_warmup);
pmem_flush_bench.name = "pmem_flush";
pmem_flush_bench.brief = "Benchmark for pmem_msync() "
"and pmem_persist()";
pmem_flush_bench.init = pmem_flush_init;
pmem_flush_bench.exit = pmem_flush_exit;
pmem_flush_bench.multithread = true;
pmem_flush_bench.multiops = true;
pmem_flush_bench.operation = pmem_flush_operation;
pmem_flush_bench.measure_time = true;
pmem_flush_bench.clos = pmem_flush_clo;
pmem_flush_bench.nclos = ARRAY_SIZE(pmem_flush_clo);
pmem_flush_bench.opts_size = sizeof(struct pmem_args);
pmem_flush_bench.rm_file = true;
pmem_flush_bench.allow_poolset = false;
REGISTER_BENCHMARK(pmem_flush_bench);
}
| 13,147 | 23.303142 | 77 |
cpp
|
null |
NearPMSW-main/nearpmMDsync/shadow/pmdk-sd/src/benchmarks/pmembench.cpp
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* pmembench.cpp -- main source file for benchmark framework
*/
#include <cassert>
#include <cerrno>
#include <cfloat>
#include <cinttypes>
#include <cmath>
#include <cstdio>
#include <cstring>
#include <dirent.h>
#include <err.h>
#include <getopt.h>
#include <linux/limits.h>
#include <sched.h>
#include <sys/wait.h>
#include <unistd.h>
#include "benchmark.hpp"
#include "benchmark_worker.hpp"
#include "clo.hpp"
#include "clo_vec.hpp"
#include "config_reader.hpp"
#include "file.h"
#include "libpmempool.h"
#include "mmap.h"
#include "os.h"
#include "os_thread.h"
#include "queue.h"
#include "scenario.hpp"
#include "set.h"
#include "util.h"
#ifndef _WIN32
#include "rpmem_common.h"
#include "rpmem_ssh.h"
#include "rpmem_util.h"
#endif
/* average time required to get a current time from the system */
unsigned long long Get_time_avg;
#define MIN_EXE_TIME_E 0.5
/*
* struct pmembench -- main context
*/
struct pmembench {
int argc;
char **argv;
struct scenario *scenario;
struct clo_vec *clovec;
bool override_clos;
};
/*
* struct benchmark -- benchmark's context
*/
struct benchmark {
PMDK_LIST_ENTRY(benchmark) next;
struct benchmark_info *info;
void *priv;
struct benchmark_clo *clos;
size_t nclos;
size_t args_size;
};
/*
* struct bench_list -- list of available benchmarks
*/
struct bench_list {
PMDK_LIST_HEAD(benchmarks_head, benchmark) head;
bool initialized;
};
/*
* struct benchmark_opts -- arguments for pmembench
*/
struct benchmark_opts {
bool help;
bool version;
const char *file_name;
};
static struct version_s {
unsigned major;
unsigned minor;
} version = {1, 0};
/* benchmarks list initialization */
static struct bench_list benchmarks;
/* common arguments for benchmarks */
static struct benchmark_clo pmembench_clos[13];
/* list of arguments for pmembench */
static struct benchmark_clo pmembench_opts[2];
CONSTRUCTOR(pmembench_constructor)
void
pmembench_constructor(void)
{
pmembench_opts[0].opt_short = 'h';
pmembench_opts[0].opt_long = "help";
pmembench_opts[0].descr = "Print help";
pmembench_opts[0].type = CLO_TYPE_FLAG;
pmembench_opts[0].off = clo_field_offset(struct benchmark_opts, help);
pmembench_opts[0].ignore_in_res = true;
pmembench_opts[1].opt_short = 'v';
pmembench_opts[1].opt_long = "version";
pmembench_opts[1].descr = "Print version";
pmembench_opts[1].type = CLO_TYPE_FLAG;
pmembench_opts[1].off =
clo_field_offset(struct benchmark_opts, version);
pmembench_opts[1].ignore_in_res = true;
pmembench_clos[0].opt_short = 'h';
pmembench_clos[0].opt_long = "help";
pmembench_clos[0].descr = "Print help for single benchmark";
pmembench_clos[0].type = CLO_TYPE_FLAG;
pmembench_clos[0].off = clo_field_offset(struct benchmark_args, help);
pmembench_clos[0].ignore_in_res = true;
pmembench_clos[1].opt_short = 't';
pmembench_clos[1].opt_long = "threads";
pmembench_clos[1].type = CLO_TYPE_UINT;
pmembench_clos[1].descr = "Number of working threads";
pmembench_clos[1].off =
clo_field_offset(struct benchmark_args, n_threads);
pmembench_clos[1].def = "1";
pmembench_clos[1].type_uint.size =
clo_field_size(struct benchmark_args, n_threads);
pmembench_clos[1].type_uint.base = CLO_INT_BASE_DEC;
pmembench_clos[1].type_uint.min = 1;
pmembench_clos[1].type_uint.max = UINT_MAX;
pmembench_clos[2].opt_short = 'n';
pmembench_clos[2].opt_long = "ops-per-thread";
pmembench_clos[2].type = CLO_TYPE_UINT;
pmembench_clos[2].descr = "Number of operations per thread";
pmembench_clos[2].off =
clo_field_offset(struct benchmark_args, n_ops_per_thread);
pmembench_clos[2].def = "1";
pmembench_clos[2].type_uint.size =
clo_field_size(struct benchmark_args, n_ops_per_thread);
pmembench_clos[2].type_uint.base = CLO_INT_BASE_DEC;
pmembench_clos[2].type_uint.min = 1;
pmembench_clos[2].type_uint.max = ULLONG_MAX;
pmembench_clos[3].opt_short = 'd';
pmembench_clos[3].opt_long = "data-size";
pmembench_clos[3].type = CLO_TYPE_UINT;
pmembench_clos[3].descr = "IO data size";
pmembench_clos[3].off = clo_field_offset(struct benchmark_args, dsize);
pmembench_clos[3].def = "1";
pmembench_clos[3].type_uint.size =
clo_field_size(struct benchmark_args, dsize);
pmembench_clos[3].type_uint.base = CLO_INT_BASE_DEC | CLO_INT_BASE_HEX;
pmembench_clos[3].type_uint.min = 1;
pmembench_clos[3].type_uint.max = ULONG_MAX;
pmembench_clos[4].opt_short = 'f';
pmembench_clos[4].opt_long = "file";
pmembench_clos[4].type = CLO_TYPE_STR;
pmembench_clos[4].descr = "File name";
pmembench_clos[4].off = clo_field_offset(struct benchmark_args, fname);
pmembench_clos[4].def = "/mnt/pmem/testfile";
pmembench_clos[4].ignore_in_res = true;
pmembench_clos[5].opt_short = 'm';
pmembench_clos[5].opt_long = "fmode";
pmembench_clos[5].type = CLO_TYPE_UINT;
pmembench_clos[5].descr = "File mode";
pmembench_clos[5].off = clo_field_offset(struct benchmark_args, fmode);
pmembench_clos[5].def = "0666";
pmembench_clos[5].ignore_in_res = true;
pmembench_clos[5].type_uint.size =
clo_field_size(struct benchmark_args, fmode);
pmembench_clos[5].type_uint.base = CLO_INT_BASE_OCT;
pmembench_clos[5].type_uint.min = 0;
pmembench_clos[5].type_uint.max = ULONG_MAX;
pmembench_clos[6].opt_short = 's';
pmembench_clos[6].opt_long = "seed";
pmembench_clos[6].type = CLO_TYPE_UINT;
pmembench_clos[6].descr = "PRNG seed";
pmembench_clos[6].off = clo_field_offset(struct benchmark_args, seed);
pmembench_clos[6].def = "0";
pmembench_clos[6].type_uint.size =
clo_field_size(struct benchmark_args, seed);
pmembench_clos[6].type_uint.base = CLO_INT_BASE_DEC;
pmembench_clos[6].type_uint.min = 0;
pmembench_clos[6].type_uint.max = ~0;
pmembench_clos[7].opt_short = 'r';
pmembench_clos[7].opt_long = "repeats";
pmembench_clos[7].type = CLO_TYPE_UINT;
pmembench_clos[7].descr = "Number of repeats of scenario";
pmembench_clos[7].off =
clo_field_offset(struct benchmark_args, repeats);
pmembench_clos[7].def = "1";
pmembench_clos[7].type_uint.size =
clo_field_size(struct benchmark_args, repeats);
pmembench_clos[7].type_uint.base = CLO_INT_BASE_DEC | CLO_INT_BASE_HEX;
pmembench_clos[7].type_uint.min = 1;
pmembench_clos[7].type_uint.max = ULONG_MAX;
pmembench_clos[8].opt_short = 'F';
pmembench_clos[8].opt_long = "thread-affinity";
pmembench_clos[8].descr = "Set worker threads CPU affinity mask";
pmembench_clos[8].type = CLO_TYPE_FLAG;
pmembench_clos[8].off =
clo_field_offset(struct benchmark_args, thread_affinity);
pmembench_clos[8].def = "false";
/*
* XXX: add link to blog post about optimal affinity
* when it will be done
*/
pmembench_clos[9].opt_short = 'I';
pmembench_clos[9].opt_long = "affinity-list";
pmembench_clos[9].descr =
"Set affinity mask as a list of CPUs separated by semicolon";
pmembench_clos[9].type = CLO_TYPE_STR;
pmembench_clos[9].off =
clo_field_offset(struct benchmark_args, affinity_list);
pmembench_clos[9].def = "";
pmembench_clos[9].ignore_in_res = true;
pmembench_clos[10].opt_long = "main-affinity";
pmembench_clos[10].descr = "Set affinity for main thread";
pmembench_clos[10].type = CLO_TYPE_INT;
pmembench_clos[10].off =
clo_field_offset(struct benchmark_args, main_affinity);
pmembench_clos[10].def = "-1";
pmembench_clos[10].ignore_in_res = false;
pmembench_clos[10].type_int.size =
clo_field_size(struct benchmark_args, main_affinity);
pmembench_clos[10].type_int.base = CLO_INT_BASE_DEC;
pmembench_clos[10].type_int.min = (-1);
pmembench_clos[10].type_int.max = LONG_MAX;
pmembench_clos[11].opt_short = 'e';
pmembench_clos[11].opt_long = "min-exe-time";
pmembench_clos[11].type = CLO_TYPE_UINT;
pmembench_clos[11].descr = "Minimal execution time in seconds";
pmembench_clos[11].off =
clo_field_offset(struct benchmark_args, min_exe_time);
pmembench_clos[11].def = "0";
pmembench_clos[11].type_uint.size =
clo_field_size(struct benchmark_args, min_exe_time);
pmembench_clos[11].type_uint.base = CLO_INT_BASE_DEC;
pmembench_clos[11].type_uint.min = 0;
pmembench_clos[11].type_uint.max = ULONG_MAX;
pmembench_clos[12].opt_short = 'p';
pmembench_clos[12].opt_long = "dynamic-poolset";
pmembench_clos[12].type = CLO_TYPE_FLAG;
pmembench_clos[12].descr =
"Allow benchmark to create poolset and reuse files";
pmembench_clos[12].off =
clo_field_offset(struct benchmark_args, is_dynamic_poolset);
pmembench_clos[12].ignore_in_res = true;
}
/*
* pmembench_get_priv -- return private structure of benchmark
*/
void *
pmembench_get_priv(struct benchmark *bench)
{
return bench->priv;
}
/*
* pmembench_set_priv -- set private structure of benchmark
*/
void
pmembench_set_priv(struct benchmark *bench, void *priv)
{
bench->priv = priv;
}
/*
* pmembench_register -- register benchmark
*/
int
pmembench_register(struct benchmark_info *bench_info)
{
assert(bench_info->name && bench_info->brief);
struct benchmark *bench = (struct benchmark *)calloc(1, sizeof(*bench));
assert(bench != nullptr);
bench->info = bench_info;
if (!benchmarks.initialized) {
PMDK_LIST_INIT(&benchmarks.head);
benchmarks.initialized = true;
}
PMDK_LIST_INSERT_HEAD(&benchmarks.head, bench, next);
return 0;
}
/*
* pmembench_get_info -- return structure with information about benchmark
*/
struct benchmark_info *
pmembench_get_info(struct benchmark *bench)
{
return bench->info;
}
/*
* pmembench_release_clos -- release CLO structure
*/
static void
pmembench_release_clos(struct benchmark *bench)
{
free(bench->clos);
}
/*
* pmembench_merge_clos -- merge benchmark's CLOs with common CLOs
*/
static void
pmembench_merge_clos(struct benchmark *bench)
{
size_t size = sizeof(struct benchmark_args);
size_t pb_nclos = ARRAY_SIZE(pmembench_clos);
size_t nclos = pb_nclos;
size_t i;
if (bench->info->clos) {
size += bench->info->opts_size;
nclos += bench->info->nclos;
}
auto *clos = (struct benchmark_clo *)calloc(
nclos, sizeof(struct benchmark_clo));
assert(clos != nullptr);
memcpy(clos, pmembench_clos, pb_nclos * sizeof(struct benchmark_clo));
if (bench->info->clos) {
memcpy(&clos[pb_nclos], bench->info->clos,
bench->info->nclos * sizeof(struct benchmark_clo));
for (i = 0; i < bench->info->nclos; i++) {
clos[pb_nclos + i].off += sizeof(struct benchmark_args);
}
}
bench->clos = clos;
bench->nclos = nclos;
bench->args_size = size;
}
/*
* pmembench_run_worker -- run worker with benchmark operation
*/
static int
pmembench_run_worker(struct benchmark *bench, struct worker_info *winfo)
{
benchmark_time_get(&winfo->beg);
for (size_t i = 0; i < winfo->nops; i++) {
if (bench->info->operation(bench, &winfo->opinfo[i]))
return -1;
benchmark_time_get(&winfo->opinfo[i].end);
}
benchmark_time_get(&winfo->end);
return 0;
}
/*
* pmembench_print_header -- print header of benchmark's results
*/
static void
pmembench_print_header(struct pmembench *pb, struct benchmark *bench,
struct clo_vec *clovec)
{
if (pb->scenario) {
printf("%s: %s [%" PRIu64 "]%s%s%s\n", pb->scenario->name,
bench->info->name, clovec->nargs,
pb->scenario->group ? " [group: " : "",
pb->scenario->group ? pb->scenario->group : "",
pb->scenario->group ? "]" : "");
} else {
printf("%s [%" PRIu64 "]\n", bench->info->name, clovec->nargs);
}
printf("total-avg[sec];"
"ops-per-second[1/sec];"
"total-max[sec];"
"total-min[sec];"
"total-median[sec];"
"total-std-dev[sec];"
"latency-avg[nsec];"
"latency-min[nsec];"
"latency-max[nsec];"
"latency-std-dev[nsec];"
"latency-pctl-50.0%%[nsec];"
"latency-pctl-99.0%%[nsec];"
"latency-pctl-99.9%%[nsec]");
size_t i;
for (i = 0; i < bench->nclos; i++) {
if (!bench->clos[i].ignore_in_res) {
printf(";%s", bench->clos[i].opt_long);
}
}
if (bench->info->print_bandwidth)
printf(";bandwidth[MiB/s]");
if (bench->info->print_extra_headers)
bench->info->print_extra_headers();
printf("\n");
}
/*
* pmembench_print_results -- print benchmark's results
*/
static void
pmembench_print_results(struct benchmark *bench, struct benchmark_args *args,
struct total_results *res)
{
printf("%f;%f;%f;%f;%f;%f;%" PRIu64 ";%" PRIu64 ";%" PRIu64
";%f;%" PRIu64 ";%" PRIu64 ";%" PRIu64,
res->total.avg, res->nopsps, res->total.max, res->total.min,
res->total.med, res->total.std_dev, res->latency.avg,
res->latency.min, res->latency.max, res->latency.std_dev,
res->latency.pctl50_0p, res->latency.pctl99_0p,
res->latency.pctl99_9p);
size_t i;
for (i = 0; i < bench->nclos; i++) {
if (!bench->clos[i].ignore_in_res)
printf(";%s",
benchmark_clo_str(&bench->clos[i], args,
bench->args_size));
}
if (bench->info->print_bandwidth)
printf(";%f", res->nopsps * args->dsize / 1024 / 1024);
if (bench->info->print_extra_values)
bench->info->print_extra_values(bench, args, res);
printf("\n");
}
/*
* pmembench_parse_clos -- parse command line arguments for benchmark
*/
static int
pmembench_parse_clo(struct pmembench *pb, struct benchmark *bench,
struct clo_vec *clovec)
{
if (!pb->scenario) {
return benchmark_clo_parse(pb->argc, pb->argv, bench->clos,
bench->nclos, clovec);
}
if (pb->override_clos) {
/*
* Use only ARRAY_SIZE(pmembench_clos) clos - these are the
* general clos and are placed at the beginning of the
* clos array.
*/
int ret = benchmark_override_clos_in_scenario(
pb->scenario, pb->argc, pb->argv, bench->clos,
ARRAY_SIZE(pmembench_clos));
/* reset for the next benchmark in the config file */
optind = 1;
if (ret)
return ret;
}
return benchmark_clo_parse_scenario(pb->scenario, bench->clos,
bench->nclos, clovec);
}
/*
* pmembench_parse_affinity -- parse affinity list
*/
static int
pmembench_parse_affinity(const char *list, char **saveptr)
{
char *str = nullptr;
char *end;
int cpu = 0;
if (*saveptr) {
str = strtok(nullptr, ";");
if (str == nullptr) {
/* end of list - we have to start over */
free(*saveptr);
*saveptr = nullptr;
}
}
if (!*saveptr) {
*saveptr = strdup(list);
if (*saveptr == nullptr) {
perror("strdup");
return -1;
}
str = strtok(*saveptr, ";");
if (str == nullptr)
goto err;
}
if ((str == nullptr) || (*str == '\0'))
goto err;
cpu = strtol(str, &end, 10);
if (*end != '\0')
goto err;
return cpu;
err:
errno = EINVAL;
perror("pmembench_parse_affinity");
free(*saveptr);
*saveptr = nullptr;
return -1;
}
/*
* pmembench_init_workers -- init benchmark's workers
*/
static int
pmembench_init_workers(struct benchmark_worker **workers,
struct benchmark *bench, struct benchmark_args *args)
{
unsigned i;
int ncpus = 0;
char *saveptr = nullptr;
int ret = 0;
if (args->thread_affinity) {
ncpus = sysconf(_SC_NPROCESSORS_ONLN);
if (ncpus <= 0)
return -1;
}
for (i = 0; i < args->n_threads; i++) {
workers[i] = benchmark_worker_alloc();
if (args->thread_affinity) {
int cpu;
os_cpu_set_t cpuset;
if (args->affinity_list &&
*args->affinity_list != '\0') {
cpu = pmembench_parse_affinity(
args->affinity_list, &saveptr);
if (cpu == -1) {
ret = -1;
goto end;
}
} else {
cpu = (int)i;
}
assert(ncpus > 0);
cpu %= ncpus;
os_cpu_zero(&cpuset);
os_cpu_set(cpu, &cpuset);
errno = os_thread_setaffinity_np(&workers[i]->thread,
sizeof(os_cpu_set_t),
&cpuset);
if (errno) {
perror("os_thread_setaffinity_np");
ret = -1;
goto end;
}
}
workers[i]->info.index = i;
workers[i]->info.nops = args->n_ops_per_thread;
workers[i]->info.opinfo = (struct operation_info *)calloc(
args->n_ops_per_thread, sizeof(struct operation_info));
size_t j;
for (j = 0; j < args->n_ops_per_thread; j++) {
workers[i]->info.opinfo[j].worker = &workers[i]->info;
workers[i]->info.opinfo[j].args = args;
workers[i]->info.opinfo[j].index = j;
}
workers[i]->bench = bench;
workers[i]->args = args;
workers[i]->func = pmembench_run_worker;
workers[i]->init = bench->info->init_worker;
workers[i]->exit = bench->info->free_worker;
if (benchmark_worker_init(workers[i])) {
fprintf(stderr,
"thread number %u initialization failed\n", i);
ret = -1;
goto end;
}
}
end:
free(saveptr);
return ret;
}
/*
* results_store -- store results of a single repeat
*/
static void
results_store(struct bench_results *res, struct benchmark_worker **workers,
unsigned nthreads, size_t nops)
{
for (unsigned i = 0; i < nthreads; i++) {
res->thres[i]->beg = workers[i]->info.beg;
res->thres[i]->end = workers[i]->info.end;
for (size_t j = 0; j < nops; j++) {
res->thres[i]->end_op[j] =
workers[i]->info.opinfo[j].end;
}
}
}
/*
* compare_time -- compare time values
*/
static int
compare_time(const void *p1, const void *p2)
{
const auto *t1 = (const benchmark_time_t *)p1;
const auto *t2 = (const benchmark_time_t *)p2;
return benchmark_time_compare(t1, t2);
}
/*
* compare_doubles -- comparing function used for sorting
*/
static int
compare_doubles(const void *a1, const void *b1)
{
const auto *a = (const double *)a1;
const auto *b = (const double *)b1;
return (*a > *b) - (*a < *b);
}
/*
* compare_uint64t -- comparing function used for sorting
*/
static int
compare_uint64t(const void *a1, const void *b1)
{
const auto *a = (const uint64_t *)a1;
const auto *b = (const uint64_t *)b1;
return (*a > *b) - (*a < *b);
}
/*
* results_alloc -- prepare structure to store all benchmark results
*/
static struct total_results *
results_alloc(struct benchmark_args *args)
{
struct total_results *total =
(struct total_results *)malloc(sizeof(*total));
assert(total != nullptr);
total->nrepeats = args->repeats;
total->nthreads = args->n_threads;
total->nops = args->n_ops_per_thread;
total->res = (struct bench_results *)malloc(args->repeats *
sizeof(*total->res));
assert(total->res != nullptr);
for (size_t i = 0; i < args->repeats; i++) {
struct bench_results *res = &total->res[i];
assert(args->n_threads != 0);
res->thres = (struct thread_results **)malloc(
args->n_threads * sizeof(*res->thres));
assert(res->thres != nullptr);
for (size_t j = 0; j < args->n_threads; j++) {
res->thres[j] = (struct thread_results *)malloc(
sizeof(*res->thres[j]) +
args->n_ops_per_thread *
sizeof(benchmark_time_t));
assert(res->thres[j] != nullptr);
}
}
return total;
}
/*
* results_free -- release results structure
*/
static void
results_free(struct total_results *total)
{
for (size_t i = 0; i < total->nrepeats; i++) {
for (size_t j = 0; j < total->nthreads; j++)
free(total->res[i].thres[j]);
free(total->res[i].thres);
}
free(total->res);
free(total);
}
/*
* get_total_results -- return results of all repeats of scenario
*/
static void
get_total_results(struct total_results *tres)
{
assert(tres->nrepeats != 0);
assert(tres->nthreads != 0);
assert(tres->nops != 0);
/* reset results */
memset(&tres->total, 0, sizeof(tres->total));
memset(&tres->latency, 0, sizeof(tres->latency));
tres->total.min = DBL_MAX;
tres->total.max = DBL_MIN;
tres->latency.min = UINT64_MAX;
tres->latency.max = 0;
/* allocate helper arrays */
benchmark_time_t *tbeg =
(benchmark_time_t *)malloc(tres->nthreads * sizeof(*tbeg));
assert(tbeg != nullptr);
benchmark_time_t *tend =
(benchmark_time_t *)malloc(tres->nthreads * sizeof(*tend));
assert(tend != nullptr);
auto *totals = (double *)malloc(tres->nrepeats * sizeof(double));
assert(totals != nullptr);
/* estimate total penalty of getting time from the system */
benchmark_time_t Tget;
unsigned long long nsecs = tres->nops * Get_time_avg;
benchmark_time_set(&Tget, nsecs);
for (size_t i = 0; i < tres->nrepeats; i++) {
struct bench_results *res = &tres->res[i];
/* get start and end timestamps of each worker */
for (size_t j = 0; j < tres->nthreads; j++) {
tbeg[j] = res->thres[j]->beg;
tend[j] = res->thres[j]->end;
}
/* sort start and end timestamps */
qsort(tbeg, tres->nthreads, sizeof(benchmark_time_t),
compare_time);
qsort(tend, tres->nthreads, sizeof(benchmark_time_t),
compare_time);
/* calculating time interval between start and end time */
benchmark_time_t Tbeg = tbeg[0];
benchmark_time_t Tend = tend[tres->nthreads - 1];
benchmark_time_t Ttot_ove;
benchmark_time_diff(&Ttot_ove, &Tbeg, &Tend);
/*
* subtract time used for getting the current time from the
* system
*/
benchmark_time_t Ttot;
benchmark_time_diff(&Ttot, &Tget, &Ttot_ove);
double Stot = benchmark_time_get_secs(&Ttot);
if (Stot > tres->total.max)
tres->total.max = Stot;
if (Stot < tres->total.min)
tres->total.min = Stot;
tres->total.avg += Stot;
totals[i] = Stot;
}
/* median */
qsort(totals, tres->nrepeats, sizeof(double), compare_doubles);
if (tres->nrepeats % 2) {
tres->total.med = totals[tres->nrepeats / 2];
} else {
double m1 = totals[tres->nrepeats / 2];
double m2 = totals[tres->nrepeats / 2 - 1];
tres->total.med = (m1 + m2) / 2.0;
}
/* total average time */
tres->total.avg /= (double)tres->nrepeats;
/* number of operations per second */
tres->nopsps =
(double)tres->nops * (double)tres->nthreads / tres->total.avg;
/* std deviation of total time */
for (size_t i = 0; i < tres->nrepeats; i++) {
double dev = (totals[i] - tres->total.avg);
dev *= dev;
tres->total.std_dev += dev;
}
tres->total.std_dev = sqrt(tres->total.std_dev / tres->nrepeats);
/* latency */
for (size_t i = 0; i < tres->nrepeats; i++) {
struct bench_results *res = &tres->res[i];
for (size_t j = 0; j < tres->nthreads; j++) {
struct thread_results *thres = res->thres[j];
benchmark_time_t *beg = &thres->beg;
for (size_t o = 0; o < tres->nops; o++) {
benchmark_time_t lat;
benchmark_time_diff(&lat, beg,
&thres->end_op[o]);
uint64_t nsecs = benchmark_time_get_nsecs(&lat);
/* min, max latency */
if (nsecs > tres->latency.max)
tres->latency.max = nsecs;
if (nsecs < tres->latency.min)
tres->latency.min = nsecs;
tres->latency.avg += nsecs;
beg = &thres->end_op[o];
}
}
}
/* average latency */
size_t count = tres->nrepeats * tres->nthreads * tres->nops;
assert(count > 0);
tres->latency.avg /= count;
auto *ntotals = (uint64_t *)calloc(count, sizeof(uint64_t));
assert(ntotals != nullptr);
count = 0;
/* std deviation of latency and percentiles */
for (size_t i = 0; i < tres->nrepeats; i++) {
struct bench_results *res = &tres->res[i];
for (size_t j = 0; j < tres->nthreads; j++) {
struct thread_results *thres = res->thres[j];
benchmark_time_t *beg = &thres->beg;
for (size_t o = 0; o < tres->nops; o++) {
benchmark_time_t lat;
benchmark_time_diff(&lat, beg,
&thres->end_op[o]);
uint64_t nsecs = benchmark_time_get_nsecs(&lat);
uint64_t dev = (nsecs - tres->latency.avg);
dev *= dev;
tres->latency.std_dev += dev;
beg = &thres->end_op[o];
ntotals[count] = nsecs;
++count;
}
}
}
tres->latency.std_dev = sqrt(tres->latency.std_dev / count);
/* find 50%, 99.0% and 99.9% percentiles */
qsort(ntotals, count, sizeof(uint64_t), compare_uint64t);
uint64_t p50_0 = count * 50 / 100;
uint64_t p99_0 = count * 99 / 100;
uint64_t p99_9 = count * 999 / 1000;
tres->latency.pctl50_0p = ntotals[p50_0];
tres->latency.pctl99_0p = ntotals[p99_0];
tres->latency.pctl99_9p = ntotals[p99_9];
free(ntotals);
free(totals);
free(tend);
free(tbeg);
}
/*
* pmembench_print_args -- print arguments for one benchmark
*/
static void
pmembench_print_args(struct benchmark_clo *clos, size_t nclos)
{
struct benchmark_clo clo;
for (size_t i = 0; i < nclos; i++) {
clo = clos[i];
if (clo.opt_short != 0)
printf("\t-%c,", clo.opt_short);
else
printf("\t");
printf("\t--%-15s\t\t%s", clo.opt_long, clo.descr);
if (clo.type != CLO_TYPE_FLAG)
printf(" [default: %s]", clo.def);
if (clo.type == CLO_TYPE_INT) {
if (clo.type_int.min != LONG_MIN)
printf(" [min: %" PRId64 "]", clo.type_int.min);
if (clo.type_int.max != LONG_MAX)
printf(" [max: %" PRId64 "]", clo.type_int.max);
} else if (clo.type == CLO_TYPE_UINT) {
if (clo.type_uint.min != 0)
printf(" [min: %" PRIu64 "]",
clo.type_uint.min);
if (clo.type_uint.max != ULONG_MAX)
printf(" [max: %" PRIu64 "]",
clo.type_uint.max);
}
printf("\n");
}
}
/*
* pmembench_print_help_single -- prints help for single benchmark
*/
static void
pmembench_print_help_single(struct benchmark *bench)
{
struct benchmark_info *info = bench->info;
printf("%s\n%s\n", info->name, info->brief);
printf("\nArguments:\n");
size_t nclos = sizeof(pmembench_clos) / sizeof(struct benchmark_clo);
pmembench_print_args(pmembench_clos, nclos);
if (info->clos == nullptr)
return;
pmembench_print_args(info->clos, info->nclos);
}
/*
* pmembench_print_usage -- print usage of framework
*/
static void
pmembench_print_usage()
{
printf("Usage: $ pmembench [-h|--help] [-v|--version]"
"\t[<benchmark>[<args>]]\n");
printf("\t\t\t\t\t\t[<config>[<scenario>]]\n");
printf("\t\t\t\t\t\t[<config>[<scenario>[<common_args>]]]\n");
}
/*
* pmembench_print_version -- print version of framework
*/
static void
pmembench_print_version()
{
printf("Benchmark framework - version %u.%u\n", version.major,
version.minor);
}
/*
* pmembench_print_examples() -- print examples of using framework
*/
static void
pmembench_print_examples()
{
printf("\nExamples:\n");
printf("$ pmembench <benchmark_name> <args>\n");
printf(" # runs benchmark of name <benchmark> with arguments <args>\n");
printf("or\n");
printf("$ pmembench <config_file>\n");
printf(" # runs all scenarios from config file\n");
printf("or\n");
printf("$ pmembench [<benchmark_name>] [-h|--help [-v|--version]\n");
printf(" # prints help\n");
printf("or\n");
printf("$ pmembench <config_file> <name_of_scenario>\n");
printf(" # runs the specified scenario from config file\n");
printf("$ pmembench <config_file> <name_of_scenario_1> "
"<name_of_scenario_2> <common_args>\n");
printf(" # runs the specified scenarios from config file and overwrites"
" the given common_args from the config file\n");
}
/*
* pmembench_print_help -- print help for framework
*/
static void
pmembench_print_help()
{
pmembench_print_version();
pmembench_print_usage();
printf("\nCommon arguments:\n");
size_t nclos = sizeof(pmembench_opts) / sizeof(struct benchmark_clo);
pmembench_print_args(pmembench_opts, nclos);
printf("\nAvaliable benchmarks:\n");
struct benchmark *bench = nullptr;
PMDK_LIST_FOREACH(bench, &benchmarks.head, next)
printf("\t%-20s\t\t%s\n", bench->info->name, bench->info->brief);
printf("\n$ pmembench <benchmark> --help to print detailed information"
" about benchmark arguments\n");
pmembench_print_examples();
}
/*
* pmembench_get_bench -- searching benchmarks by name
*/
static struct benchmark *
pmembench_get_bench(const char *name)
{
struct benchmark *bench;
PMDK_LIST_FOREACH(bench, &benchmarks.head, next)
{
if (strcmp(name, bench->info->name) == 0)
return bench;
}
return nullptr;
}
/*
* pmembench_parse_opts -- parse arguments for framework
*/
static int
pmembench_parse_opts(struct pmembench *pb)
{
int ret = 0;
int argc = ++pb->argc;
char **argv = --pb->argv;
struct benchmark_opts *opts = nullptr;
struct clo_vec *clovec;
size_t size, n_clos;
size = sizeof(struct benchmark_opts);
n_clos = ARRAY_SIZE(pmembench_opts);
clovec = clo_vec_alloc(size);
assert(clovec != nullptr);
if (benchmark_clo_parse(argc, argv, pmembench_opts, n_clos, clovec)) {
ret = -1;
goto out;
}
opts = (struct benchmark_opts *)clo_vec_get_args(clovec, 0);
if (opts == nullptr) {
ret = -1;
goto out;
}
if (opts->help)
pmembench_print_help();
if (opts->version)
pmembench_print_version();
out:
clo_vec_free(clovec);
return ret;
}
/*
* pmembench_remove_file -- remove file or directory if exists
*/
static int
pmembench_remove_file(const char *path)
{
int ret = 0;
os_stat_t status;
char *tmp;
int exists = util_file_exists(path);
if (exists < 0)
return -1;
if (!exists)
return 0;
if (os_stat(path, &status) != 0)
return 0;
if (!(status.st_mode & S_IFDIR))
return pmempool_rm(path, 0);
struct dir_handle it;
struct file_info info;
if (util_file_dir_open(&it, path)) {
return -1;
}
while (util_file_dir_next(&it, &info) == 0) {
if (strcmp(info.filename, ".") == 0 ||
strcmp(info.filename, "..") == 0)
continue;
tmp = (char *)malloc(strlen(path) + strlen(info.filename) + 2);
if (tmp == nullptr)
return -1;
sprintf(tmp, "%s" OS_DIR_SEP_STR "%s", path, info.filename);
ret = info.is_dir ? pmembench_remove_file(tmp)
: util_unlink(tmp);
free(tmp);
if (ret != 0) {
util_file_dir_close(&it);
return ret;
}
}
util_file_dir_close(&it);
return util_file_dir_remove(path);
}
/*
* pmembench_single_repeat -- runs benchmark ones
*/
static int
pmembench_single_repeat(struct benchmark *bench, struct benchmark_args *args,
struct bench_results *res)
{
int ret = 0;
if (args->main_affinity != -1) {
os_cpu_set_t cpuset;
os_cpu_zero(&cpuset);
os_thread_t self;
os_thread_self(&self);
os_cpu_set(args->main_affinity, &cpuset);
errno = os_thread_setaffinity_np(&self, sizeof(os_cpu_set_t),
&cpuset);
if (errno) {
perror("os_thread_setaffinity_np");
return -1;
}
sched_yield();
}
if (bench->info->rm_file && !args->is_dynamic_poolset) {
ret = pmembench_remove_file(args->fname);
if (ret != 0 && errno != ENOENT) {
perror("removing file failed");
return ret;
}
}
if (bench->info->init) {
if (bench->info->init(bench, args)) {
warn("%s: initialization failed", bench->info->name);
return -1;
}
}
assert(bench->info->operation != nullptr);
assert(args->n_threads != 0);
struct benchmark_worker **workers;
workers = (struct benchmark_worker **)malloc(
args->n_threads * sizeof(struct benchmark_worker *));
assert(workers != nullptr);
if ((ret = pmembench_init_workers(workers, bench, args)) != 0) {
goto out;
}
unsigned j;
for (j = 0; j < args->n_threads; j++) {
benchmark_worker_run(workers[j]);
}
for (j = 0; j < args->n_threads; j++) {
benchmark_worker_join(workers[j]);
if (workers[j]->ret != 0) {
ret = workers[j]->ret;
fprintf(stderr, "thread number %u failed\n", j);
}
}
results_store(res, workers, args->n_threads, args->n_ops_per_thread);
for (j = 0; j < args->n_threads; j++) {
benchmark_worker_exit(workers[j]);
free(workers[j]->info.opinfo);
benchmark_worker_free(workers[j]);
}
out:
free(workers);
if (bench->info->exit)
bench->info->exit(bench, args);
return ret;
}
/*
* scale_up_min_exe_time -- scale up the number of operations to obtain an
* execution time not smaller than the assumed minimal execution time
*/
int
scale_up_min_exe_time(struct benchmark *bench, struct benchmark_args *args,
struct total_results **total_results)
{
const double min_exe_time = args->min_exe_time;
struct total_results *total_res = *total_results;
total_res->nrepeats = 1;
do {
/*
* run single benchmark repeat to probe execution time
*/
int ret = pmembench_single_repeat(bench, args,
&total_res->res[0]);
if (ret != 0)
return 1;
get_total_results(total_res);
if (min_exe_time < total_res->total.min + MIN_EXE_TIME_E)
break;
/*
* scale up number of operations to get assumed minimal
* execution time
*/
args->n_ops_per_thread = (size_t)(
(double)args->n_ops_per_thread *
(min_exe_time + MIN_EXE_TIME_E) / total_res->total.min);
results_free(total_res);
*total_results = results_alloc(args);
assert(*total_results != nullptr);
total_res = *total_results;
total_res->nrepeats = 1;
} while (1);
total_res->nrepeats = args->repeats;
return 0;
}
/*
* is_absolute_path_to_directory -- checks if passed argument is absolute
* path to directory
*/
static bool
is_absolute_path_to_directory(const char *path)
{
os_stat_t sb;
return util_is_absolute_path(path) && os_stat(path, &sb) == 0 &&
S_ISDIR(sb.st_mode);
}
/*
* pmembench_run -- runs one benchmark. Parses arguments and performs
* specific functions.
*/
static int
pmembench_run(struct pmembench *pb, struct benchmark *bench)
{
enum file_type type;
char old_wd[PATH_MAX];
int ret = 0;
struct benchmark_args *args = nullptr;
struct total_results *total_res = nullptr;
struct latency *stats = nullptr;
double *workers_times = nullptr;
struct clo_vec *clovec = nullptr;
assert(bench->info != nullptr);
pmembench_merge_clos(bench);
/*
* Check if PMEMBENCH_DIR env var is set and change
* the working directory accordingly.
*/
char *wd = os_getenv("PMEMBENCH_DIR");
if (wd != nullptr) {
/* get current dir name */
if (getcwd(old_wd, PATH_MAX) == nullptr) {
perror("getcwd");
ret = -1;
goto out_release_clos;
}
os_stat_t stat_buf;
if (os_stat(wd, &stat_buf) != 0) {
perror("os_stat");
ret = -1;
goto out_release_clos;
}
if (!S_ISDIR(stat_buf.st_mode)) {
warn("PMEMBENCH_DIR is not a directory: %s", wd);
ret = -1;
goto out_release_clos;
}
if (chdir(wd)) {
perror("chdir(wd)");
ret = -1;
goto out_release_clos;
}
}
if (bench->info->pre_init) {
if (bench->info->pre_init(bench)) {
warn("%s: pre-init failed", bench->info->name);
ret = -1;
goto out_old_wd;
}
}
clovec = clo_vec_alloc(bench->args_size);
assert(clovec != nullptr);
if (pmembench_parse_clo(pb, bench, clovec)) {
warn("%s: parsing command line arguments failed",
bench->info->name);
ret = -1;
goto out_release_args;
}
args = (struct benchmark_args *)clo_vec_get_args(clovec, 0);
if (args == nullptr) {
warn("%s: parsing command line arguments failed",
bench->info->name);
ret = -1;
goto out_release_args;
}
if (args->help) {
pmembench_print_help_single(bench);
goto out;
}
if (strlen(args->fname) > PATH_MAX) {
warn("Filename too long");
ret = -1;
goto out;
}
type = util_file_get_type(args->fname);
if (type == OTHER_ERROR) {
fprintf(stderr, "could not check type of file %s\n",
args->fname);
return -1;
}
pmembench_print_header(pb, bench, clovec);
size_t args_i;
for (args_i = 0; args_i < clovec->nargs; args_i++) {
args = (struct benchmark_args *)clo_vec_get_args(clovec,
args_i);
if (args == nullptr) {
warn("%s: parsing command line arguments failed",
bench->info->name);
ret = -1;
goto out;
}
args->opts = (void *)((uintptr_t)args +
sizeof(struct benchmark_args));
if (args->is_dynamic_poolset) {
if (!bench->info->allow_poolset) {
fprintf(stderr,
"dynamic poolset not supported\n");
goto out;
}
if (!is_absolute_path_to_directory(args->fname)) {
fprintf(stderr,
"path must be absolute and point to a directory\n");
goto out;
}
} else {
args->is_poolset =
util_is_poolset_file(args->fname) == 1;
if (args->is_poolset) {
if (!bench->info->allow_poolset) {
fprintf(stderr,
"poolset files not supported\n");
goto out;
}
args->fsize = util_poolset_size(args->fname);
if (!args->fsize) {
fprintf(stderr,
"invalid size of poolset\n");
goto out;
}
} else if (type == TYPE_DEVDAX) {
args->fsize = util_file_get_size(args->fname);
if (!args->fsize) {
fprintf(stderr,
"invalid size of device dax\n");
goto out;
}
}
}
unsigned n_threads_copy = args->n_threads;
args->n_threads =
!bench->info->multithread ? 1 : args->n_threads;
size_t n_ops_per_thread_copy = args->n_ops_per_thread;
args->n_ops_per_thread =
!bench->info->multiops ? 1 : args->n_ops_per_thread;
stats = (struct latency *)calloc(args->repeats,
sizeof(struct latency));
assert(stats != nullptr);
workers_times = (double *)calloc(
args->n_threads * args->repeats, sizeof(double));
assert(workers_times != nullptr);
total_res = results_alloc(args);
assert(total_res != nullptr);
unsigned i = 0;
if (args->min_exe_time != 0 && bench->info->multiops) {
ret = scale_up_min_exe_time(bench, args, &total_res);
if (ret != 0)
goto out;
i = 1;
}
for (; i < args->repeats; i++) {
ret = pmembench_single_repeat(bench, args,
&total_res->res[i]);
if (ret != 0)
goto out;
}
get_total_results(total_res);
pmembench_print_results(bench, args, total_res);
args->n_ops_per_thread = n_ops_per_thread_copy;
args->n_threads = n_threads_copy;
results_free(total_res);
free(stats);
free(workers_times);
total_res = nullptr;
stats = nullptr;
workers_times = nullptr;
}
out:
if (total_res)
results_free(total_res);
if (stats)
free(stats);
if (workers_times)
free(workers_times);
out_release_args:
clo_vec_free(clovec);
out_old_wd:
/* restore the original working directory */
if (wd != nullptr) { /* Only if PMEMBENCH_DIR env var was defined */
if (chdir(old_wd)) {
perror("chdir(old_wd)");
ret = -1;
}
}
out_release_clos:
pmembench_release_clos(bench);
return ret;
}
/*
* pmembench_free_benchmarks -- release all benchmarks
*/
static void __attribute__((destructor)) pmembench_free_benchmarks(void)
{
while (!PMDK_LIST_EMPTY(&benchmarks.head)) {
struct benchmark *bench = PMDK_LIST_FIRST(&benchmarks.head);
PMDK_LIST_REMOVE(bench, next);
free(bench);
}
}
/*
* pmembench_run_scenario -- run single benchmark's scenario
*/
static int
pmembench_run_scenario(struct pmembench *pb, struct scenario *scenario)
{
struct benchmark *bench = pmembench_get_bench(scenario->benchmark);
if (nullptr == bench) {
fprintf(stderr, "unknown benchmark: %s\n", scenario->benchmark);
return -1;
}
pb->scenario = scenario;
return pmembench_run(pb, bench);
}
/*
* pmembench_run_scenarios -- run all scenarios
*/
static int
pmembench_run_scenarios(struct pmembench *pb, struct scenarios *ss)
{
struct scenario *scenario;
FOREACH_SCENARIO(scenario, ss)
{
if (pmembench_run_scenario(pb, scenario) != 0)
return -1;
}
return 0;
}
/*
* pmembench_run_config -- run one or all scenarios from config file
*/
static int
pmembench_run_config(struct pmembench *pb, const char *config)
{
struct scenarios *ss = nullptr;
struct config_reader *cr = config_reader_alloc();
assert(cr != nullptr);
int ret = 0;
if ((ret = config_reader_read(cr, config)))
goto out;
if ((ret = config_reader_get_scenarios(cr, &ss)))
goto out;
assert(ss != nullptr);
if (pb->argc == 1) {
if ((ret = pmembench_run_scenarios(pb, ss)) != 0)
goto out_scenarios;
} else {
/* Skip the config file name in cmd line params */
int tmp_argc = pb->argc - 1;
char **tmp_argv = pb->argv + 1;
if (!contains_scenarios(tmp_argc, tmp_argv, ss)) {
/* no scenarios in cmd line arguments - parse params */
pb->override_clos = true;
if ((ret = pmembench_run_scenarios(pb, ss)) != 0)
goto out_scenarios;
} else { /* scenarios in cmd line */
struct scenarios *cmd_ss = scenarios_alloc();
assert(cmd_ss != nullptr);
int parsed_scenarios = clo_get_scenarios(
tmp_argc, tmp_argv, ss, cmd_ss);
if (parsed_scenarios < 0)
goto out_cmd;
/*
* If there are any cmd line args left, treat
* them as config file params override.
*/
if (tmp_argc - parsed_scenarios)
pb->override_clos = true;
/*
* Skip the scenarios in the cmd line,
* pmembench_run_scenarios does not expect them and will
* fail otherwise.
*/
pb->argc -= parsed_scenarios;
pb->argv += parsed_scenarios;
ret = pmembench_run_scenarios(pb, cmd_ss);
out_cmd:
scenarios_free(cmd_ss);
}
}
out_scenarios:
scenarios_free(ss);
out:
config_reader_free(cr);
return ret;
}
int
main(int argc, char *argv[])
{
util_init();
util_mmap_init();
/*
* Parse common command line arguments and
* benchmark's specific ones.
*/
if (argc < 2) {
pmembench_print_usage();
exit(EXIT_FAILURE);
}
int ret = 0;
int fexists;
struct benchmark *bench;
struct pmembench *pb = (struct pmembench *)calloc(1, sizeof(*pb));
assert(pb != nullptr);
Get_time_avg = benchmark_get_avg_get_time();
pb->argc = --argc;
pb->argv = ++argv;
char *bench_name = pb->argv[0];
if (nullptr == bench_name) {
ret = -1;
goto out;
}
fexists = os_access(bench_name, R_OK) == 0;
bench = pmembench_get_bench(bench_name);
if (nullptr != bench)
ret = pmembench_run(pb, bench);
else if (fexists)
ret = pmembench_run_config(pb, bench_name);
else if ((ret = pmembench_parse_opts(pb)) != 0) {
pmembench_print_usage();
goto out;
}
out:
free(pb);
util_mmap_fini();
return ret;
}
#ifdef _MSC_VER
extern "C" {
/*
* Since libpmemobj is linked statically,
* we need to invoke its ctor/dtor.
*/
MSVC_CONSTR(libpmemobj_init)
MSVC_DESTR(libpmemobj_fini)
}
#endif
| 41,103 | 24.078707 | 77 |
cpp
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.