repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/test/include/test/SFMT-alti.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file SFMT-alti.h
*
* @brief SIMD oriented Fast Mersenne Twister(SFMT)
* pseudorandom number generator
*
* @author Mutsuo Saito (Hiroshima University)
* @author Makoto Matsumoto (Hiroshima University)
*
* Copyright (C) 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* The new BSD License is applied to this software.
* see LICENSE.txt
*/
#ifndef SFMT_ALTI_H
#define SFMT_ALTI_H
/**
* This function represents the recursion formula in AltiVec and BIG ENDIAN.
* @param a a 128-bit part of the interal state array
* @param b a 128-bit part of the interal state array
* @param c a 128-bit part of the interal state array
* @param d a 128-bit part of the interal state array
* @return output
*/
JEMALLOC_ALWAYS_INLINE
vector unsigned int vec_recursion(vector unsigned int a,
vector unsigned int b,
vector unsigned int c,
vector unsigned int d) {
const vector unsigned int sl1 = ALTI_SL1;
const vector unsigned int sr1 = ALTI_SR1;
#ifdef ONLY64
const vector unsigned int mask = ALTI_MSK64;
const vector unsigned char perm_sl = ALTI_SL2_PERM64;
const vector unsigned char perm_sr = ALTI_SR2_PERM64;
#else
const vector unsigned int mask = ALTI_MSK;
const vector unsigned char perm_sl = ALTI_SL2_PERM;
const vector unsigned char perm_sr = ALTI_SR2_PERM;
#endif
vector unsigned int v, w, x, y, z;
x = vec_perm(a, (vector unsigned int)perm_sl, perm_sl);
v = a;
y = vec_sr(b, sr1);
z = vec_perm(c, (vector unsigned int)perm_sr, perm_sr);
w = vec_sl(d, sl1);
z = vec_xor(z, w);
y = vec_and(y, mask);
v = vec_xor(v, x);
z = vec_xor(z, y);
z = vec_xor(z, v);
return z;
}
/**
* This function fills the internal state array with pseudorandom
* integers.
*/
JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) {
int i;
vector unsigned int r, r1, r2;
r1 = ctx->sfmt[N - 2].s;
r2 = ctx->sfmt[N - 1].s;
for (i = 0; i < N - POS1; i++) {
r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2);
ctx->sfmt[i].s = r;
r1 = r2;
r2 = r;
}
for (; i < N; i++) {
r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1 - N].s, r1, r2);
ctx->sfmt[i].s = r;
r1 = r2;
r2 = r;
}
}
/**
* This function fills the user-specified array with pseudorandom
* integers.
*
* @param array an 128-bit array to be filled by pseudorandom numbers.
* @param size number of 128-bit pesudorandom numbers to be generated.
*/
JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
int i, j;
vector unsigned int r, r1, r2;
r1 = ctx->sfmt[N - 2].s;
r2 = ctx->sfmt[N - 1].s;
for (i = 0; i < N - POS1; i++) {
r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2);
array[i].s = r;
r1 = r2;
r2 = r;
}
for (; i < N; i++) {
r = vec_recursion(ctx->sfmt[i].s, array[i + POS1 - N].s, r1, r2);
array[i].s = r;
r1 = r2;
r2 = r;
}
/* main loop */
for (; i < size - N; i++) {
r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2);
array[i].s = r;
r1 = r2;
r2 = r;
}
for (j = 0; j < 2 * N - size; j++) {
ctx->sfmt[j].s = array[j + size - N].s;
}
for (; i < size; i++) {
r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2);
array[i].s = r;
ctx->sfmt[j++].s = r;
r1 = r2;
r2 = r;
}
}
#ifndef ONLY64
#if defined(__APPLE__)
#define ALTI_SWAP (vector unsigned char) \
(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11)
#else
#define ALTI_SWAP {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}
#endif
/**
* This function swaps high and low 32-bit of 64-bit integers in user
* specified array.
*
* @param array an 128-bit array to be swaped.
* @param size size of 128-bit array.
*/
JEMALLOC_INLINE void swap(w128_t *array, int size) {
int i;
const vector unsigned char perm = ALTI_SWAP;
for (i = 0; i < size; i++) {
array[i].s = vec_perm(array[i].s, (vector unsigned int)perm, perm);
}
}
#endif
#endif
| 5,921 | 30.668449 | 79 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/test/include/test/SFMT-params86243.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SFMT_PARAMS86243_H
#define SFMT_PARAMS86243_H
#define POS1 366
#define SL1 6
#define SL2 7
#define SR1 19
#define SR2 1
#define MSK1 0xfdbffbffU
#define MSK2 0xbff7ff3fU
#define MSK3 0xfd77efffU
#define MSK4 0xbf9ff3ffU
#define PARITY1 0x00000001U
#define PARITY2 0x00000000U
#define PARITY3 0x00000000U
#define PARITY4 0xe9528d85U
/* PARAMETERS FOR ALTIVEC */
#if defined(__APPLE__) /* For OSX */
#define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
#define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
#define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
#define ALTI_MSK64 \
(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
#define ALTI_SL2_PERM \
(vector unsigned char)(25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6)
#define ALTI_SL2_PERM64 \
(vector unsigned char)(7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6)
#define ALTI_SR2_PERM \
(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
#define ALTI_SR2_PERM64 \
(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
#else /* For OTHER OSs(Linux?) */
#define ALTI_SL1 {SL1, SL1, SL1, SL1}
#define ALTI_SR1 {SR1, SR1, SR1, SR1}
#define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
#define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
#define ALTI_SL2_PERM {25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6}
#define ALTI_SL2_PERM64 {7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6}
#define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
#define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
#endif /* For OSX */
#define IDSTR "SFMT-86243:366-6-7-19-1:fdbffbff-bff7ff3f-fd77efff-bf9ff3ff"
#endif /* SFMT_PARAMS86243_H */
| 3,564 | 42.47561 | 79 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/test/include/test/SFMT-params132049.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SFMT_PARAMS132049_H
#define SFMT_PARAMS132049_H
#define POS1 110
#define SL1 19
#define SL2 1
#define SR1 21
#define SR2 1
#define MSK1 0xffffbb5fU
#define MSK2 0xfb6ebf95U
#define MSK3 0xfffefffaU
#define MSK4 0xcff77fffU
#define PARITY1 0x00000001U
#define PARITY2 0x00000000U
#define PARITY3 0xcb520000U
#define PARITY4 0xc7e91c7dU
/* PARAMETERS FOR ALTIVEC */
#if defined(__APPLE__) /* For OSX */
#define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
#define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
#define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
#define ALTI_MSK64 \
(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
#define ALTI_SL2_PERM \
(vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
#define ALTI_SL2_PERM64 \
(vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
#define ALTI_SR2_PERM \
(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
#define ALTI_SR2_PERM64 \
(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
#else /* For OTHER OSs(Linux?) */
#define ALTI_SL1 {SL1, SL1, SL1, SL1}
#define ALTI_SR1 {SR1, SR1, SR1, SR1}
#define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
#define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
#define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
#define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
#define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
#define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
#endif /* For OSX */
#define IDSTR "SFMT-132049:110-19-1-21-1:ffffbb5f-fb6ebf95-fffefffa-cff77fff"
#endif /* SFMT_PARAMS132049_H */
| 3,564 | 42.47561 | 79 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/msvc_compat/C99/stdint.h | // ISO C9x compliant stdint.h for Microsoft Visual Studio
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
//
// Copyright (c) 2006-2008 Alexander Chemeris
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. The name of the author may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef _MSC_VER // [
#error "Use this header only with Microsoft Visual C++ compilers!"
#endif // _MSC_VER ]
#ifndef _MSC_STDINT_H_ // [
#define _MSC_STDINT_H_
#if _MSC_VER > 1000
#pragma once
#endif
#include <limits.h>
// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}'
// or compiler give many errors like this:
// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
#ifdef __cplusplus
extern "C" {
#endif
# include <wchar.h>
#ifdef __cplusplus
}
#endif
// Define _W64 macros to mark types changing their size, like intptr_t.
#ifndef _W64
# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
# define _W64 __w64
# else
# define _W64
# endif
#endif
// 7.18.1 Integer types
// 7.18.1.1 Exact-width integer types
// Visual Studio 6 and Embedded Visual C++ 4 doesn't
// realize that, e.g. char has the same size as __int8
// so we give up on __intX for them.
#if (_MSC_VER < 1300)
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
#else
typedef signed __int8 int8_t;
typedef signed __int16 int16_t;
typedef signed __int32 int32_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
#endif
typedef signed __int64 int64_t;
typedef unsigned __int64 uint64_t;
// 7.18.1.2 Minimum-width integer types
typedef int8_t int_least8_t;
typedef int16_t int_least16_t;
typedef int32_t int_least32_t;
typedef int64_t int_least64_t;
typedef uint8_t uint_least8_t;
typedef uint16_t uint_least16_t;
typedef uint32_t uint_least32_t;
typedef uint64_t uint_least64_t;
// 7.18.1.3 Fastest minimum-width integer types
typedef int8_t int_fast8_t;
typedef int16_t int_fast16_t;
typedef int32_t int_fast32_t;
typedef int64_t int_fast64_t;
typedef uint8_t uint_fast8_t;
typedef uint16_t uint_fast16_t;
typedef uint32_t uint_fast32_t;
typedef uint64_t uint_fast64_t;
// 7.18.1.4 Integer types capable of holding object pointers
#ifdef _WIN64 // [
typedef signed __int64 intptr_t;
typedef unsigned __int64 uintptr_t;
#else // _WIN64 ][
typedef _W64 signed int intptr_t;
typedef _W64 unsigned int uintptr_t;
#endif // _WIN64 ]
// 7.18.1.5 Greatest-width integer types
typedef int64_t intmax_t;
typedef uint64_t uintmax_t;
// 7.18.2 Limits of specified-width integer types
#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
// 7.18.2.1 Limits of exact-width integer types
#define INT8_MIN ((int8_t)_I8_MIN)
#define INT8_MAX _I8_MAX
#define INT16_MIN ((int16_t)_I16_MIN)
#define INT16_MAX _I16_MAX
#define INT32_MIN ((int32_t)_I32_MIN)
#define INT32_MAX _I32_MAX
#define INT64_MIN ((int64_t)_I64_MIN)
#define INT64_MAX _I64_MAX
#define UINT8_MAX _UI8_MAX
#define UINT16_MAX _UI16_MAX
#define UINT32_MAX _UI32_MAX
#define UINT64_MAX _UI64_MAX
// 7.18.2.2 Limits of minimum-width integer types
#define INT_LEAST8_MIN INT8_MIN
#define INT_LEAST8_MAX INT8_MAX
#define INT_LEAST16_MIN INT16_MIN
#define INT_LEAST16_MAX INT16_MAX
#define INT_LEAST32_MIN INT32_MIN
#define INT_LEAST32_MAX INT32_MAX
#define INT_LEAST64_MIN INT64_MIN
#define INT_LEAST64_MAX INT64_MAX
#define UINT_LEAST8_MAX UINT8_MAX
#define UINT_LEAST16_MAX UINT16_MAX
#define UINT_LEAST32_MAX UINT32_MAX
#define UINT_LEAST64_MAX UINT64_MAX
// 7.18.2.3 Limits of fastest minimum-width integer types
#define INT_FAST8_MIN INT8_MIN
#define INT_FAST8_MAX INT8_MAX
#define INT_FAST16_MIN INT16_MIN
#define INT_FAST16_MAX INT16_MAX
#define INT_FAST32_MIN INT32_MIN
#define INT_FAST32_MAX INT32_MAX
#define INT_FAST64_MIN INT64_MIN
#define INT_FAST64_MAX INT64_MAX
#define UINT_FAST8_MAX UINT8_MAX
#define UINT_FAST16_MAX UINT16_MAX
#define UINT_FAST32_MAX UINT32_MAX
#define UINT_FAST64_MAX UINT64_MAX
// 7.18.2.4 Limits of integer types capable of holding object pointers
#ifdef _WIN64 // [
# define INTPTR_MIN INT64_MIN
# define INTPTR_MAX INT64_MAX
# define UINTPTR_MAX UINT64_MAX
#else // _WIN64 ][
# define INTPTR_MIN INT32_MIN
# define INTPTR_MAX INT32_MAX
# define UINTPTR_MAX UINT32_MAX
#endif // _WIN64 ]
// 7.18.2.5 Limits of greatest-width integer types
#define INTMAX_MIN INT64_MIN
#define INTMAX_MAX INT64_MAX
#define UINTMAX_MAX UINT64_MAX
// 7.18.3 Limits of other integer types
#ifdef _WIN64 // [
# define PTRDIFF_MIN _I64_MIN
# define PTRDIFF_MAX _I64_MAX
#else // _WIN64 ][
# define PTRDIFF_MIN _I32_MIN
# define PTRDIFF_MAX _I32_MAX
#endif // _WIN64 ]
#define SIG_ATOMIC_MIN INT_MIN
#define SIG_ATOMIC_MAX INT_MAX
#ifndef SIZE_MAX // [
# ifdef _WIN64 // [
# define SIZE_MAX _UI64_MAX
# else // _WIN64 ][
# define SIZE_MAX _UI32_MAX
# endif // _WIN64 ]
#endif // SIZE_MAX ]
// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
#ifndef WCHAR_MIN // [
# define WCHAR_MIN 0
#endif // WCHAR_MIN ]
#ifndef WCHAR_MAX // [
# define WCHAR_MAX _UI16_MAX
#endif // WCHAR_MAX ]
#define WINT_MIN 0
#define WINT_MAX _UI16_MAX
#endif // __STDC_LIMIT_MACROS ]
// 7.18.4 Limits of other integer types
#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
// 7.18.4.1 Macros for minimum-width integer constants
#define INT8_C(val) val##i8
#define INT16_C(val) val##i16
#define INT32_C(val) val##i32
#define INT64_C(val) val##i64
#define UINT8_C(val) val##ui8
#define UINT16_C(val) val##ui16
#define UINT32_C(val) val##ui32
#define UINT64_C(val) val##ui64
// 7.18.4.2 Macros for greatest-width integer constants
#define INTMAX_C INT64_C
#define UINTMAX_C UINT64_C
#endif // __STDC_CONSTANT_MACROS ]
#endif // _MSC_STDINT_H_ ]
| 7,728 | 30.165323 | 122 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/mutex.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct malloc_mutex_s malloc_mutex_t;
#ifdef _WIN32
# define MALLOC_MUTEX_INITIALIZER
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
# define MALLOC_MUTEX_INITIALIZER \
{OS_UNFAIR_LOCK_INIT, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_OSSPIN))
# define MALLOC_MUTEX_INITIALIZER {0, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
# define MALLOC_MUTEX_INITIALIZER \
{PTHREAD_MUTEX_INITIALIZER, NULL, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
#else
# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
# define MALLOC_MUTEX_INITIALIZER \
{PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, \
WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
# else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
# define MALLOC_MUTEX_INITIALIZER \
{PTHREAD_MUTEX_INITIALIZER, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
# endif
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct malloc_mutex_s {
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
SRWLOCK lock;
# else
CRITICAL_SECTION lock;
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock lock;
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLock lock;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
pthread_mutex_t lock;
malloc_mutex_t *postponed_next;
#else
pthread_mutex_t lock;
#endif
witness_t witness;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#ifdef JEMALLOC_LAZY_LOCK
extern bool isthreaded;
#else
# undef isthreaded /* Undo private_namespace.h definition. */
# define isthreaded true
#endif
bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
witness_rank_t rank);
void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
bool malloc_mutex_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
JEMALLOC_INLINE void
malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
if (isthreaded) {
witness_assert_not_owner(tsdn, &mutex->witness);
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
AcquireSRWLockExclusive(&mutex->lock);
# else
EnterCriticalSection(&mutex->lock);
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock_lock(&mutex->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock(&mutex->lock);
#else
pthread_mutex_lock(&mutex->lock);
#endif
witness_lock(tsdn, &mutex->witness);
}
}
JEMALLOC_INLINE void
malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
if (isthreaded) {
witness_unlock(tsdn, &mutex->witness);
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
ReleaseSRWLockExclusive(&mutex->lock);
# else
LeaveCriticalSection(&mutex->lock);
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock_unlock(&mutex->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockUnlock(&mutex->lock);
#else
pthread_mutex_unlock(&mutex->lock);
#endif
}
}
JEMALLOC_INLINE void
malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
if (isthreaded)
witness_assert_owner(tsdn, &mutex->witness);
}
JEMALLOC_INLINE void
malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
if (isthreaded)
witness_assert_not_owner(tsdn, &mutex->witness);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 4,264 | 27.817568 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/ctl.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct ctl_node_s ctl_node_t;
typedef struct ctl_named_node_s ctl_named_node_t;
typedef struct ctl_indexed_node_s ctl_indexed_node_t;
typedef struct ctl_arena_stats_s ctl_arena_stats_t;
typedef struct ctl_stats_s ctl_stats_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct ctl_node_s {
bool named;
};
struct ctl_named_node_s {
struct ctl_node_s node;
const char *name;
/* If (nchildren == 0), this is a terminal node. */
unsigned nchildren;
const ctl_node_t *children;
int (*ctl)(tsd_t *, const size_t *, size_t, void *,
size_t *, void *, size_t);
};
struct ctl_indexed_node_s {
struct ctl_node_s node;
const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
size_t);
};
struct ctl_arena_stats_s {
bool initialized;
unsigned nthreads;
const char *dss;
ssize_t lg_dirty_mult;
ssize_t decay_time;
size_t pactive;
size_t pdirty;
/* The remainder are only populated if config_stats is true. */
arena_stats_t astats;
/* Aggregate stats for small size classes, based on bin stats. */
size_t allocated_small;
uint64_t nmalloc_small;
uint64_t ndalloc_small;
uint64_t nrequests_small;
malloc_bin_stats_t bstats[NBINS];
malloc_large_stats_t *lstats; /* nlclasses elements. */
malloc_huge_stats_t *hstats; /* nhclasses elements. */
};
struct ctl_stats_s {
size_t allocated;
size_t active;
size_t metadata;
size_t resident;
size_t mapped;
size_t retained;
unsigned narenas;
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
void *newp, size_t newlen);
int ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp,
size_t *miblenp);
int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
bool ctl_boot(void);
void ctl_prefork(tsdn_t *tsdn);
void ctl_postfork_parent(tsdn_t *tsdn);
void ctl_postfork_child(tsdn_t *tsdn);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
!= 0) { \
malloc_printf( \
"<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \
name); \
abort(); \
} \
} while (0)
#define xmallctlnametomib(name, mibp, miblenp) do { \
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
malloc_printf("<jemalloc>: Failure in " \
"xmallctlnametomib(\"%s\", ...)\n", name); \
abort(); \
} \
} while (0)
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
newlen) != 0) { \
malloc_write( \
"<jemalloc>: Failure in xmallctlbymib()\n"); \
abort(); \
} \
} while (0)
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 3,389 | 27.487395 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/ql.h | /* List definitions. */
#define ql_head(a_type) \
struct { \
a_type *qlh_first; \
}
#define ql_head_initializer(a_head) {NULL}
#define ql_elm(a_type) qr(a_type)
/* List functions. */
#define ql_new(a_head) do { \
(a_head)->qlh_first = NULL; \
} while (0)
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
#define ql_first(a_head) ((a_head)->qlh_first)
#define ql_last(a_head, a_field) \
((ql_first(a_head) != NULL) \
? qr_prev(ql_first(a_head), a_field) : NULL)
#define ql_next(a_head, a_elm, a_field) \
((ql_last(a_head, a_field) != (a_elm)) \
? qr_next((a_elm), a_field) : NULL)
#define ql_prev(a_head, a_elm, a_field) \
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
: NULL)
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
qr_before_insert((a_qlelm), (a_elm), a_field); \
if (ql_first(a_head) == (a_qlelm)) { \
ql_first(a_head) = (a_elm); \
} \
} while (0)
#define ql_after_insert(a_qlelm, a_elm, a_field) \
qr_after_insert((a_qlelm), (a_elm), a_field)
#define ql_head_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = (a_elm); \
} while (0)
#define ql_tail_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = qr_next((a_elm), a_field); \
} while (0)
#define ql_remove(a_head, a_elm, a_field) do { \
if (ql_first(a_head) == (a_elm)) { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} \
if (ql_first(a_head) != (a_elm)) { \
qr_remove((a_elm), a_field); \
} else { \
ql_first(a_head) = NULL; \
} \
} while (0)
#define ql_head_remove(a_head, a_type, a_field) do { \
a_type *t = ql_first(a_head); \
ql_remove((a_head), t, a_field); \
} while (0)
#define ql_tail_remove(a_head, a_type, a_field) do { \
a_type *t = ql_last(a_head, a_field); \
ql_remove((a_head), t, a_field); \
} while (0)
#define ql_foreach(a_var, a_head, a_field) \
qr_foreach((a_var), ql_first(a_head), a_field)
#define ql_reverse_foreach(a_var, a_head, a_field) \
qr_reverse_foreach((a_var), ql_first(a_head), a_field)
| 2,369 | 27.902439 | 65 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/nstime.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct nstime_s nstime_t;
/* Maximum supported number of seconds (~584 years). */
#define NSTIME_SEC_MAX KQU(18446744072)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct nstime_s {
uint64_t ns;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void nstime_init(nstime_t *time, uint64_t ns);
void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec);
uint64_t nstime_ns(const nstime_t *time);
uint64_t nstime_sec(const nstime_t *time);
uint64_t nstime_nsec(const nstime_t *time);
void nstime_copy(nstime_t *time, const nstime_t *source);
int nstime_compare(const nstime_t *a, const nstime_t *b);
void nstime_add(nstime_t *time, const nstime_t *addend);
void nstime_subtract(nstime_t *time, const nstime_t *subtrahend);
void nstime_imultiply(nstime_t *time, uint64_t multiplier);
void nstime_idivide(nstime_t *time, uint64_t divisor);
uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor);
#ifdef JEMALLOC_JET
typedef bool (nstime_monotonic_t)(void);
extern nstime_monotonic_t *nstime_monotonic;
typedef bool (nstime_update_t)(nstime_t *);
extern nstime_update_t *nstime_update;
#else
bool nstime_monotonic(void);
bool nstime_update(nstime_t *time);
#endif
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 1,738 | 34.489796 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/witness.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct witness_s witness_t;
typedef unsigned witness_rank_t;
typedef ql_head(witness_t) witness_list_t;
typedef int witness_comp_t (const witness_t *, const witness_t *);
/*
* Lock ranks. Witnesses with rank WITNESS_RANK_OMIT are completely ignored by
* the witness machinery.
*/
#define WITNESS_RANK_OMIT 0U
#define WITNESS_RANK_INIT 1U
#define WITNESS_RANK_CTL 1U
#define WITNESS_RANK_ARENAS 2U
#define WITNESS_RANK_PROF_DUMP 3U
#define WITNESS_RANK_PROF_BT2GCTX 4U
#define WITNESS_RANK_PROF_TDATAS 5U
#define WITNESS_RANK_PROF_TDATA 6U
#define WITNESS_RANK_PROF_GCTX 7U
#define WITNESS_RANK_ARENA 8U
#define WITNESS_RANK_ARENA_CHUNKS 9U
#define WITNESS_RANK_ARENA_NODE_CACHE 10
#define WITNESS_RANK_BASE 11U
#define WITNESS_RANK_LEAF 0xffffffffU
#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF
#define WITNESS_RANK_ARENA_HUGE WITNESS_RANK_LEAF
#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
#define WITNESS_INITIALIZER(rank) {"initializer", rank, NULL, {NULL, NULL}}
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct witness_s {
/* Name, used for printing lock order reversal messages. */
const char *name;
/*
* Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses
* must be acquired in order of increasing rank.
*/
witness_rank_t rank;
/*
* If two witnesses are of equal rank and they have the samp comp
* function pointer, it is called as a last attempt to differentiate
* between witnesses of equal rank.
*/
witness_comp_t *comp;
/* Linkage for thread's currently owned locks. */
ql_elm(witness_t) link;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void witness_init(witness_t *witness, const char *name, witness_rank_t rank,
witness_comp_t *comp);
#ifdef JEMALLOC_JET
typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *);
extern witness_lock_error_t *witness_lock_error;
#else
void witness_lock_error(const witness_list_t *witnesses,
const witness_t *witness);
#endif
#ifdef JEMALLOC_JET
typedef void (witness_owner_error_t)(const witness_t *);
extern witness_owner_error_t *witness_owner_error;
#else
void witness_owner_error(const witness_t *witness);
#endif
#ifdef JEMALLOC_JET
typedef void (witness_not_owner_error_t)(const witness_t *);
extern witness_not_owner_error_t *witness_not_owner_error;
#else
void witness_not_owner_error(const witness_t *witness);
#endif
#ifdef JEMALLOC_JET
typedef void (witness_lockless_error_t)(const witness_list_t *);
extern witness_lockless_error_t *witness_lockless_error;
#else
void witness_lockless_error(const witness_list_t *witnesses);
#endif
void witnesses_cleanup(tsd_t *tsd);
void witness_fork_cleanup(tsd_t *tsd);
void witness_prefork(tsd_t *tsd);
void witness_postfork_parent(tsd_t *tsd);
void witness_postfork_child(tsd_t *tsd);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
bool witness_owner(tsd_t *tsd, const witness_t *witness);
void witness_assert_owner(tsdn_t *tsdn, const witness_t *witness);
void witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness);
void witness_assert_lockless(tsdn_t *tsdn);
void witness_lock(tsdn_t *tsdn, witness_t *witness);
void witness_unlock(tsdn_t *tsdn, witness_t *witness);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
JEMALLOC_INLINE bool
witness_owner(tsd_t *tsd, const witness_t *witness)
{
witness_list_t *witnesses;
witness_t *w;
witnesses = tsd_witnessesp_get(tsd);
ql_foreach(w, witnesses, link) {
if (w == witness)
return (true);
}
return (false);
}
JEMALLOC_INLINE void
witness_assert_owner(tsdn_t *tsdn, const witness_t *witness)
{
tsd_t *tsd;
if (!config_debug)
return;
if (tsdn_null(tsdn))
return;
tsd = tsdn_tsd(tsdn);
if (witness->rank == WITNESS_RANK_OMIT)
return;
if (witness_owner(tsd, witness))
return;
witness_owner_error(witness);
}
JEMALLOC_INLINE void
witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness)
{
tsd_t *tsd;
witness_list_t *witnesses;
witness_t *w;
if (!config_debug)
return;
if (tsdn_null(tsdn))
return;
tsd = tsdn_tsd(tsdn);
if (witness->rank == WITNESS_RANK_OMIT)
return;
witnesses = tsd_witnessesp_get(tsd);
ql_foreach(w, witnesses, link) {
if (w == witness)
witness_not_owner_error(witness);
}
}
JEMALLOC_INLINE void
witness_assert_lockless(tsdn_t *tsdn)
{
tsd_t *tsd;
witness_list_t *witnesses;
witness_t *w;
if (!config_debug)
return;
if (tsdn_null(tsdn))
return;
tsd = tsdn_tsd(tsdn);
witnesses = tsd_witnessesp_get(tsd);
w = ql_last(witnesses, link);
if (w != NULL)
witness_lockless_error(witnesses);
}
JEMALLOC_INLINE void
witness_lock(tsdn_t *tsdn, witness_t *witness)
{
tsd_t *tsd;
witness_list_t *witnesses;
witness_t *w;
if (!config_debug)
return;
if (tsdn_null(tsdn))
return;
tsd = tsdn_tsd(tsdn);
if (witness->rank == WITNESS_RANK_OMIT)
return;
witness_assert_not_owner(tsdn, witness);
witnesses = tsd_witnessesp_get(tsd);
w = ql_last(witnesses, link);
if (w == NULL) {
/* No other locks; do nothing. */
} else if (tsd_witness_fork_get(tsd) && w->rank <= witness->rank) {
/* Forking, and relaxed ranking satisfied. */
} else if (w->rank > witness->rank) {
/* Not forking, rank order reversal. */
witness_lock_error(witnesses, witness);
} else if (w->rank == witness->rank && (w->comp == NULL || w->comp !=
witness->comp || w->comp(w, witness) > 0)) {
/*
* Missing/incompatible comparison function, or comparison
* function indicates rank order reversal.
*/
witness_lock_error(witnesses, witness);
}
ql_elm_new(witness, link);
ql_tail_insert(witnesses, witness, link);
}
JEMALLOC_INLINE void
witness_unlock(tsdn_t *tsdn, witness_t *witness)
{
tsd_t *tsd;
witness_list_t *witnesses;
if (!config_debug)
return;
if (tsdn_null(tsdn))
return;
tsd = tsdn_tsd(tsdn);
if (witness->rank == WITNESS_RANK_OMIT)
return;
/*
* Check whether owner before removal, rather than relying on
* witness_assert_owner() to abort, so that unit tests can test this
* function's failure mode without causing undefined behavior.
*/
if (witness_owner(tsd, witness)) {
witnesses = tsd_witnessesp_get(tsd);
ql_remove(witnesses, witness, link);
} else
witness_assert_owner(tsdn, witness);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 7,051 | 25.411985 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/qr.h | /* Ring definitions. */
#define qr(a_type) \
struct { \
a_type *qre_next; \
a_type *qre_prev; \
}
/* Ring functions. */
#define qr_new(a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qrelm); \
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
(a_qrelm)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_after_insert(a_qrelm, a_qr, a_field) \
do \
{ \
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
(a_qr)->a_field.qre_prev = (a_qrelm); \
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
(a_qrelm)->a_field.qre_next = (a_qr); \
} while (0)
#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
void *t; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
t = (a_qr_a)->a_field.qre_prev; \
(a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
(a_qr_b)->a_field.qre_prev = t; \
} while (0)
/*
* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code.
*/
#define qr_split(a_qr_a, a_qr_b, a_field) \
qr_meld((a_qr_a), (a_qr_b), a_field)
#define qr_remove(a_qr, a_field) do { \
(a_qr)->a_field.qre_prev->a_field.qre_next \
= (a_qr)->a_field.qre_next; \
(a_qr)->a_field.qre_next->a_field.qre_prev \
= (a_qr)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_foreach(var, a_qr, a_field) \
for ((var) = (a_qr); \
(var) != NULL; \
(var) = (((var)->a_field.qre_next != (a_qr)) \
? (var)->a_field.qre_next : NULL))
#define qr_reverse_foreach(var, a_qr, a_field) \
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
(var) != NULL; \
(var) = (((var) != (a_qr)) \
? (var)->a_field.qre_prev : NULL))
| 2,259 | 31.285714 | 78 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/spin.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct spin_s spin_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct spin_s {
unsigned iteration;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void spin_init(spin_t *spin);
void spin_adaptive(spin_t *spin);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_SPIN_C_))
JEMALLOC_INLINE void
spin_init(spin_t *spin)
{
spin->iteration = 0;
}
JEMALLOC_INLINE void
spin_adaptive(spin_t *spin)
{
volatile uint64_t i;
for (i = 0; i < (KQU(1) << spin->iteration); i++)
CPU_SPINWAIT;
if (spin->iteration < 63)
spin->iteration++;
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 1,154 | 21.211538 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/smoothstep.h | /*
* This file was generated by the following command:
* sh smoothstep.sh smoother 200 24 3 15
*/
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* This header defines a precomputed table based on the smoothstep family of
* sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
* to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
* that floating point math can be avoided.
*
* 3 2
* smoothstep(x) = -2x + 3x
*
* 5 4 3
* smootherstep(x) = 6x - 15x + 10x
*
* 7 6 5 4
* smootheststep(x) = -20x + 70x - 84x + 35x
*/
#define SMOOTHSTEP_VARIANT "smoother"
#define SMOOTHSTEP_NSTEPS 200
#define SMOOTHSTEP_BFP 24
#define SMOOTHSTEP \
/* STEP(step, h, x, y) */ \
STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \
STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \
STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \
STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \
STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \
STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \
STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \
STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \
STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \
STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \
STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \
STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \
STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \
STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \
STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \
STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \
STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \
STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \
STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \
STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \
STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \
STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \
STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \
STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \
STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \
STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \
STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \
STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \
STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \
STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \
STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \
STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \
STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \
STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \
STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \
STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \
STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \
STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \
STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \
STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \
STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \
STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \
STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \
STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \
STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \
STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \
STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \
STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \
STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \
STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \
STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \
STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \
STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \
STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \
STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \
STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \
STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \
STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \
STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \
STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \
STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \
STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \
STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \
STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \
STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \
STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \
STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \
STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \
STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \
STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \
STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \
STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \
STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \
STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \
STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \
STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \
STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \
STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \
STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \
STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \
STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \
STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \
STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \
STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \
STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \
STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \
STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \
STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \
STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \
STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \
STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \
STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \
STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \
STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \
STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \
STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \
STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \
STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \
STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \
STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \
STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \
STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \
STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \
STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \
STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \
STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \
STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \
STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \
STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \
STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \
STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \
STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \
STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \
STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \
STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \
STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \
STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \
STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \
STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \
STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \
STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \
STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \
STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \
STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \
STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \
STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \
STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \
STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \
STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \
STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \
STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \
STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \
STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \
STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \
STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \
STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \
STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \
STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \
STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \
STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \
STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \
STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \
STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \
STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \
STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \
STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \
STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \
STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \
STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \
STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \
STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \
STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \
STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \
STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \
STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \
STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \
STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \
STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \
STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \
STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \
STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \
STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \
STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \
STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \
STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \
STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \
STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \
STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \
STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \
STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \
STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \
STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \
STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \
STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \
STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \
STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \
STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \
STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \
STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \
STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \
STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \
STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \
STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \
STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \
STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \
STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \
STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \
STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \
STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \
STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \
STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \
STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \
STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \
STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \
STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \
STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \
STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \
STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \
STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \
STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 16,061 | 64.02834 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/chunk_mmap.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit);
bool chunk_dalloc_mmap(void *chunk, size_t size);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 789 | 34.909091 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/chunk.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* Size and alignment of memory chunks that are allocated by the OS's virtual
* memory system.
*/
#define LG_CHUNK_DEFAULT 21
/* Return the chunk address for allocation address a. */
#define CHUNK_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~chunksize_mask))
/* Return the chunk offset of address a. */
#define CHUNK_ADDR2OFFSET(a) \
((size_t)((uintptr_t)(a) & chunksize_mask))
/* Return the smallest chunk multiple that is >= s. */
#define CHUNK_CEILING(s) \
(((s) + chunksize_mask) & ~chunksize_mask)
#define CHUNK_HOOKS_INITIALIZER { \
NULL, \
NULL, \
NULL, \
NULL, \
NULL, \
NULL, \
NULL \
}
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern size_t opt_lg_chunk;
extern const char *opt_dss;
extern rtree_t chunks_rtree;
extern size_t chunksize;
extern size_t chunksize_mask; /* (chunksize - 1). */
extern size_t chunk_npages;
extern const chunk_hooks_t chunk_hooks_default;
chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena);
chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
const chunk_hooks_t *chunk_hooks);
bool chunk_register(tsdn_t *tsdn, const void *chunk,
const extent_node_t *node);
void chunk_deregister(const void *chunk, const extent_node_t *node);
void *chunk_alloc_base(size_t size);
void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
size_t *sn, bool *zero, bool *commit, bool dalloc_node);
void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
size_t *sn, bool *zero, bool *commit);
void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn,
bool committed);
void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn,
bool zeroed, bool committed);
bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset,
size_t length);
bool chunk_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
extent_node_t *chunk_lookup(const void *chunk, bool dependent);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_))
JEMALLOC_INLINE extent_node_t *
chunk_lookup(const void *ptr, bool dependent)
{
return (rtree_get(&chunks_rtree, (uintptr_t)ptr, dependent));
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#include "jemalloc/internal/chunk_dss.h"
#include "jemalloc/internal/chunk_mmap.h"
| 3,196 | 31.622449 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/ckh.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct ckh_s ckh_t;
typedef struct ckhc_s ckhc_t;
/* Typedefs to allow easy function pointer passing. */
typedef void ckh_hash_t (const void *, size_t[2]);
typedef bool ckh_keycomp_t (const void *, const void *);
/* Maintain counters used to get an idea of performance. */
/* #define CKH_COUNT */
/* Print counter values in ckh_delete() (requires CKH_COUNT). */
/* #define CKH_VERBOSE */
/*
* There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit
* one bucket per L1 cache line.
*/
#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/* Hash table cell. */
struct ckhc_s {
const void *key;
const void *data;
};
struct ckh_s {
#ifdef CKH_COUNT
/* Counters used to get an idea of performance. */
uint64_t ngrows;
uint64_t nshrinks;
uint64_t nshrinkfails;
uint64_t ninserts;
uint64_t nrelocs;
#endif
/* Used for pseudo-random number generation. */
uint64_t prng_state;
/* Total number of items. */
size_t count;
/*
* Minimum and current number of hash table buckets. There are
* 2^LG_CKH_BUCKET_CELLS cells per bucket.
*/
unsigned lg_minbuckets;
unsigned lg_curbuckets;
/* Hash and comparison functions. */
ckh_hash_t *hash;
ckh_keycomp_t *keycomp;
/* Hash table with 2^lg_curbuckets buckets. */
ckhc_t *tab;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh_keycomp_t *keycomp);
void ckh_delete(tsd_t *tsd, ckh_t *ckh);
size_t ckh_count(ckh_t *ckh);
bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
void **data);
bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
void ckh_string_hash(const void *key, size_t r_hash[2]);
bool ckh_string_keycomp(const void *k1, const void *k2);
void ckh_pointer_hash(const void *key, size_t r_hash[2]);
bool ckh_pointer_keycomp(const void *k1, const void *k2);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 2,648 | 29.448276 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/rtree.h | /*
* This radix tree implementation is tailored to the singular purpose of
* associating metadata with chunks that are currently owned by jemalloc.
*
*******************************************************************************
*/
#ifdef JEMALLOC_H_TYPES
typedef struct rtree_node_elm_s rtree_node_elm_t;
typedef struct rtree_level_s rtree_level_t;
typedef struct rtree_s rtree_t;
/*
* RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the
* machine address width.
*/
#define LG_RTREE_BITS_PER_LEVEL 4
#define RTREE_BITS_PER_LEVEL (1U << LG_RTREE_BITS_PER_LEVEL)
/* Maximum rtree height. */
#define RTREE_HEIGHT_MAX \
((1U << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
/* Used for two-stage lock-free node initialization. */
#define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1)
/*
* The node allocation callback function's argument is the number of contiguous
* rtree_node_elm_t structures to allocate, and the resulting memory must be
* zeroed.
*/
typedef rtree_node_elm_t *(rtree_node_alloc_t)(size_t);
typedef void (rtree_node_dalloc_t)(rtree_node_elm_t *);
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct rtree_node_elm_s {
union {
void *pun;
rtree_node_elm_t *child;
extent_node_t *val;
};
};
struct rtree_level_s {
/*
* A non-NULL subtree points to a subtree rooted along the hypothetical
* path to the leaf node corresponding to key 0. Depending on what keys
* have been used to store to the tree, an arbitrary combination of
* subtree pointers may remain NULL.
*
* Suppose keys comprise 48 bits, and LG_RTREE_BITS_PER_LEVEL is 4.
* This results in a 3-level tree, and the leftmost leaf can be directly
* accessed via subtrees[2], the subtree prefixed by 0x0000 (excluding
* 0x00000000) can be accessed via subtrees[1], and the remainder of the
* tree can be accessed via subtrees[0].
*
* levels[0] : [<unused> | 0x0001******** | 0x0002******** | ...]
*
* levels[1] : [<unused> | 0x00000001**** | 0x00000002**** | ... ]
*
* levels[2] : [val(0x000000000000) | val(0x000000000001) | ...]
*
* This has practical implications on x64, which currently uses only the
* lower 47 bits of virtual address space in userland, thus leaving
* subtrees[0] unused and avoiding a level of tree traversal.
*/
union {
void *subtree_pun;
rtree_node_elm_t *subtree;
};
/* Number of key bits distinguished by this level. */
unsigned bits;
/*
* Cumulative number of key bits distinguished by traversing to
* corresponding tree level.
*/
unsigned cumbits;
};
struct rtree_s {
rtree_node_alloc_t *alloc;
rtree_node_dalloc_t *dalloc;
unsigned height;
/*
* Precomputed table used to convert from the number of leading 0 key
* bits to which subtree level to start at.
*/
unsigned start_level[RTREE_HEIGHT_MAX];
rtree_level_t levels[RTREE_HEIGHT_MAX];
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
rtree_node_dalloc_t *dalloc);
void rtree_delete(rtree_t *rtree);
rtree_node_elm_t *rtree_subtree_read_hard(rtree_t *rtree,
unsigned level);
rtree_node_elm_t *rtree_child_read_hard(rtree_t *rtree,
rtree_node_elm_t *elm, unsigned level);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
unsigned rtree_start_level(rtree_t *rtree, uintptr_t key);
uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level);
bool rtree_node_valid(rtree_node_elm_t *node);
rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm,
bool dependent);
rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm,
unsigned level, bool dependent);
extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm,
bool dependent);
void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm,
const extent_node_t *val);
rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level,
bool dependent);
rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level,
bool dependent);
extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent);
bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
JEMALLOC_ALWAYS_INLINE unsigned
rtree_start_level(rtree_t *rtree, uintptr_t key)
{
unsigned start_level;
if (unlikely(key == 0))
return (rtree->height - 1);
start_level = rtree->start_level[lg_floor(key) >>
LG_RTREE_BITS_PER_LEVEL];
assert(start_level < rtree->height);
return (start_level);
}
JEMALLOC_ALWAYS_INLINE uintptr_t
rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
{
return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
rtree->levels[level].cumbits)) & ((ZU(1) <<
rtree->levels[level].bits) - 1));
}
JEMALLOC_ALWAYS_INLINE bool
rtree_node_valid(rtree_node_elm_t *node)
{
return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING);
}
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
rtree_child_tryread(rtree_node_elm_t *elm, bool dependent)
{
rtree_node_elm_t *child;
/* Double-checked read (first read may be stale. */
child = elm->child;
if (!dependent && !rtree_node_valid(child))
child = atomic_read_p(&elm->pun);
assert(!dependent || child != NULL);
return (child);
}
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level,
bool dependent)
{
rtree_node_elm_t *child;
child = rtree_child_tryread(elm, dependent);
if (!dependent && unlikely(!rtree_node_valid(child)))
child = rtree_child_read_hard(rtree, elm, level);
assert(!dependent || child != NULL);
return (child);
}
JEMALLOC_ALWAYS_INLINE extent_node_t *
rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent)
{
if (dependent) {
/*
* Reading a val on behalf of a pointer to a valid allocation is
* guaranteed to be a clean read even without synchronization,
* because the rtree update became visible in memory before the
* pointer came into existence.
*/
return (elm->val);
} else {
/*
* An arbitrary read, e.g. on behalf of ivsalloc(), may not be
* dependent on a previous rtree write, which means a stale read
* could result if synchronization were omitted here.
*/
return (atomic_read_p(&elm->pun));
}
}
JEMALLOC_INLINE void
rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val)
{
atomic_write_p(&elm->pun, val);
}
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent)
{
rtree_node_elm_t *subtree;
/* Double-checked read (first read may be stale. */
subtree = rtree->levels[level].subtree;
if (!dependent && unlikely(!rtree_node_valid(subtree)))
subtree = atomic_read_p(&rtree->levels[level].subtree_pun);
assert(!dependent || subtree != NULL);
return (subtree);
}
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent)
{
rtree_node_elm_t *subtree;
subtree = rtree_subtree_tryread(rtree, level, dependent);
if (!dependent && unlikely(!rtree_node_valid(subtree)))
subtree = rtree_subtree_read_hard(rtree, level);
assert(!dependent || subtree != NULL);
return (subtree);
}
JEMALLOC_ALWAYS_INLINE extent_node_t *
rtree_get(rtree_t *rtree, uintptr_t key, bool dependent)
{
uintptr_t subkey;
unsigned start_level;
rtree_node_elm_t *node;
start_level = rtree_start_level(rtree, key);
node = rtree_subtree_tryread(rtree, start_level, dependent);
#define RTREE_GET_BIAS (RTREE_HEIGHT_MAX - rtree->height)
switch (start_level + RTREE_GET_BIAS) {
#define RTREE_GET_SUBTREE(level) \
case level: \
assert(level < (RTREE_HEIGHT_MAX-1)); \
if (!dependent && unlikely(!rtree_node_valid(node))) \
return (NULL); \
subkey = rtree_subkey(rtree, key, level - \
RTREE_GET_BIAS); \
node = rtree_child_tryread(&node[subkey], dependent); \
/* Fall through. */
#define RTREE_GET_LEAF(level) \
case level: \
assert(level == (RTREE_HEIGHT_MAX-1)); \
if (!dependent && unlikely(!rtree_node_valid(node))) \
return (NULL); \
subkey = rtree_subkey(rtree, key, level - \
RTREE_GET_BIAS); \
/* \
* node is a leaf, so it contains values rather than \
* child pointers. \
*/ \
return (rtree_val_read(rtree, &node[subkey], \
dependent));
#if RTREE_HEIGHT_MAX > 1
RTREE_GET_SUBTREE(0)
#endif
#if RTREE_HEIGHT_MAX > 2
RTREE_GET_SUBTREE(1)
#endif
#if RTREE_HEIGHT_MAX > 3
RTREE_GET_SUBTREE(2)
#endif
#if RTREE_HEIGHT_MAX > 4
RTREE_GET_SUBTREE(3)
#endif
#if RTREE_HEIGHT_MAX > 5
RTREE_GET_SUBTREE(4)
#endif
#if RTREE_HEIGHT_MAX > 6
RTREE_GET_SUBTREE(5)
#endif
#if RTREE_HEIGHT_MAX > 7
RTREE_GET_SUBTREE(6)
#endif
#if RTREE_HEIGHT_MAX > 8
RTREE_GET_SUBTREE(7)
#endif
#if RTREE_HEIGHT_MAX > 9
RTREE_GET_SUBTREE(8)
#endif
#if RTREE_HEIGHT_MAX > 10
RTREE_GET_SUBTREE(9)
#endif
#if RTREE_HEIGHT_MAX > 11
RTREE_GET_SUBTREE(10)
#endif
#if RTREE_HEIGHT_MAX > 12
RTREE_GET_SUBTREE(11)
#endif
#if RTREE_HEIGHT_MAX > 13
RTREE_GET_SUBTREE(12)
#endif
#if RTREE_HEIGHT_MAX > 14
RTREE_GET_SUBTREE(13)
#endif
#if RTREE_HEIGHT_MAX > 15
RTREE_GET_SUBTREE(14)
#endif
#if RTREE_HEIGHT_MAX > 16
# error Unsupported RTREE_HEIGHT_MAX
#endif
RTREE_GET_LEAF(RTREE_HEIGHT_MAX-1)
#undef RTREE_GET_SUBTREE
#undef RTREE_GET_LEAF
default: not_reached();
}
#undef RTREE_GET_BIAS
not_reached();
}
JEMALLOC_INLINE bool
rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
{
uintptr_t subkey;
unsigned i, start_level;
rtree_node_elm_t *node, *child;
start_level = rtree_start_level(rtree, key);
node = rtree_subtree_read(rtree, start_level, false);
if (node == NULL)
return (true);
for (i = start_level; /**/; i++, node = child) {
subkey = rtree_subkey(rtree, key, i);
if (i == rtree->height - 1) {
/*
* node is a leaf, so it contains values rather than
* child pointers.
*/
rtree_val_write(rtree, &node[subkey], val);
return (false);
}
assert(i + 1 < rtree->height);
child = rtree_child_read(rtree, &node[subkey], i, false);
if (child == NULL)
return (true);
}
not_reached();
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 10,608 | 27.907357 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/stats.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct tcache_bin_stats_s tcache_bin_stats_t;
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
typedef struct malloc_large_stats_s malloc_large_stats_t;
typedef struct malloc_huge_stats_s malloc_huge_stats_t;
typedef struct arena_stats_s arena_stats_t;
typedef struct chunk_stats_s chunk_stats_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct tcache_bin_stats_s {
/*
* Number of allocation requests that corresponded to the size of this
* bin.
*/
uint64_t nrequests;
};
struct malloc_bin_stats_s {
/*
* Total number of allocation/deallocation requests served directly by
* the bin. Note that tcache may allocate an object, then recycle it
* many times, resulting many increments to nrequests, but only one
* each to nmalloc and ndalloc.
*/
uint64_t nmalloc;
uint64_t ndalloc;
/*
* Number of allocation requests that correspond to the size of this
* bin. This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
uint64_t nrequests;
/*
* Current number of regions of this size class, including regions
* currently cached by tcache.
*/
size_t curregs;
/* Number of tcache fills from this bin. */
uint64_t nfills;
/* Number of tcache flushes to this bin. */
uint64_t nflushes;
/* Total number of runs created for this bin's size class. */
uint64_t nruns;
/*
* Total number of runs reused by extracting them from the runs tree for
* this bin's size class.
*/
uint64_t reruns;
/* Current number of runs in this bin. */
size_t curruns;
};
struct malloc_large_stats_s {
/*
* Total number of allocation/deallocation requests served directly by
* the arena. Note that tcache may allocate an object, then recycle it
* many times, resulting many increments to nrequests, but only one
* each to nmalloc and ndalloc.
*/
uint64_t nmalloc;
uint64_t ndalloc;
/*
* Number of allocation requests that correspond to this size class.
* This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
uint64_t nrequests;
/*
* Current number of runs of this size class, including runs currently
* cached by tcache.
*/
size_t curruns;
};
struct malloc_huge_stats_s {
/*
* Total number of allocation/deallocation requests served directly by
* the arena.
*/
uint64_t nmalloc;
uint64_t ndalloc;
/* Current number of (multi-)chunk allocations of this size class. */
size_t curhchunks;
};
struct arena_stats_s {
/* Number of bytes currently mapped. */
size_t mapped;
/*
* Number of bytes currently retained as a side effect of munmap() being
* disabled/bypassed. Retained bytes are technically mapped (though
* always decommitted or purged), but they are excluded from the mapped
* statistic (above).
*/
size_t retained;
/*
* Total number of purge sweeps, total number of madvise calls made,
* and total pages purged in order to keep dirty unused memory under
* control.
*/
uint64_t npurge;
uint64_t nmadvise;
uint64_t purged;
/*
* Number of bytes currently mapped purely for metadata purposes, and
* number of bytes currently allocated for internal metadata.
*/
size_t metadata_mapped;
size_t metadata_allocated; /* Protected via atomic_*_z(). */
/* Per-size-category statistics. */
size_t allocated_large;
uint64_t nmalloc_large;
uint64_t ndalloc_large;
uint64_t nrequests_large;
size_t allocated_huge;
uint64_t nmalloc_huge;
uint64_t ndalloc_huge;
/* One element for each large size class. */
malloc_large_stats_t *lstats;
/* One element for each huge size class. */
malloc_huge_stats_t *hstats;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern bool opt_stats_print;
extern size_t stats_cactive;
void stats_print(void (*write)(void *, const char *), void *cbopaque,
const char *opts);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
size_t stats_cactive_get(void);
void stats_cactive_add(size_t size);
void stats_cactive_sub(size_t size);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_))
JEMALLOC_INLINE size_t
stats_cactive_get(void)
{
return (atomic_read_z(&stats_cactive));
}
JEMALLOC_INLINE void
stats_cactive_add(size_t size)
{
assert(size > 0);
assert((size & chunksize_mask) == 0);
atomic_add_z(&stats_cactive, size);
}
JEMALLOC_INLINE void
stats_cactive_sub(size_t size)
{
assert(size > 0);
assert((size & chunksize_mask) == 0);
atomic_sub_z(&stats_cactive, size);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 5,028 | 24.39899 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/util.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#ifdef _WIN32
# ifdef _WIN64
# define FMT64_PREFIX "ll"
# define FMTPTR_PREFIX "ll"
# else
# define FMT64_PREFIX "ll"
# define FMTPTR_PREFIX ""
# endif
# define FMTd32 "d"
# define FMTu32 "u"
# define FMTx32 "x"
# define FMTd64 FMT64_PREFIX "d"
# define FMTu64 FMT64_PREFIX "u"
# define FMTx64 FMT64_PREFIX "x"
# define FMTdPTR FMTPTR_PREFIX "d"
# define FMTuPTR FMTPTR_PREFIX "u"
# define FMTxPTR FMTPTR_PREFIX "x"
#else
# include <inttypes.h>
# define FMTd32 PRId32
# define FMTu32 PRIu32
# define FMTx32 PRIx32
# define FMTd64 PRId64
# define FMTu64 PRIu64
# define FMTx64 PRIx64
# define FMTdPTR PRIdPTR
# define FMTuPTR PRIuPTR
# define FMTxPTR PRIxPTR
#endif
/* Size of stack-allocated buffer passed to buferror(). */
#define BUFERROR_BUF 64
/*
* Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be
* large enough for all possible uses within jemalloc.
*/
#define MALLOC_PRINTF_BUFSIZE 4096
/* Junk fill patterns. */
#ifndef JEMALLOC_ALLOC_JUNK
# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5)
#endif
#ifndef JEMALLOC_FREE_JUNK
# define JEMALLOC_FREE_JUNK ((uint8_t)0x5a)
#endif
/*
* Wrap a cpp argument that contains commas such that it isn't broken up into
* multiple arguments.
*/
#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
/*
* Silence compiler warnings due to uninitialized values. This is used
* wherever the compiler fails to recognize that the variable is never used
* uninitialized.
*/
#ifdef JEMALLOC_CC_SILENCE
# define JEMALLOC_CC_SILENCE_INIT(v) = v
#else
# define JEMALLOC_CC_SILENCE_INIT(v)
#endif
#ifdef __GNUC__
# define likely(x) __builtin_expect(!!(x), 1)
# define unlikely(x) __builtin_expect(!!(x), 0)
#else
# define likely(x) !!(x)
# define unlikely(x) !!(x)
#endif
#if !defined(JEMALLOC_INTERNAL_UNREACHABLE)
# error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure
#endif
#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE()
#include "jemalloc/internal/assert.h"
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
#define cassert(c) do { \
if (unlikely(!(c))) \
not_reached(); \
} while (0)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
int buferror(int err, char *buf, size_t buflen);
uintmax_t malloc_strtoumax(const char *restrict nptr,
char **restrict endptr, int base);
void malloc_write(const char *s);
/*
* malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
* point math.
*/
size_t malloc_vsnprintf(char *str, size_t size, const char *format,
va_list ap);
size_t malloc_snprintf(char *str, size_t size, const char *format, ...)
JEMALLOC_FORMAT_PRINTF(3, 4);
void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, va_list ap);
void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4);
void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
unsigned ffs_llu(unsigned long long bitmap);
unsigned ffs_lu(unsigned long bitmap);
unsigned ffs_u(unsigned bitmap);
unsigned ffs_zu(size_t bitmap);
unsigned ffs_u64(uint64_t bitmap);
unsigned ffs_u32(uint32_t bitmap);
uint64_t pow2_ceil_u64(uint64_t x);
uint32_t pow2_ceil_u32(uint32_t x);
size_t pow2_ceil_zu(size_t x);
unsigned lg_floor(size_t x);
void set_errno(int errnum);
int get_errno(void);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_))
/* Sanity check. */
#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
|| !defined(JEMALLOC_INTERNAL_FFS)
# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
#endif
JEMALLOC_ALWAYS_INLINE unsigned
ffs_llu(unsigned long long bitmap)
{
return (JEMALLOC_INTERNAL_FFSLL(bitmap));
}
JEMALLOC_ALWAYS_INLINE unsigned
ffs_lu(unsigned long bitmap)
{
return (JEMALLOC_INTERNAL_FFSL(bitmap));
}
JEMALLOC_ALWAYS_INLINE unsigned
ffs_u(unsigned bitmap)
{
return (JEMALLOC_INTERNAL_FFS(bitmap));
}
JEMALLOC_ALWAYS_INLINE unsigned
ffs_zu(size_t bitmap)
{
#if LG_SIZEOF_PTR == LG_SIZEOF_INT
return (ffs_u(bitmap));
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
return (ffs_lu(bitmap));
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
return (ffs_llu(bitmap));
#else
#error No implementation for size_t ffs()
#endif
}
JEMALLOC_ALWAYS_INLINE unsigned
ffs_u64(uint64_t bitmap)
{
#if LG_SIZEOF_LONG == 3
return (ffs_lu(bitmap));
#elif LG_SIZEOF_LONG_LONG == 3
return (ffs_llu(bitmap));
#else
#error No implementation for 64-bit ffs()
#endif
}
JEMALLOC_ALWAYS_INLINE unsigned
ffs_u32(uint32_t bitmap)
{
#if LG_SIZEOF_INT == 2
return (ffs_u(bitmap));
#else
#error No implementation for 32-bit ffs()
#endif
return (ffs_u(bitmap));
}
JEMALLOC_INLINE uint64_t
pow2_ceil_u64(uint64_t x)
{
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
x |= x >> 32;
x++;
return (x);
}
JEMALLOC_INLINE uint32_t
pow2_ceil_u32(uint32_t x)
{
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
x++;
return (x);
}
/* Compute the smallest power of 2 that is >= x. */
JEMALLOC_INLINE size_t
pow2_ceil_zu(size_t x)
{
#if (LG_SIZEOF_PTR == 3)
return (pow2_ceil_u64(x));
#else
return (pow2_ceil_u32(x));
#endif
}
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE unsigned
lg_floor(size_t x)
{
size_t ret;
assert(x != 0);
asm ("bsr %1, %0"
: "=r"(ret) // Outputs.
: "r"(x) // Inputs.
);
assert(ret < UINT_MAX);
return ((unsigned)ret);
}
#elif (defined(_MSC_VER))
JEMALLOC_INLINE unsigned
lg_floor(size_t x)
{
unsigned long ret;
assert(x != 0);
#if (LG_SIZEOF_PTR == 3)
_BitScanReverse64(&ret, x);
#elif (LG_SIZEOF_PTR == 2)
_BitScanReverse(&ret, x);
#else
# error "Unsupported type size for lg_floor()"
#endif
assert(ret < UINT_MAX);
return ((unsigned)ret);
}
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
JEMALLOC_INLINE unsigned
lg_floor(size_t x)
{
assert(x != 0);
#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x));
#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x));
#else
# error "Unsupported type size for lg_floor()"
#endif
}
#else
JEMALLOC_INLINE unsigned
lg_floor(size_t x)
{
assert(x != 0);
x |= (x >> 1);
x |= (x >> 2);
x |= (x >> 4);
x |= (x >> 8);
x |= (x >> 16);
#if (LG_SIZEOF_PTR == 3)
x |= (x >> 32);
#endif
if (x == SIZE_T_MAX)
return ((8 << LG_SIZEOF_PTR) - 1);
x++;
return (ffs_zu(x) - 2);
}
#endif
/* Set error code. */
JEMALLOC_INLINE void
set_errno(int errnum)
{
#ifdef _WIN32
SetLastError(errnum);
#else
errno = errnum;
#endif
}
/* Get last error code. */
JEMALLOC_INLINE int
get_errno(void)
{
#ifdef _WIN32
return (GetLastError());
#else
return (errno);
#endif
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 7,458 | 20.746356 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/tcache.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct tcache_bin_info_s tcache_bin_info_t;
typedef struct tcache_bin_s tcache_bin_t;
typedef struct tcache_s tcache_t;
typedef struct tcaches_s tcaches_t;
/*
* tcache pointers close to NULL are used to encode state information that is
* used for two purposes: preventing thread caching on a per thread basis and
* cleaning up during thread shutdown.
*/
#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
/*
* Absolute minimum number of cache slots for each small bin.
*/
#define TCACHE_NSLOTS_SMALL_MIN 20
/*
* Absolute maximum number of cache slots for each small bin in the thread
* cache. This is an additional constraint beyond that imposed as: twice the
* number of regions per run for this size class.
*
* This constant must be an even number.
*/
#define TCACHE_NSLOTS_SMALL_MAX 200
/* Number of cache slots for large size classes. */
#define TCACHE_NSLOTS_LARGE 20
/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
#define LG_TCACHE_MAXCLASS_DEFAULT 15
/*
* TCACHE_GC_SWEEP is the approximate number of allocation events between
* full GC sweeps. Integer rounding may cause the actual number to be
* slightly higher, since GC is performed incrementally.
*/
#define TCACHE_GC_SWEEP 8192
/* Number of tcache allocation/deallocation events between incremental GCs. */
#define TCACHE_GC_INCR \
((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
typedef enum {
tcache_enabled_false = 0, /* Enable cast to/from bool. */
tcache_enabled_true = 1,
tcache_enabled_default = 2
} tcache_enabled_t;
/*
* Read-only information associated with each element of tcache_t's tbins array
* is stored separately, mainly to reduce memory usage.
*/
struct tcache_bin_info_s {
unsigned ncached_max; /* Upper limit on ncached. */
};
struct tcache_bin_s {
tcache_bin_stats_t tstats;
int low_water; /* Min # cached since last GC. */
unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
unsigned ncached; /* # of cached objects. */
/*
* To make use of adjacent cacheline prefetch, the items in the avail
* stack goes to higher address for newer allocations. avail points
* just above the available space, which means that
* avail[-ncached, ... -1] are available items and the lowest item will
* be allocated first.
*/
void **avail; /* Stack of available objects. */
};
struct tcache_s {
ql_elm(tcache_t) link; /* Used for aggregating stats. */
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */
ticker_t gc_ticker; /* Drives incremental GC. */
szind_t next_gc_bin; /* Next bin to GC. */
tcache_bin_t tbins[1]; /* Dynamically sized. */
/*
* The pointer stacks associated with tbins follow as a contiguous
* array. During tcache initialization, the avail pointer in each
* element of tbins is initialized to point to the proper offset within
* this array.
*/
};
/* Linkage for list of available (previously used) explicit tcache IDs. */
struct tcaches_s {
union {
tcache_t *tcache;
tcaches_t *next;
};
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern bool opt_tcache;
extern ssize_t opt_lg_tcache_max;
extern tcache_bin_info_t *tcache_bin_info;
/*
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
* large-object bins.
*/
extern unsigned nhbins;
/* Maximum cached size class. */
extern size_t tcache_maxclass;
/*
* Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
* usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
* completely disjoint from this data structure. tcaches starts off as a sparse
* array, so it has no physical memory footprint until individual pages are
* touched. This allows the entire array to be allocated the first time an
* explicit tcache is created without a disproportionate impact on memory usage.
*/
extern tcaches_t *tcaches;
size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
szind_t binind, unsigned rem);
void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
unsigned rem, tcache_t *tcache);
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
arena_t *oldarena, arena_t *newarena);
tcache_t *tcache_get_hard(tsd_t *tsd);
tcache_t *tcache_create(tsdn_t *tsdn, arena_t *arena);
void tcache_cleanup(tsd_t *tsd);
void tcache_enabled_cleanup(tsd_t *tsd);
void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
void tcaches_flush(tsd_t *tsd, unsigned ind);
void tcaches_destroy(tsd_t *tsd, unsigned ind);
bool tcache_boot(tsdn_t *tsdn);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void tcache_event(tsd_t *tsd, tcache_t *tcache);
void tcache_flush(void);
bool tcache_enabled_get(void);
tcache_t *tcache_get(tsd_t *tsd, bool create);
void tcache_enabled_set(bool enabled);
void *tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success);
void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
size_t size, szind_t ind, bool zero, bool slow_path);
void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
size_t size, szind_t ind, bool zero, bool slow_path);
void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
szind_t binind, bool slow_path);
void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
size_t size, bool slow_path);
tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
JEMALLOC_INLINE void
tcache_flush(void)
{
tsd_t *tsd;
cassert(config_tcache);
tsd = tsd_fetch();
tcache_cleanup(tsd);
}
JEMALLOC_INLINE bool
tcache_enabled_get(void)
{
tsd_t *tsd;
tcache_enabled_t tcache_enabled;
cassert(config_tcache);
tsd = tsd_fetch();
tcache_enabled = tsd_tcache_enabled_get(tsd);
if (tcache_enabled == tcache_enabled_default) {
tcache_enabled = (tcache_enabled_t)opt_tcache;
tsd_tcache_enabled_set(tsd, tcache_enabled);
}
return ((bool)tcache_enabled);
}
JEMALLOC_INLINE void
tcache_enabled_set(bool enabled)
{
tsd_t *tsd;
tcache_enabled_t tcache_enabled;
cassert(config_tcache);
tsd = tsd_fetch();
tcache_enabled = (tcache_enabled_t)enabled;
tsd_tcache_enabled_set(tsd, tcache_enabled);
if (!enabled)
tcache_cleanup(tsd);
}
JEMALLOC_ALWAYS_INLINE tcache_t *
tcache_get(tsd_t *tsd, bool create)
{
tcache_t *tcache;
if (!config_tcache)
return (NULL);
tcache = tsd_tcache_get(tsd);
if (!create)
return (tcache);
if (unlikely(tcache == NULL) && tsd_nominal(tsd)) {
tcache = tcache_get_hard(tsd);
tsd_tcache_set(tsd, tcache);
}
return (tcache);
}
JEMALLOC_ALWAYS_INLINE void
tcache_event(tsd_t *tsd, tcache_t *tcache)
{
if (TCACHE_GC_INCR == 0)
return;
if (unlikely(ticker_tick(&tcache->gc_ticker)))
tcache_event_hard(tsd, tcache);
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success)
{
void *ret;
if (unlikely(tbin->ncached == 0)) {
tbin->low_water = -1;
*tcache_success = false;
return (NULL);
}
/*
* tcache_success (instead of ret) should be checked upon the return of
* this function. We avoid checking (ret == NULL) because there is
* never a null stored on the avail stack (which is unknown to the
* compiler), and eagerly checking ret would cause pipeline stall
* (waiting for the cacheline).
*/
*tcache_success = true;
ret = *(tbin->avail - tbin->ncached);
tbin->ncached--;
if (unlikely((int)tbin->ncached < tbin->low_water))
tbin->low_water = tbin->ncached;
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
szind_t binind, bool zero, bool slow_path)
{
void *ret;
tcache_bin_t *tbin;
bool tcache_success;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
assert(binind < NBINS);
tbin = &tcache->tbins[binind];
ret = tcache_alloc_easy(tbin, &tcache_success);
assert(tcache_success == (ret != NULL));
if (unlikely(!tcache_success)) {
bool tcache_hard_success;
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL))
return (NULL);
ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
tbin, binind, &tcache_hard_success);
if (tcache_hard_success == false)
return (NULL);
}
assert(ret);
/*
* Only compute usize if required. The checks in the following if
* statement are all static.
*/
if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
usize = index2size(binind);
assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
}
if (likely(!zero)) {
if (slow_path && config_fill) {
if (unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret,
&arena_bin_info[binind], false);
} else if (unlikely(opt_zero))
memset(ret, 0, usize);
}
} else {
if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
true);
}
memset(ret, 0, usize);
}
if (config_stats)
tbin->tstats.nrequests++;
if (config_prof)
tcache->prof_accumbytes += usize;
tcache_event(tsd, tcache);
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
szind_t binind, bool zero, bool slow_path)
{
void *ret;
tcache_bin_t *tbin;
bool tcache_success;
assert(binind < nhbins);
tbin = &tcache->tbins[binind];
ret = tcache_alloc_easy(tbin, &tcache_success);
assert(tcache_success == (ret != NULL));
if (unlikely(!tcache_success)) {
/*
* Only allocate one large object at a time, because it's quite
* expensive to create one and not use it.
*/
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL))
return (NULL);
ret = arena_malloc_large(tsd_tsdn(tsd), arena, binind, zero);
if (ret == NULL)
return (NULL);
} else {
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
/* Only compute usize on demand */
if (config_prof || (slow_path && config_fill) ||
unlikely(zero)) {
usize = index2size(binind);
assert(usize <= tcache_maxclass);
}
if (config_prof && usize == LARGE_MINCLASS) {
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(ret);
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
LG_PAGE);
arena_mapbits_large_binind_set(chunk, pageind,
BININD_INVALID);
}
if (likely(!zero)) {
if (slow_path && config_fill) {
if (unlikely(opt_junk_alloc)) {
memset(ret, JEMALLOC_ALLOC_JUNK,
usize);
} else if (unlikely(opt_zero))
memset(ret, 0, usize);
}
} else
memset(ret, 0, usize);
if (config_stats)
tbin->tstats.nrequests++;
if (config_prof)
tcache->prof_accumbytes += usize;
}
tcache_event(tsd, tcache);
return (ret);
}
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
bool slow_path)
{
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS);
if (slow_path && config_fill && unlikely(opt_junk_free))
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind];
if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
tcache_bin_flush_small(tsd, tcache, tbin, binind,
(tbin_info->ncached_max >> 1));
}
assert(tbin->ncached < tbin_info->ncached_max);
tbin->ncached++;
*(tbin->avail - tbin->ncached) = ptr;
tcache_event(tsd, tcache);
}
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
bool slow_path)
{
szind_t binind;
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
assert((size & PAGE_MASK) == 0);
assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS);
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
binind = size2index(size);
if (slow_path && config_fill && unlikely(opt_junk_free))
arena_dalloc_junk_large(ptr, size);
tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind];
if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
tcache_bin_flush_large(tsd, tbin, binind,
(tbin_info->ncached_max >> 1), tcache);
}
assert(tbin->ncached < tbin_info->ncached_max);
tbin->ncached++;
*(tbin->avail - tbin->ncached) = ptr;
tcache_event(tsd, tcache);
}
JEMALLOC_ALWAYS_INLINE tcache_t *
tcaches_get(tsd_t *tsd, unsigned ind)
{
tcaches_t *elm = &tcaches[ind];
if (unlikely(elm->tcache == NULL)) {
elm->tcache = tcache_create(tsd_tsdn(tsd), arena_choose(tsd,
NULL));
}
return (elm->tcache);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 13,576 | 27.887234 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/base.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *base_alloc(tsdn_t *tsdn, size_t size);
void base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
size_t *mapped);
bool base_boot(void);
void base_prefork(tsdn_t *tsdn);
void base_postfork_parent(tsdn_t *tsdn);
void base_postfork_child(tsdn_t *tsdn);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 911 | 34.076923 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/bitmap.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS
#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
typedef struct bitmap_level_s bitmap_level_t;
typedef struct bitmap_info_s bitmap_info_t;
typedef unsigned long bitmap_t;
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
/* Number of bits per group. */
#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS)
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
/*
* Do some analysis on how big the bitmap is before we use a tree. For a brute
* force linear search, if we would have to call ffs_lu() more than 2^3 times,
* use a tree instead.
*/
#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
# define USE_TREE
#endif
/* Number of groups required to store a given number of bits. */
#define BITMAP_BITS2GROUPS(nbits) \
((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
/*
* Number of groups required at a particular level for a given number of bits.
*/
#define BITMAP_GROUPS_L0(nbits) \
BITMAP_BITS2GROUPS(nbits)
#define BITMAP_GROUPS_L1(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
#define BITMAP_GROUPS_L2(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
#define BITMAP_GROUPS_L3(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
BITMAP_BITS2GROUPS((nbits)))))
/*
* Assuming the number of levels, number of groups required for a given number
* of bits.
*/
#define BITMAP_GROUPS_1_LEVEL(nbits) \
BITMAP_GROUPS_L0(nbits)
#define BITMAP_GROUPS_2_LEVEL(nbits) \
(BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
#define BITMAP_GROUPS_3_LEVEL(nbits) \
(BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
#define BITMAP_GROUPS_4_LEVEL(nbits) \
(BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
/*
* Maximum number of groups required to support LG_BITMAP_MAXBITS.
*/
#ifdef USE_TREE
#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
#else
# error "Unsupported bitmap size"
#endif
/* Maximum number of levels possible. */
#define BITMAP_MAX_LEVELS \
(LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
+ !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
#else /* USE_TREE */
#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
#endif /* USE_TREE */
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct bitmap_level_s {
/* Offset of this level's groups within the array of groups. */
size_t group_offset;
};
struct bitmap_info_s {
/* Logical number of bits in bitmap (stored at bottom level). */
size_t nbits;
#ifdef USE_TREE
/* Number of levels necessary for nbits. */
unsigned nlevels;
/*
* Only the first (nlevels+1) elements are used, and levels are ordered
* bottom to top (e.g. the bottom level is stored in levels[0]).
*/
bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
#else /* USE_TREE */
/* Number of groups necessary for nbits. */
size_t ngroups;
#endif /* USE_TREE */
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo);
size_t bitmap_size(const bitmap_info_t *binfo);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo);
bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo);
void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_))
JEMALLOC_INLINE bool
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
{
#ifdef USE_TREE
size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
bitmap_t rg = bitmap[rgoff];
/* The bitmap is full iff the root group is 0. */
return (rg == 0);
#else
size_t i;
for (i = 0; i < binfo->ngroups; i++) {
if (bitmap[i] != 0)
return (false);
}
return (true);
#endif
}
JEMALLOC_INLINE bool
bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
{
size_t goff;
bitmap_t g;
assert(bit < binfo->nbits);
goff = bit >> LG_BITMAP_GROUP_NBITS;
g = bitmap[goff];
return (!(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))));
}
JEMALLOC_INLINE void
bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
{
size_t goff;
bitmap_t *gp;
bitmap_t g;
assert(bit < binfo->nbits);
assert(!bitmap_get(bitmap, binfo, bit));
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[goff];
g = *gp;
assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
assert(bitmap_get(bitmap, binfo, bit));
#ifdef USE_TREE
/* Propagate group state transitions up the tree. */
if (g == 0) {
unsigned i;
for (i = 1; i < binfo->nlevels; i++) {
bit = goff;
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[binfo->levels[i].group_offset + goff];
g = *gp;
assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
if (g != 0)
break;
}
}
#endif
}
/* sfu: set first unset. */
JEMALLOC_INLINE size_t
bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
{
size_t bit;
bitmap_t g;
unsigned i;
assert(!bitmap_full(bitmap, binfo));
#ifdef USE_TREE
i = binfo->nlevels - 1;
g = bitmap[binfo->levels[i].group_offset];
bit = ffs_lu(g) - 1;
while (i > 0) {
i--;
g = bitmap[binfo->levels[i].group_offset + bit];
bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1);
}
#else
i = 0;
g = bitmap[0];
while ((bit = ffs_lu(g)) == 0) {
i++;
g = bitmap[i];
}
bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
#endif
bitmap_set(bitmap, binfo, bit);
return (bit);
}
JEMALLOC_INLINE void
bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
{
size_t goff;
bitmap_t *gp;
bitmap_t g;
UNUSED bool propagate;
assert(bit < binfo->nbits);
assert(bitmap_get(bitmap, binfo, bit));
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[goff];
g = *gp;
propagate = (g == 0);
assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
assert(!bitmap_get(bitmap, binfo, bit));
#ifdef USE_TREE
/* Propagate group state transitions up the tree. */
if (propagate) {
unsigned i;
for (i = 1; i < binfo->nlevels; i++) {
bit = goff;
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[binfo->levels[i].group_offset + goff];
g = *gp;
propagate = (g == 0);
assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)))
== 0);
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
if (!propagate)
break;
}
}
#endif /* USE_TREE */
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 7,819 | 27.436364 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/ticker.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct ticker_s ticker_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct ticker_s {
int32_t tick;
int32_t nticks;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void ticker_init(ticker_t *ticker, int32_t nticks);
void ticker_copy(ticker_t *ticker, const ticker_t *other);
int32_t ticker_read(const ticker_t *ticker);
bool ticker_ticks(ticker_t *ticker, int32_t nticks);
bool ticker_tick(ticker_t *ticker);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TICKER_C_))
JEMALLOC_INLINE void
ticker_init(ticker_t *ticker, int32_t nticks)
{
ticker->tick = nticks;
ticker->nticks = nticks;
}
JEMALLOC_INLINE void
ticker_copy(ticker_t *ticker, const ticker_t *other)
{
*ticker = *other;
}
JEMALLOC_INLINE int32_t
ticker_read(const ticker_t *ticker)
{
return (ticker->tick);
}
JEMALLOC_INLINE bool
ticker_ticks(ticker_t *ticker, int32_t nticks)
{
if (unlikely(ticker->tick < nticks)) {
ticker->tick = ticker->nticks;
return (true);
}
ticker->tick -= nticks;
return(false);
}
JEMALLOC_INLINE bool
ticker_tick(ticker_t *ticker)
{
return (ticker_ticks(ticker, 1));
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 1,698 | 21.355263 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/prng.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* Simple linear congruential pseudo-random number generator:
*
* prng(y) = (a*x + c) % m
*
* where the following constants ensure maximal period:
*
* a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4.
* c == Odd number (relatively prime to 2^n).
* m == 2^32
*
* See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
*
* This choice of m has the disadvantage that the quality of the bits is
* proportional to bit position. For example, the lowest bit has a cycle of 2,
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper
* bits.
*/
#define PRNG_A_32 UINT32_C(1103515241)
#define PRNG_C_32 UINT32_C(12347)
#define PRNG_A_64 UINT64_C(6364136223846793005)
#define PRNG_C_64 UINT64_C(1442695040888963407)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
uint32_t prng_state_next_u32(uint32_t state);
uint64_t prng_state_next_u64(uint64_t state);
size_t prng_state_next_zu(size_t state);
uint32_t prng_lg_range_u32(uint32_t *state, unsigned lg_range,
bool atomic);
uint64_t prng_lg_range_u64(uint64_t *state, unsigned lg_range);
size_t prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic);
uint32_t prng_range_u32(uint32_t *state, uint32_t range, bool atomic);
uint64_t prng_range_u64(uint64_t *state, uint64_t range);
size_t prng_range_zu(size_t *state, size_t range, bool atomic);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PRNG_C_))
JEMALLOC_ALWAYS_INLINE uint32_t
prng_state_next_u32(uint32_t state)
{
return ((state * PRNG_A_32) + PRNG_C_32);
}
JEMALLOC_ALWAYS_INLINE uint64_t
prng_state_next_u64(uint64_t state)
{
return ((state * PRNG_A_64) + PRNG_C_64);
}
JEMALLOC_ALWAYS_INLINE size_t
prng_state_next_zu(size_t state)
{
#if LG_SIZEOF_PTR == 2
return ((state * PRNG_A_32) + PRNG_C_32);
#elif LG_SIZEOF_PTR == 3
return ((state * PRNG_A_64) + PRNG_C_64);
#else
#error Unsupported pointer size
#endif
}
JEMALLOC_ALWAYS_INLINE uint32_t
prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic)
{
uint32_t ret, state1;
assert(lg_range > 0);
assert(lg_range <= 32);
if (atomic) {
uint32_t state0;
do {
state0 = atomic_read_uint32(state);
state1 = prng_state_next_u32(state0);
} while (atomic_cas_uint32(state, state0, state1));
} else {
state1 = prng_state_next_u32(*state);
*state = state1;
}
ret = state1 >> (32 - lg_range);
return (ret);
}
/* 64-bit atomic operations cannot be supported on all relevant platforms. */
JEMALLOC_ALWAYS_INLINE uint64_t
prng_lg_range_u64(uint64_t *state, unsigned lg_range)
{
uint64_t ret, state1;
assert(lg_range > 0);
assert(lg_range <= 64);
state1 = prng_state_next_u64(*state);
*state = state1;
ret = state1 >> (64 - lg_range);
return (ret);
}
JEMALLOC_ALWAYS_INLINE size_t
prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic)
{
size_t ret, state1;
assert(lg_range > 0);
assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR));
if (atomic) {
size_t state0;
do {
state0 = atomic_read_z(state);
state1 = prng_state_next_zu(state0);
} while (atomic_cas_z(state, state0, state1));
} else {
state1 = prng_state_next_zu(*state);
*state = state1;
}
ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
return (ret);
}
JEMALLOC_ALWAYS_INLINE uint32_t
prng_range_u32(uint32_t *state, uint32_t range, bool atomic)
{
uint32_t ret;
unsigned lg_range;
assert(range > 1);
/* Compute the ceiling of lg(range). */
lg_range = ffs_u32(pow2_ceil_u32(range)) - 1;
/* Generate a result in [0..range) via repeated trial. */
do {
ret = prng_lg_range_u32(state, lg_range, atomic);
} while (ret >= range);
return (ret);
}
JEMALLOC_ALWAYS_INLINE uint64_t
prng_range_u64(uint64_t *state, uint64_t range)
{
uint64_t ret;
unsigned lg_range;
assert(range > 1);
/* Compute the ceiling of lg(range). */
lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
/* Generate a result in [0..range) via repeated trial. */
do {
ret = prng_lg_range_u64(state, lg_range);
} while (ret >= range);
return (ret);
}
JEMALLOC_ALWAYS_INLINE size_t
prng_range_zu(size_t *state, size_t range, bool atomic)
{
size_t ret;
unsigned lg_range;
assert(range > 1);
/* Compute the ceiling of lg(range). */
lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
/* Generate a result in [0..range) via repeated trial. */
do {
ret = prng_lg_range_zu(state, lg_range, atomic);
} while (ret >= range);
return (ret);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 5,087 | 23.461538 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/ph.h | /*
* A Pairing Heap implementation.
*
* "The Pairing Heap: A New Form of Self-Adjusting Heap"
* https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf
*
* With auxiliary twopass list, described in a follow on paper.
*
* "Pairing Heaps: Experiments and Analysis"
* http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
*
*******************************************************************************
*/
#ifndef PH_H_
#define PH_H_
/* Node structure. */
#define phn(a_type) \
struct { \
a_type *phn_prev; \
a_type *phn_next; \
a_type *phn_lchild; \
}
/* Root structure. */
#define ph(a_type) \
struct { \
a_type *ph_root; \
}
/* Internal utility macros. */
#define phn_lchild_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_lchild)
#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \
a_phn->a_field.phn_lchild = a_lchild; \
} while (0)
#define phn_next_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_next)
#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \
a_phn->a_field.phn_prev = a_prev; \
} while (0)
#define phn_prev_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_prev)
#define phn_next_set(a_type, a_field, a_phn, a_next) do { \
a_phn->a_field.phn_next = a_next; \
} while (0)
#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \
a_type *phn0child; \
\
assert(a_phn0 != NULL); \
assert(a_phn1 != NULL); \
assert(a_cmp(a_phn0, a_phn1) <= 0); \
\
phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
phn_next_set(a_type, a_field, a_phn1, phn0child); \
if (phn0child != NULL) \
phn_prev_set(a_type, a_field, phn0child, a_phn1); \
phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
} while (0)
#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
if (a_phn0 == NULL) \
r_phn = a_phn1; \
else if (a_phn1 == NULL) \
r_phn = a_phn0; \
else if (a_cmp(a_phn0, a_phn1) < 0) { \
phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
a_cmp); \
r_phn = a_phn0; \
} else { \
phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \
a_cmp); \
r_phn = a_phn1; \
} \
} while (0)
#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *head = NULL; \
a_type *tail = NULL; \
a_type *phn0 = a_phn; \
a_type *phn1 = phn_next_get(a_type, a_field, phn0); \
\
/* \
* Multipass merge, wherein the first two elements of a FIFO \
* are repeatedly merged, and each result is appended to the \
* singly linked FIFO, until the FIFO contains only a single \
* element. We start with a sibling list but no reference to \
* its tail, so we do a single pass over the sibling list to \
* populate the FIFO. \
*/ \
if (phn1 != NULL) { \
a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
if (phnrest != NULL) \
phn_prev_set(a_type, a_field, phnrest, NULL); \
phn_prev_set(a_type, a_field, phn0, NULL); \
phn_next_set(a_type, a_field, phn0, NULL); \
phn_prev_set(a_type, a_field, phn1, NULL); \
phn_next_set(a_type, a_field, phn1, NULL); \
phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \
head = tail = phn0; \
phn0 = phnrest; \
while (phn0 != NULL) { \
phn1 = phn_next_get(a_type, a_field, phn0); \
if (phn1 != NULL) { \
phnrest = phn_next_get(a_type, a_field, \
phn1); \
if (phnrest != NULL) { \
phn_prev_set(a_type, a_field, \
phnrest, NULL); \
} \
phn_prev_set(a_type, a_field, phn0, \
NULL); \
phn_next_set(a_type, a_field, phn0, \
NULL); \
phn_prev_set(a_type, a_field, phn1, \
NULL); \
phn_next_set(a_type, a_field, phn1, \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = phnrest; \
} else { \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = NULL; \
} \
} \
phn0 = head; \
phn1 = phn_next_get(a_type, a_field, phn0); \
if (phn1 != NULL) { \
while (true) { \
head = phn_next_get(a_type, a_field, \
phn1); \
assert(phn_prev_get(a_type, a_field, \
phn0) == NULL); \
phn_next_set(a_type, a_field, phn0, \
NULL); \
assert(phn_prev_get(a_type, a_field, \
phn1) == NULL); \
phn_next_set(a_type, a_field, phn1, \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
if (head == NULL) \
break; \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = head; \
phn1 = phn_next_get(a_type, a_field, \
phn0); \
} \
} \
} \
r_phn = phn0; \
} while (0)
#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \
a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \
if (phn != NULL) { \
phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \
phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \
phn_prev_set(a_type, a_field, phn, NULL); \
ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \
assert(phn_next_get(a_type, a_field, phn) == NULL); \
phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \
a_ph->ph_root); \
} \
} while (0)
#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
if (lchild == NULL) \
r_phn = NULL; \
else { \
ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
r_phn); \
} \
} while (0)
/*
* The ph_proto() macro generates function prototypes that correspond to the
* functions generated by an equivalently parameterized call to ph_gen().
*/
#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \
a_attr void a_prefix##new(a_ph_type *ph); \
a_attr bool a_prefix##empty(a_ph_type *ph); \
a_attr a_type *a_prefix##first(a_ph_type *ph); \
a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \
a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \
a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
/*
* The ph_gen() macro generates a type-specific pairing heap implementation,
* based on the above cpp macros.
*/
#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
a_attr void \
a_prefix##new(a_ph_type *ph) \
{ \
\
memset(ph, 0, sizeof(ph(a_type))); \
} \
a_attr bool \
a_prefix##empty(a_ph_type *ph) \
{ \
\
return (ph->ph_root == NULL); \
} \
a_attr a_type * \
a_prefix##first(a_ph_type *ph) \
{ \
\
if (ph->ph_root == NULL) \
return (NULL); \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
return (ph->ph_root); \
} \
a_attr void \
a_prefix##insert(a_ph_type *ph, a_type *phn) \
{ \
\
memset(&phn->a_field, 0, sizeof(phn(a_type))); \
\
/* \
* Treat the root as an aux list during insertion, and lazily \
* merge during a_prefix##remove_first(). For elements that \
* are inserted, then removed via a_prefix##remove() before the \
* aux list is ever processed, this makes insert/remove \
* constant-time, whereas eager merging would make insert \
* O(log n). \
*/ \
if (ph->ph_root == NULL) \
ph->ph_root = phn; \
else { \
phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
a_field, ph->ph_root)); \
if (phn_next_get(a_type, a_field, ph->ph_root) != \
NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, ph->ph_root), \
phn); \
} \
phn_prev_set(a_type, a_field, phn, ph->ph_root); \
phn_next_set(a_type, a_field, ph->ph_root, phn); \
} \
} \
a_attr a_type * \
a_prefix##remove_first(a_ph_type *ph) \
{ \
a_type *ret; \
\
if (ph->ph_root == NULL) \
return (NULL); \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
\
ret = ph->ph_root; \
\
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
ph->ph_root); \
\
return (ret); \
} \
a_attr void \
a_prefix##remove(a_ph_type *ph, a_type *phn) \
{ \
a_type *replace, *parent; \
\
/* \
* We can delete from aux list without merging it, but we need \
* to merge if we are dealing with the root node. \
*/ \
if (ph->ph_root == phn) { \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
if (ph->ph_root == phn) { \
ph_merge_children(a_type, a_field, ph->ph_root, \
a_cmp, ph->ph_root); \
return; \
} \
} \
\
/* Get parent (if phn is leftmost child) before mutating. */ \
if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
if (phn_lchild_get(a_type, a_field, parent) != phn) \
parent = NULL; \
} \
/* Find a possible replacement node, and link to parent. */ \
ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
/* Set next/prev for sibling linked list. */ \
if (replace != NULL) { \
if (parent != NULL) { \
phn_prev_set(a_type, a_field, replace, parent); \
phn_lchild_set(a_type, a_field, parent, \
replace); \
} else { \
phn_prev_set(a_type, a_field, replace, \
phn_prev_get(a_type, a_field, phn)); \
if (phn_prev_get(a_type, a_field, phn) != \
NULL) { \
phn_next_set(a_type, a_field, \
phn_prev_get(a_type, a_field, phn), \
replace); \
} \
} \
phn_next_set(a_type, a_field, replace, \
phn_next_get(a_type, a_field, phn)); \
if (phn_next_get(a_type, a_field, phn) != NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, phn), \
replace); \
} \
} else { \
if (parent != NULL) { \
a_type *next = phn_next_get(a_type, a_field, \
phn); \
phn_lchild_set(a_type, a_field, parent, next); \
if (next != NULL) { \
phn_prev_set(a_type, a_field, next, \
parent); \
} \
} else { \
assert(phn_prev_get(a_type, a_field, phn) != \
NULL); \
phn_next_set(a_type, a_field, \
phn_prev_get(a_type, a_field, phn), \
phn_next_get(a_type, a_field, phn)); \
} \
if (phn_next_get(a_type, a_field, phn) != NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, phn), \
phn_prev_get(a_type, a_field, phn)); \
} \
} \
}
#endif /* PH_H_ */
| 10,965 | 30.693642 | 86 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/huge.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
void *huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero);
bool huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize,
size_t usize_min, size_t usize_max, bool zero);
void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
size_t usize, size_t alignment, bool zero, tcache_t *tcache);
#ifdef JEMALLOC_JET
typedef void (huge_dalloc_junk_t)(void *, size_t);
extern huge_dalloc_junk_t *huge_dalloc_junk;
#endif
void huge_dalloc(tsdn_t *tsdn, void *ptr);
arena_t *huge_aalloc(const void *ptr);
size_t huge_salloc(tsdn_t *tsdn, const void *ptr);
prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr);
void huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx);
void huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 1,518 | 41.194444 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/assert.h | /*
* Define a custom assert() in order to reduce the chances of deadlock during
* assertion failure.
*/
#ifndef assert
#define assert(e) do { \
if (unlikely(config_debug && !(e))) { \
malloc_printf( \
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
__FILE__, __LINE__, #e); \
abort(); \
} \
} while (0)
#endif
#ifndef not_reached
#define not_reached() do { \
if (config_debug) { \
malloc_printf( \
"<jemalloc>: %s:%d: Unreachable code reached\n", \
__FILE__, __LINE__); \
abort(); \
} \
unreachable(); \
} while (0)
#endif
#ifndef not_implemented
#define not_implemented() do { \
if (config_debug) { \
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
__FILE__, __LINE__); \
abort(); \
} \
} while (0)
#endif
#ifndef assert_not_implemented
#define assert_not_implemented(e) do { \
if (unlikely(config_debug && !(e))) \
not_implemented(); \
} while (0)
#endif
| 1,029 | 21.391304 | 77 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/atomic.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#define atomic_read_uint64(p) atomic_add_uint64(p, 0)
#define atomic_read_uint32(p) atomic_add_uint32(p, 0)
#define atomic_read_p(p) atomic_add_p(p, NULL)
#define atomic_read_z(p) atomic_add_z(p, 0)
#define atomic_read_u(p) atomic_add_u(p, 0)
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
/*
* All arithmetic functions return the arithmetic result of the atomic
* operation. Some atomic operation APIs return the value prior to mutation, in
* which case the following functions must redundantly compute the result so
* that it can be returned. These functions are normally inlined, so the extra
* operations can be optimized away if the return values aren't used by the
* callers.
*
* <t> atomic_read_<t>(<t> *p) { return (*p); }
* <t> atomic_add_<t>(<t> *p, <t> x) { return (*p += x); }
* <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p -= x); }
* bool atomic_cas_<t>(<t> *p, <t> c, <t> s)
* {
* if (*p != c)
* return (true);
* *p = s;
* return (false);
* }
* void atomic_write_<t>(<t> *p, <t> x) { *p = x; }
*/
#ifndef JEMALLOC_ENABLE_INLINE
uint64_t atomic_add_uint64(uint64_t *p, uint64_t x);
uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x);
bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s);
void atomic_write_uint64(uint64_t *p, uint64_t x);
uint32_t atomic_add_uint32(uint32_t *p, uint32_t x);
uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x);
bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s);
void atomic_write_uint32(uint32_t *p, uint32_t x);
void *atomic_add_p(void **p, void *x);
void *atomic_sub_p(void **p, void *x);
bool atomic_cas_p(void **p, void *c, void *s);
void atomic_write_p(void **p, const void *x);
size_t atomic_add_z(size_t *p, size_t x);
size_t atomic_sub_z(size_t *p, size_t x);
bool atomic_cas_z(size_t *p, size_t c, size_t s);
void atomic_write_z(size_t *p, size_t x);
unsigned atomic_add_u(unsigned *p, unsigned x);
unsigned atomic_sub_u(unsigned *p, unsigned x);
bool atomic_cas_u(unsigned *p, unsigned c, unsigned s);
void atomic_write_u(unsigned *p, unsigned x);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
/******************************************************************************/
/* 64-bit operations. */
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
# if (defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
uint64_t t = x;
asm volatile (
"lock; xaddq %0, %1;"
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (t + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
uint64_t t;
x = (uint64_t)(-(int64_t)x);
t = x;
asm volatile (
"lock; xaddq %0, %1;"
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (t + x);
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
uint8_t success;
asm volatile (
"lock; cmpxchgq %4, %0;"
"sete %1;"
: "=m" (*p), "=a" (success) /* Outputs. */
: "m" (*p), "a" (c), "r" (s) /* Inputs. */
: "memory" /* Clobbers. */
);
return (!(bool)success);
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
asm volatile (
"xchgq %1, %0;" /* Lock is implied by xchgq. */
: "=m" (*p), "+r" (x) /* Outputs. */
: "m" (*p) /* Inputs. */
: "memory" /* Clobbers. */
);
}
# elif (defined(JEMALLOC_C11ATOMICS))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
return (atomic_fetch_add(a, x) + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
return (atomic_fetch_sub(a, x) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
return (!atomic_compare_exchange_strong(a, &c, s));
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
atomic_store(a, x);
}
# elif (defined(JEMALLOC_ATOMIC9))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
/*
* atomic_fetchadd_64() doesn't exist, but we only ever use this
* function on LP64 systems, so atomic_fetchadd_long() will do.
*/
assert(sizeof(uint64_t) == sizeof(unsigned long));
return (atomic_fetchadd_long(p, (unsigned long)x) + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
assert(sizeof(uint64_t) == sizeof(unsigned long));
return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
assert(sizeof(uint64_t) == sizeof(unsigned long));
return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s));
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
assert(sizeof(uint64_t) == sizeof(unsigned long));
atomic_store_rel_long(p, x);
}
# elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p));
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
uint64_t o;
/*The documented OSAtomic*() API does not expose an atomic exchange. */
do {
o = atomic_read_uint64(p);
} while (atomic_cas_uint64(p, o, x));
}
# elif (defined(_MSC_VER))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (InterlockedExchangeAdd64(p, x) + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
uint64_t o;
o = InterlockedCompareExchange64(p, s, c);
return (o != c);
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
InterlockedExchange64(p, x);
}
# elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \
defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (__sync_sub_and_fetch(p, x));
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
return (!__sync_bool_compare_and_swap(p, c, s));
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
__sync_lock_test_and_set(p, x);
}
# else
# error "Missing implementation for 64-bit atomic operations"
# endif
#endif
/******************************************************************************/
/* 32-bit operations. */
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
uint32_t t = x;
asm volatile (
"lock; xaddl %0, %1;"
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (t + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
uint32_t t;
x = (uint32_t)(-(int32_t)x);
t = x;
asm volatile (
"lock; xaddl %0, %1;"
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (t + x);
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
uint8_t success;
asm volatile (
"lock; cmpxchgl %4, %0;"
"sete %1;"
: "=m" (*p), "=a" (success) /* Outputs. */
: "m" (*p), "a" (c), "r" (s) /* Inputs. */
: "memory"
);
return (!(bool)success);
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
asm volatile (
"xchgl %1, %0;" /* Lock is implied by xchgl. */
: "=m" (*p), "+r" (x) /* Outputs. */
: "m" (*p) /* Inputs. */
: "memory" /* Clobbers. */
);
}
# elif (defined(JEMALLOC_C11ATOMICS))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
return (atomic_fetch_add(a, x) + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
return (atomic_fetch_sub(a, x) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
return (!atomic_compare_exchange_strong(a, &c, s));
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
atomic_store(a, x);
}
#elif (defined(JEMALLOC_ATOMIC9))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (atomic_fetchadd_32(p, x) + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
return (!atomic_cmpset_32(p, c, s));
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
atomic_store_rel_32(p, x);
}
#elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p));
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
uint32_t o;
/*The documented OSAtomic*() API does not expose an atomic exchange. */
do {
o = atomic_read_uint32(p);
} while (atomic_cas_uint32(p, o, x));
}
#elif (defined(_MSC_VER))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (InterlockedExchangeAdd(p, x) + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (InterlockedExchangeAdd(p, -((int32_t)x)) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
uint32_t o;
o = InterlockedCompareExchange(p, s, c);
return (o != c);
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
InterlockedExchange(p, x);
}
#elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \
defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (__sync_sub_and_fetch(p, x));
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
return (!__sync_bool_compare_and_swap(p, c, s));
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
__sync_lock_test_and_set(p, x);
}
#else
# error "Missing implementation for 32-bit atomic operations"
#endif
/******************************************************************************/
/* Pointer operations. */
JEMALLOC_INLINE void *
atomic_add_p(void **p, void *x)
{
#if (LG_SIZEOF_PTR == 3)
return ((void *)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_PTR == 2)
return ((void *)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
#endif
}
JEMALLOC_INLINE void *
atomic_sub_p(void **p, void *x)
{
#if (LG_SIZEOF_PTR == 3)
return ((void *)atomic_add_uint64((uint64_t *)p,
(uint64_t)-((int64_t)x)));
#elif (LG_SIZEOF_PTR == 2)
return ((void *)atomic_add_uint32((uint32_t *)p,
(uint32_t)-((int32_t)x)));
#endif
}
JEMALLOC_INLINE bool
atomic_cas_p(void **p, void *c, void *s)
{
#if (LG_SIZEOF_PTR == 3)
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
#elif (LG_SIZEOF_PTR == 2)
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
#endif
}
JEMALLOC_INLINE void
atomic_write_p(void **p, const void *x)
{
#if (LG_SIZEOF_PTR == 3)
atomic_write_uint64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_PTR == 2)
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
#endif
}
/******************************************************************************/
/* size_t operations. */
JEMALLOC_INLINE size_t
atomic_add_z(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 3)
return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_PTR == 2)
return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
#endif
}
JEMALLOC_INLINE size_t
atomic_sub_z(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 3)
return ((size_t)atomic_add_uint64((uint64_t *)p,
(uint64_t)-((int64_t)x)));
#elif (LG_SIZEOF_PTR == 2)
return ((size_t)atomic_add_uint32((uint32_t *)p,
(uint32_t)-((int32_t)x)));
#endif
}
JEMALLOC_INLINE bool
atomic_cas_z(size_t *p, size_t c, size_t s)
{
#if (LG_SIZEOF_PTR == 3)
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
#elif (LG_SIZEOF_PTR == 2)
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
#endif
}
JEMALLOC_INLINE void
atomic_write_z(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 3)
atomic_write_uint64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_PTR == 2)
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
#endif
}
/******************************************************************************/
/* unsigned operations. */
JEMALLOC_INLINE unsigned
atomic_add_u(unsigned *p, unsigned x)
{
#if (LG_SIZEOF_INT == 3)
return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_INT == 2)
return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
#endif
}
JEMALLOC_INLINE unsigned
atomic_sub_u(unsigned *p, unsigned x)
{
#if (LG_SIZEOF_INT == 3)
return ((unsigned)atomic_add_uint64((uint64_t *)p,
(uint64_t)-((int64_t)x)));
#elif (LG_SIZEOF_INT == 2)
return ((unsigned)atomic_add_uint32((uint32_t *)p,
(uint32_t)-((int32_t)x)));
#endif
}
JEMALLOC_INLINE bool
atomic_cas_u(unsigned *p, unsigned c, unsigned s)
{
#if (LG_SIZEOF_INT == 3)
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
#elif (LG_SIZEOF_INT == 2)
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
#endif
}
JEMALLOC_INLINE void
atomic_write_u(unsigned *p, unsigned x)
{
#if (LG_SIZEOF_INT == 3)
atomic_write_uint64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_INT == 2)
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
#endif
}
/******************************************************************************/
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 15,441 | 22.684049 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h | #ifndef JEMALLOC_INTERNAL_DECLS_H
#define JEMALLOC_INTERNAL_DECLS_H
#include <math.h>
#ifdef _WIN32
# include <windows.h>
# include "msvc_compat/windows_extra.h"
#else
# include <sys/param.h>
# include <sys/mman.h>
# if !defined(__pnacl__) && !defined(__native_client__)
# include <sys/syscall.h>
# if !defined(SYS_write) && defined(__NR_write)
# define SYS_write __NR_write
# endif
# include <sys/uio.h>
# endif
# include <pthread.h>
# ifdef JEMALLOC_OS_UNFAIR_LOCK
# include <os/lock.h>
# endif
# ifdef JEMALLOC_GLIBC_MALLOC_HOOK
# include <sched.h>
# endif
# include <errno.h>
# include <sys/time.h>
# include <time.h>
# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
# include <mach/mach_time.h>
# endif
#endif
#include <sys/types.h>
#include <limits.h>
#ifndef SIZE_T_MAX
# define SIZE_T_MAX SIZE_MAX
#endif
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stddef.h>
#ifndef offsetof
# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
#endif
#include <string.h>
#include <strings.h>
#include <ctype.h>
#ifdef _MSC_VER
# include <io.h>
typedef intptr_t ssize_t;
# define PATH_MAX 1024
# define STDERR_FILENO 2
# define __func__ __FUNCTION__
# ifdef JEMALLOC_HAS_RESTRICT
# define restrict __restrict
# endif
/* Disable warnings about deprecated system functions. */
# pragma warning(disable: 4996)
#if _MSC_VER < 1800
static int
isblank(int c)
{
return (c == '\t' || c == ' ');
}
#endif
#else
# include <unistd.h>
#endif
#include <fcntl.h>
#endif /* JEMALLOC_INTERNAL_H */
| 1,608 | 20.171053 | 68 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/mb.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void mb_write(void);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_))
#ifdef __i386__
/*
* According to the Intel Architecture Software Developer's Manual, current
* processors execute instructions in order from the perspective of other
* processors in a multiprocessor system, but 1) Intel reserves the right to
* change that, and 2) the compiler's optimizer could re-order instructions if
* there weren't some form of barrier. Therefore, even if running on an
* architecture that does not need memory barriers (everything through at least
* i686), an "optimizer barrier" is necessary.
*/
JEMALLOC_INLINE void
mb_write(void)
{
# if 0
/* This is a true memory barrier. */
asm volatile ("pusha;"
"xor %%eax,%%eax;"
"cpuid;"
"popa;"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
# else
/*
* This is hopefully enough to keep the compiler from reordering
* instructions around this one.
*/
asm volatile ("nop;"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
# endif
}
#elif (defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE void
mb_write(void)
{
asm volatile ("sfence"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
}
#elif defined(__powerpc__)
JEMALLOC_INLINE void
mb_write(void)
{
asm volatile ("eieio"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
}
#elif defined(__sparc64__)
JEMALLOC_INLINE void
mb_write(void)
{
asm volatile ("membar #StoreStore"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
}
#elif defined(__tile__)
JEMALLOC_INLINE void
mb_write(void)
{
__sync_synchronize();
}
#else
/*
* This is much slower than a simple memory barrier, but the semantics of mutex
* unlock make this work.
*/
JEMALLOC_INLINE void
mb_write(void)
{
malloc_mutex_t mtx;
malloc_mutex_init(&mtx, "mb", WITNESS_RANK_OMIT);
malloc_mutex_lock(TSDN_NULL, &mtx);
malloc_mutex_unlock(TSDN_NULL, &mtx);
}
#endif
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 2,738 | 22.612069 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/quarantine.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct quarantine_obj_s quarantine_obj_t;
typedef struct quarantine_s quarantine_t;
/* Default per thread quarantine size if valgrind is enabled. */
#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct quarantine_obj_s {
void *ptr;
size_t usize;
};
struct quarantine_s {
size_t curbytes;
size_t curobjs;
size_t first;
#define LG_MAXOBJS_INIT 10
size_t lg_maxobjs;
quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void quarantine_alloc_hook_work(tsd_t *tsd);
void quarantine(tsd_t *tsd, void *ptr);
void quarantine_cleanup(tsd_t *tsd);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void quarantine_alloc_hook(void);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_))
JEMALLOC_ALWAYS_INLINE void
quarantine_alloc_hook(void)
{
tsd_t *tsd;
assert(config_fill && opt_quarantine);
tsd = tsd_fetch();
if (tsd_quarantine_get(tsd) == NULL)
quarantine_alloc_hook_work(tsd);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 1,593 | 25.131148 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/valgrind.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#ifdef JEMALLOC_VALGRIND
#include <valgrind/valgrind.h>
/*
* The size that is reported to Valgrind must be consistent through a chain of
* malloc..realloc..realloc calls. Request size isn't recorded anywhere in
* jemalloc, so it is critical that all callers of these macros provide usize
* rather than request size. As a result, buffer overflow detection is
* technically weakened for the standard API, though it is generally accepted
* practice to consider any extra bytes reported by malloc_usable_size() as
* usable space.
*/
#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do { \
if (unlikely(in_valgrind)) \
valgrind_make_mem_noaccess(ptr, usize); \
} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do { \
if (unlikely(in_valgrind)) \
valgrind_make_mem_undefined(ptr, usize); \
} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do { \
if (unlikely(in_valgrind)) \
valgrind_make_mem_defined(ptr, usize); \
} while (0)
/*
* The VALGRIND_MALLOCLIKE_BLOCK() and VALGRIND_RESIZEINPLACE_BLOCK() macro
* calls must be embedded in macros rather than in functions so that when
* Valgrind reports errors, there are no extra stack frames in the backtraces.
*/
#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do { \
if (unlikely(in_valgrind && cond)) { \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(tsdn, ptr), \
zero); \
} \
} while (0)
#define JEMALLOC_VALGRIND_REALLOC_MOVED_no(ptr, old_ptr) \
(false)
#define JEMALLOC_VALGRIND_REALLOC_MOVED_maybe(ptr, old_ptr) \
((ptr) != (old_ptr))
#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_no(ptr) \
(false)
#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_maybe(ptr) \
(ptr == NULL)
#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_no(old_ptr) \
(false)
#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_maybe(old_ptr) \
(old_ptr == NULL)
#define JEMALLOC_VALGRIND_REALLOC(moved, tsdn, ptr, usize, ptr_null, \
old_ptr, old_usize, old_rzsize, old_ptr_null, zero) do { \
if (unlikely(in_valgrind)) { \
size_t rzsize = p2rz(tsdn, ptr); \
\
if (!JEMALLOC_VALGRIND_REALLOC_MOVED_##moved(ptr, \
old_ptr)) { \
VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
usize, rzsize); \
if (zero && old_usize < usize) { \
valgrind_make_mem_defined( \
(void *)((uintptr_t)ptr + \
old_usize), usize - old_usize); \
} \
} else { \
if (!JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_## \
old_ptr_null(old_ptr)) { \
valgrind_freelike_block(old_ptr, \
old_rzsize); \
} \
if (!JEMALLOC_VALGRIND_REALLOC_PTR_NULL_## \
ptr_null(ptr)) { \
size_t copy_size = (old_usize < usize) \
? old_usize : usize; \
size_t tail_size = usize - copy_size; \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \
rzsize, false); \
if (copy_size > 0) { \
valgrind_make_mem_defined(ptr, \
copy_size); \
} \
if (zero && tail_size > 0) { \
valgrind_make_mem_defined( \
(void *)((uintptr_t)ptr + \
copy_size), tail_size); \
} \
} \
} \
} \
} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \
if (unlikely(in_valgrind)) \
valgrind_freelike_block(ptr, rzsize); \
} while (0)
#else
#define RUNNING_ON_VALGRIND ((unsigned)0)
#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do {} while (0)
#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \
ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
zero) do {} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#ifdef JEMALLOC_VALGRIND
void valgrind_make_mem_noaccess(void *ptr, size_t usize);
void valgrind_make_mem_undefined(void *ptr, size_t usize);
void valgrind_make_mem_defined(void *ptr, size_t usize);
void valgrind_freelike_block(void *ptr, size_t usize);
#endif
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 4,841 | 36.534884 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/extent.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct extent_node_s extent_node_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/* Tree of extents. Use accessor functions for en_* fields. */
struct extent_node_s {
/* Arena from which this extent came, if any. */
arena_t *en_arena;
/* Pointer to the extent that this tree node is responsible for. */
void *en_addr;
/* Total region size. */
size_t en_size;
/*
* Serial number (potentially non-unique).
*
* In principle serial numbers can wrap around on 32-bit systems if
* JEMALLOC_MUNMAP is defined, but as long as comparison functions fall
* back on address comparison for equal serial numbers, stable (if
* imperfect) ordering is maintained.
*
* Serial numbers may not be unique even in the absence of wrap-around,
* e.g. when splitting an extent and assigning the same serial number to
* both resulting adjacent extents.
*/
size_t en_sn;
/*
* The zeroed flag is used by chunk recycling code to track whether
* memory is zero-filled.
*/
bool en_zeroed;
/*
* True if physical memory is committed to the extent, whether
* explicitly or implicitly as on a system that overcommits and
* satisfies physical memory needs on demand via soft page faults.
*/
bool en_committed;
/*
* The achunk flag is used to validate that huge allocation lookups
* don't return arena chunks.
*/
bool en_achunk;
/* Profile counters, used for huge objects. */
prof_tctx_t *en_prof_tctx;
/* Linkage for arena's runs_dirty and chunks_cache rings. */
arena_runs_dirty_link_t rd;
qr(extent_node_t) cc_link;
union {
/* Linkage for the size/sn/address-ordered tree. */
rb_node(extent_node_t) szsnad_link;
/* Linkage for arena's achunks, huge, and node_cache lists. */
ql_elm(extent_node_t) ql_link;
};
/* Linkage for the address-ordered tree. */
rb_node(extent_node_t) ad_link;
};
typedef rb_tree(extent_node_t) extent_tree_t;
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
rb_proto(, extent_tree_szsnad_, extent_tree_t, extent_node_t)
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
arena_t *extent_node_arena_get(const extent_node_t *node);
void *extent_node_addr_get(const extent_node_t *node);
size_t extent_node_size_get(const extent_node_t *node);
size_t extent_node_sn_get(const extent_node_t *node);
bool extent_node_zeroed_get(const extent_node_t *node);
bool extent_node_committed_get(const extent_node_t *node);
bool extent_node_achunk_get(const extent_node_t *node);
prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node);
void extent_node_arena_set(extent_node_t *node, arena_t *arena);
void extent_node_addr_set(extent_node_t *node, void *addr);
void extent_node_size_set(extent_node_t *node, size_t size);
void extent_node_sn_set(extent_node_t *node, size_t sn);
void extent_node_zeroed_set(extent_node_t *node, bool zeroed);
void extent_node_committed_set(extent_node_t *node, bool committed);
void extent_node_achunk_set(extent_node_t *node, bool achunk);
void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
void extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
size_t size, size_t sn, bool zeroed, bool committed);
void extent_node_dirty_linkage_init(extent_node_t *node);
void extent_node_dirty_insert(extent_node_t *node,
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
void extent_node_dirty_remove(extent_node_t *node);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
JEMALLOC_INLINE arena_t *
extent_node_arena_get(const extent_node_t *node)
{
return (node->en_arena);
}
JEMALLOC_INLINE void *
extent_node_addr_get(const extent_node_t *node)
{
return (node->en_addr);
}
JEMALLOC_INLINE size_t
extent_node_size_get(const extent_node_t *node)
{
return (node->en_size);
}
JEMALLOC_INLINE size_t
extent_node_sn_get(const extent_node_t *node)
{
return (node->en_sn);
}
JEMALLOC_INLINE bool
extent_node_zeroed_get(const extent_node_t *node)
{
return (node->en_zeroed);
}
JEMALLOC_INLINE bool
extent_node_committed_get(const extent_node_t *node)
{
assert(!node->en_achunk);
return (node->en_committed);
}
JEMALLOC_INLINE bool
extent_node_achunk_get(const extent_node_t *node)
{
return (node->en_achunk);
}
JEMALLOC_INLINE prof_tctx_t *
extent_node_prof_tctx_get(const extent_node_t *node)
{
return (node->en_prof_tctx);
}
JEMALLOC_INLINE void
extent_node_arena_set(extent_node_t *node, arena_t *arena)
{
node->en_arena = arena;
}
JEMALLOC_INLINE void
extent_node_addr_set(extent_node_t *node, void *addr)
{
node->en_addr = addr;
}
JEMALLOC_INLINE void
extent_node_size_set(extent_node_t *node, size_t size)
{
node->en_size = size;
}
JEMALLOC_INLINE void
extent_node_sn_set(extent_node_t *node, size_t sn)
{
node->en_sn = sn;
}
JEMALLOC_INLINE void
extent_node_zeroed_set(extent_node_t *node, bool zeroed)
{
node->en_zeroed = zeroed;
}
JEMALLOC_INLINE void
extent_node_committed_set(extent_node_t *node, bool committed)
{
node->en_committed = committed;
}
JEMALLOC_INLINE void
extent_node_achunk_set(extent_node_t *node, bool achunk)
{
node->en_achunk = achunk;
}
JEMALLOC_INLINE void
extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx)
{
node->en_prof_tctx = tctx;
}
JEMALLOC_INLINE void
extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
size_t sn, bool zeroed, bool committed)
{
extent_node_arena_set(node, arena);
extent_node_addr_set(node, addr);
extent_node_size_set(node, size);
extent_node_sn_set(node, sn);
extent_node_zeroed_set(node, zeroed);
extent_node_committed_set(node, committed);
extent_node_achunk_set(node, false);
if (config_prof)
extent_node_prof_tctx_set(node, NULL);
}
JEMALLOC_INLINE void
extent_node_dirty_linkage_init(extent_node_t *node)
{
qr_new(&node->rd, rd_link);
qr_new(node, cc_link);
}
JEMALLOC_INLINE void
extent_node_dirty_insert(extent_node_t *node,
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty)
{
qr_meld(runs_dirty, &node->rd, rd_link);
qr_meld(chunks_dirty, node, cc_link);
}
JEMALLOC_INLINE void
extent_node_dirty_remove(extent_node_t *node)
{
qr_remove(&node->rd, rd_link);
qr_remove(node, cc_link);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 6,787 | 24.04797 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/chunk_dss.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef enum {
dss_prec_disabled = 0,
dss_prec_primary = 1,
dss_prec_secondary = 2,
dss_prec_limit = 3
} dss_prec_t;
#define DSS_PREC_DEFAULT dss_prec_secondary
#define DSS_DEFAULT "secondary"
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
extern const char *dss_prec_names[];
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
dss_prec_t chunk_dss_prec_get(void);
bool chunk_dss_prec_set(dss_prec_t dss_prec);
void *chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit);
bool chunk_in_dss(void *chunk);
bool chunk_dss_mergeable(void *chunk_a, void *chunk_b);
void chunk_dss_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 1,211 | 30.894737 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h | /*
* JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for
* functions that are static inline functions if inlining is enabled, and
* single-definition library-private functions if inlining is disabled.
*
* JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in
* which case the denoted functions are always static, regardless of whether
* inlining is enabled.
*/
#if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE)
/* Disable inlining to make debugging/profiling easier. */
# define JEMALLOC_ALWAYS_INLINE
# define JEMALLOC_ALWAYS_INLINE_C static
# define JEMALLOC_INLINE
# define JEMALLOC_INLINE_C static
# define inline
#else
# define JEMALLOC_ENABLE_INLINE
# ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ALWAYS_INLINE \
static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline)
# define JEMALLOC_ALWAYS_INLINE_C \
static inline JEMALLOC_ATTR(always_inline)
# else
# define JEMALLOC_ALWAYS_INLINE static inline
# define JEMALLOC_ALWAYS_INLINE_C static inline
# endif
# define JEMALLOC_INLINE static inline
# define JEMALLOC_INLINE_C static inline
# ifdef _MSC_VER
# define inline _inline
# endif
#endif
#ifdef JEMALLOC_CC_SILENCE
# define UNUSED JEMALLOC_ATTR(unused)
#else
# define UNUSED
#endif
#define ZU(z) ((size_t)z)
#define ZI(z) ((ssize_t)z)
#define QU(q) ((uint64_t)q)
#define QI(q) ((int64_t)q)
#define KZU(z) ZU(z##ULL)
#define KZI(z) ZI(z##LL)
#define KQU(q) QU(q##ULL)
#define KQI(q) QI(q##LL)
#ifndef __DECONST
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif
#ifndef JEMALLOC_HAS_RESTRICT
# define restrict
#endif
| 1,669 | 27.793103 | 78 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/pages.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *pages_map(void *addr, size_t size, bool *commit);
void pages_unmap(void *addr, size_t size);
void *pages_trim(void *addr, size_t alloc_size, size_t leadsize,
size_t size, bool *commit);
bool pages_commit(void *addr, size_t size);
bool pages_decommit(void *addr, size_t size);
bool pages_purge(void *addr, size_t size);
bool pages_huge(void *addr, size_t size);
bool pages_nohuge(void *addr, size_t size);
void pages_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 1,077 | 34.933333 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/prof.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct prof_bt_s prof_bt_t;
typedef struct prof_cnt_s prof_cnt_t;
typedef struct prof_tctx_s prof_tctx_t;
typedef struct prof_gctx_s prof_gctx_t;
typedef struct prof_tdata_s prof_tdata_t;
/* Option defaults. */
#ifdef JEMALLOC_PROF
# define PROF_PREFIX_DEFAULT "jeprof"
#else
# define PROF_PREFIX_DEFAULT ""
#endif
#define LG_PROF_SAMPLE_DEFAULT 19
#define LG_PROF_INTERVAL_DEFAULT -1
/*
* Hard limit on stack backtrace depth. The version of prof_backtrace() that
* is based on __builtin_return_address() necessarily has a hard-coded number
* of backtrace frame handlers, and should be kept in sync with this setting.
*/
#define PROF_BT_MAX 128
/* Initial hash table size. */
#define PROF_CKH_MINITEMS 64
/* Size of memory buffer to use when writing dump files. */
#define PROF_DUMP_BUFSIZE 65536
/* Size of stack-allocated buffer used by prof_printf(). */
#define PROF_PRINTF_BUFSIZE 128
/*
* Number of mutexes shared among all gctx's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NCTX_LOCKS 1024
/*
* Number of mutexes shared among all tdata's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NTDATA_LOCKS 256
/*
* prof_tdata pointers close to NULL are used to encode state information that
* is used for cleaning up during thread shutdown.
*/
#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct prof_bt_s {
/* Backtrace, stored as len program counters. */
void **vec;
unsigned len;
};
#ifdef JEMALLOC_PROF_LIBGCC
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
typedef struct {
prof_bt_t *bt;
unsigned max;
} prof_unwind_data_t;
#endif
struct prof_cnt_s {
/* Profiling counters. */
uint64_t curobjs;
uint64_t curbytes;
uint64_t accumobjs;
uint64_t accumbytes;
};
typedef enum {
prof_tctx_state_initializing,
prof_tctx_state_nominal,
prof_tctx_state_dumping,
prof_tctx_state_purgatory /* Dumper must finish destroying. */
} prof_tctx_state_t;
struct prof_tctx_s {
/* Thread data for thread that performed the allocation. */
prof_tdata_t *tdata;
/*
* Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
* defunct during teardown.
*/
uint64_t thr_uid;
uint64_t thr_discrim;
/* Profiling counters, protected by tdata->lock. */
prof_cnt_t cnts;
/* Associated global context. */
prof_gctx_t *gctx;
/*
* UID that distinguishes multiple tctx's created by the same thread,
* but coexisting in gctx->tctxs. There are two ways that such
* coexistence can occur:
* - A dumper thread can cause a tctx to be retained in the purgatory
* state.
* - Although a single "producer" thread must create all tctx's which
* share the same thr_uid, multiple "consumers" can each concurrently
* execute portions of prof_tctx_destroy(). prof_tctx_destroy() only
* gets called once each time cnts.cur{objs,bytes} drop to 0, but this
* threshold can be hit again before the first consumer finishes
* executing prof_tctx_destroy().
*/
uint64_t tctx_uid;
/* Linkage into gctx's tctxs. */
rb_node(prof_tctx_t) tctx_link;
/*
* True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
* sample vs destroy race.
*/
bool prepared;
/* Current dump-related state, protected by gctx->lock. */
prof_tctx_state_t state;
/*
* Copy of cnts snapshotted during early dump phase, protected by
* dump_mtx.
*/
prof_cnt_t dump_cnts;
};
typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
struct prof_gctx_s {
/* Protects nlimbo, cnt_summed, and tctxs. */
malloc_mutex_t *lock;
/*
* Number of threads that currently cause this gctx to be in a state of
* limbo due to one of:
* - Initializing this gctx.
* - Initializing per thread counters associated with this gctx.
* - Preparing to destroy this gctx.
* - Dumping a heap profile that includes this gctx.
* nlimbo must be 1 (single destroyer) in order to safely destroy the
* gctx.
*/
unsigned nlimbo;
/*
* Tree of profile counters, one for each thread that has allocated in
* this context.
*/
prof_tctx_tree_t tctxs;
/* Linkage for tree of contexts to be dumped. */
rb_node(prof_gctx_t) dump_link;
/* Temporary storage for summation during dump. */
prof_cnt_t cnt_summed;
/* Associated backtrace. */
prof_bt_t bt;
/* Backtrace vector, variable size, referred to by bt. */
void *vec[1];
};
typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
struct prof_tdata_s {
malloc_mutex_t *lock;
/* Monotonically increasing unique thread identifier. */
uint64_t thr_uid;
/*
* Monotonically increasing discriminator among tdata structures
* associated with the same thr_uid.
*/
uint64_t thr_discrim;
/* Included in heap profile dumps if non-NULL. */
char *thread_name;
bool attached;
bool expired;
rb_node(prof_tdata_t) tdata_link;
/*
* Counter used to initialize prof_tctx_t's tctx_uid. No locking is
* necessary when incrementing this field, because only one thread ever
* does so.
*/
uint64_t tctx_uid_next;
/*
* Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks
* backtraces for which it has non-zero allocation/deallocation counters
* associated with thread-specific prof_tctx_t objects. Other threads
* may write to prof_tctx_t contents when freeing associated objects.
*/
ckh_t bt2tctx;
/* Sampling state. */
uint64_t prng_state;
uint64_t bytes_until_sample;
/* State used to avoid dumping while operating on prof internals. */
bool enq;
bool enq_idump;
bool enq_gdump;
/*
* Set to true during an early dump phase for tdata's which are
* currently being dumped. New threads' tdata's have this initialized
* to false so that they aren't accidentally included in later dump
* phases.
*/
bool dumping;
/*
* True if profiling is active for this tdata's thread
* (thread.prof.active mallctl).
*/
bool active;
/* Temporary storage for summation during dump. */
prof_cnt_t cnt_summed;
/* Backtrace vector, used for calls to prof_backtrace(). */
void *vec[PROF_BT_MAX];
};
typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern bool opt_prof;
extern bool opt_prof_active;
extern bool opt_prof_thread_active_init;
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
extern bool opt_prof_gdump; /* High-water memory dumping. */
extern bool opt_prof_final; /* Final profile dumping. */
extern bool opt_prof_leak; /* Dump leak summary at exit. */
extern bool opt_prof_accum; /* Report cumulative bytes. */
extern char opt_prof_prefix[
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
PATH_MAX +
#endif
1];
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
extern bool prof_active;
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
extern bool prof_gdump_val;
/*
* Profile dump interval, measured in bytes allocated. Each arena triggers a
* profile dump when it reaches this threshold. The effect is that the
* interval between profile dumps averages prof_interval, though the actual
* interval between dumps will tend to be sporadic, and the interval will be a
* maximum of approximately (prof_interval * narenas).
*/
extern uint64_t prof_interval;
/*
* Initialized as opt_lg_prof_sample, and potentially modified during profiling
* resets.
*/
extern size_t lg_prof_sample;
void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx);
void bt_init(prof_bt_t *bt, void **vec);
void prof_backtrace(prof_bt_t *bt);
prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
#ifdef JEMALLOC_JET
size_t prof_tdata_count(void);
size_t prof_bt_count(void);
const prof_cnt_t *prof_cnt_all(void);
typedef int (prof_dump_open_t)(bool, const char *);
extern prof_dump_open_t *prof_dump_open;
typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *);
extern prof_dump_header_t *prof_dump_header;
#endif
void prof_idump(tsdn_t *tsdn);
bool prof_mdump(tsd_t *tsd, const char *filename);
void prof_gdump(tsdn_t *tsdn);
prof_tdata_t *prof_tdata_init(tsd_t *tsd);
prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
void prof_reset(tsd_t *tsd, size_t lg_sample);
void prof_tdata_cleanup(tsd_t *tsd);
bool prof_active_get(tsdn_t *tsdn);
bool prof_active_set(tsdn_t *tsdn, bool active);
const char *prof_thread_name_get(tsd_t *tsd);
int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
bool prof_thread_active_get(tsd_t *tsd);
bool prof_thread_active_set(tsd_t *tsd, bool active);
bool prof_thread_active_init_get(tsdn_t *tsdn);
bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
bool prof_gdump_get(tsdn_t *tsdn);
bool prof_gdump_set(tsdn_t *tsdn, bool active);
void prof_boot0(void);
void prof_boot1(void);
bool prof_boot2(tsd_t *tsd);
void prof_prefork0(tsdn_t *tsdn);
void prof_prefork1(tsdn_t *tsdn);
void prof_postfork_parent(tsdn_t *tsdn);
void prof_postfork_child(tsdn_t *tsdn);
void prof_sample_threshold_update(prof_tdata_t *tdata);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
bool prof_active_get_unlocked(void);
bool prof_gdump_get_unlocked(void);
prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
prof_tctx_t *prof_tctx_get(tsdn_t *tsdn, const void *ptr);
void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
const void *old_ptr, prof_tctx_t *tctx);
bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
prof_tdata_t **tdata_out);
prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active,
bool update);
void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize,
prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr,
size_t old_usize, prof_tctx_t *old_tctx);
void prof_free(tsd_t *tsd, const void *ptr, size_t usize);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
JEMALLOC_ALWAYS_INLINE bool
prof_active_get_unlocked(void)
{
/*
* Even if opt_prof is true, sampling can be temporarily disabled by
* setting prof_active to false. No locking is used when reading
* prof_active in the fast path, so there are no guarantees regarding
* how long it will take for all threads to notice state changes.
*/
return (prof_active);
}
JEMALLOC_ALWAYS_INLINE bool
prof_gdump_get_unlocked(void)
{
/*
* No locking is used when reading prof_gdump_val in the fast path, so
* there are no guarantees regarding how long it will take for all
* threads to notice state changes.
*/
return (prof_gdump_val);
}
JEMALLOC_ALWAYS_INLINE prof_tdata_t *
prof_tdata_get(tsd_t *tsd, bool create)
{
prof_tdata_t *tdata;
cassert(config_prof);
tdata = tsd_prof_tdata_get(tsd);
if (create) {
if (unlikely(tdata == NULL)) {
if (tsd_nominal(tsd)) {
tdata = prof_tdata_init(tsd);
tsd_prof_tdata_set(tsd, tdata);
}
} else if (unlikely(tdata->expired)) {
tdata = prof_tdata_reinit(tsd, tdata);
tsd_prof_tdata_set(tsd, tdata);
}
assert(tdata == NULL || tdata->attached);
}
return (tdata);
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
prof_tctx_get(tsdn_t *tsdn, const void *ptr)
{
cassert(config_prof);
assert(ptr != NULL);
return (arena_prof_tctx_get(tsdn, ptr));
}
JEMALLOC_ALWAYS_INLINE void
prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
arena_prof_tctx_set(tsdn, ptr, usize, tctx);
}
JEMALLOC_ALWAYS_INLINE void
prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr,
prof_tctx_t *old_tctx)
{
cassert(config_prof);
assert(ptr != NULL);
arena_prof_tctx_reset(tsdn, ptr, usize, old_ptr, old_tctx);
}
JEMALLOC_ALWAYS_INLINE bool
prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
prof_tdata_t **tdata_out)
{
prof_tdata_t *tdata;
cassert(config_prof);
tdata = prof_tdata_get(tsd, true);
if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX))
tdata = NULL;
if (tdata_out != NULL)
*tdata_out = tdata;
if (unlikely(tdata == NULL))
return (true);
if (likely(tdata->bytes_until_sample >= usize)) {
if (update)
tdata->bytes_until_sample -= usize;
return (true);
} else {
/* Compute new sample threshold. */
if (update)
prof_sample_threshold_update(tdata);
return (!tdata->active);
}
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
{
prof_tctx_t *ret;
prof_tdata_t *tdata;
prof_bt_t bt;
assert(usize == s2u(usize));
if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
&tdata)))
ret = (prof_tctx_t *)(uintptr_t)1U;
else {
bt_init(&bt, tdata->vec);
prof_backtrace(&bt);
ret = prof_lookup(tsd, &bt);
}
return (ret);
}
JEMALLOC_ALWAYS_INLINE void
prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
assert(usize == isalloc(tsdn, ptr, true));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
prof_malloc_sample_object(tsdn, ptr, usize, tctx);
else
prof_tctx_set(tsdn, ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
}
JEMALLOC_ALWAYS_INLINE void
prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
prof_tctx_t *old_tctx)
{
bool sampled, old_sampled;
cassert(config_prof);
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
if (prof_active && !updated && ptr != NULL) {
assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
/*
* Don't sample. The usize passed to prof_alloc_prep()
* was larger than what actually got allocated, so a
* backtrace was captured for this allocation, even
* though its actual usize was insufficient to cross the
* sample threshold.
*/
prof_alloc_rollback(tsd, tctx, true);
tctx = (prof_tctx_t *)(uintptr_t)1U;
}
}
sampled = ((uintptr_t)tctx > (uintptr_t)1U);
old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
if (unlikely(sampled))
prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
else
prof_tctx_reset(tsd_tsdn(tsd), ptr, usize, old_ptr, old_tctx);
if (unlikely(old_sampled))
prof_free_sampled_object(tsd, old_usize, old_tctx);
}
JEMALLOC_ALWAYS_INLINE void
prof_free(tsd_t *tsd, const void *ptr, size_t usize)
{
prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr);
cassert(config_prof);
assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
prof_free_sampled_object(tsd, usize, tctx);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 15,844 | 27.914234 | 81 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/hash.h | /*
* The following hash function is based on MurmurHash3, placed into the public
* domain by Austin Appleby. See https://github.com/aappleby/smhasher for
* details.
*/
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
uint32_t hash_x86_32(const void *key, int len, uint32_t seed);
void hash_x86_128(const void *key, const int len, uint32_t seed,
uint64_t r_out[2]);
void hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t r_out[2]);
void hash(const void *key, size_t len, const uint32_t seed,
size_t r_hash[2]);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
/******************************************************************************/
/* Internal implementation. */
JEMALLOC_INLINE uint32_t
hash_rotl_32(uint32_t x, int8_t r)
{
return ((x << r) | (x >> (32 - r)));
}
JEMALLOC_INLINE uint64_t
hash_rotl_64(uint64_t x, int8_t r)
{
return ((x << r) | (x >> (64 - r)));
}
JEMALLOC_INLINE uint32_t
hash_get_block_32(const uint32_t *p, int i)
{
/* Handle unaligned read. */
if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
uint32_t ret;
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
return (ret);
}
return (p[i]);
}
JEMALLOC_INLINE uint64_t
hash_get_block_64(const uint64_t *p, int i)
{
/* Handle unaligned read. */
if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
uint64_t ret;
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
return (ret);
}
return (p[i]);
}
JEMALLOC_INLINE uint32_t
hash_fmix_32(uint32_t h)
{
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return (h);
}
JEMALLOC_INLINE uint64_t
hash_fmix_64(uint64_t k)
{
k ^= k >> 33;
k *= KQU(0xff51afd7ed558ccd);
k ^= k >> 33;
k *= KQU(0xc4ceb9fe1a85ec53);
k ^= k >> 33;
return (k);
}
JEMALLOC_INLINE uint32_t
hash_x86_32(const void *key, int len, uint32_t seed)
{
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 4;
uint32_t h1 = seed;
const uint32_t c1 = 0xcc9e2d51;
const uint32_t c2 = 0x1b873593;
/* body */
{
const uint32_t *blocks = (const uint32_t *) (data + nblocks*4);
int i;
for (i = -nblocks; i; i++) {
uint32_t k1 = hash_get_block_32(blocks, i);
k1 *= c1;
k1 = hash_rotl_32(k1, 15);
k1 *= c2;
h1 ^= k1;
h1 = hash_rotl_32(h1, 13);
h1 = h1*5 + 0xe6546b64;
}
}
/* tail */
{
const uint8_t *tail = (const uint8_t *) (data + nblocks*4);
uint32_t k1 = 0;
switch (len & 3) {
case 3: k1 ^= tail[2] << 16;
case 2: k1 ^= tail[1] << 8;
case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
k1 *= c2; h1 ^= k1;
}
}
/* finalization */
h1 ^= len;
h1 = hash_fmix_32(h1);
return (h1);
}
UNUSED JEMALLOC_INLINE void
hash_x86_128(const void *key, const int len, uint32_t seed,
uint64_t r_out[2])
{
const uint8_t * data = (const uint8_t *) key;
const int nblocks = len / 16;
uint32_t h1 = seed;
uint32_t h2 = seed;
uint32_t h3 = seed;
uint32_t h4 = seed;
const uint32_t c1 = 0x239b961b;
const uint32_t c2 = 0xab0e9789;
const uint32_t c3 = 0x38b34ae5;
const uint32_t c4 = 0xa1e38b93;
/* body */
{
const uint32_t *blocks = (const uint32_t *) (data + nblocks*16);
int i;
for (i = -nblocks; i; i++) {
uint32_t k1 = hash_get_block_32(blocks, i*4 + 0);
uint32_t k2 = hash_get_block_32(blocks, i*4 + 1);
uint32_t k3 = hash_get_block_32(blocks, i*4 + 2);
uint32_t k4 = hash_get_block_32(blocks, i*4 + 3);
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
h1 = hash_rotl_32(h1, 19); h1 += h2;
h1 = h1*5 + 0x561ccd1b;
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
h2 = hash_rotl_32(h2, 17); h2 += h3;
h2 = h2*5 + 0x0bcaa747;
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
h3 = hash_rotl_32(h3, 15); h3 += h4;
h3 = h3*5 + 0x96cd1c35;
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
h4 = hash_rotl_32(h4, 13); h4 += h1;
h4 = h4*5 + 0x32ac3b17;
}
}
/* tail */
{
const uint8_t *tail = (const uint8_t *) (data + nblocks*16);
uint32_t k1 = 0;
uint32_t k2 = 0;
uint32_t k3 = 0;
uint32_t k4 = 0;
switch (len & 15) {
case 15: k4 ^= tail[14] << 16;
case 14: k4 ^= tail[13] << 8;
case 13: k4 ^= tail[12] << 0;
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
case 12: k3 ^= tail[11] << 24;
case 11: k3 ^= tail[10] << 16;
case 10: k3 ^= tail[ 9] << 8;
case 9: k3 ^= tail[ 8] << 0;
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
case 8: k2 ^= tail[ 7] << 24;
case 7: k2 ^= tail[ 6] << 16;
case 6: k2 ^= tail[ 5] << 8;
case 5: k2 ^= tail[ 4] << 0;
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
case 4: k1 ^= tail[ 3] << 24;
case 3: k1 ^= tail[ 2] << 16;
case 2: k1 ^= tail[ 1] << 8;
case 1: k1 ^= tail[ 0] << 0;
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
}
}
/* finalization */
h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
h1 += h2; h1 += h3; h1 += h4;
h2 += h1; h3 += h1; h4 += h1;
h1 = hash_fmix_32(h1);
h2 = hash_fmix_32(h2);
h3 = hash_fmix_32(h3);
h4 = hash_fmix_32(h4);
h1 += h2; h1 += h3; h1 += h4;
h2 += h1; h3 += h1; h4 += h1;
r_out[0] = (((uint64_t) h2) << 32) | h1;
r_out[1] = (((uint64_t) h4) << 32) | h3;
}
UNUSED JEMALLOC_INLINE void
hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t r_out[2])
{
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 16;
uint64_t h1 = seed;
uint64_t h2 = seed;
const uint64_t c1 = KQU(0x87c37b91114253d5);
const uint64_t c2 = KQU(0x4cf5ad432745937f);
/* body */
{
const uint64_t *blocks = (const uint64_t *) (data);
int i;
for (i = 0; i < nblocks; i++) {
uint64_t k1 = hash_get_block_64(blocks, i*2 + 0);
uint64_t k2 = hash_get_block_64(blocks, i*2 + 1);
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
h1 = hash_rotl_64(h1, 27); h1 += h2;
h1 = h1*5 + 0x52dce729;
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
h2 = hash_rotl_64(h2, 31); h2 += h1;
h2 = h2*5 + 0x38495ab5;
}
}
/* tail */
{
const uint8_t *tail = (const uint8_t*)(data + nblocks*16);
uint64_t k1 = 0;
uint64_t k2 = 0;
switch (len & 15) {
case 15: k2 ^= ((uint64_t)(tail[14])) << 48;
case 14: k2 ^= ((uint64_t)(tail[13])) << 40;
case 13: k2 ^= ((uint64_t)(tail[12])) << 32;
case 12: k2 ^= ((uint64_t)(tail[11])) << 24;
case 11: k2 ^= ((uint64_t)(tail[10])) << 16;
case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8;
case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56;
case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48;
case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40;
case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32;
case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24;
case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16;
case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8;
case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
}
}
/* finalization */
h1 ^= len; h2 ^= len;
h1 += h2;
h2 += h1;
h1 = hash_fmix_64(h1);
h2 = hash_fmix_64(h2);
h1 += h2;
h2 += h1;
r_out[0] = h1;
r_out[1] = h2;
}
/******************************************************************************/
/* API. */
JEMALLOC_INLINE void
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
{
assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash);
#else
{
uint64_t hashes[2];
hash_x86_128(key, (int)len, seed, hashes);
r_hash[0] = (size_t)hashes[0];
r_hash[1] = (size_t)hashes[1];
}
#endif
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 8,394 | 22.449721 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/tsd.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/* Maximum number of malloc_tsd users with cleanup functions. */
#define MALLOC_TSD_CLEANUPS_MAX 2
typedef bool (*malloc_tsd_cleanup_t)(void);
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
typedef struct tsd_init_block_s tsd_init_block_t;
typedef struct tsd_init_head_s tsd_init_head_t;
#endif
typedef struct tsd_s tsd_t;
typedef struct tsdn_s tsdn_t;
#define TSDN_NULL ((tsdn_t *)0)
typedef enum {
tsd_state_uninitialized,
tsd_state_nominal,
tsd_state_purgatory,
tsd_state_reincarnated
} tsd_state_t;
/*
* TLS/TSD-agnostic macro-based implementation of thread-specific data. There
* are five macros that support (at least) three use cases: file-private,
* library-private, and library-private inlined. Following is an example
* library-private tsd variable:
*
* In example.h:
* typedef struct {
* int x;
* int y;
* } example_t;
* #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0})
* malloc_tsd_types(example_, example_t)
* malloc_tsd_protos(, example_, example_t)
* malloc_tsd_externs(example_, example_t)
* In example.c:
* malloc_tsd_data(, example_, example_t, EX_INITIALIZER)
* malloc_tsd_funcs(, example_, example_t, EX_INITIALIZER,
* example_tsd_cleanup)
*
* The result is a set of generated functions, e.g.:
*
* bool example_tsd_boot(void) {...}
* bool example_tsd_booted_get(void) {...}
* example_t *example_tsd_get(bool init) {...}
* void example_tsd_set(example_t *val) {...}
*
* Note that all of the functions deal in terms of (a_type *) rather than
* (a_type) so that it is possible to support non-pointer types (unlike
* pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is
* cast to (void *). This means that the cleanup function needs to cast the
* function argument to (a_type *), then dereference the resulting pointer to
* access fields, e.g.
*
* void
* example_tsd_cleanup(void *arg)
* {
* example_t *example = (example_t *)arg;
*
* example->x = 42;
* [...]
* if ([want the cleanup function to be called again])
* example_tsd_set(example);
* }
*
* If example_tsd_set() is called within example_tsd_cleanup(), it will be
* called again. This is similar to how pthreads TSD destruction works, except
* that pthreads only calls the cleanup function again if the value was set to
* non-NULL.
*/
/* malloc_tsd_types(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_types(a_name, a_type)
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_types(a_name, a_type)
#elif (defined(_WIN32))
#define malloc_tsd_types(a_name, a_type) \
typedef struct { \
bool initialized; \
a_type val; \
} a_name##tsd_wrapper_t;
#else
#define malloc_tsd_types(a_name, a_type) \
typedef struct { \
bool initialized; \
a_type val; \
} a_name##tsd_wrapper_t;
#endif
/* malloc_tsd_protos(). */
#define malloc_tsd_protos(a_attr, a_name, a_type) \
a_attr bool \
a_name##tsd_boot0(void); \
a_attr void \
a_name##tsd_boot1(void); \
a_attr bool \
a_name##tsd_boot(void); \
a_attr bool \
a_name##tsd_booted_get(void); \
a_attr a_type * \
a_name##tsd_get(bool init); \
a_attr void \
a_name##tsd_set(a_type *val);
/* malloc_tsd_externs(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_externs(a_name, a_type) \
extern __thread a_type a_name##tsd_tls; \
extern __thread bool a_name##tsd_initialized; \
extern bool a_name##tsd_booted;
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_externs(a_name, a_type) \
extern __thread a_type a_name##tsd_tls; \
extern pthread_key_t a_name##tsd_tsd; \
extern bool a_name##tsd_booted;
#elif (defined(_WIN32))
#define malloc_tsd_externs(a_name, a_type) \
extern DWORD a_name##tsd_tsd; \
extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \
extern bool a_name##tsd_booted;
#else
#define malloc_tsd_externs(a_name, a_type) \
extern pthread_key_t a_name##tsd_tsd; \
extern tsd_init_head_t a_name##tsd_init_head; \
extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \
extern bool a_name##tsd_booted;
#endif
/* malloc_tsd_data(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr __thread a_type JEMALLOC_TLS_MODEL \
a_name##tsd_tls = a_initializer; \
a_attr __thread bool JEMALLOC_TLS_MODEL \
a_name##tsd_initialized = false; \
a_attr bool a_name##tsd_booted = false;
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr __thread a_type JEMALLOC_TLS_MODEL \
a_name##tsd_tls = a_initializer; \
a_attr pthread_key_t a_name##tsd_tsd; \
a_attr bool a_name##tsd_booted = false;
#elif (defined(_WIN32))
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr DWORD a_name##tsd_tsd; \
a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \
false, \
a_initializer \
}; \
a_attr bool a_name##tsd_booted = false;
#else
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr pthread_key_t a_name##tsd_tsd; \
a_attr tsd_init_head_t a_name##tsd_init_head = { \
ql_head_initializer(blocks), \
MALLOC_MUTEX_INITIALIZER \
}; \
a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \
false, \
a_initializer \
}; \
a_attr bool a_name##tsd_booted = false;
#endif
/* malloc_tsd_funcs(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Initialization/cleanup. */ \
a_attr bool \
a_name##tsd_cleanup_wrapper(void) \
{ \
\
if (a_name##tsd_initialized) { \
a_name##tsd_initialized = false; \
a_cleanup(&a_name##tsd_tls); \
} \
return (a_name##tsd_initialized); \
} \
a_attr bool \
a_name##tsd_boot0(void) \
{ \
\
if (a_cleanup != malloc_tsd_no_cleanup) { \
malloc_tsd_cleanup_register( \
&a_name##tsd_cleanup_wrapper); \
} \
a_name##tsd_booted = true; \
return (false); \
} \
a_attr void \
a_name##tsd_boot1(void) \
{ \
\
/* Do nothing. */ \
} \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
return (a_name##tsd_boot0()); \
} \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
a_attr bool \
a_name##tsd_get_allocates(void) \
{ \
\
return (false); \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(bool init) \
{ \
\
assert(a_name##tsd_booted); \
return (&a_name##tsd_tls); \
} \
a_attr void \
a_name##tsd_set(a_type *val) \
{ \
\
assert(a_name##tsd_booted); \
a_name##tsd_tls = (*val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
a_name##tsd_initialized = true; \
}
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Initialization/cleanup. */ \
a_attr bool \
a_name##tsd_boot0(void) \
{ \
\
if (a_cleanup != malloc_tsd_no_cleanup) { \
if (pthread_key_create(&a_name##tsd_tsd, a_cleanup) != \
0) \
return (true); \
} \
a_name##tsd_booted = true; \
return (false); \
} \
a_attr void \
a_name##tsd_boot1(void) \
{ \
\
/* Do nothing. */ \
} \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
return (a_name##tsd_boot0()); \
} \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
a_attr bool \
a_name##tsd_get_allocates(void) \
{ \
\
return (false); \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(bool init) \
{ \
\
assert(a_name##tsd_booted); \
return (&a_name##tsd_tls); \
} \
a_attr void \
a_name##tsd_set(a_type *val) \
{ \
\
assert(a_name##tsd_booted); \
a_name##tsd_tls = (*val); \
if (a_cleanup != malloc_tsd_no_cleanup) { \
if (pthread_setspecific(a_name##tsd_tsd, \
(void *)(&a_name##tsd_tls))) { \
malloc_write("<jemalloc>: Error" \
" setting TSD for "#a_name"\n"); \
if (opt_abort) \
abort(); \
} \
} \
}
#elif (defined(_WIN32))
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Initialization/cleanup. */ \
a_attr bool \
a_name##tsd_cleanup_wrapper(void) \
{ \
DWORD error = GetLastError(); \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
TlsGetValue(a_name##tsd_tsd); \
SetLastError(error); \
\
if (wrapper == NULL) \
return (false); \
if (a_cleanup != malloc_tsd_no_cleanup && \
wrapper->initialized) { \
wrapper->initialized = false; \
a_cleanup(&wrapper->val); \
if (wrapper->initialized) { \
/* Trigger another cleanup round. */ \
return (true); \
} \
} \
malloc_tsd_dalloc(wrapper); \
return (false); \
} \
a_attr void \
a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
{ \
\
if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) { \
malloc_write("<jemalloc>: Error setting" \
" TSD for "#a_name"\n"); \
abort(); \
} \
} \
a_attr a_name##tsd_wrapper_t * \
a_name##tsd_wrapper_get(bool init) \
{ \
DWORD error = GetLastError(); \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
TlsGetValue(a_name##tsd_tsd); \
SetLastError(error); \
\
if (init && unlikely(wrapper == NULL)) { \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
abort(); \
} else { \
wrapper->initialized = false; \
wrapper->val = a_initializer; \
} \
a_name##tsd_wrapper_set(wrapper); \
} \
return (wrapper); \
} \
a_attr bool \
a_name##tsd_boot0(void) \
{ \
\
a_name##tsd_tsd = TlsAlloc(); \
if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES) \
return (true); \
if (a_cleanup != malloc_tsd_no_cleanup) { \
malloc_tsd_cleanup_register( \
&a_name##tsd_cleanup_wrapper); \
} \
a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \
a_name##tsd_booted = true; \
return (false); \
} \
a_attr void \
a_name##tsd_boot1(void) \
{ \
a_name##tsd_wrapper_t *wrapper; \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
abort(); \
} \
memcpy(wrapper, &a_name##tsd_boot_wrapper, \
sizeof(a_name##tsd_wrapper_t)); \
a_name##tsd_wrapper_set(wrapper); \
} \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
if (a_name##tsd_boot0()) \
return (true); \
a_name##tsd_boot1(); \
return (false); \
} \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
a_attr bool \
a_name##tsd_get_allocates(void) \
{ \
\
return (true); \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(bool init) \
{ \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
wrapper = a_name##tsd_wrapper_get(init); \
if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \
return (NULL); \
return (&wrapper->val); \
} \
a_attr void \
a_name##tsd_set(a_type *val) \
{ \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
wrapper = a_name##tsd_wrapper_get(true); \
wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \
}
#else
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Initialization/cleanup. */ \
a_attr void \
a_name##tsd_cleanup_wrapper(void *arg) \
{ \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)arg; \
\
if (a_cleanup != malloc_tsd_no_cleanup && \
wrapper->initialized) { \
wrapper->initialized = false; \
a_cleanup(&wrapper->val); \
if (wrapper->initialized) { \
/* Trigger another cleanup round. */ \
if (pthread_setspecific(a_name##tsd_tsd, \
(void *)wrapper)) { \
malloc_write("<jemalloc>: Error" \
" setting TSD for "#a_name"\n"); \
if (opt_abort) \
abort(); \
} \
return; \
} \
} \
malloc_tsd_dalloc(wrapper); \
} \
a_attr void \
a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
{ \
\
if (pthread_setspecific(a_name##tsd_tsd, \
(void *)wrapper)) { \
malloc_write("<jemalloc>: Error setting" \
" TSD for "#a_name"\n"); \
abort(); \
} \
} \
a_attr a_name##tsd_wrapper_t * \
a_name##tsd_wrapper_get(bool init) \
{ \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
pthread_getspecific(a_name##tsd_tsd); \
\
if (init && unlikely(wrapper == NULL)) { \
tsd_init_block_t block; \
wrapper = tsd_init_check_recursion( \
&a_name##tsd_init_head, &block); \
if (wrapper) \
return (wrapper); \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
block.data = wrapper; \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
abort(); \
} else { \
wrapper->initialized = false; \
wrapper->val = a_initializer; \
} \
a_name##tsd_wrapper_set(wrapper); \
tsd_init_finish(&a_name##tsd_init_head, &block); \
} \
return (wrapper); \
} \
a_attr bool \
a_name##tsd_boot0(void) \
{ \
\
if (pthread_key_create(&a_name##tsd_tsd, \
a_name##tsd_cleanup_wrapper) != 0) \
return (true); \
a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \
a_name##tsd_booted = true; \
return (false); \
} \
a_attr void \
a_name##tsd_boot1(void) \
{ \
a_name##tsd_wrapper_t *wrapper; \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
abort(); \
} \
memcpy(wrapper, &a_name##tsd_boot_wrapper, \
sizeof(a_name##tsd_wrapper_t)); \
a_name##tsd_wrapper_set(wrapper); \
} \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
if (a_name##tsd_boot0()) \
return (true); \
a_name##tsd_boot1(); \
return (false); \
} \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
a_attr bool \
a_name##tsd_get_allocates(void) \
{ \
\
return (true); \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(bool init) \
{ \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
wrapper = a_name##tsd_wrapper_get(init); \
if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \
return (NULL); \
return (&wrapper->val); \
} \
a_attr void \
a_name##tsd_set(a_type *val) \
{ \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
wrapper = a_name##tsd_wrapper_get(true); \
wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \
}
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
struct tsd_init_block_s {
ql_elm(tsd_init_block_t) link;
pthread_t thread;
void *data;
};
struct tsd_init_head_s {
ql_head(tsd_init_block_t) blocks;
malloc_mutex_t lock;
};
#endif
#define MALLOC_TSD \
/* O(name, type) */ \
O(tcache, tcache_t *) \
O(thread_allocated, uint64_t) \
O(thread_deallocated, uint64_t) \
O(prof_tdata, prof_tdata_t *) \
O(iarena, arena_t *) \
O(arena, arena_t *) \
O(arenas_tdata, arena_tdata_t *) \
O(narenas_tdata, unsigned) \
O(arenas_tdata_bypass, bool) \
O(tcache_enabled, tcache_enabled_t) \
O(quarantine, quarantine_t *) \
O(witnesses, witness_list_t) \
O(witness_fork, bool) \
#define TSD_INITIALIZER { \
tsd_state_uninitialized, \
NULL, \
0, \
0, \
NULL, \
NULL, \
NULL, \
NULL, \
0, \
false, \
tcache_enabled_default, \
NULL, \
ql_head_initializer(witnesses), \
false \
}
struct tsd_s {
tsd_state_t state;
#define O(n, t) \
t n;
MALLOC_TSD
#undef O
};
/*
* Wrapper around tsd_t that makes it possible to avoid implicit conversion
* between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be
* explicitly converted to tsd_t, which is non-nullable.
*/
struct tsdn_s {
tsd_t tsd;
};
static const tsd_t tsd_initializer = TSD_INITIALIZER;
malloc_tsd_types(, tsd_t)
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *malloc_tsd_malloc(size_t size);
void malloc_tsd_dalloc(void *wrapper);
void malloc_tsd_no_cleanup(void *arg);
void malloc_tsd_cleanup_register(bool (*f)(void));
tsd_t *malloc_tsd_boot0(void);
void malloc_tsd_boot1(void);
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
void *tsd_init_check_recursion(tsd_init_head_t *head,
tsd_init_block_t *block);
void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
#endif
void tsd_cleanup(void *arg);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t)
tsd_t *tsd_fetch_impl(bool init);
tsd_t *tsd_fetch(void);
tsdn_t *tsd_tsdn(tsd_t *tsd);
bool tsd_nominal(tsd_t *tsd);
#define O(n, t) \
t *tsd_##n##p_get(tsd_t *tsd); \
t tsd_##n##_get(tsd_t *tsd); \
void tsd_##n##_set(tsd_t *tsd, t n);
MALLOC_TSD
#undef O
tsdn_t *tsdn_fetch(void);
bool tsdn_null(const tsdn_t *tsdn);
tsd_t *tsdn_tsd(tsdn_t *tsdn);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_))
malloc_tsd_externs(, tsd_t)
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup)
JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_fetch_impl(bool init)
{
tsd_t *tsd = tsd_get(init);
if (!init && tsd_get_allocates() && tsd == NULL)
return (NULL);
assert(tsd != NULL);
if (unlikely(tsd->state != tsd_state_nominal)) {
if (tsd->state == tsd_state_uninitialized) {
tsd->state = tsd_state_nominal;
/* Trigger cleanup handler registration. */
tsd_set(tsd);
} else if (tsd->state == tsd_state_purgatory) {
tsd->state = tsd_state_reincarnated;
tsd_set(tsd);
} else
assert(tsd->state == tsd_state_reincarnated);
}
return (tsd);
}
JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_fetch(void)
{
return (tsd_fetch_impl(true));
}
JEMALLOC_ALWAYS_INLINE tsdn_t *
tsd_tsdn(tsd_t *tsd)
{
return ((tsdn_t *)tsd);
}
JEMALLOC_INLINE bool
tsd_nominal(tsd_t *tsd)
{
return (tsd->state == tsd_state_nominal);
}
#define O(n, t) \
JEMALLOC_ALWAYS_INLINE t * \
tsd_##n##p_get(tsd_t *tsd) \
{ \
\
return (&tsd->n); \
} \
\
JEMALLOC_ALWAYS_INLINE t \
tsd_##n##_get(tsd_t *tsd) \
{ \
\
return (*tsd_##n##p_get(tsd)); \
} \
\
JEMALLOC_ALWAYS_INLINE void \
tsd_##n##_set(tsd_t *tsd, t n) \
{ \
\
assert(tsd->state == tsd_state_nominal); \
tsd->n = n; \
}
MALLOC_TSD
#undef O
JEMALLOC_ALWAYS_INLINE tsdn_t *
tsdn_fetch(void)
{
if (!tsd_booted_get())
return (NULL);
return (tsd_tsdn(tsd_fetch_impl(false)));
}
JEMALLOC_ALWAYS_INLINE bool
tsdn_null(const tsdn_t *tsdn)
{
return (tsdn == NULL);
}
JEMALLOC_ALWAYS_INLINE tsd_t *
tsdn_tsd(tsdn_t *tsdn)
{
assert(!tsdn_null(tsdn));
return (&tsdn->tsd);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 21,743 | 26.593909 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/geohash-int/geohash_helper.c | /*
* Copyright (c) 2013-2014, yinqiwen <[email protected]>
* Copyright (c) 2014, Matt Stancliff <[email protected]>.
* Copyright (c) 2015-2016, Salvatore Sanfilippo <[email protected]>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
/* This is a C++ to C conversion from the ardb project.
* This file started out as:
* https://github.com/yinqiwen/ardb/blob/d42503/src/geo/geohash_helper.cpp
*/
#include "geohash_helper.h"
#include <math.h>
#define D_R (M_PI / 180.0)
#define R_MAJOR 6378137.0
#define R_MINOR 6356752.3142
#define RATIO (R_MINOR / R_MAJOR)
#define ECCENT (sqrt(1.0 - (RATIO *RATIO)))
#define COM (0.5 * ECCENT)
/// @brief The usual PI/180 constant
const double DEG_TO_RAD = 0.017453292519943295769236907684886;
/// @brief Earth's quatratic mean radius for WGS-84
const double EARTH_RADIUS_IN_METERS = 6372797.560856;
const double MERCATOR_MAX = 20037726.37;
const double MERCATOR_MIN = -20037726.37;
static inline double deg_rad(double ang) { return ang * D_R; }
static inline double rad_deg(double ang) { return ang / D_R; }
/* This function is used in order to estimate the step (bits precision)
* of the 9 search area boxes during radius queries. */
uint8_t geohashEstimateStepsByRadius(double range_meters, double lat) {
if (range_meters == 0) return 26;
int step = 1;
while (range_meters < MERCATOR_MAX) {
range_meters *= 2;
step++;
}
step -= 2; /* Make sure range is included in most of the base cases. */
/* Wider range torwards the poles... Note: it is possible to do better
* than this approximation by computing the distance between meridians
* at this latitude, but this does the trick for now. */
if (lat > 66 || lat < -66) {
step--;
if (lat > 80 || lat < -80) step--;
}
/* Frame to valid range. */
if (step < 1) step = 1;
if (step > 26) step = 26;
return step;
}
/* Return the bounding box of the search area centered at latitude,longitude
* having a radius of radius_meter. bounds[0] - bounds[2] is the minimum
* and maxium longitude, while bounds[1] - bounds[3] is the minimum and
* maximum latitude. */
int geohashBoundingBox(double longitude, double latitude, double radius_meters,
double *bounds) {
if (!bounds) return 0;
bounds[0] = longitude - rad_deg(radius_meters/EARTH_RADIUS_IN_METERS/cos(deg_rad(latitude)));
bounds[2] = longitude + rad_deg(radius_meters/EARTH_RADIUS_IN_METERS/cos(deg_rad(latitude)));
bounds[1] = latitude - rad_deg(radius_meters/EARTH_RADIUS_IN_METERS);
bounds[3] = latitude + rad_deg(radius_meters/EARTH_RADIUS_IN_METERS);
return 1;
}
/* Return a set of areas (center + 8) that are able to cover a range query
* for the specified position and radius. */
GeoHashRadius geohashGetAreasByRadius(double longitude, double latitude, double radius_meters) {
GeoHashRange long_range, lat_range;
GeoHashRadius radius;
GeoHashBits hash;
GeoHashNeighbors neighbors;
GeoHashArea area;
double min_lon, max_lon, min_lat, max_lat;
double bounds[4];
int steps;
geohashBoundingBox(longitude, latitude, radius_meters, bounds);
min_lon = bounds[0];
min_lat = bounds[1];
max_lon = bounds[2];
max_lat = bounds[3];
steps = geohashEstimateStepsByRadius(radius_meters,latitude);
geohashGetCoordRange(&long_range,&lat_range);
geohashEncode(&long_range,&lat_range,longitude,latitude,steps,&hash);
geohashNeighbors(&hash,&neighbors);
geohashDecode(long_range,lat_range,hash,&area);
/* Check if the step is enough at the limits of the covered area.
* Sometimes when the search area is near an edge of the
* area, the estimated step is not small enough, since one of the
* north / south / west / east square is too near to the search area
* to cover everything. */
int decrease_step = 0;
{
GeoHashArea north, south, east, west;
geohashDecode(long_range, lat_range, neighbors.north, &north);
geohashDecode(long_range, lat_range, neighbors.south, &south);
geohashDecode(long_range, lat_range, neighbors.east, &east);
geohashDecode(long_range, lat_range, neighbors.west, &west);
if (geohashGetDistance(longitude,latitude,longitude,north.latitude.max)
< radius_meters) decrease_step = 1;
if (geohashGetDistance(longitude,latitude,longitude,south.latitude.min)
< radius_meters) decrease_step = 1;
if (geohashGetDistance(longitude,latitude,east.longitude.max,latitude)
< radius_meters) decrease_step = 1;
if (geohashGetDistance(longitude,latitude,west.longitude.min,latitude)
< radius_meters) decrease_step = 1;
}
if (steps > 1 && decrease_step) {
steps--;
geohashEncode(&long_range,&lat_range,longitude,latitude,steps,&hash);
geohashNeighbors(&hash,&neighbors);
geohashDecode(long_range,lat_range,hash,&area);
}
/* Exclude the search areas that are useless. */
if (area.latitude.min < min_lat) {
GZERO(neighbors.south);
GZERO(neighbors.south_west);
GZERO(neighbors.south_east);
}
if (area.latitude.max > max_lat) {
GZERO(neighbors.north);
GZERO(neighbors.north_east);
GZERO(neighbors.north_west);
}
if (area.longitude.min < min_lon) {
GZERO(neighbors.west);
GZERO(neighbors.south_west);
GZERO(neighbors.north_west);
}
if (area.longitude.max > max_lon) {
GZERO(neighbors.east);
GZERO(neighbors.south_east);
GZERO(neighbors.north_east);
}
radius.hash = hash;
radius.neighbors = neighbors;
radius.area = area;
return radius;
}
GeoHashRadius geohashGetAreasByRadiusWGS84(double longitude, double latitude,
double radius_meters) {
return geohashGetAreasByRadius(longitude, latitude, radius_meters);
}
GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits hash) {
uint64_t bits = hash.bits;
bits <<= (52 - hash.step * 2);
return bits;
}
/* Calculate distance using haversin great circle distance formula. */
double geohashGetDistance(double lon1d, double lat1d, double lon2d, double lat2d) {
double lat1r, lon1r, lat2r, lon2r, u, v;
lat1r = deg_rad(lat1d);
lon1r = deg_rad(lon1d);
lat2r = deg_rad(lat2d);
lon2r = deg_rad(lon2d);
u = sin((lat2r - lat1r) / 2);
v = sin((lon2r - lon1r) / 2);
return 2.0 * EARTH_RADIUS_IN_METERS *
asin(sqrt(u * u + cos(lat1r) * cos(lat2r) * v * v));
}
int geohashGetDistanceIfInRadius(double x1, double y1,
double x2, double y2, double radius,
double *distance) {
*distance = geohashGetDistance(x1, y1, x2, y2);
if (*distance > radius) return 0;
return 1;
}
int geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2,
double y2, double radius,
double *distance) {
return geohashGetDistanceIfInRadius(x1, y1, x2, y2, radius, distance);
}
| 8,623 | 38.559633 | 97 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/geohash-int/geohash.h | /*
* Copyright (c) 2013-2014, yinqiwen <[email protected]>
* Copyright (c) 2014, Matt Stancliff <[email protected]>.
* Copyright (c) 2015, Salvatore Sanfilippo <[email protected]>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef GEOHASH_H_
#define GEOHASH_H_
#include <stddef.h>
#include <stdint.h>
#include <stdint.h>
#if defined(__cplusplus)
extern "C" {
#endif
#define HASHISZERO(r) (!(r).bits && !(r).step)
#define RANGEISZERO(r) (!(r).max && !(r).min)
#define RANGEPISZERO(r) (r == NULL || RANGEISZERO(*r))
#define GEO_STEP_MAX 26 /* 26*2 = 52 bits. */
/* Limits from EPSG:900913 / EPSG:3785 / OSGEO:41001 */
#define GEO_LAT_MIN -85.05112878
#define GEO_LAT_MAX 85.05112878
#define GEO_LONG_MIN -180
#define GEO_LONG_MAX 180
typedef enum {
GEOHASH_NORTH = 0,
GEOHASH_EAST,
GEOHASH_WEST,
GEOHASH_SOUTH,
GEOHASH_SOUTH_WEST,
GEOHASH_SOUTH_EAST,
GEOHASH_NORT_WEST,
GEOHASH_NORT_EAST
} GeoDirection;
typedef struct {
uint64_t bits;
uint8_t step;
} GeoHashBits;
typedef struct {
double min;
double max;
} GeoHashRange;
typedef struct {
GeoHashBits hash;
GeoHashRange longitude;
GeoHashRange latitude;
} GeoHashArea;
typedef struct {
GeoHashBits north;
GeoHashBits east;
GeoHashBits west;
GeoHashBits south;
GeoHashBits north_east;
GeoHashBits south_east;
GeoHashBits north_west;
GeoHashBits south_west;
} GeoHashNeighbors;
/*
* 0:success
* -1:failed
*/
void geohashGetCoordRange(GeoHashRange *long_range, GeoHashRange *lat_range);
int geohashEncode(const GeoHashRange *long_range, const GeoHashRange *lat_range,
double longitude, double latitude, uint8_t step,
GeoHashBits *hash);
int geohashEncodeType(double longitude, double latitude,
uint8_t step, GeoHashBits *hash);
int geohashEncodeWGS84(double longitude, double latitude, uint8_t step,
GeoHashBits *hash);
int geohashDecode(const GeoHashRange long_range, const GeoHashRange lat_range,
const GeoHashBits hash, GeoHashArea *area);
int geohashDecodeType(const GeoHashBits hash, GeoHashArea *area);
int geohashDecodeWGS84(const GeoHashBits hash, GeoHashArea *area);
int geohashDecodeAreaToLongLat(const GeoHashArea *area, double *xy);
int geohashDecodeToLongLatType(const GeoHashBits hash, double *xy);
int geohashDecodeToLongLatWGS84(const GeoHashBits hash, double *xy);
int geohashDecodeToLongLatMercator(const GeoHashBits hash, double *xy);
void geohashNeighbors(const GeoHashBits *hash, GeoHashNeighbors *neighbors);
#if defined(__cplusplus)
}
#endif
#endif /* GEOHASH_H_ */
| 4,124 | 33.663866 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/geohash-int/geohash.c | /*
* Copyright (c) 2013-2014, yinqiwen <[email protected]>
* Copyright (c) 2014, Matt Stancliff <[email protected]>.
* Copyright (c) 2015-2016, Salvatore Sanfilippo <[email protected]>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "geohash.h"
/**
* Hashing works like this:
* Divide the world into 4 buckets. Label each one as such:
* -----------------
* | | |
* | | |
* | 0,1 | 1,1 |
* -----------------
* | | |
* | | |
* | 0,0 | 1,0 |
* -----------------
*/
/* Interleave lower bits of x and y, so the bits of x
* are in the even positions and bits from y in the odd;
* x and y must initially be less than 2**32 (65536).
* From: https://graphics.stanford.edu/~seander/bithacks.html#InterleaveBMN
*/
static inline uint64_t interleave64(uint32_t xlo, uint32_t ylo) {
static const uint64_t B[] = {0x5555555555555555ULL, 0x3333333333333333ULL,
0x0F0F0F0F0F0F0F0FULL, 0x00FF00FF00FF00FFULL,
0x0000FFFF0000FFFFULL};
static const unsigned int S[] = {1, 2, 4, 8, 16};
uint64_t x = xlo;
uint64_t y = ylo;
x = (x | (x << S[4])) & B[4];
y = (y | (y << S[4])) & B[4];
x = (x | (x << S[3])) & B[3];
y = (y | (y << S[3])) & B[3];
x = (x | (x << S[2])) & B[2];
y = (y | (y << S[2])) & B[2];
x = (x | (x << S[1])) & B[1];
y = (y | (y << S[1])) & B[1];
x = (x | (x << S[0])) & B[0];
y = (y | (y << S[0])) & B[0];
return x | (y << 1);
}
/* reverse the interleave process
* derived from http://stackoverflow.com/questions/4909263
*/
static inline uint64_t deinterleave64(uint64_t interleaved) {
static const uint64_t B[] = {0x5555555555555555ULL, 0x3333333333333333ULL,
0x0F0F0F0F0F0F0F0FULL, 0x00FF00FF00FF00FFULL,
0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
static const unsigned int S[] = {0, 1, 2, 4, 8, 16};
uint64_t x = interleaved;
uint64_t y = interleaved >> 1;
x = (x | (x >> S[0])) & B[0];
y = (y | (y >> S[0])) & B[0];
x = (x | (x >> S[1])) & B[1];
y = (y | (y >> S[1])) & B[1];
x = (x | (x >> S[2])) & B[2];
y = (y | (y >> S[2])) & B[2];
x = (x | (x >> S[3])) & B[3];
y = (y | (y >> S[3])) & B[3];
x = (x | (x >> S[4])) & B[4];
y = (y | (y >> S[4])) & B[4];
x = (x | (x >> S[5])) & B[5];
y = (y | (y >> S[5])) & B[5];
return x | (y << 32);
}
void geohashGetCoordRange(GeoHashRange *long_range, GeoHashRange *lat_range) {
/* These are constraints from EPSG:900913 / EPSG:3785 / OSGEO:41001 */
/* We can't geocode at the north/south pole. */
long_range->max = GEO_LONG_MAX;
long_range->min = GEO_LONG_MIN;
lat_range->max = GEO_LAT_MAX;
lat_range->min = GEO_LAT_MIN;
}
int geohashEncode(const GeoHashRange *long_range, const GeoHashRange *lat_range,
double longitude, double latitude, uint8_t step,
GeoHashBits *hash) {
/* Check basic arguments sanity. */
if (hash == NULL || step > 32 || step == 0 ||
RANGEPISZERO(lat_range) || RANGEPISZERO(long_range)) return 0;
/* Return an error when trying to index outside the supported
* constraints. */
if (longitude > 180 || longitude < -180 ||
latitude > 85.05112878 || latitude < -85.05112878) return 0;
hash->bits = 0;
hash->step = step;
if (latitude < lat_range->min || latitude > lat_range->max ||
longitude < long_range->min || longitude > long_range->max) {
return 0;
}
double lat_offset =
(latitude - lat_range->min) / (lat_range->max - lat_range->min);
double long_offset =
(longitude - long_range->min) / (long_range->max - long_range->min);
/* convert to fixed point based on the step size */
lat_offset *= (1 << step);
long_offset *= (1 << step);
hash->bits = interleave64(lat_offset, long_offset);
return 1;
}
int geohashEncodeType(double longitude, double latitude, uint8_t step, GeoHashBits *hash) {
GeoHashRange r[2] = { { 0 } };
geohashGetCoordRange(&r[0], &r[1]);
return geohashEncode(&r[0], &r[1], longitude, latitude, step, hash);
}
int geohashEncodeWGS84(double longitude, double latitude, uint8_t step,
GeoHashBits *hash) {
return geohashEncodeType(longitude, latitude, step, hash);
}
int geohashDecode(const GeoHashRange long_range, const GeoHashRange lat_range,
const GeoHashBits hash, GeoHashArea *area) {
if (HASHISZERO(hash) || NULL == area || RANGEISZERO(lat_range) ||
RANGEISZERO(long_range)) {
return 0;
}
area->hash = hash;
uint8_t step = hash.step;
uint64_t hash_sep = deinterleave64(hash.bits); /* hash = [LAT][LONG] */
double lat_scale = lat_range.max - lat_range.min;
double long_scale = long_range.max - long_range.min;
uint32_t ilato = hash_sep; /* get lat part of deinterleaved hash */
uint32_t ilono = hash_sep >> 32; /* shift over to get long part of hash */
/* divide by 2**step.
* Then, for 0-1 coordinate, multiply times scale and add
to the min to get the absolute coordinate. */
area->latitude.min =
lat_range.min + (ilato * 1.0 / (1ull << step)) * lat_scale;
area->latitude.max =
lat_range.min + ((ilato + 1) * 1.0 / (1ull << step)) * lat_scale;
area->longitude.min =
long_range.min + (ilono * 1.0 / (1ull << step)) * long_scale;
area->longitude.max =
long_range.min + ((ilono + 1) * 1.0 / (1ull << step)) * long_scale;
return 1;
}
int geohashDecodeType(const GeoHashBits hash, GeoHashArea *area) {
GeoHashRange r[2] = { { 0 } };
geohashGetCoordRange(&r[0], &r[1]);
return geohashDecode(r[0], r[1], hash, area);
}
int geohashDecodeWGS84(const GeoHashBits hash, GeoHashArea *area) {
return geohashDecodeType(hash, area);
}
int geohashDecodeAreaToLongLat(const GeoHashArea *area, double *xy) {
if (!xy) return 0;
xy[0] = (area->longitude.min + area->longitude.max) / 2;
xy[1] = (area->latitude.min + area->latitude.max) / 2;
return 1;
}
int geohashDecodeToLongLatType(const GeoHashBits hash, double *xy) {
GeoHashArea area = { { 0 } };
if (!xy || !geohashDecodeType(hash, &area))
return 0;
return geohashDecodeAreaToLongLat(&area, xy);
}
int geohashDecodeToLongLatWGS84(const GeoHashBits hash, double *xy) {
return geohashDecodeToLongLatType(hash, xy);
}
static void geohash_move_x(GeoHashBits *hash, int8_t d) {
if (d == 0)
return;
uint64_t x = hash->bits & 0xaaaaaaaaaaaaaaaaULL;
uint64_t y = hash->bits & 0x5555555555555555ULL;
uint64_t zz = 0x5555555555555555ULL >> (64 - hash->step * 2);
if (d > 0) {
x = x + (zz + 1);
} else {
x = x | zz;
x = x - (zz + 1);
}
x &= (0xaaaaaaaaaaaaaaaaULL >> (64 - hash->step * 2));
hash->bits = (x | y);
}
static void geohash_move_y(GeoHashBits *hash, int8_t d) {
if (d == 0)
return;
uint64_t x = hash->bits & 0xaaaaaaaaaaaaaaaaULL;
uint64_t y = hash->bits & 0x5555555555555555ULL;
uint64_t zz = 0xaaaaaaaaaaaaaaaaULL >> (64 - hash->step * 2);
if (d > 0) {
y = y + (zz + 1);
} else {
y = y | zz;
y = y - (zz + 1);
}
y &= (0x5555555555555555ULL >> (64 - hash->step * 2));
hash->bits = (x | y);
}
void geohashNeighbors(const GeoHashBits *hash, GeoHashNeighbors *neighbors) {
neighbors->east = *hash;
neighbors->west = *hash;
neighbors->north = *hash;
neighbors->south = *hash;
neighbors->south_east = *hash;
neighbors->south_west = *hash;
neighbors->north_east = *hash;
neighbors->north_west = *hash;
geohash_move_x(&neighbors->east, 1);
geohash_move_y(&neighbors->east, 0);
geohash_move_x(&neighbors->west, -1);
geohash_move_y(&neighbors->west, 0);
geohash_move_x(&neighbors->south, 0);
geohash_move_y(&neighbors->south, -1);
geohash_move_x(&neighbors->north, 0);
geohash_move_y(&neighbors->north, 1);
geohash_move_x(&neighbors->north_west, -1);
geohash_move_y(&neighbors->north_west, 1);
geohash_move_x(&neighbors->north_east, 1);
geohash_move_y(&neighbors->north_east, 1);
geohash_move_x(&neighbors->south_east, 1);
geohash_move_y(&neighbors->south_east, -1);
geohash_move_x(&neighbors->south_west, -1);
geohash_move_y(&neighbors->south_west, -1);
}
| 10,005 | 32.804054 | 91 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/geohash-int/geohash_helper.h | /*
* Copyright (c) 2013-2014, yinqiwen <[email protected]>
* Copyright (c) 2014, Matt Stancliff <[email protected]>.
* Copyright (c) 2015, Salvatore Sanfilippo <[email protected]>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef GEOHASH_HELPER_HPP_
#define GEOHASH_HELPER_HPP_
#include <math.h>
#include "geohash.h"
#define GZERO(s) s.bits = s.step = 0;
#define GISZERO(s) (!s.bits && !s.step)
#define GISNOTZERO(s) (s.bits || s.step)
typedef uint64_t GeoHashFix52Bits;
typedef uint64_t GeoHashVarBits;
typedef struct {
GeoHashBits hash;
GeoHashArea area;
GeoHashNeighbors neighbors;
} GeoHashRadius;
int GeoHashBitsComparator(const GeoHashBits *a, const GeoHashBits *b);
uint8_t geohashEstimateStepsByRadius(double range_meters, double lat);
int geohashBoundingBox(double longitude, double latitude, double radius_meters,
double *bounds);
GeoHashRadius geohashGetAreasByRadius(double longitude,
double latitude, double radius_meters);
GeoHashRadius geohashGetAreasByRadiusWGS84(double longitude, double latitude,
double radius_meters);
GeoHashRadius geohashGetAreasByRadiusMercator(double longitude, double latitude,
double radius_meters);
GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits hash);
double geohashGetDistance(double lon1d, double lat1d,
double lon2d, double lat2d);
int geohashGetDistanceIfInRadius(double x1, double y1,
double x2, double y2, double radius,
double *distance);
int geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2,
double y2, double radius,
double *distance);
#endif /* GEOHASH_HELPER_HPP_ */
| 3,368 | 45.791667 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/utils/corrupt_rdb.c | /* Trivia program to corrupt an RDB file in order to check the RDB check
* program behavior and effectiveness.
*
* Copyright (C) 2016 Salvatore Sanfilippo.
* This software is released in the 3-clause BSD license. */
#include <stdio.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <stdlib.h>
#include <unistd.h>
#include <time.h>
int main(int argc, char **argv) {
struct stat stat;
int fd, cycles;
if (argc != 3) {
fprintf(stderr,"Usage: <filename> <cycles>\n");
exit(1);
}
srand(time(NULL));
cycles = atoi(argv[2]);
fd = open("dump.rdb",O_RDWR);
if (fd == -1) {
perror("open");
exit(1);
}
fstat(fd,&stat);
while(cycles--) {
unsigned char buf[32];
unsigned long offset = rand()%stat.st_size;
int writelen = 1+rand()%31;
int j;
for (j = 0; j < writelen; j++) buf[j] = (char)rand();
lseek(fd,offset,SEEK_SET);
printf("Writing %d bytes at offset %lu\n", writelen, offset);
write(fd,buf,writelen);
}
return 0;
}
| 1,070 | 22.8 | 72 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/utils/hashtable/rehashing.c | #include "redis.h"
#include "dict.h"
void _redisAssert(char *x, char *y, int l) {
printf("ASSERT: %s %s %d\n",x,y,l);
exit(1);
}
unsigned int dictKeyHash(const void *keyp) {
unsigned long key = (unsigned long)keyp;
key = dictGenHashFunction(&key,sizeof(key));
key += ~(key << 15);
key ^= (key >> 10);
key += (key << 3);
key ^= (key >> 6);
key += ~(key << 11);
key ^= (key >> 16);
return key;
}
int dictKeyCompare(void *privdata, const void *key1, const void *key2) {
unsigned long k1 = (unsigned long)key1;
unsigned long k2 = (unsigned long)key2;
return k1 == k2;
}
dictType dictTypeTest = {
dictKeyHash, /* hash function */
NULL, /* key dup */
NULL, /* val dup */
dictKeyCompare, /* key compare */
NULL, /* key destructor */
NULL /* val destructor */
};
void showBuckets(dictht ht) {
if (ht.table == NULL) {
printf("NULL\n");
} else {
int j;
for (j = 0; j < ht.size; j++) {
printf("%c", ht.table[j] ? '1' : '0');
}
printf("\n");
}
}
void show(dict *d) {
int j;
if (d->rehashidx != -1) {
printf("rhidx: ");
for (j = 0; j < d->rehashidx; j++)
printf(".");
printf("|\n");
}
printf("ht[0]: ");
showBuckets(d->ht[0]);
printf("ht[1]: ");
showBuckets(d->ht[1]);
printf("\n");
}
int sortPointers(const void *a, const void *b) {
unsigned long la, lb;
la = (long) (*((dictEntry**)a));
lb = (long) (*((dictEntry**)b));
return la-lb;
}
void stressGetKeys(dict *d, int times, int *perfect_run, int *approx_run) {
int j;
dictEntry **des = zmalloc(sizeof(dictEntry*)*dictSize(d));
for (j = 0; j < times; j++) {
int requested = rand() % (dictSize(d)+1);
int returned = dictGetSomeKeys(d, des, requested);
int dup = 0;
qsort(des,returned,sizeof(dictEntry*),sortPointers);
if (returned > 1) {
int i;
for (i = 0; i < returned-1; i++) {
if (des[i] == des[i+1]) dup++;
}
}
if (requested == returned && dup == 0) {
(*perfect_run)++;
} else {
(*approx_run)++;
printf("Requested, returned, duplicated: %d %d %d\n",
requested, returned, dup);
}
}
zfree(des);
}
#define MAX1 120
#define MAX2 1000
int main(void) {
dict *d = dictCreate(&dictTypeTest,NULL);
unsigned long i;
srand(time(NULL));
for (i = 0; i < MAX1; i++) {
dictAdd(d,(void*)i,NULL);
show(d);
}
printf("Size: %d\n", (int)dictSize(d));
for (i = 0; i < MAX1; i++) {
dictDelete(d,(void*)i);
dictResize(d);
show(d);
}
dictRelease(d);
d = dictCreate(&dictTypeTest,NULL);
printf("Stress testing dictGetSomeKeys\n");
int perfect_run = 0, approx_run = 0;
for (i = 0; i < MAX2; i++) {
dictAdd(d,(void*)i,NULL);
stressGetKeys(d,100,&perfect_run,&approx_run);
}
for (i = 0; i < MAX2; i++) {
dictDelete(d,(void*)i);
dictResize(d);
stressGetKeys(d,100,&perfect_run,&approx_run);
}
printf("dictGetSomeKey, %d perfect runs, %d approximated runs\n",
perfect_run, approx_run);
dictRelease(d);
printf("TEST PASSED!\n");
return 0;
}
| 3,504 | 23.51049 | 75 | c |
null | NearPMSW-main/nearpm/checkpointing/TATP_CP/tableEntries.h | /*
Author: Vaibhav Gogte <[email protected]>
Aasheesh Kolli <[email protected]>
This file defines the table entries used by TATP.
*/
struct subscriber_entry {
unsigned s_id; // Subscriber id
char sub_nbr[15]; // Subscriber number, s_id in 15 digit string, zeros padded
short bit_1, bit_2, bit_3, bit_4, bit_5, bit_6, bit_7, bit_8, bit_9, bit_10; // randomly generated values 0/1
short hex_1, hex_2, hex_3, hex_4, hex_5, hex_6, hex_7, hex_8, hex_9, hex_10; // randomly generated values 0->15
short byte2_1, byte2_2, byte2_3, byte2_4, byte2_5, byte2_6, byte2_7, byte2_8, byte2_9, byte2_10; // randomly generated values 0->255
unsigned msc_location; // Randomly generated value 1->((2^32)-1)
unsigned vlr_location; // Randomly generated value 1->((2^32)-1)
char padding[40];
};
struct access_info_entry {
unsigned s_id; //Subscriber id
short ai_type; // Random value 1->4. A subscriber can have a max of 4 and all unique
short data_1, data_2; // Randomly generated values 0->255
char data_3[3]; // random 3 char string. All upper case alphabets
char data_4[5]; // random 5 char string. All upper case alphabets
bool valid;
bool padding_1[7];
char padding_2[4+32];
};
struct special_facility_entry {
unsigned s_id; //Subscriber id
short sf_type; // Random value 1->4. A subscriber can have a max of 4 and all unique
short is_active; // 0(15%)/1(85%)
short error_cntrl; // Randomly generated values 0->255
short data_a; // Randomly generated values 0->255
char data_b[5]; // random 5 char string. All upper case alphabets
char padding_1[7];
bool valid;
bool padding_2[4+32];
};
struct call_forwarding_entry {
unsigned s_id; // Subscriber id from special facility
short sf_type; // sf_type from special facility table
int start_time; // 0 or 8 or 16
int end_time; // start_time+N, N randomly generated 1->8
char numberx[15]; // randomly generated 15 digit string
char padding_1[7];
bool valid;
bool padding_2[24];
};
| 1,993 | 35.254545 | 134 | h |
null | NearPMSW-main/nearpm/checkpointing/pmemkv-bench-chekpointing/bench/util/csv.h | // SPDX-License-Identifier: Apache-2.0
/* Copyright 2020-2021, Intel Corporation */
#pragma once
#include <iostream>
#include <map>
#include <ostream>
#include <set>
#include <string>
template <typename IdType>
class CSV {
private:
/* Hold data in two-dimensional map of strings: data_matrix[row][column]
*/
std::map<IdType, std::map<std::string, std::string>> data_matrix;
/* List of all columns, which is filled during inserts. Needed for
* printing header and data in the same order.
* */
std::set<std::string> columns;
std::string id_name;
public:
CSV(std::string id_column_name) : id_name(id_column_name){};
void insert(IdType row, std::string column, std::string data)
{
columns.insert(column);
data_matrix[row][column] = data;
}
void insert(IdType row, std::string column, const char *data)
{
insert(row, column, std::string(data));
}
template <typename T>
void insert(IdType row, std::string column, T data)
{
insert(row, column, std::to_string(data));
}
void print()
{
// Print first column name
std::cout << id_name;
for (auto &column : columns) {
std::cout << "," << column;
}
std::cout << "\r\n" << std::flush;
for (auto &row : data_matrix) {
std::cout << row.first;
for (auto &column : columns) {
std::cout << "," << data_matrix[row.first][column];
}
std::cout << "\r\n" << std::flush;
}
}
};
| 1,381 | 21.290323 | 73 | h |
null | NearPMSW-main/nearpm/checkpointing/pmemkv-bench-chekpointing/bench/util/logging.h | // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
// Copyright 2020, Intel Corporation
// Must not be included from any .h files to avoid polluting the namespace
// with macros.
#ifndef STORAGE_LEVELDB_UTIL_LOGGING_H_
#define STORAGE_LEVELDB_UTIL_LOGGING_H_
#include "port/port_posix.h"
#include <stdint.h>
#include <stdio.h>
#include <string>
namespace leveldb
{
class Slice;
class WritableFile;
// Append a human-readable printout of "num" to *str
extern void AppendNumberTo(std::string *str, uint64_t num);
// Append a human-readable printout of "value" to *str.
// Escapes any non-printable characters found in "value".
extern void AppendEscapedStringTo(std::string *str, const Slice &value);
// Return a human-readable printout of "num"
extern std::string NumberToString(uint64_t num);
// Return a human-readable version of "value".
// Escapes any non-printable characters found in "value".
extern std::string EscapeString(const Slice &value);
// Parse a human-readable number from "*in" into *value. On success,
// advances "*in" past the consumed number and sets "*val" to the
// numeric value. Otherwise, returns false and leaves *in in an
// unspecified state.
extern bool ConsumeDecimalNumber(Slice *in, uint64_t *val);
} // namespace leveldb
#endif // STORAGE_LEVELDB_UTIL_LOGGING_H_
| 1,519 | 30.666667 | 81 | h |
null | NearPMSW-main/nearpm/checkpointing/pmemkv-bench-chekpointing/bench/util/testutil.h | // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
// Copyright 2020, Intel Corporation
#ifndef STORAGE_LEVELDB_UTIL_TESTUTIL_H_
#define STORAGE_LEVELDB_UTIL_TESTUTIL_H_
#include "leveldb/env.h"
#include "leveldb/slice.h"
#include "util/random.h"
namespace leveldb
{
namespace test
{
// Store in *dst a random string of length "len" and return a Slice that
// references the generated data.
Slice RandomString(Random *rnd, int len, std::string *dst);
// Return a random key with the specified length that may contain interesting
// characters (e.g. \x00, \xff, etc.).
std::string RandomKey(Random *rnd, int len);
// Store in *dst a string of length "len" that will compress to
// "N*compressed_fraction" bytes and return a Slice that references
// the generated data.
Slice CompressibleString(Random *rnd, double compressed_fraction, size_t len, std::string *dst);
// A wrapper that allows injection of errors.
class ErrorEnv : public EnvWrapper {
public:
bool writable_file_error_;
int num_writable_file_errors_;
ErrorEnv() : EnvWrapper(Env::Default()), writable_file_error_(false), num_writable_file_errors_(0)
{
}
virtual Status NewWritableFile(const std::string &fname, WritableFile **result)
{
if (writable_file_error_) {
++num_writable_file_errors_;
*result = nullptr;
return Status::IOError(fname, "fake error");
}
return target()->NewWritableFile(fname, result);
}
virtual Status NewAppendableFile(const std::string &fname, WritableFile **result)
{
if (writable_file_error_) {
++num_writable_file_errors_;
*result = nullptr;
return Status::IOError(fname, "fake error");
}
return target()->NewAppendableFile(fname, result);
}
};
} // namespace test
} // namespace leveldb
#endif // STORAGE_LEVELDB_UTIL_TESTUTIL_H_
| 1,984 | 28.191176 | 99 | h |
null | NearPMSW-main/nearpm/checkpointing/pmemkv-bench-chekpointing/bench/util/mutexlock.h | // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
// Copyright 2020, Intel Corporation
#ifndef STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_
#define STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_
#include "port/port_posix.h"
#include "port/thread_annotations.h"
namespace leveldb
{
// Helper class that locks a mutex on construction and unlocks the mutex when
// the destructor of the MutexLock object is invoked.
//
// Typical usage:
//
// void MyClass::MyMethod() {
// MutexLock l(&mu_); // mu_ is an instance variable
// ... some complex code, possibly with multiple return paths ...
// }
class SCOPED_LOCKABLE MutexLock {
public:
explicit MutexLock(port::Mutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu)
{
this->mu_->Lock();
}
~MutexLock() UNLOCK_FUNCTION()
{
this->mu_->Unlock();
}
private:
port::Mutex *const mu_;
// No copying allowed
MutexLock(const MutexLock &);
void operator=(const MutexLock &);
};
} // namespace leveldb
#endif // STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_
| 1,202 | 24.0625 | 81 | h |
null | NearPMSW-main/nearpm/checkpointing/pmemkv-bench-chekpointing/bench/util/random.h | // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
// Copyright 2020, Intel Corporation
#ifndef STORAGE_LEVELDB_UTIL_RANDOM_H_
#define STORAGE_LEVELDB_UTIL_RANDOM_H_
#include <stdint.h>
namespace leveldb
{
// A very simple random number generator. Not especially good at
// generating truly random bits, but good enough for our needs in this
// package.
class Random {
private:
uint32_t seed_;
public:
explicit Random(uint32_t s) : seed_(s & 0x7fffffffu)
{
// Avoid bad seeds.
if (seed_ == 0 || seed_ == 2147483647L) {
seed_ = 1;
}
}
uint32_t Next()
{
static const uint32_t M = 2147483647L; // 2^31-1
static const uint64_t A = 16807; // bits 14, 8, 7, 5, 2, 1, 0
// We are computing
// seed_ = (seed_ * A) % M, where M = 2^31-1
//
// seed_ must not be zero or M, or else all subsequent computed values
// will be zero or M respectively. For all other values, seed_ will end
// up cycling through every number in [1,M-1]
uint64_t product = seed_ * A;
// Compute (product % M) using the fact that ((x << 31) % M) == x.
seed_ = static_cast<uint32_t>((product >> 31) + (product & M));
// The first reduction may overflow by 1 bit, so we may need to
// repeat. mod == M is not possible; using > allows the faster
// sign-bit-based test.
if (seed_ > M) {
seed_ -= M;
}
return seed_;
}
// Returns a uniformly distributed value in the range [0..n-1]
// REQUIRES: n > 0
uint32_t Uniform(int n)
{
return Next() % n;
}
// Randomly returns true ~"1/n" of the time, and false otherwise.
// REQUIRES: n > 0
bool OneIn(int n)
{
return (Next() % n) == 0;
}
// Skewed: pick "base" uniformly from range [0,max_log] and then
// return "base" random bits. The effect is to pick a number in the
// range [0,2^max_log-1] with exponential bias towards smaller numbers.
uint32_t Skewed(int max_log)
{
return Uniform(1 << Uniform(max_log + 1));
}
};
} // namespace leveldb
#endif // STORAGE_LEVELDB_UTIL_RANDOM_H_
| 2,202 | 26.886076 | 81 | h |
null | NearPMSW-main/nearpm/checkpointing/pmemkv-bench-chekpointing/bench/util/posix_logger.h | // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
// Copyright 2020, Intel Corporation
//
// Logger implementation that can be shared by all environments
// where enough posix functionality is available.
#ifndef STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_
#define STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_
#include "leveldb/env.h"
#include <algorithm>
#include <stdio.h>
#include <sys/time.h>
#include <time.h>
namespace leveldb
{
class PosixLogger : public Logger {
private:
FILE *file_;
uint64_t (*gettid_)(); // Return the thread id for the current thread
public:
PosixLogger(FILE *f, uint64_t (*gettid)()) : file_(f), gettid_(gettid)
{
}
virtual ~PosixLogger()
{
fclose(file_);
}
virtual void Logv(const char *format, va_list ap)
{
const uint64_t thread_id = (*gettid_)();
// We try twice: the first time with a fixed-size stack allocated buffer,
// and the second time with a much larger dynamically allocated buffer.
char buffer[500];
for (int iter = 0; iter < 2; iter++) {
char *base;
int bufsize;
if (iter == 0) {
bufsize = sizeof(buffer);
base = buffer;
} else {
bufsize = 30000;
base = new char[bufsize];
}
char *p = base;
char *limit = base + bufsize;
struct timeval now_tv;
gettimeofday(&now_tv, NULL);
const time_t seconds = now_tv.tv_sec;
struct tm t;
localtime_r(&seconds, &t);
p += snprintf(p, limit - p, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %llx ",
t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, t.tm_hour, t.tm_min,
t.tm_sec, static_cast<int>(now_tv.tv_usec),
static_cast<long long unsigned int>(thread_id));
// Print the message
if (p < limit) {
va_list backup_ap;
va_copy(backup_ap, ap);
p += vsnprintf(p, limit - p, format, backup_ap);
va_end(backup_ap);
}
// Truncate to available space if necessary
if (p >= limit) {
if (iter == 0) {
continue; // Try again with larger buffer
} else {
p = limit - 1;
}
}
// Add newline if necessary
if (p == base || p[-1] != '\n') {
*p++ = '\n';
}
assert(p <= limit);
fwrite(base, 1, p - base, file_);
fflush(file_);
if (base != buffer) {
delete[] base;
}
break;
}
}
};
} // namespace leveldb
#endif // STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_
| 2,503 | 23.54902 | 81 | h |
null | NearPMSW-main/nearpm/checkpointing/pmemkv-bench-chekpointing/bench/util/env_posix_test_helper.h | // Copyright 2017 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
// Copyright 2020, Intel Corporation
#ifndef STORAGE_LEVELDB_UTIL_ENV_POSIX_TEST_HELPER_H_
#define STORAGE_LEVELDB_UTIL_ENV_POSIX_TEST_HELPER_H_
namespace leveldb
{
class EnvPosixTest;
// A helper for the POSIX Env to facilitate testing.
class EnvPosixTestHelper {
private:
friend class EnvPosixTest;
// Set the maximum number of read-only files that will be opened.
// Must be called before creating an Env.
static void SetReadOnlyFDLimit(int limit);
// Set the maximum number of read-only files that will be mapped via mmap.
// Must be called before creating an Env.
static void SetReadOnlyMMapLimit(int limit);
};
} // namespace leveldb
#endif // STORAGE_LEVELDB_UTIL_ENV_POSIX_TEST_HELPER_H_
| 967 | 28.333333 | 81 | h |
null | NearPMSW-main/nearpm/checkpointing/pmemkv-bench-chekpointing/bench/port/port_posix.h | // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
// Copyright 2020, Intel Corporation
// See port_example.h for documentation for the following types/functions.
#ifndef STORAGE_LEVELDB_PORT_PORT_POSIX_H_
#define STORAGE_LEVELDB_PORT_PORT_POSIX_H_
#undef PLATFORM_IS_LITTLE_ENDIAN
#if defined(__APPLE__)
#include <machine/endian.h>
#if defined(__DARWIN_LITTLE_ENDIAN) && defined(__DARWIN_BYTE_ORDER)
#define PLATFORM_IS_LITTLE_ENDIAN (__DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN)
#endif
#elif defined(OS_SOLARIS)
#include <sys/isa_defs.h>
#ifdef _LITTLE_ENDIAN
#define PLATFORM_IS_LITTLE_ENDIAN true
#else
#define PLATFORM_IS_LITTLE_ENDIAN false
#endif
#elif defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_NETBSD) || defined(OS_DRAGONFLYBSD)
#include <sys/endian.h>
#include <sys/types.h>
#define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN)
#elif defined(OS_HPUX)
#define PLATFORM_IS_LITTLE_ENDIAN false
#elif defined(OS_ANDROID)
// Due to a bug in the NDK x86 <sys/endian.h> definition,
// _BYTE_ORDER must be used instead of __BYTE_ORDER on Android.
// See http://code.google.com/p/android/issues/detail?id=39824
#include <endian.h>
#define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN)
#else
#include <endian.h>
#endif
#include <pthread.h>
#if defined(HAVE_CRC32C)
#include <crc32c/crc32c.h>
#endif // defined(HAVE_CRC32C)
#ifdef HAVE_SNAPPY
#include <snappy.h>
#endif // defined(HAVE_SNAPPY)
#include "port/atomic_pointer.h"
#include <stdint.h>
#include <string>
#ifndef PLATFORM_IS_LITTLE_ENDIAN
#define PLATFORM_IS_LITTLE_ENDIAN (__BYTE_ORDER == __LITTLE_ENDIAN)
#endif
#if defined(__APPLE__) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLYBSD)
// Use fsync() on platforms without fdatasync()
#define fdatasync fsync
#endif
#if defined(OS_ANDROID) && __ANDROID_API__ < 9
// fdatasync() was only introduced in API level 9 on Android. Use fsync()
// when targetting older platforms.
#define fdatasync fsync
#endif
namespace leveldb
{
namespace port
{
static const bool kLittleEndian = PLATFORM_IS_LITTLE_ENDIAN;
#undef PLATFORM_IS_LITTLE_ENDIAN
class CondVar;
class Mutex {
public:
Mutex();
~Mutex();
void Lock();
void Unlock();
void AssertHeld()
{
}
private:
friend class CondVar;
pthread_mutex_t mu_;
// No copying
Mutex(const Mutex &);
void operator=(const Mutex &);
};
class CondVar {
public:
explicit CondVar(Mutex *mu);
~CondVar();
void Wait();
void Signal();
void SignalAll();
private:
pthread_cond_t cv_;
Mutex *mu_;
};
typedef pthread_once_t OnceType;
#define LEVELDB_ONCE_INIT PTHREAD_ONCE_INIT
extern void InitOnce(OnceType *once, void (*initializer)());
inline bool Snappy_Compress(const char *input, size_t length, ::std::string *output)
{
#ifdef HAVE_SNAPPY
output->resize(snappy::MaxCompressedLength(length));
size_t outlen;
snappy::RawCompress(input, length, &(*output)[0], &outlen);
output->resize(outlen);
return true;
#endif // defined(HAVE_SNAPPY)
return false;
}
inline bool Snappy_GetUncompressedLength(const char *input, size_t length, size_t *result)
{
#ifdef HAVE_SNAPPY
return snappy::GetUncompressedLength(input, length, result);
#else
return false;
#endif // defined(HAVE_SNAPPY)
}
inline bool Snappy_Uncompress(const char *input, size_t length, char *output)
{
#ifdef HAVE_SNAPPY
return snappy::RawUncompress(input, length, output);
#else
return false;
#endif // defined(HAVE_SNAPPY)
}
inline bool GetHeapProfile(void (*func)(void *, const char *, int), void *arg)
{
return false;
}
inline uint32_t AcceleratedCRC32C(uint32_t crc, const char *buf, size_t size)
{
#if defined(HAVE_CRC32C)
return ::crc32c::Extend(crc, reinterpret_cast<const uint8_t *>(buf), size);
#else
return 0;
#endif // defined(HAVE_CRC32C)
}
} // namespace port
} // namespace leveldb
#endif // STORAGE_LEVELDB_PORT_PORT_POSIX_H_
| 4,061 | 23.768293 | 98 | h |
null | NearPMSW-main/nearpm/checkpointing/pmemkv-bench-chekpointing/bench/port/thread_annotations.h | // Copyright (c) 2012 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
// Copyright 2020, Intel Corporation
#ifndef STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
#define STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
// Some environments provide custom macros to aid in static thread-safety
// analysis. Provide empty definitions of such macros unless they are already
// defined.
#ifndef EXCLUSIVE_LOCKS_REQUIRED
#define EXCLUSIVE_LOCKS_REQUIRED(...)
#endif
#ifndef SHARED_LOCKS_REQUIRED
#define SHARED_LOCKS_REQUIRED(...)
#endif
#ifndef LOCKS_EXCLUDED
#define LOCKS_EXCLUDED(...)
#endif
#ifndef LOCK_RETURNED
#define LOCK_RETURNED(x)
#endif
#ifndef LOCKABLE
#define LOCKABLE
#endif
#ifndef SCOPED_LOCKABLE
#define SCOPED_LOCKABLE
#endif
#ifndef EXCLUSIVE_LOCK_FUNCTION
#define EXCLUSIVE_LOCK_FUNCTION(...)
#endif
#ifndef SHARED_LOCK_FUNCTION
#define SHARED_LOCK_FUNCTION(...)
#endif
#ifndef EXCLUSIVE_TRYLOCK_FUNCTION
#define EXCLUSIVE_TRYLOCK_FUNCTION(...)
#endif
#ifndef SHARED_TRYLOCK_FUNCTION
#define SHARED_TRYLOCK_FUNCTION(...)
#endif
#ifndef UNLOCK_FUNCTION
#define UNLOCK_FUNCTION(...)
#endif
#ifndef NO_THREAD_SAFETY_ANALYSIS
#define NO_THREAD_SAFETY_ANALYSIS
#endif
#endif // STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
| 1,429 | 21.34375 | 81 | h |
null | NearPMSW-main/nearpm/checkpointing/pmemkv-bench-chekpointing/bench/port/atomic_pointer.h | // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
// Copyright 2020, Intel Corporation
// AtomicPointer provides storage for a lock-free pointer.
// Platform-dependent implementation of AtomicPointer:
// - If the platform provides a cheap barrier, we use it with raw pointers
// - If <atomic> is present (on newer versions of gcc, it is), we use
// a <atomic>-based AtomicPointer. However we prefer the memory
// barrier based version, because at least on a gcc 4.4 32-bit build
// on linux, we have encountered a buggy <atomic> implementation.
// Also, some <atomic> implementations are much slower than a memory-barrier
// based implementation (~16ns for <atomic> based acquire-load vs. ~1ns for
// a barrier based acquire-load).
// This code is based on atomicops-internals-* in Google's perftools:
// http://code.google.com/p/google-perftools/source/browse/#svn%2Ftrunk%2Fsrc%2Fbase
#ifndef PORT_ATOMIC_POINTER_H_
#define PORT_ATOMIC_POINTER_H_
#include <stdint.h>
#ifdef LEVELDB_ATOMIC_PRESENT
#include <atomic>
#endif
#ifdef OS_WIN
#include <windows.h>
#endif
#ifdef __APPLE__
#include <libkern/OSAtomic.h>
#endif
#if defined(_M_X64) || defined(__x86_64__)
#define ARCH_CPU_X86_FAMILY 1
#elif defined(_M_IX86) || defined(__i386__) || defined(__i386)
#define ARCH_CPU_X86_FAMILY 1
#elif defined(__ARMEL__)
#define ARCH_CPU_ARM_FAMILY 1
#elif defined(__aarch64__)
#define ARCH_CPU_ARM64_FAMILY 1
#elif defined(__ppc__) || defined(__powerpc__) || defined(__powerpc64__)
#define ARCH_CPU_PPC_FAMILY 1
#elif defined(__mips__)
#define ARCH_CPU_MIPS_FAMILY 1
#endif
namespace leveldb
{
namespace port
{
// Define MemoryBarrier() if available
// Windows on x86
#if defined(OS_WIN) && defined(COMPILER_MSVC) && defined(ARCH_CPU_X86_FAMILY)
// windows.h already provides a MemoryBarrier(void) macro
// http://msdn.microsoft.com/en-us/library/ms684208(v=vs.85).aspx
#define LEVELDB_HAVE_MEMORY_BARRIER
// Mac OS
#elif defined(__APPLE__)
inline void MemoryBarrier()
{
OSMemoryBarrier();
}
#define LEVELDB_HAVE_MEMORY_BARRIER
// Gcc on x86
#elif defined(ARCH_CPU_X86_FAMILY) && defined(__GNUC__)
inline void MemoryBarrier()
{
// See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on
// this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering.
__asm__ __volatile__("" : : : "memory");
}
#define LEVELDB_HAVE_MEMORY_BARRIER
// Sun Studio
#elif defined(ARCH_CPU_X86_FAMILY) && defined(__SUNPRO_CC)
inline void MemoryBarrier()
{
// See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on
// this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering.
asm volatile("" : : : "memory");
}
#define LEVELDB_HAVE_MEMORY_BARRIER
// ARM Linux
#elif defined(ARCH_CPU_ARM_FAMILY) && defined(__linux__)
typedef void (*LinuxKernelMemoryBarrierFunc)(void);
// The Linux ARM kernel provides a highly optimized device-specific memory
// barrier function at a fixed memory address that is mapped in every
// user-level process.
//
// This beats using CPU-specific instructions which are, on single-core
// devices, un-necessary and very costly (e.g. ARMv7-A "dmb" takes more
// than 180ns on a Cortex-A8 like the one on a Nexus One). Benchmarking
// shows that the extra function call cost is completely negligible on
// multi-core devices.
//
inline void MemoryBarrier()
{
(*(LinuxKernelMemoryBarrierFunc)0xffff0fa0)();
}
#define LEVELDB_HAVE_MEMORY_BARRIER
// ARM64
#elif defined(ARCH_CPU_ARM64_FAMILY)
inline void MemoryBarrier()
{
asm volatile("dmb sy" : : : "memory");
}
#define LEVELDB_HAVE_MEMORY_BARRIER
// PPC
#elif defined(ARCH_CPU_PPC_FAMILY) && defined(__GNUC__)
inline void MemoryBarrier()
{
// TODO for some powerpc expert: is there a cheaper suitable variant?
// Perhaps by having separate barriers for acquire and release ops.
asm volatile("sync" : : : "memory");
}
#define LEVELDB_HAVE_MEMORY_BARRIER
// MIPS
#elif defined(ARCH_CPU_MIPS_FAMILY) && defined(__GNUC__)
inline void MemoryBarrier()
{
__asm__ __volatile__("sync" : : : "memory");
}
#define LEVELDB_HAVE_MEMORY_BARRIER
#endif
// AtomicPointer built using platform-specific MemoryBarrier()
#if defined(LEVELDB_HAVE_MEMORY_BARRIER)
class AtomicPointer {
private:
void *rep_;
public:
AtomicPointer()
{
}
explicit AtomicPointer(void *p) : rep_(p)
{
}
inline void *NoBarrier_Load() const
{
return rep_;
}
inline void NoBarrier_Store(void *v)
{
rep_ = v;
}
inline void *Acquire_Load() const
{
void *result = rep_;
MemoryBarrier();
return result;
}
inline void Release_Store(void *v)
{
MemoryBarrier();
rep_ = v;
}
};
// AtomicPointer based on <cstdatomic>
#elif defined(LEVELDB_ATOMIC_PRESENT)
class AtomicPointer {
private:
std::atomic<void *> rep_;
public:
AtomicPointer()
{
}
explicit AtomicPointer(void *v) : rep_(v)
{
}
inline void *Acquire_Load() const
{
return rep_.load(std::memory_order_acquire);
}
inline void Release_Store(void *v)
{
rep_.store(v, std::memory_order_release);
}
inline void *NoBarrier_Load() const
{
return rep_.load(std::memory_order_relaxed);
}
inline void NoBarrier_Store(void *v)
{
rep_.store(v, std::memory_order_relaxed);
}
};
// Atomic pointer based on sparc memory barriers
#elif defined(__sparcv9) && defined(__GNUC__)
class AtomicPointer {
private:
void *rep_;
public:
AtomicPointer()
{
}
explicit AtomicPointer(void *v) : rep_(v)
{
}
inline void *Acquire_Load() const
{
void *val;
__asm__ __volatile__("ldx [%[rep_]], %[val] \n\t"
"membar #LoadLoad|#LoadStore \n\t"
: [val] "=r"(val)
: [rep_] "r"(&rep_)
: "memory");
return val;
}
inline void Release_Store(void *v)
{
__asm__ __volatile__("membar #LoadStore|#StoreStore \n\t"
"stx %[v], [%[rep_]] \n\t"
:
: [rep_] "r"(&rep_), [v] "r"(v)
: "memory");
}
inline void *NoBarrier_Load() const
{
return rep_;
}
inline void NoBarrier_Store(void *v)
{
rep_ = v;
}
};
// Atomic pointer based on ia64 acq/rel
#elif defined(__ia64) && defined(__GNUC__)
class AtomicPointer {
private:
void *rep_;
public:
AtomicPointer()
{
}
explicit AtomicPointer(void *v) : rep_(v)
{
}
inline void *Acquire_Load() const
{
void *val;
__asm__ __volatile__("ld8.acq %[val] = [%[rep_]] \n\t"
: [val] "=r"(val)
: [rep_] "r"(&rep_)
: "memory");
return val;
}
inline void Release_Store(void *v)
{
__asm__ __volatile__("st8.rel [%[rep_]] = %[v] \n\t"
:
: [rep_] "r"(&rep_), [v] "r"(v)
: "memory");
}
inline void *NoBarrier_Load() const
{
return rep_;
}
inline void NoBarrier_Store(void *v)
{
rep_ = v;
}
};
// We have neither MemoryBarrier(), nor <atomic>
#else
#error Please implement AtomicPointer for this platform.
#endif
#undef LEVELDB_HAVE_MEMORY_BARRIER
#undef ARCH_CPU_X86_FAMILY
#undef ARCH_CPU_ARM_FAMILY
#undef ARCH_CPU_ARM64_FAMILY
#undef ARCH_CPU_PPC_FAMILY
} // namespace port
} // namespace leveldb
#endif // PORT_ATOMIC_POINTER_H_
| 7,207 | 23.26936 | 84 | h |
null | NearPMSW-main/nearpm/checkpointing/pmemkv-bench-chekpointing/bench/include/leveldb/status.h | // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
// Copyright 2020, Intel Corporation
// A Status encapsulates the result of an operation. It may indicate success,
// or it may indicate an error with an associated error message.
//
// Multiple threads can invoke const methods on a Status without
// external synchronization, but if any of the threads may call a
// non-const method, all threads accessing the same Status must use
// external synchronization.
#ifndef STORAGE_LEVELDB_INCLUDE_STATUS_H_
#define STORAGE_LEVELDB_INCLUDE_STATUS_H_
#include "leveldb/slice.h"
#include <string>
namespace leveldb
{
class Status {
public:
// Create a success status.
Status() : state_(NULL)
{
}
~Status()
{
delete[] state_;
}
// Copy the specified status.
Status(const Status &s);
void operator=(const Status &s);
// Return a success status.
static Status OK()
{
return Status();
}
// Return error status of an appropriate type.
static Status NotFound(const Slice &msg, const Slice &msg2 = Slice())
{
return Status(kNotFound, msg, msg2);
}
static Status Corruption(const Slice &msg, const Slice &msg2 = Slice())
{
return Status(kCorruption, msg, msg2);
}
static Status NotSupported(const Slice &msg, const Slice &msg2 = Slice())
{
return Status(kNotSupported, msg, msg2);
}
static Status InvalidArgument(const Slice &msg, const Slice &msg2 = Slice())
{
return Status(kInvalidArgument, msg, msg2);
}
static Status IOError(const Slice &msg, const Slice &msg2 = Slice())
{
return Status(kIOError, msg, msg2);
}
// Returns true iff the status indicates success.
bool ok() const
{
return (state_ == NULL);
}
// Returns true iff the status indicates a NotFound error.
bool IsNotFound() const
{
return code() == kNotFound;
}
// Returns true iff the status indicates a Corruption error.
bool IsCorruption() const
{
return code() == kCorruption;
}
// Returns true iff the status indicates an IOError.
bool IsIOError() const
{
return code() == kIOError;
}
// Returns true iff the status indicates a NotSupportedError.
bool IsNotSupportedError() const
{
return code() == kNotSupported;
}
// Returns true iff the status indicates an InvalidArgument.
bool IsInvalidArgument() const
{
return code() == kInvalidArgument;
}
// Return a string representation of this status suitable for printing.
// Returns the string "OK" for success.
std::string ToString() const;
private:
// OK status has a NULL state_. Otherwise, state_ is a new[] array
// of the following form:
// state_[0..3] == length of message
// state_[4] == code
// state_[5..] == message
const char *state_;
enum Code {
kOk = 0,
kNotFound = 1,
kCorruption = 2,
kNotSupported = 3,
kInvalidArgument = 4,
kIOError = 5
};
Code code() const
{
return (state_ == NULL) ? kOk : static_cast<Code>(state_[4]);
}
Status(Code code, const Slice &msg, const Slice &msg2);
static const char *CopyState(const char *s);
};
inline Status::Status(const Status &s)
{
state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_);
}
inline void Status::operator=(const Status &s)
{
// The following condition catches both aliasing (when this == &s),
// and the common case where both s and *this are ok.
if (state_ != s.state_) {
delete[] state_;
state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_);
}
}
} // namespace leveldb
#endif // STORAGE_LEVELDB_INCLUDE_STATUS_H_
| 3,658 | 23.231788 | 81 | h |
null | NearPMSW-main/nearpm/checkpointing/pmemkv-bench-chekpointing/bench/include/leveldb/slice.h | // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
// Copyright 2020, Intel Corporation
// Slice is a simple structure containing a pointer into some external
// storage and a size. The user of a Slice must ensure that the slice
// is not used after the corresponding external storage has been
// deallocated.
//
// Multiple threads can invoke const methods on a Slice without
// external synchronization, but if any of the threads may call a
// non-const method, all threads accessing the same Slice must use
// external synchronization.
#ifndef STORAGE_LEVELDB_INCLUDE_SLICE_H_
#define STORAGE_LEVELDB_INCLUDE_SLICE_H_
#include <assert.h>
#include <stddef.h>
#include <string.h>
#include <string>
namespace leveldb
{
class Slice {
public:
// Create an empty slice.
Slice() : data_(""), size_(0)
{
}
// Create a slice that refers to d[0,n-1].
Slice(const char *d, size_t n) : data_(d), size_(n)
{
}
// Create a slice that refers to the contents of "s"
Slice(const std::string &s) : data_(s.data()), size_(s.size())
{
}
// Create a slice that refers to s[0,strlen(s)-1]
Slice(const char *s) : data_(s), size_(strlen(s))
{
}
// Return a pointer to the beginning of the referenced data
const char *data() const
{
return data_;
}
// Return the length (in bytes) of the referenced data
size_t size() const
{
return size_;
}
// Return true iff the length of the referenced data is zero
bool empty() const
{
return size_ == 0;
}
// Return the ith byte in the referenced data.
// REQUIRES: n < size()
char operator[](size_t n) const
{
assert(n < size());
return data_[n];
}
// Change this slice to refer to an empty array
void clear()
{
data_ = "";
size_ = 0;
}
// Drop the first "n" bytes from this slice.
void remove_prefix(size_t n)
{
assert(n <= size());
data_ += n;
size_ -= n;
}
// Return a string that contains the copy of the referenced data.
std::string ToString() const
{
return std::string(data_, size_);
}
// Three-way comparison. Returns value:
// < 0 iff "*this" < "b",
// == 0 iff "*this" == "b",
// > 0 iff "*this" > "b"
int compare(const Slice &b) const;
// Return true iff "x" is a prefix of "*this"
bool starts_with(const Slice &x) const
{
return ((size_ >= x.size_) && (memcmp(data_, x.data_, x.size_) == 0));
}
private:
const char *data_;
size_t size_;
// Intentionally copyable
};
inline bool operator==(const Slice &x, const Slice &y)
{
return ((x.size() == y.size()) && (memcmp(x.data(), y.data(), x.size()) == 0));
}
inline bool operator!=(const Slice &x, const Slice &y)
{
return !(x == y);
}
inline int Slice::compare(const Slice &b) const
{
const size_t min_len = (size_ < b.size_) ? size_ : b.size_;
int r = memcmp(data_, b.data_, min_len);
if (r == 0) {
if (size_ < b.size_)
r = -1;
else if (size_ > b.size_)
r = +1;
}
return r;
}
} // namespace leveldb
#endif // STORAGE_LEVELDB_INCLUDE_SLICE_H_
| 3,163 | 21.125874 | 81 | h |
null | NearPMSW-main/nearpm/checkpointing/pmemkv-bench-chekpointing/bench/include/leveldb/env.h | // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD file. See the AUTHORS file for names of contributors.
// SPDX-License-Identifier: Apache-2.0
// Copyright 2020, Intel Corporation
// An Env is an interface used by the leveldb implementation to access
// operating system functionality like the filesystem etc. Callers
// may wish to provide a custom Env object when opening a database to
// get fine gain control; e.g., to rate limit file system operations.
//
// All Env implementations are safe for concurrent access from
// multiple threads without any external synchronization.
#ifndef STORAGE_LEVELDB_INCLUDE_ENV_H_
#define STORAGE_LEVELDB_INCLUDE_ENV_H_
#include "leveldb/status.h"
#include <stdarg.h>
#include <stdint.h>
#include <string>
#include <vector>
namespace leveldb
{
class FileLock;
class Logger;
class RandomAccessFile;
class SequentialFile;
class Slice;
class WritableFile;
class Env {
public:
Env()
{
}
virtual ~Env();
// Return a default environment suitable for the current operating
// system. Sophisticated users may wish to provide their own Env
// implementation instead of relying on this default environment.
//
// The result of Default() belongs to leveldb and must never be deleted.
static Env *Default();
// Create a brand new sequentially-readable file with the specified name.
// On success, stores a pointer to the new file in *result and returns OK.
// On failure stores NULL in *result and returns non-OK. If the file does
// not exist, returns a non-OK status. Implementations should return a
// NotFound status when the file does not exist.
//
// The returned file will only be accessed by one thread at a time.
virtual Status NewSequentialFile(const std::string &fname, SequentialFile **result) = 0;
// Create a brand new random access read-only file with the
// specified name. On success, stores a pointer to the new file in
// *result and returns OK. On failure stores NULL in *result and
// returns non-OK. If the file does not exist, returns a non-OK
// status. Implementations should return a NotFound status when the file does
// not exist.
//
// The returned file may be concurrently accessed by multiple threads.
virtual Status NewRandomAccessFile(const std::string &fname, RandomAccessFile **result) = 0;
// Create an object that writes to a new file with the specified
// name. Deletes any existing file with the same name and creates a
// new file. On success, stores a pointer to the new file in
// *result and returns OK. On failure stores NULL in *result and
// returns non-OK.
//
// The returned file will only be accessed by one thread at a time.
virtual Status NewWritableFile(const std::string &fname, WritableFile **result) = 0;
// Create an object that either appends to an existing file, or
// writes to a new file (if the file does not exist to begin with).
// On success, stores a pointer to the new file in *result and
// returns OK. On failure stores NULL in *result and returns
// non-OK.
//
// The returned file will only be accessed by one thread at a time.
//
// May return an IsNotSupportedError error if this Env does
// not allow appending to an existing file. Users of Env (including
// the leveldb implementation) must be prepared to deal with
// an Env that does not support appending.
virtual Status NewAppendableFile(const std::string &fname, WritableFile **result);
// Returns true iff the named file exists.
virtual bool FileExists(const std::string &fname) = 0;
// Store in *result the names of the children of the specified directory.
// The names are relative to "dir".
// Original contents of *results are dropped.
virtual Status GetChildren(const std::string &dir, std::vector<std::string> *result) = 0;
// Delete the named file.
virtual Status DeleteFile(const std::string &fname) = 0;
// Create the specified directory.
virtual Status CreateDir(const std::string &dirname) = 0;
// Delete the specified directory.
virtual Status DeleteDir(const std::string &dirname) = 0;
// Store the size of fname in *file_size.
virtual Status GetFileSize(const std::string &fname, uint64_t *file_size) = 0;
// Rename file src to target.
virtual Status RenameFile(const std::string &src, const std::string &target) = 0;
// Lock the specified file. Used to prevent concurrent access to
// the same db by multiple processes. On failure, stores NULL in
// *lock and returns non-OK.
//
// On success, stores a pointer to the object that represents the
// acquired lock in *lock and returns OK. The caller should call
// UnlockFile(*lock) to release the lock. If the process exits,
// the lock will be automatically released.
//
// If somebody else already holds the lock, finishes immediately
// with a failure. I.e., this call does not wait for existing locks
// to go away.
//
// May create the named file if it does not already exist.
virtual Status LockFile(const std::string &fname, FileLock **lock) = 0;
// Release the lock acquired by a previous successful call to LockFile.
// REQUIRES: lock was returned by a successful LockFile() call
// REQUIRES: lock has not already been unlocked.
virtual Status UnlockFile(FileLock *lock) = 0;
// Arrange to run "(*function)(arg)" once in a background thread.
//
// "function" may run in an unspecified thread. Multiple functions
// added to the same Env may run concurrently in different threads.
// I.e., the caller may not assume that background work items are
// serialized.
virtual void Schedule(void (*function)(void *arg), void *arg) = 0;
// Start a new thread, invoking "function(arg)" within the new thread.
// When "function(arg)" returns, the thread will be destroyed.
virtual void StartThread(void (*function)(void *arg), void *arg) = 0;
// *path is set to a temporary directory that can be used for testing. It may
// or many not have just been created. The directory may or may not differ
// between runs of the same process, but subsequent calls will return the
// same directory.
virtual Status GetTestDirectory(std::string *path) = 0;
// Create and return a log file for storing informational messages.
virtual Status NewLogger(const std::string &fname, Logger **result) = 0;
// Returns the number of micro-seconds since some fixed point in time. Only
// useful for computing deltas of time.
virtual uint64_t NowMicros() = 0;
// Sleep/delay the thread for the prescribed number of micro-seconds.
virtual void SleepForMicroseconds(int micros) = 0;
private:
// No copying allowed
Env(const Env &);
void operator=(const Env &);
};
// A file abstraction for reading sequentially through a file
class SequentialFile {
public:
SequentialFile()
{
}
virtual ~SequentialFile();
// Read up to "n" bytes from the file. "scratch[0..n-1]" may be
// written by this routine. Sets "*result" to the data that was
// read (including if fewer than "n" bytes were successfully read).
// May set "*result" to point at data in "scratch[0..n-1]", so
// "scratch[0..n-1]" must be live when "*result" is used.
// If an error was encountered, returns a non-OK status.
//
// REQUIRES: External synchronization
virtual Status Read(size_t n, Slice *result, char *scratch) = 0;
// Skip "n" bytes from the file. This is guaranteed to be no
// slower that reading the same data, but may be faster.
//
// If end of file is reached, skipping will stop at the end of the
// file, and Skip will return OK.
//
// REQUIRES: External synchronization
virtual Status Skip(uint64_t n) = 0;
private:
// No copying allowed
SequentialFile(const SequentialFile &);
void operator=(const SequentialFile &);
};
// A file abstraction for randomly reading the contents of a file.
class RandomAccessFile {
public:
RandomAccessFile()
{
}
virtual ~RandomAccessFile();
// Read up to "n" bytes from the file starting at "offset".
// "scratch[0..n-1]" may be written by this routine. Sets "*result"
// to the data that was read (including if fewer than "n" bytes were
// successfully read). May set "*result" to point at data in
// "scratch[0..n-1]", so "scratch[0..n-1]" must be live when
// "*result" is used. If an error was encountered, returns a non-OK
// status.
//
// Safe for concurrent use by multiple threads.
virtual Status Read(uint64_t offset, size_t n, Slice *result, char *scratch) const = 0;
private:
// No copying allowed
RandomAccessFile(const RandomAccessFile &);
void operator=(const RandomAccessFile &);
};
// A file abstraction for sequential writing. The implementation
// must provide buffering since callers may append small fragments
// at a time to the file.
class WritableFile {
public:
WritableFile()
{
}
virtual ~WritableFile();
virtual Status Append(const Slice &data) = 0;
virtual Status Close() = 0;
virtual Status Flush() = 0;
virtual Status Sync() = 0;
private:
// No copying allowed
WritableFile(const WritableFile &);
void operator=(const WritableFile &);
};
// An interface for writing log messages.
class Logger {
public:
Logger()
{
}
virtual ~Logger();
// Write an entry to the log file with the specified format.
virtual void Logv(const char *format, va_list ap) = 0;
private:
// No copying allowed
Logger(const Logger &);
void operator=(const Logger &);
};
// Identifies a locked file.
class FileLock {
public:
FileLock()
{
}
virtual ~FileLock();
private:
// No copying allowed
FileLock(const FileLock &);
void operator=(const FileLock &);
};
// Log the specified data to *info_log if info_log is non-NULL.
extern void Log(Logger *info_log, const char *format, ...)
#if defined(__GNUC__) || defined(__clang__)
__attribute__((__format__(__printf__, 2, 3)))
#endif
;
// A utility routine: write "data" to the named file.
Status WriteStringToFile(Env *env, const Slice &data, const std::string &fname);
// A utility routine: read contents of named file into *data
Status ReadFileToString(Env *env, const std::string &fname, std::string *data);
// An implementation of Env that forwards all calls to another Env.
// May be useful to clients who wish to override just part of the
// functionality of another Env.
class EnvWrapper : public Env {
public:
// Initialize an EnvWrapper that delegates all calls to *t
explicit EnvWrapper(Env *t) : target_(t)
{
}
virtual ~EnvWrapper();
// Return the target to which this Env forwards all calls
Env *target() const
{
return target_;
}
// The following text is boilerplate that forwards all methods to target()
Status NewSequentialFile(const std::string &f, SequentialFile **r)
{
return target_->NewSequentialFile(f, r);
}
Status NewRandomAccessFile(const std::string &f, RandomAccessFile **r)
{
return target_->NewRandomAccessFile(f, r);
}
Status NewWritableFile(const std::string &f, WritableFile **r)
{
return target_->NewWritableFile(f, r);
}
Status NewAppendableFile(const std::string &f, WritableFile **r)
{
return target_->NewAppendableFile(f, r);
}
bool FileExists(const std::string &f)
{
return target_->FileExists(f);
}
Status GetChildren(const std::string &dir, std::vector<std::string> *r)
{
return target_->GetChildren(dir, r);
}
Status DeleteFile(const std::string &f)
{
return target_->DeleteFile(f);
}
Status CreateDir(const std::string &d)
{
return target_->CreateDir(d);
}
Status DeleteDir(const std::string &d)
{
return target_->DeleteDir(d);
}
Status GetFileSize(const std::string &f, uint64_t *s)
{
return target_->GetFileSize(f, s);
}
Status RenameFile(const std::string &s, const std::string &t)
{
return target_->RenameFile(s, t);
}
Status LockFile(const std::string &f, FileLock **l)
{
return target_->LockFile(f, l);
}
Status UnlockFile(FileLock *l)
{
return target_->UnlockFile(l);
}
void Schedule(void (*f)(void *), void *a)
{
return target_->Schedule(f, a);
}
void StartThread(void (*f)(void *), void *a)
{
return target_->StartThread(f, a);
}
virtual Status GetTestDirectory(std::string *path)
{
return target_->GetTestDirectory(path);
}
virtual Status NewLogger(const std::string &fname, Logger **result)
{
return target_->NewLogger(fname, result);
}
uint64_t NowMicros()
{
return target_->NowMicros();
}
void SleepForMicroseconds(int micros)
{
target_->SleepForMicroseconds(micros);
}
private:
Env *target_;
};
} // namespace leveldb
#endif // STORAGE_LEVELDB_INCLUDE_ENV_H_
| 12,539 | 30.827411 | 93 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/tools/rpmemd/rpmemd_config.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmemd_config.h -- internal definitions for rpmemd config
*/
#include <stdint.h>
#include <stdbool.h>
#ifndef RPMEMD_DEFAULT_LOG_FILE
#define RPMEMD_DEFAULT_LOG_FILE ("/var/log/" DAEMON_NAME ".log")
#endif
#ifndef RPMEMD_GLOBAL_CONFIG_FILE
#define RPMEMD_GLOBAL_CONFIG_FILE ("/etc/" DAEMON_NAME "/" DAEMON_NAME\
".conf")
#endif
#define RPMEMD_USER_CONFIG_FILE ("." DAEMON_NAME ".conf")
#define RPMEM_DEFAULT_MAX_LANES 1024
#define RPMEM_DEFAULT_NTHREADS 0
#define HOME_ENV "HOME"
#define HOME_STR_PLACEHOLDER ("$" HOME_ENV)
struct rpmemd_config {
char *log_file;
char *poolset_dir;
const char *rm_poolset;
bool force;
bool pool_set;
bool persist_apm;
bool persist_general;
bool use_syslog;
uint64_t max_lanes;
enum rpmemd_log_level log_level;
size_t nthreads;
};
int rpmemd_config_read(struct rpmemd_config *config, int argc, char *argv[]);
void rpmemd_config_free(struct rpmemd_config *config);
| 1,012 | 21.021739 | 77 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/tools/rpmemd/rpmemd.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* rpmemd.c -- rpmemd main source file
*/
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include "librpmem.h"
#include "rpmemd.h"
#include "rpmemd_log.h"
#include "rpmemd_config.h"
#include "rpmem_common.h"
#include "rpmemd_fip.h"
#include "rpmemd_obc.h"
#include "rpmemd_db.h"
#include "rpmemd_util.h"
#include "pool_hdr.h"
#include "os.h"
#include "os_thread.h"
#include "util.h"
#include "uuid.h"
#include "set.h"
/*
* rpmemd -- rpmem handle
*/
struct rpmemd {
struct rpmemd_obc *obc; /* out-of-band connection handle */
struct rpmemd_db *db; /* pool set database handle */
struct rpmemd_db_pool *pool; /* pool handle */
char *pool_desc; /* pool descriptor */
struct rpmemd_fip *fip; /* fabric provider handle */
struct rpmemd_config config; /* configuration */
enum rpmem_persist_method persist_method;
int closing; /* set when closing connection */
int created; /* pool created */
os_thread_t fip_thread;
int fip_running;
};
#ifdef DEBUG
/*
* bool2str -- convert bool to yes/no string
*/
static inline const char *
bool2str(int v)
{
return v ? "yes" : "no";
}
#endif
/*
* str_or_null -- return null string instead of NULL pointer
*/
static inline const char *
_str(const char *str)
{
if (!str)
return "(null)";
return str;
}
/*
* uuid2str -- convert uuid to string
*/
static const char *
uuid2str(const uuid_t uuid)
{
static char uuid_str[64] = {0, };
int ret = util_uuid_to_string(uuid, uuid_str);
if (ret != 0) {
return "(error)";
}
return uuid_str;
}
/*
* rpmemd_get_pm -- returns persist method based on configuration
*/
static enum rpmem_persist_method
rpmemd_get_pm(struct rpmemd_config *config)
{
enum rpmem_persist_method ret = RPMEM_PM_GPSPM;
if (config->persist_apm)
ret = RPMEM_PM_APM;
return ret;
}
/*
* rpmemd_db_get_status -- convert error number to status for db operation
*/
static int
rpmemd_db_get_status(int err)
{
switch (err) {
case EEXIST:
return RPMEM_ERR_EXISTS;
case EACCES:
return RPMEM_ERR_NOACCESS;
case ENOENT:
return RPMEM_ERR_NOEXIST;
case EWOULDBLOCK:
return RPMEM_ERR_BUSY;
case EBADF:
return RPMEM_ERR_BADNAME;
case EINVAL:
return RPMEM_ERR_POOL_CFG;
default:
return RPMEM_ERR_FATAL;
}
}
/*
* rpmemd_check_pool -- verify pool parameters
*/
static int
rpmemd_check_pool(struct rpmemd *rpmemd, const struct rpmem_req_attr *req,
int *status)
{
if (rpmemd->pool->pool_size < RPMEM_MIN_POOL) {
RPMEMD_LOG(ERR, "invalid pool size -- must be >= %zu",
RPMEM_MIN_POOL);
*status = RPMEM_ERR_POOL_CFG;
return -1;
}
if (rpmemd->pool->pool_size < req->pool_size) {
RPMEMD_LOG(ERR, "requested size is too big");
*status = RPMEM_ERR_BADSIZE;
return -1;
}
return 0;
}
/*
* rpmemd_deep_persist -- perform deep persist operation
*/
static int
rpmemd_deep_persist(const void *addr, size_t size, void *ctx)
{
struct rpmemd *rpmemd = (struct rpmemd *)ctx;
return util_replica_deep_persist(addr, size, rpmemd->pool->set, 0);
}
/*
* rpmemd_common_fip_init -- initialize fabric provider
*/
static int
rpmemd_common_fip_init(struct rpmemd *rpmemd, const struct rpmem_req_attr *req,
struct rpmem_resp_attr *resp, int *status)
{
/* register the whole pool with header in RDMA */
void *addr = (void *)((uintptr_t)rpmemd->pool->pool_addr);
struct rpmemd_fip_attr fip_attr = {
.addr = addr,
.size = req->pool_size,
.nlanes = req->nlanes,
.nthreads = rpmemd->config.nthreads,
.provider = req->provider,
.persist_method = rpmemd->persist_method,
.deep_persist = rpmemd_deep_persist,
.ctx = rpmemd,
.buff_size = req->buff_size,
};
const int is_pmem = rpmemd_db_pool_is_pmem(rpmemd->pool);
if (rpmemd_apply_pm_policy(&fip_attr.persist_method,
&fip_attr.persist,
&fip_attr.memcpy_persist,
is_pmem)) {
*status = RPMEM_ERR_FATAL;
goto err_fip_init;
}
const char *node = rpmem_get_ssh_conn_addr();
enum rpmem_err err;
rpmemd->fip = rpmemd_fip_init(node, NULL, &fip_attr, resp, &err);
if (!rpmemd->fip) {
*status = (int)err;
goto err_fip_init;
}
return 0;
err_fip_init:
return -1;
}
/*
* rpmemd_print_req_attr -- print request attributes
*/
static void
rpmemd_print_req_attr(const struct rpmem_req_attr *req)
{
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "pool descriptor: '%s'",
_str(req->pool_desc));
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "pool size: %lu", req->pool_size);
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "nlanes: %u", req->nlanes);
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "provider: %s",
rpmem_provider_to_str(req->provider));
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "buff_size: %lu", req->buff_size);
}
/*
* rpmemd_print_pool_attr -- print pool attributes
*/
static void
rpmemd_print_pool_attr(const struct rpmem_pool_attr *attr)
{
if (attr == NULL) {
RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "NULL");
} else {
RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "signature: '%s'",
_str(attr->signature));
RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "major: %u", attr->major);
RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "compat_features: 0x%x",
attr->compat_features);
RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "incompat_features: 0x%x",
attr->incompat_features);
RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "ro_compat_features: 0x%x",
attr->ro_compat_features);
RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "poolset_uuid: %s",
uuid2str(attr->poolset_uuid));
RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "uuid: %s",
uuid2str(attr->uuid));
RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "next_uuid: %s",
uuid2str(attr->next_uuid));
RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "prev_uuid: %s",
uuid2str(attr->prev_uuid));
}
}
/*
* rpmemd_print_resp_attr -- print response attributes
*/
static void
rpmemd_print_resp_attr(const struct rpmem_resp_attr *attr)
{
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "port: %u", attr->port);
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "rkey: 0x%lx", attr->rkey);
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "raddr: 0x%lx", attr->raddr);
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "nlanes: %u", attr->nlanes);
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "persist method: %s",
rpmem_persist_method_to_str(attr->persist_method));
}
/*
* rpmemd_fip_thread -- background thread for establishing in-band connection
*/
static void *
rpmemd_fip_thread(void *arg)
{
struct rpmemd *rpmemd = (struct rpmemd *)arg;
int ret;
RPMEMD_LOG(INFO, "waiting for in-band connection");
ret = rpmemd_fip_accept(rpmemd->fip, RPMEM_ACCEPT_TIMEOUT);
if (ret)
goto err_accept;
RPMEMD_LOG(NOTICE, "in-band connection established");
ret = rpmemd_fip_process_start(rpmemd->fip);
if (ret)
goto err_process_start;
return NULL;
err_process_start:
rpmemd_fip_close(rpmemd->fip);
err_accept:
return (void *)(uintptr_t)ret;
}
/*
* rpmemd_fip_start_thread -- start background thread for establishing
* in-band connection
*/
static int
rpmemd_fip_start_thread(struct rpmemd *rpmemd)
{
errno = os_thread_create(&rpmemd->fip_thread, NULL,
rpmemd_fip_thread, rpmemd);
if (errno) {
RPMEMD_LOG(ERR, "!creating in-band thread");
goto err_os_thread_create;
}
rpmemd->fip_running = 1;
return 0;
err_os_thread_create:
return -1;
}
/*
* rpmemd_fip_stop_thread -- stop background thread for in-band connection
*/
static int
rpmemd_fip_stop_thread(struct rpmemd *rpmemd)
{
RPMEMD_ASSERT(rpmemd->fip_running);
void *tret;
errno = os_thread_join(&rpmemd->fip_thread, &tret);
if (errno)
RPMEMD_LOG(ERR, "!waiting for in-band thread");
int ret = (int)(uintptr_t)tret;
if (ret)
RPMEMD_LOG(ERR, "in-band thread failed -- '%d'", ret);
return ret;
}
/*
* rpmemd_fip-stop -- stop in-band thread and stop processing thread
*/
static int
rpmemd_fip_stop(struct rpmemd *rpmemd)
{
int ret;
int fip_ret = rpmemd_fip_stop_thread(rpmemd);
if (fip_ret) {
RPMEMD_LOG(ERR, "!in-band thread failed");
}
if (!fip_ret) {
ret = rpmemd_fip_process_stop(rpmemd->fip);
if (ret) {
RPMEMD_LOG(ERR, "!stopping fip process failed");
}
}
rpmemd->fip_running = 0;
return fip_ret;
}
/*
* rpmemd_close_pool -- close pool and remove it if required
*/
static int
rpmemd_close_pool(struct rpmemd *rpmemd, int remove)
{
int ret = 0;
RPMEMD_LOG(NOTICE, "closing pool");
rpmemd_db_pool_close(rpmemd->db, rpmemd->pool);
RPMEMD_LOG(INFO, "pool closed");
if (remove) {
RPMEMD_LOG(NOTICE, "removing '%s'", rpmemd->pool_desc);
ret = rpmemd_db_pool_remove(rpmemd->db,
rpmemd->pool_desc, 0, 0);
if (ret) {
RPMEMD_LOG(ERR, "!removing pool '%s' failed",
rpmemd->pool_desc);
} else {
RPMEMD_LOG(INFO, "removed '%s'", rpmemd->pool_desc);
}
}
free(rpmemd->pool_desc);
return ret;
}
/*
* rpmemd_req_cleanup -- cleanup in-band connection and all resources allocated
* during open/create requests
*/
static void
rpmemd_req_cleanup(struct rpmemd *rpmemd)
{
if (!rpmemd->fip_running)
return;
int ret;
ret = rpmemd_fip_stop(rpmemd);
if (!ret) {
rpmemd_fip_close(rpmemd->fip);
rpmemd_fip_fini(rpmemd->fip);
}
int remove = rpmemd->created && ret;
rpmemd_close_pool(rpmemd, remove);
}
/*
* rpmemd_req_create -- handle create request
*/
static int
rpmemd_req_create(struct rpmemd_obc *obc, void *arg,
const struct rpmem_req_attr *req,
const struct rpmem_pool_attr *pool_attr)
{
RPMEMD_ASSERT(arg != NULL);
RPMEMD_LOG(NOTICE, "create request:");
rpmemd_print_req_attr(req);
RPMEMD_LOG(NOTICE, "pool attributes:");
rpmemd_print_pool_attr(pool_attr);
struct rpmemd *rpmemd = (struct rpmemd *)arg;
int ret;
int status = 0;
int err_send = 1;
struct rpmem_resp_attr resp;
memset(&resp, 0, sizeof(resp));
if (rpmemd->pool) {
RPMEMD_LOG(ERR, "pool already opened");
ret = -1;
status = RPMEM_ERR_FATAL;
goto err_pool_opened;
}
rpmemd->pool_desc = strdup(req->pool_desc);
if (!rpmemd->pool_desc) {
RPMEMD_LOG(ERR, "!allocating pool descriptor");
ret = -1;
status = RPMEM_ERR_FATAL;
goto err_strdup;
}
rpmemd->pool = rpmemd_db_pool_create(rpmemd->db,
req->pool_desc, 0, pool_attr);
if (!rpmemd->pool) {
ret = -1;
status = rpmemd_db_get_status(errno);
goto err_pool_create;
}
rpmemd->created = 1;
ret = rpmemd_check_pool(rpmemd, req, &status);
if (ret)
goto err_pool_check;
ret = rpmemd_common_fip_init(rpmemd, req, &resp, &status);
if (ret)
goto err_fip_init;
RPMEMD_LOG(NOTICE, "create request response: (status = %u)", status);
if (!status)
rpmemd_print_resp_attr(&resp);
ret = rpmemd_obc_create_resp(obc, status, &resp);
if (ret)
goto err_create_resp;
ret = rpmemd_fip_start_thread(rpmemd);
if (ret)
goto err_fip_start;
return 0;
err_fip_start:
err_create_resp:
err_send = 0;
rpmemd_fip_fini(rpmemd->fip);
err_fip_init:
err_pool_check:
rpmemd_db_pool_close(rpmemd->db, rpmemd->pool);
rpmemd_db_pool_remove(rpmemd->db, req->pool_desc, 0, 0);
err_pool_create:
free(rpmemd->pool_desc);
err_strdup:
err_pool_opened:
if (err_send)
ret = rpmemd_obc_create_resp(obc, status, &resp);
rpmemd->closing = 1;
return ret;
}
/*
* rpmemd_req_open -- handle open request
*/
static int
rpmemd_req_open(struct rpmemd_obc *obc, void *arg,
const struct rpmem_req_attr *req)
{
RPMEMD_ASSERT(arg != NULL);
RPMEMD_LOG(NOTICE, "open request:");
rpmemd_print_req_attr(req);
struct rpmemd *rpmemd = (struct rpmemd *)arg;
int ret;
int status = 0;
int err_send = 1;
struct rpmem_resp_attr resp;
memset(&resp, 0, sizeof(resp));
struct rpmem_pool_attr pool_attr;
memset(&pool_attr, 0, sizeof(pool_attr));
if (rpmemd->pool) {
RPMEMD_LOG(ERR, "pool already opened");
ret = -1;
status = RPMEM_ERR_FATAL;
goto err_pool_opened;
}
rpmemd->pool_desc = strdup(req->pool_desc);
if (!rpmemd->pool_desc) {
RPMEMD_LOG(ERR, "!allocating pool descriptor");
ret = -1;
status = RPMEM_ERR_FATAL;
goto err_strdup;
}
rpmemd->pool = rpmemd_db_pool_open(rpmemd->db,
req->pool_desc, 0, &pool_attr);
if (!rpmemd->pool) {
ret = -1;
status = rpmemd_db_get_status(errno);
goto err_pool_open;
}
RPMEMD_LOG(NOTICE, "pool attributes:");
rpmemd_print_pool_attr(&pool_attr);
ret = rpmemd_check_pool(rpmemd, req, &status);
if (ret)
goto err_pool_check;
ret = rpmemd_common_fip_init(rpmemd, req, &resp, &status);
if (ret)
goto err_fip_init;
RPMEMD_LOG(NOTICE, "open request response: (status = %u)", status);
if (!status)
rpmemd_print_resp_attr(&resp);
ret = rpmemd_obc_open_resp(obc, status, &resp, &pool_attr);
if (ret)
goto err_open_resp;
ret = rpmemd_fip_start_thread(rpmemd);
if (ret)
goto err_fip_start;
return 0;
err_fip_start:
err_open_resp:
err_send = 0;
rpmemd_fip_fini(rpmemd->fip);
err_fip_init:
err_pool_check:
rpmemd_db_pool_close(rpmemd->db, rpmemd->pool);
err_pool_open:
free(rpmemd->pool_desc);
err_strdup:
err_pool_opened:
if (err_send)
ret = rpmemd_obc_open_resp(obc, status, &resp, &pool_attr);
rpmemd->closing = 1;
return ret;
}
/*
* rpmemd_req_close -- handle close request
*/
static int
rpmemd_req_close(struct rpmemd_obc *obc, void *arg, int flags)
{
RPMEMD_ASSERT(arg != NULL);
RPMEMD_LOG(NOTICE, "close request");
struct rpmemd *rpmemd = (struct rpmemd *)arg;
rpmemd->closing = 1;
int ret;
int status = 0;
if (!rpmemd->pool) {
RPMEMD_LOG(ERR, "pool not opened");
status = RPMEM_ERR_FATAL;
return rpmemd_obc_close_resp(obc, status);
}
ret = rpmemd_fip_stop(rpmemd);
if (ret) {
status = RPMEM_ERR_FATAL;
} else {
rpmemd_fip_close(rpmemd->fip);
rpmemd_fip_fini(rpmemd->fip);
}
int remove = rpmemd->created &&
(status || (flags & RPMEM_CLOSE_FLAGS_REMOVE));
if (rpmemd_close_pool(rpmemd, remove))
RPMEMD_LOG(ERR, "closing pool failed");
RPMEMD_LOG(NOTICE, "close request response (status = %u)", status);
ret = rpmemd_obc_close_resp(obc, status);
return ret;
}
/*
* rpmemd_req_set_attr -- handle set attributes request
*/
static int
rpmemd_req_set_attr(struct rpmemd_obc *obc, void *arg,
const struct rpmem_pool_attr *pool_attr)
{
RPMEMD_ASSERT(arg != NULL);
RPMEMD_LOG(NOTICE, "set attributes request");
struct rpmemd *rpmemd = (struct rpmemd *)arg;
RPMEMD_ASSERT(rpmemd->pool != NULL);
int ret;
int status = 0;
int err_send = 1;
ret = rpmemd_db_pool_set_attr(rpmemd->pool, pool_attr);
if (ret) {
ret = -1;
status = rpmemd_db_get_status(errno);
goto err_set_attr;
}
RPMEMD_LOG(NOTICE, "new pool attributes:");
rpmemd_print_pool_attr(pool_attr);
ret = rpmemd_obc_set_attr_resp(obc, status);
if (ret)
goto err_set_attr_resp;
return ret;
err_set_attr_resp:
err_send = 0;
err_set_attr:
if (err_send)
ret = rpmemd_obc_set_attr_resp(obc, status);
return ret;
}
static struct rpmemd_obc_requests rpmemd_req = {
.create = rpmemd_req_create,
.open = rpmemd_req_open,
.close = rpmemd_req_close,
.set_attr = rpmemd_req_set_attr,
};
/*
* rpmemd_print_info -- print basic info and configuration
*/
static void
rpmemd_print_info(struct rpmemd *rpmemd)
{
RPMEMD_LOG(NOTICE, "ssh connection: %s",
_str(os_getenv("SSH_CONNECTION")));
RPMEMD_LOG(NOTICE, "user: %s", _str(os_getenv("USER")));
RPMEMD_LOG(NOTICE, "configuration");
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "pool set directory: '%s'",
_str(rpmemd->config.poolset_dir));
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "persist method: %s",
rpmem_persist_method_to_str(rpmemd->persist_method));
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "number of threads: %lu",
rpmemd->config.nthreads);
RPMEMD_DBG(RPMEMD_LOG_INDENT "persist APM: %s",
bool2str(rpmemd->config.persist_apm));
RPMEMD_DBG(RPMEMD_LOG_INDENT "persist GPSPM: %s",
bool2str(rpmemd->config.persist_general));
RPMEMD_DBG(RPMEMD_LOG_INDENT "use syslog: %s",
bool2str(rpmemd->config.use_syslog));
RPMEMD_DBG(RPMEMD_LOG_INDENT "log file: %s",
_str(rpmemd->config.log_file));
RPMEMD_DBG(RPMEMD_LOG_INDENT "log level: %s",
rpmemd_log_level_to_str(rpmemd->config.log_level));
}
int
main(int argc, char *argv[])
{
util_init();
int send_status = 1;
int ret = 1;
struct rpmemd *rpmemd = calloc(1, sizeof(*rpmemd));
if (!rpmemd) {
RPMEMD_LOG(ERR, "!calloc");
goto err_rpmemd;
}
rpmemd->obc = rpmemd_obc_init(STDIN_FILENO, STDOUT_FILENO);
if (!rpmemd->obc) {
RPMEMD_LOG(ERR, "out-of-band connection initialization");
goto err_obc;
}
if (rpmemd_log_init(DAEMON_NAME, NULL, 0)) {
RPMEMD_LOG(ERR, "logging subsystem initialization failed");
goto err_log_init;
}
if (rpmemd_config_read(&rpmemd->config, argc, argv) != 0) {
RPMEMD_LOG(ERR, "reading configuration failed");
goto err_config;
}
rpmemd_log_close();
rpmemd_log_level = rpmemd->config.log_level;
if (rpmemd_log_init(DAEMON_NAME, rpmemd->config.log_file,
rpmemd->config.use_syslog)) {
RPMEMD_LOG(ERR, "logging subsystem initialization"
" failed (%s, %d)", rpmemd->config.log_file,
rpmemd->config.use_syslog);
goto err_log_init_config;
}
RPMEMD_LOG(INFO, "%s version %s", DAEMON_NAME, SRCVERSION);
rpmemd->persist_method = rpmemd_get_pm(&rpmemd->config);
rpmemd->db = rpmemd_db_init(rpmemd->config.poolset_dir, 0666);
if (!rpmemd->db) {
RPMEMD_LOG(ERR, "!pool set db initialization");
goto err_db_init;
}
if (rpmemd->config.rm_poolset) {
RPMEMD_LOG(INFO, "removing '%s'",
rpmemd->config.rm_poolset);
if (rpmemd_db_pool_remove(rpmemd->db,
rpmemd->config.rm_poolset,
rpmemd->config.force,
rpmemd->config.pool_set)) {
RPMEMD_LOG(ERR, "removing '%s' failed",
rpmemd->config.rm_poolset);
ret = errno;
} else {
RPMEMD_LOG(NOTICE, "removed '%s'",
rpmemd->config.rm_poolset);
ret = 0;
}
send_status = 0;
goto out_rm;
}
ret = rpmemd_obc_status(rpmemd->obc, 0);
if (ret) {
RPMEMD_LOG(ERR, "writing status failed");
goto err_status;
}
rpmemd_print_info(rpmemd);
while (!ret) {
ret = rpmemd_obc_process(rpmemd->obc, &rpmemd_req, rpmemd);
if (ret) {
RPMEMD_LOG(ERR, "out-of-band connection"
" process failed");
goto err;
}
if (rpmemd->closing)
break;
}
rpmemd_db_fini(rpmemd->db);
rpmemd_config_free(&rpmemd->config);
rpmemd_log_close();
rpmemd_obc_fini(rpmemd->obc);
free(rpmemd);
return 0;
err:
rpmemd_req_cleanup(rpmemd);
err_status:
out_rm:
rpmemd_db_fini(rpmemd->db);
err_db_init:
err_log_init_config:
rpmemd_config_free(&rpmemd->config);
err_config:
rpmemd_log_close();
err_log_init:
if (send_status) {
if (rpmemd_obc_status(rpmemd->obc, (uint32_t)errno))
RPMEMD_LOG(ERR, "writing status failed");
}
rpmemd_obc_fini(rpmemd->obc);
err_obc:
free(rpmemd);
err_rpmemd:
return ret;
}
| 18,497 | 22.007463 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/tools/rpmemd/rpmemd_log.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmemd_log.h -- rpmemd logging functions declarations
*/
#include <string.h>
#include "util.h"
#define FORMAT_PRINTF(a, b) __attribute__((__format__(__printf__, (a), (b))))
/*
* The tab character is not allowed in rpmemd log,
* because it is not well handled by syslog.
* Please use RPMEMD_LOG_INDENT instead.
*/
#define RPMEMD_LOG_INDENT " "
#ifdef DEBUG
#define RPMEMD_LOG(level, fmt, arg...) do {\
COMPILE_ERROR_ON(strchr(fmt, '\t') != 0);\
rpmemd_log(RPD_LOG_##level, __FILE__, __LINE__, fmt, ## arg);\
} while (0)
#else
#define RPMEMD_LOG(level, fmt, arg...) do {\
COMPILE_ERROR_ON(strchr(fmt, '\t') != 0);\
rpmemd_log(RPD_LOG_##level, NULL, 0, fmt, ## arg);\
} while (0)
#endif
#ifdef DEBUG
#define RPMEMD_DBG(fmt, arg...) do {\
COMPILE_ERROR_ON(strchr(fmt, '\t') != 0);\
rpmemd_log(_RPD_LOG_DBG, __FILE__, __LINE__, fmt, ## arg);\
} while (0)
#else
#define RPMEMD_DBG(fmt, arg...) do {} while (0)
#endif
#define RPMEMD_ERR(fmt, arg...) do {\
RPMEMD_LOG(ERR, fmt, ## arg);\
} while (0)
#define RPMEMD_FATAL(fmt, arg...) do {\
RPMEMD_LOG(ERR, fmt, ## arg);\
abort();\
} while (0)
#define RPMEMD_ASSERT(cond) do {\
if (!(cond)) {\
rpmemd_log(RPD_LOG_ERR, __FILE__, __LINE__,\
"assertion fault: %s", #cond);\
abort();\
}\
} while (0)
enum rpmemd_log_level {
RPD_LOG_ERR,
RPD_LOG_WARN,
RPD_LOG_NOTICE,
RPD_LOG_INFO,
_RPD_LOG_DBG, /* disallow to use this with LOG macro */
MAX_RPD_LOG,
};
enum rpmemd_log_level rpmemd_log_level_from_str(const char *str);
const char *rpmemd_log_level_to_str(enum rpmemd_log_level level);
extern enum rpmemd_log_level rpmemd_log_level;
int rpmemd_log_init(const char *ident, const char *fname, int use_syslog);
void rpmemd_log_close(void);
int rpmemd_prefix(const char *fmt, ...) FORMAT_PRINTF(1, 2);
void rpmemd_log(enum rpmemd_log_level level, const char *fname,
int lineno, const char *fmt, ...) FORMAT_PRINTF(4, 5);
| 1,991 | 25.210526 | 77 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/tools/rpmemd/rpmemd_util.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2018, Intel Corporation */
/*
* rpmemd_util.c -- rpmemd utility functions definitions
*/
#include <stdlib.h>
#include <unistd.h>
#include "libpmem.h"
#include "rpmem_common.h"
#include "rpmemd_log.h"
#include "rpmemd_util.h"
/*
* rpmemd_pmem_persist -- pmem_persist wrapper required to unify function
* pointer type with pmem_msync
*/
int
rpmemd_pmem_persist(const void *addr, size_t len)
{
pmem_persist(addr, len);
return 0;
}
/*
* rpmemd_flush_fatal -- APM specific flush function which should never be
* called because APM does not require flushes
*/
int
rpmemd_flush_fatal(const void *addr, size_t len)
{
RPMEMD_FATAL("rpmemd_flush_fatal should never be called");
}
/*
* rpmemd_persist_to_str -- convert persist function pointer to string
*/
static const char *
rpmemd_persist_to_str(int (*persist)(const void *addr, size_t len))
{
if (persist == rpmemd_pmem_persist) {
return "pmem_persist";
} else if (persist == pmem_msync) {
return "pmem_msync";
} else if (persist == rpmemd_flush_fatal) {
return "none";
} else {
return NULL;
}
}
/*
* rpmem_print_pm_policy -- print persistency method policy
*/
static void
rpmem_print_pm_policy(enum rpmem_persist_method persist_method,
int (*persist)(const void *addr, size_t len))
{
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "persist method: %s",
rpmem_persist_method_to_str(persist_method));
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "persist flush: %s",
rpmemd_persist_to_str(persist));
}
/*
* rpmem_memcpy_msync -- memcpy and msync
*/
static void *
rpmem_memcpy_msync(void *pmemdest, const void *src, size_t len)
{
void *ret = pmem_memcpy(pmemdest, src, len, PMEM_F_MEM_NOFLUSH);
pmem_msync(pmemdest, len);
return ret;
}
/*
* rpmemd_apply_pm_policy -- choose the persistency method and the flush
* function according to the pool type and the persistency method read from the
* config
*/
int
rpmemd_apply_pm_policy(enum rpmem_persist_method *persist_method,
int (**persist)(const void *addr, size_t len),
void *(**memcpy_persist)(void *pmemdest, const void *src, size_t len),
const int is_pmem)
{
switch (*persist_method) {
case RPMEM_PM_APM:
if (is_pmem) {
*persist_method = RPMEM_PM_APM;
*persist = rpmemd_flush_fatal;
} else {
*persist_method = RPMEM_PM_GPSPM;
*persist = pmem_msync;
}
break;
case RPMEM_PM_GPSPM:
*persist_method = RPMEM_PM_GPSPM;
*persist = is_pmem ? rpmemd_pmem_persist : pmem_msync;
break;
default:
RPMEMD_FATAL("invalid persist method: %d", *persist_method);
return -1;
}
/* this is for RPMEM_PERSIST_INLINE */
if (is_pmem)
*memcpy_persist = pmem_memcpy_persist;
else
*memcpy_persist = rpmem_memcpy_msync;
RPMEMD_LOG(NOTICE, "persistency policy:");
rpmem_print_pm_policy(*persist_method, *persist);
return 0;
}
| 2,839 | 22.666667 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/tools/rpmemd/rpmemd_db.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmemd_db.h -- internal definitions for rpmemd database of pool set files
*/
struct rpmemd_db;
struct rpmem_pool_attr;
/*
* struct rpmemd_db_pool -- remote pool context
*/
struct rpmemd_db_pool {
void *pool_addr;
size_t pool_size;
struct pool_set *set;
};
struct rpmemd_db *rpmemd_db_init(const char *root_dir, mode_t mode);
struct rpmemd_db_pool *rpmemd_db_pool_create(struct rpmemd_db *db,
const char *pool_desc, size_t pool_size,
const struct rpmem_pool_attr *rattr);
struct rpmemd_db_pool *rpmemd_db_pool_open(struct rpmemd_db *db,
const char *pool_desc, size_t pool_size, struct rpmem_pool_attr *rattr);
int rpmemd_db_pool_remove(struct rpmemd_db *db, const char *pool_desc,
int force, int pool_set);
int rpmemd_db_pool_set_attr(struct rpmemd_db_pool *prp,
const struct rpmem_pool_attr *rattr);
void rpmemd_db_pool_close(struct rpmemd_db *db, struct rpmemd_db_pool *prp);
void rpmemd_db_fini(struct rpmemd_db *db);
int rpmemd_db_check_dir(struct rpmemd_db *db);
int rpmemd_db_pool_is_pmem(struct rpmemd_db_pool *pool);
| 1,132 | 32.323529 | 76 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/tools/rpmemd/rpmemd_obc.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmemd_obc.c -- rpmemd out-of-band connection definitions
*/
#include <stdlib.h>
#include <errno.h>
#include <stdint.h>
#include <string.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <sys/socket.h>
#include <unistd.h>
#include <netdb.h>
#include "librpmem.h"
#include "rpmemd_log.h"
#include "rpmem_proto.h"
#include "rpmem_common.h"
#include "rpmemd_obc.h"
struct rpmemd_obc {
int fd_in;
int fd_out;
};
/*
* rpmemd_obc_check_proto_ver -- check protocol version
*/
static int
rpmemd_obc_check_proto_ver(unsigned major, unsigned minor)
{
if (major != RPMEM_PROTO_MAJOR ||
minor != RPMEM_PROTO_MINOR) {
RPMEMD_LOG(ERR, "unsupported protocol version -- %u.%u",
major, minor);
return -1;
}
return 0;
}
/*
* rpmemd_obc_check_msg_hdr -- check message header
*/
static int
rpmemd_obc_check_msg_hdr(struct rpmem_msg_hdr *hdrp)
{
switch (hdrp->type) {
case RPMEM_MSG_TYPE_OPEN:
case RPMEM_MSG_TYPE_CREATE:
case RPMEM_MSG_TYPE_CLOSE:
case RPMEM_MSG_TYPE_SET_ATTR:
/* all messages from obc to server are fine */
break;
default:
RPMEMD_LOG(ERR, "invalid message type -- %u", hdrp->type);
return -1;
}
if (hdrp->size < sizeof(struct rpmem_msg_hdr)) {
RPMEMD_LOG(ERR, "invalid message size -- %lu", hdrp->size);
return -1;
}
return 0;
}
/*
* rpmemd_obc_check_pool_desc -- check pool descriptor
*/
static int
rpmemd_obc_check_pool_desc(struct rpmem_msg_hdr *hdrp, size_t msg_size,
struct rpmem_msg_pool_desc *pool_desc)
{
size_t body_size = msg_size + pool_desc->size;
if (hdrp->size != body_size) {
RPMEMD_LOG(ERR, "message and pool descriptor size mismatch "
"-- is %lu should be %lu", hdrp->size, body_size);
return -1;
}
if (pool_desc->size < 2) {
RPMEMD_LOG(ERR, "invalid pool descriptor size -- %u "
"(must be >= 2)", pool_desc->size);
return -1;
}
if (pool_desc->desc[pool_desc->size - 1] != '\0') {
RPMEMD_LOG(ERR, "invalid pool descriptor "
"(must be null-terminated string)");
return -1;
}
size_t len = strlen((char *)pool_desc->desc) + 1;
if (pool_desc->size != len) {
RPMEMD_LOG(ERR, "invalid pool descriptor size -- is %lu "
"should be %u", len, pool_desc->size);
return -1;
}
return 0;
}
/*
* rpmemd_obc_check_provider -- check provider value
*/
static int
rpmemd_obc_check_provider(uint32_t provider)
{
if (provider == 0 || provider >= MAX_RPMEM_PROV) {
RPMEMD_LOG(ERR, "invalid provider -- %u", provider);
return -1;
}
return 0;
}
/*
* rpmemd_obc_ntoh_check_msg_create -- convert and check create request message
*/
static int
rpmemd_obc_ntoh_check_msg_create(struct rpmem_msg_hdr *hdrp)
{
int ret;
struct rpmem_msg_create *msg = (struct rpmem_msg_create *)hdrp;
rpmem_ntoh_msg_create(msg);
ret = rpmemd_obc_check_proto_ver(msg->c.major, msg->c.minor);
if (ret)
return ret;
ret = rpmemd_obc_check_pool_desc(hdrp, sizeof(*msg), &msg->pool_desc);
if (ret)
return ret;
ret = rpmemd_obc_check_provider(msg->c.provider);
if (ret)
return ret;
return 0;
}
/*
* rpmemd_obc_ntoh_check_msg_open -- convert and check open request message
*/
static int
rpmemd_obc_ntoh_check_msg_open(struct rpmem_msg_hdr *hdrp)
{
int ret;
struct rpmem_msg_open *msg = (struct rpmem_msg_open *)hdrp;
rpmem_ntoh_msg_open(msg);
ret = rpmemd_obc_check_proto_ver(msg->c.major, msg->c.minor);
if (ret)
return ret;
ret = rpmemd_obc_check_pool_desc(hdrp, sizeof(*msg), &msg->pool_desc);
if (ret)
return ret;
ret = rpmemd_obc_check_provider(msg->c.provider);
if (ret)
return ret;
return 0;
}
/*
* rpmemd_obc_ntoh_check_msg_close -- convert and check close request message
*/
static int
rpmemd_obc_ntoh_check_msg_close(struct rpmem_msg_hdr *hdrp)
{
struct rpmem_msg_close *msg = (struct rpmem_msg_close *)hdrp;
rpmem_ntoh_msg_close(msg);
/* nothing to do */
return 0;
}
/*
* rpmemd_obc_ntoh_check_msg_set_attr -- convert and check set attributes
* request message
*/
static int
rpmemd_obc_ntoh_check_msg_set_attr(struct rpmem_msg_hdr *hdrp)
{
struct rpmem_msg_set_attr *msg = (struct rpmem_msg_set_attr *)hdrp;
rpmem_ntoh_msg_set_attr(msg);
/* nothing to do */
return 0;
}
typedef int (*rpmemd_obc_ntoh_check_msg_fn)(struct rpmem_msg_hdr *hdrp);
static rpmemd_obc_ntoh_check_msg_fn rpmemd_obc_ntoh_check_msg[] = {
[RPMEM_MSG_TYPE_CREATE] = rpmemd_obc_ntoh_check_msg_create,
[RPMEM_MSG_TYPE_OPEN] = rpmemd_obc_ntoh_check_msg_open,
[RPMEM_MSG_TYPE_CLOSE] = rpmemd_obc_ntoh_check_msg_close,
[RPMEM_MSG_TYPE_SET_ATTR] = rpmemd_obc_ntoh_check_msg_set_attr,
};
/*
* rpmemd_obc_process_create -- process create request
*/
static int
rpmemd_obc_process_create(struct rpmemd_obc *obc,
struct rpmemd_obc_requests *req_cb, void *arg,
struct rpmem_msg_hdr *hdrp)
{
struct rpmem_msg_create *msg = (struct rpmem_msg_create *)hdrp;
struct rpmem_req_attr req = {
.pool_size = msg->c.pool_size,
.nlanes = (unsigned)msg->c.nlanes,
.pool_desc = (char *)msg->pool_desc.desc,
.provider = (enum rpmem_provider)msg->c.provider,
.buff_size = msg->c.buff_size,
};
struct rpmem_pool_attr *rattr = NULL;
struct rpmem_pool_attr rpmem_attr;
unpack_rpmem_pool_attr(&msg->pool_attr, &rpmem_attr);
if (!util_is_zeroed(&rpmem_attr, sizeof(rpmem_attr)))
rattr = &rpmem_attr;
return req_cb->create(obc, arg, &req, rattr);
}
/*
* rpmemd_obc_process_open -- process open request
*/
static int
rpmemd_obc_process_open(struct rpmemd_obc *obc,
struct rpmemd_obc_requests *req_cb, void *arg,
struct rpmem_msg_hdr *hdrp)
{
struct rpmem_msg_open *msg = (struct rpmem_msg_open *)hdrp;
struct rpmem_req_attr req = {
.pool_size = msg->c.pool_size,
.nlanes = (unsigned)msg->c.nlanes,
.pool_desc = (const char *)msg->pool_desc.desc,
.provider = (enum rpmem_provider)msg->c.provider,
.buff_size = msg->c.buff_size,
};
return req_cb->open(obc, arg, &req);
}
/*
* rpmemd_obc_process_close -- process close request
*/
static int
rpmemd_obc_process_close(struct rpmemd_obc *obc,
struct rpmemd_obc_requests *req_cb, void *arg,
struct rpmem_msg_hdr *hdrp)
{
struct rpmem_msg_close *msg = (struct rpmem_msg_close *)hdrp;
return req_cb->close(obc, arg, (int)msg->flags);
}
/*
* rpmemd_obc_process_set_attr -- process set attributes request
*/
static int
rpmemd_obc_process_set_attr(struct rpmemd_obc *obc,
struct rpmemd_obc_requests *req_cb, void *arg,
struct rpmem_msg_hdr *hdrp)
{
struct rpmem_msg_set_attr *msg = (struct rpmem_msg_set_attr *)hdrp;
struct rpmem_pool_attr *rattr = NULL;
struct rpmem_pool_attr rpmem_attr;
unpack_rpmem_pool_attr(&msg->pool_attr, &rpmem_attr);
if (!util_is_zeroed(&rpmem_attr, sizeof(rpmem_attr)))
rattr = &rpmem_attr;
return req_cb->set_attr(obc, arg, rattr);
}
typedef int (*rpmemd_obc_process_fn)(struct rpmemd_obc *obc,
struct rpmemd_obc_requests *req_cb, void *arg,
struct rpmem_msg_hdr *hdrp);
static rpmemd_obc_process_fn rpmemd_obc_process_cb[] = {
[RPMEM_MSG_TYPE_CREATE] = rpmemd_obc_process_create,
[RPMEM_MSG_TYPE_OPEN] = rpmemd_obc_process_open,
[RPMEM_MSG_TYPE_CLOSE] = rpmemd_obc_process_close,
[RPMEM_MSG_TYPE_SET_ATTR] = rpmemd_obc_process_set_attr,
};
/*
* rpmemd_obc_recv -- wrapper for read and decode data function
*/
static inline int
rpmemd_obc_recv(struct rpmemd_obc *obc, void *buff, size_t len)
{
return rpmem_xread(obc->fd_in, buff, len, 0);
}
/*
* rpmemd_obc_send -- wrapper for encode and write data function
*/
static inline int
rpmemd_obc_send(struct rpmemd_obc *obc, const void *buff, size_t len)
{
return rpmem_xwrite(obc->fd_out, buff, len, 0);
}
/*
* rpmemd_obc_msg_recv -- receive and check request message
*
* Return values:
* 0 - success
* < 0 - error
* 1 - obc disconnected
*/
static int
rpmemd_obc_msg_recv(struct rpmemd_obc *obc,
struct rpmem_msg_hdr **hdrpp)
{
struct rpmem_msg_hdr hdr;
struct rpmem_msg_hdr nhdr;
struct rpmem_msg_hdr *hdrp;
int ret;
ret = rpmemd_obc_recv(obc, &nhdr, sizeof(nhdr));
if (ret == 1) {
RPMEMD_LOG(NOTICE, "out-of-band connection disconnected");
return 1;
}
if (ret < 0) {
RPMEMD_LOG(ERR, "!receiving message header failed");
return ret;
}
memcpy(&hdr, &nhdr, sizeof(hdr));
rpmem_ntoh_msg_hdr(&hdr);
ret = rpmemd_obc_check_msg_hdr(&hdr);
if (ret) {
RPMEMD_LOG(ERR, "parsing message header failed");
return ret;
}
hdrp = malloc(hdr.size);
if (!hdrp) {
RPMEMD_LOG(ERR, "!allocating message buffer failed");
return -1;
}
memcpy(hdrp, &nhdr, sizeof(*hdrp));
size_t body_size = hdr.size - sizeof(hdr);
ret = rpmemd_obc_recv(obc, hdrp->body, body_size);
if (ret) {
RPMEMD_LOG(ERR, "!receiving message body failed");
goto err_recv_body;
}
ret = rpmemd_obc_ntoh_check_msg[hdr.type](hdrp);
if (ret) {
RPMEMD_LOG(ERR, "parsing message body failed");
goto err_body;
}
*hdrpp = hdrp;
return 0;
err_body:
err_recv_body:
free(hdrp);
return -1;
}
/*
* rpmemd_obc_init -- initialize rpmemd
*/
struct rpmemd_obc *
rpmemd_obc_init(int fd_in, int fd_out)
{
struct rpmemd_obc *obc = calloc(1, sizeof(*obc));
if (!obc) {
RPMEMD_LOG(ERR, "!allocating obc failed");
goto err_calloc;
}
obc->fd_in = fd_in;
obc->fd_out = fd_out;
return obc;
err_calloc:
return NULL;
}
/*
* rpmemd_obc_fini -- destroy obc
*/
void
rpmemd_obc_fini(struct rpmemd_obc *obc)
{
free(obc);
}
/*
* rpmemd_obc_status -- sends initial status to the client
*/
int
rpmemd_obc_status(struct rpmemd_obc *obc, uint32_t status)
{
return rpmemd_obc_send(obc, &status, sizeof(status));
}
/*
* rpmemd_obc_process -- wait for and process a message from client
*
* Return values:
* 0 - success
* < 0 - error
* 1 - client disconnected
*/
int
rpmemd_obc_process(struct rpmemd_obc *obc,
struct rpmemd_obc_requests *req_cb, void *arg)
{
RPMEMD_ASSERT(req_cb != NULL);
RPMEMD_ASSERT(req_cb->create != NULL);
RPMEMD_ASSERT(req_cb->open != NULL);
RPMEMD_ASSERT(req_cb->close != NULL);
RPMEMD_ASSERT(req_cb->set_attr != NULL);
struct rpmem_msg_hdr *hdrp = NULL;
int ret;
ret = rpmemd_obc_msg_recv(obc, &hdrp);
if (ret)
return ret;
RPMEMD_ASSERT(hdrp != NULL);
ret = rpmemd_obc_process_cb[hdrp->type](obc, req_cb, arg, hdrp);
free(hdrp);
return ret;
}
/*
* rpmemd_obc_create_resp -- send create request response message
*/
int
rpmemd_obc_create_resp(struct rpmemd_obc *obc,
int status, const struct rpmem_resp_attr *res)
{
struct rpmem_msg_create_resp resp = {
.hdr = {
.type = RPMEM_MSG_TYPE_CREATE_RESP,
.size = sizeof(struct rpmem_msg_create_resp),
.status = (uint32_t)status,
},
.ibc = {
.port = res->port,
.rkey = res->rkey,
.raddr = res->raddr,
.persist_method = res->persist_method,
.nlanes = res->nlanes,
},
};
rpmem_hton_msg_create_resp(&resp);
return rpmemd_obc_send(obc, &resp, sizeof(resp));
}
/*
* rpmemd_obc_open_resp -- send open request response message
*/
int
rpmemd_obc_open_resp(struct rpmemd_obc *obc,
int status, const struct rpmem_resp_attr *res,
const struct rpmem_pool_attr *pool_attr)
{
struct rpmem_msg_open_resp resp = {
.hdr = {
.type = RPMEM_MSG_TYPE_OPEN_RESP,
.size = sizeof(struct rpmem_msg_open_resp),
.status = (uint32_t)status,
},
.ibc = {
.port = res->port,
.rkey = res->rkey,
.raddr = res->raddr,
.persist_method = res->persist_method,
.nlanes = res->nlanes,
},
};
pack_rpmem_pool_attr(pool_attr, &resp.pool_attr);
rpmem_hton_msg_open_resp(&resp);
return rpmemd_obc_send(obc, &resp, sizeof(resp));
}
/*
* rpmemd_obc_close_resp -- send close request response message
*/
int
rpmemd_obc_close_resp(struct rpmemd_obc *obc,
int status)
{
struct rpmem_msg_close_resp resp = {
.hdr = {
.type = RPMEM_MSG_TYPE_CLOSE_RESP,
.size = sizeof(struct rpmem_msg_close_resp),
.status = (uint32_t)status,
},
};
rpmem_hton_msg_close_resp(&resp);
return rpmemd_obc_send(obc, &resp, sizeof(resp));
}
/*
* rpmemd_obc_set_attr_resp -- send set attributes request response message
*/
int
rpmemd_obc_set_attr_resp(struct rpmemd_obc *obc, int status)
{
struct rpmem_msg_set_attr_resp resp = {
.hdr = {
.type = RPMEM_MSG_TYPE_SET_ATTR_RESP,
.size = sizeof(struct rpmem_msg_set_attr_resp),
.status = (uint32_t)status,
},
};
rpmem_hton_msg_set_attr_resp(&resp);
return rpmemd_obc_send(obc, &resp, sizeof(resp));
}
| 12,309 | 21.422587 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/tools/rpmemd/rpmemd_config.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* rpmemd_config.c -- rpmemd config source file
*/
#include <pwd.h>
#include <stdio.h>
#include <stddef.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
#include <ctype.h>
#include <errno.h>
#include <getopt.h>
#include <limits.h>
#include <inttypes.h>
#include "rpmemd.h"
#include "rpmemd_log.h"
#include "rpmemd_config.h"
#include "os.h"
#define CONFIG_LINE_SIZE_INIT 50
#define INVALID_CHAR_POS UINT64_MAX
struct rpmemd_special_chars_pos {
uint64_t equal_char;
uint64_t comment_char;
uint64_t EOL_char;
};
enum rpmemd_option {
RPD_OPT_LOG_FILE,
RPD_OPT_POOLSET_DIR,
RPD_OPT_PERSIST_APM,
RPD_OPT_PERSIST_GENERAL,
RPD_OPT_USE_SYSLOG,
RPD_OPT_LOG_LEVEL,
RPD_OPT_RM_POOLSET,
RPD_OPT_MAX_VALUE,
RPD_OPT_INVALID = UINT64_MAX,
};
static const char *optstr = "c:hVr:fst:";
/*
* options -- cl and config file options
*/
static const struct option options[] = {
{"config", required_argument, NULL, 'c'},
{"help", no_argument, NULL, 'h'},
{"version", no_argument, NULL, 'V'},
{"log-file", required_argument, NULL, RPD_OPT_LOG_FILE},
{"poolset-dir", required_argument, NULL, RPD_OPT_POOLSET_DIR},
{"persist-apm", no_argument, NULL, RPD_OPT_PERSIST_APM},
{"persist-general", no_argument, NULL, RPD_OPT_PERSIST_GENERAL},
{"use-syslog", no_argument, NULL, RPD_OPT_USE_SYSLOG},
{"log-level", required_argument, NULL, RPD_OPT_LOG_LEVEL},
{"remove", required_argument, NULL, 'r'},
{"force", no_argument, NULL, 'f'},
{"pool-set", no_argument, NULL, 's'},
{"nthreads", required_argument, NULL, 't'},
{NULL, 0, NULL, 0},
};
#define VALUE_INDENT " "
static const char * const help_str =
"\n"
"Options:\n"
" -c, --config <path> configuration file location\n"
" -r, --remove <poolset> remove pool described by given poolset file\n"
" -f, --force ignore errors when removing a pool\n"
" -t, --nthreads <num> number of processing threads\n"
" -h, --help display help message and exit\n"
" -V, --version display target daemon version and exit\n"
" --log-file <path> log file location\n"
" --poolset-dir <path> pool set files directory\n"
" --persist-apm enable Appliance Persistency Method\n"
" --persist-general enable General Server Persistency Mechanism\n"
" --use-syslog use syslog(3) for logging messages\n"
" --log-level <level> set log level value\n"
VALUE_INDENT "err error conditions\n"
VALUE_INDENT "warn warning conditions\n"
VALUE_INDENT "notice normal, but significant, condition\n"
VALUE_INDENT "info informational message\n"
VALUE_INDENT "debug debug-level message\n"
"\n"
"For complete documentation see %s(1) manual page.";
/*
* print_version -- (internal) prints version message
*/
static void
print_version(void)
{
RPMEMD_LOG(ERR, "%s version %s", DAEMON_NAME, SRCVERSION);
}
/*
* print_usage -- (internal) prints usage message
*/
static void
print_usage(const char *name)
{
RPMEMD_LOG(ERR, "usage: %s [--version] [--help] [<args>]",
name);
}
/*
* print_help -- (internal) prints help message
*/
static void
print_help(const char *name)
{
print_usage(name);
print_version();
RPMEMD_LOG(ERR, help_str, DAEMON_NAME);
}
/*
* parse_config_string -- (internal) parse string value
*/
static inline char *
parse_config_string(const char *value)
{
if (strlen(value) == 0) {
errno = EINVAL;
return NULL;
}
char *output = strdup(value);
if (output == NULL)
RPMEMD_FATAL("!strdup");
return output;
}
/*
* parse_config_bool -- (internal) parse yes / no flag
*/
static inline int
parse_config_bool(bool *config_value, const char *value)
{
if (value == NULL)
*config_value = true;
else if (strcmp("yes", value) == 0)
*config_value = true;
else if (strcmp("no", value) == 0)
*config_value = false;
else {
errno = EINVAL;
return -1;
}
return 0;
}
/*
* set_option -- (internal) set single config option
*/
static int
set_option(enum rpmemd_option option, const char *value,
struct rpmemd_config *config)
{
int ret = 0;
switch (option) {
case RPD_OPT_LOG_FILE:
free(config->log_file);
config->log_file = parse_config_string(value);
if (config->log_file == NULL)
return -1;
else
config->use_syslog = false;
break;
case RPD_OPT_POOLSET_DIR:
free(config->poolset_dir);
config->poolset_dir = parse_config_string(value);
if (config->poolset_dir == NULL)
return -1;
break;
case RPD_OPT_PERSIST_APM:
ret = parse_config_bool(&config->persist_apm, value);
break;
case RPD_OPT_PERSIST_GENERAL:
ret = parse_config_bool(&config->persist_general, value);
break;
case RPD_OPT_USE_SYSLOG:
ret = parse_config_bool(&config->use_syslog, value);
break;
case RPD_OPT_LOG_LEVEL:
config->log_level = rpmemd_log_level_from_str(value);
if (config->log_level == MAX_RPD_LOG) {
errno = EINVAL;
return -1;
}
break;
default:
errno = EINVAL;
return -1;
}
return ret;
}
/*
* get_config_line -- (internal) read single line from file
*/
static int
get_config_line(FILE *file, char **line, uint64_t *line_max,
uint8_t *line_max_increased, struct rpmemd_special_chars_pos *pos)
{
uint8_t line_complete = 0;
uint64_t line_length = 0;
char *line_part = *line;
do {
char *ret = fgets(line_part,
(int)(*line_max - line_length), file);
if (ret == NULL)
return 0;
for (uint64_t i = 0; i < *line_max; ++i) {
if (line_part[i] == '\n')
line_complete = 1;
else if (line_part[i] == '\0') {
line_length += i;
if (line_length + 1 < *line_max)
line_complete = 1;
break;
} else if (line_part[i] == '#' &&
pos->comment_char == UINT64_MAX)
pos->comment_char = line_length + i;
else if (line_part[i] == '=' &&
pos->equal_char == UINT64_MAX)
pos->equal_char = line_length + i;
}
if (line_complete == 0) {
*line = realloc(*line, sizeof(char) * (*line_max) * 2);
if (*line == NULL) {
RPMEMD_FATAL("!realloc");
}
line_part = *line + *line_max - 1;
line_length = *line_max - 1;
*line_max *= 2;
*line_max_increased = 1;
}
} while (line_complete != 1);
pos->EOL_char = line_length;
return 0;
}
/*
* trim_line_element -- (internal) remove white characters
*/
static char *
trim_line_element(char *line, uint64_t start, uint64_t end)
{
for (; start <= end; ++start) {
if (!isspace(line[start]))
break;
}
for (; end > start; --end) {
if (!isspace(line[end - 1]))
break;
}
if (start == end)
return NULL;
line[end] = '\0';
return &line[start];
}
/*
* parse_config_key -- (internal) lookup config key
*/
static enum rpmemd_option
parse_config_key(const char *key)
{
for (int i = 0; options[i].name != 0; ++i) {
if (strcmp(key, options[i].name) == 0)
return (enum rpmemd_option)options[i].val;
}
return RPD_OPT_INVALID;
}
/*
* parse_config_line -- (internal) parse single config line
*
* Return newly written option flag. Store possible errors in errno.
*/
static int
parse_config_line(char *line, struct rpmemd_special_chars_pos *pos,
struct rpmemd_config *config, uint64_t disabled)
{
if (pos->comment_char < pos->equal_char)
pos->equal_char = INVALID_CHAR_POS;
uint64_t end_of_content = pos->comment_char != INVALID_CHAR_POS ?
pos->comment_char : pos->EOL_char;
if (pos->equal_char == INVALID_CHAR_POS) {
char *leftover = trim_line_element(line, 0, end_of_content);
if (leftover != NULL) {
errno = EINVAL;
return -1;
} else {
return 0;
}
}
char *key_name = trim_line_element(line, 0, pos->equal_char);
char *value = trim_line_element(line, pos->equal_char + 1,
end_of_content);
if (key_name == NULL || value == NULL) {
errno = EINVAL;
return -1;
}
enum rpmemd_option key = parse_config_key(key_name);
if (key != RPD_OPT_INVALID) {
if ((disabled & (uint64_t)(1 << key)) == 0)
if (set_option(key, value, config) != 0)
return -1;
} else {
errno = EINVAL;
return -1;
}
return 0;
}
/*
* parse_config_file -- (internal) parse config file
*/
static int
parse_config_file(const char *filename, struct rpmemd_config *config,
uint64_t disabled, int required)
{
RPMEMD_ASSERT(filename != NULL);
FILE *file = os_fopen(filename, "r");
if (file == NULL) {
if (required) {
RPMEMD_LOG(ERR, "!%s", filename);
goto error_fopen;
} else {
goto optional_config_missing;
}
}
uint8_t line_max_increased = 0;
uint64_t line_max = CONFIG_LINE_SIZE_INIT;
uint64_t line_num = 1;
char *line = (char *)malloc(sizeof(char) * line_max);
if (line == NULL) {
RPMEMD_LOG(ERR, "!malloc");
goto error_malloc_line;
}
char *line_copy = (char *)malloc(sizeof(char) * line_max);
if (line_copy == NULL) {
RPMEMD_LOG(ERR, "!malloc");
goto error_malloc_line_copy;
}
struct rpmemd_special_chars_pos pos;
do {
memset(&pos, 0xff, sizeof(pos));
if (get_config_line(file, &line, &line_max,
&line_max_increased, &pos) != 0)
goto error;
if (line_max_increased) {
char *line_new = (char *)realloc(line_copy,
sizeof(char) * line_max);
if (line_new == NULL) {
RPMEMD_LOG(ERR, "!malloc");
goto error;
}
line_copy = line_new;
line_max_increased = 0;
}
if (pos.EOL_char != INVALID_CHAR_POS) {
strcpy(line_copy, line);
int ret = parse_config_line(line_copy, &pos, config,
disabled);
if (ret != 0) {
size_t len = strlen(line);
if (len > 0 && line[len - 1] == '\n')
line[len - 1] = '\0';
RPMEMD_LOG(ERR, "Invalid config file line at "
"%s:%lu\n%s",
filename, line_num, line);
goto error;
}
}
++line_num;
} while (pos.EOL_char != INVALID_CHAR_POS);
free(line_copy);
free(line);
fclose(file);
optional_config_missing:
return 0;
error:
free(line_copy);
error_malloc_line_copy:
free(line);
error_malloc_line:
fclose(file);
error_fopen:
return -1;
}
/*
* parse_cl_args -- (internal) parse command line arguments
*/
static void
parse_cl_args(int argc, char *argv[], struct rpmemd_config *config,
const char **config_file, uint64_t *cl_options)
{
RPMEMD_ASSERT(argv != NULL);
RPMEMD_ASSERT(config != NULL);
int opt;
int option_index = 0;
while ((opt = getopt_long(argc, argv, optstr, options,
&option_index)) != -1) {
switch (opt) {
case 'c':
(*config_file) = optarg;
break;
case 'r':
config->rm_poolset = optarg;
break;
case 'f':
config->force = true;
break;
case 's':
config->pool_set = true;
break;
case 't':
errno = 0;
char *endptr;
config->nthreads = strtoul(optarg, &endptr, 10);
if (errno || *endptr != '\0') {
RPMEMD_LOG(ERR,
"invalid number of threads -- '%s'",
optarg);
exit(-1);
}
break;
case 'h':
print_help(argv[0]);
exit(0);
case 'V':
print_version();
exit(0);
break;
default:
if (set_option((enum rpmemd_option)opt, optarg, config)
== 0) {
*cl_options |= (UINT64_C(1) << opt);
} else {
print_usage(argv[0]);
exit(-1);
}
}
}
}
/*
* get_home_dir -- (internal) return user home directory
*
* Function will lookup user home directory in order:
* 1. HOME environment variable
* 2. Password file entry using real user ID
*/
static void
get_home_dir(char *str, size_t size)
{
char *home = os_getenv(HOME_ENV);
if (home) {
int r = util_snprintf(str, size, "%s", home);
if (r < 0)
RPMEMD_FATAL("!snprintf");
} else {
uid_t uid = getuid();
struct passwd *pw = getpwuid(uid);
if (pw == NULL)
RPMEMD_FATAL("!getpwuid");
int r = util_snprintf(str, size, "%s", pw->pw_dir);
if (r < 0)
RPMEMD_FATAL("!snprintf");
}
}
/*
* concat_dir_and_file_name -- (internal) concatenate directory and file name
* into single string path
*/
static void
concat_dir_and_file_name(char *path, size_t size, const char *dir,
const char *file)
{
int r = util_snprintf(path, size, "%s/%s", dir, file);
if (r < 0)
RPMEMD_FATAL("!snprintf");
}
/*
* str_replace_home -- (internal) replace $HOME string with user home directory
*
* If function does not find $HOME string it will return haystack untouched.
* Otherwise it will allocate new string with $HOME replaced with provided
* home_dir path. haystack will be released and newly created string returned.
*/
static char *
str_replace_home(char *haystack, const char *home_dir)
{
const size_t placeholder_len = strlen(HOME_STR_PLACEHOLDER);
const size_t home_len = strlen(home_dir);
size_t haystack_len = strlen(haystack);
char *pos = strstr(haystack, HOME_STR_PLACEHOLDER);
if (!pos)
return haystack;
const char *after = pos + placeholder_len;
if (isalnum(*after))
return haystack;
haystack_len += home_len - placeholder_len + 1;
char *buf = malloc(sizeof(char) * haystack_len);
if (!buf)
RPMEMD_FATAL("!malloc");
*pos = '\0';
int r = util_snprintf(buf, haystack_len, "%s%s%s", haystack, home_dir,
after);
if (r < 0)
RPMEMD_FATAL("!snprintf");
free(haystack);
return buf;
}
/*
* config_set_default -- (internal) load default config
*/
static void
config_set_default(struct rpmemd_config *config, const char *poolset_dir)
{
config->log_file = strdup(RPMEMD_DEFAULT_LOG_FILE);
if (!config->log_file)
RPMEMD_FATAL("!strdup");
config->poolset_dir = strdup(poolset_dir);
if (!config->poolset_dir)
RPMEMD_FATAL("!strdup");
config->persist_apm = false;
config->persist_general = true;
config->use_syslog = true;
config->max_lanes = RPMEM_DEFAULT_MAX_LANES;
config->log_level = RPD_LOG_ERR;
config->rm_poolset = NULL;
config->force = false;
config->nthreads = RPMEM_DEFAULT_NTHREADS;
}
/*
* rpmemd_config_read -- read config from cl and config files
*
* cl param overwrites configuration from any config file. Config file are read
* in order:
* 1. Global config file
* 2. User config file
* or
* cl provided config file
*/
int
rpmemd_config_read(struct rpmemd_config *config, int argc, char *argv[])
{
const char *cl_config_file = NULL;
char user_config_file[PATH_MAX];
char home_dir[PATH_MAX];
uint64_t cl_options = 0;
get_home_dir(home_dir, PATH_MAX);
config_set_default(config, home_dir);
parse_cl_args(argc, argv, config, &cl_config_file, &cl_options);
if (cl_config_file) {
if (parse_config_file(cl_config_file, config, cl_options, 1)) {
rpmemd_config_free(config);
return 1;
}
} else {
if (parse_config_file(RPMEMD_GLOBAL_CONFIG_FILE, config,
cl_options, 0)) {
rpmemd_config_free(config);
return 1;
}
concat_dir_and_file_name(user_config_file, PATH_MAX, home_dir,
RPMEMD_USER_CONFIG_FILE);
if (parse_config_file(user_config_file, config, cl_options,
0)) {
rpmemd_config_free(config);
return 1;
}
}
config->poolset_dir = str_replace_home(config->poolset_dir, home_dir);
return 0;
}
/*
* rpmemd_config_free -- rpmemd config release
*/
void
rpmemd_config_free(struct rpmemd_config *config)
{
free(config->log_file);
free(config->poolset_dir);
}
| 15,007 | 22.413417 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/tools/rpmemd/rpmemd_db.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* rpmemd_db.c -- rpmemd database of pool set files
*/
#include <stdio.h>
#include <stdint.h>
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <dirent.h>
#include <sys/file.h>
#include <sys/mman.h>
#include "queue.h"
#include "set.h"
#include "os.h"
#include "out.h"
#include "file.h"
#include "sys_util.h"
#include "librpmem.h"
#include "rpmemd_db.h"
#include "rpmemd_log.h"
/*
* struct rpmemd_db -- pool set database structure
*/
struct rpmemd_db {
os_mutex_t lock;
char *root_dir;
mode_t mode;
};
/*
* declaration of the 'struct list_head' type
*/
PMDK_LIST_HEAD(list_head, rpmemd_db_entry);
/*
* struct rpmemd_db_entry -- entry in the pool set list
*/
struct rpmemd_db_entry {
PMDK_LIST_ENTRY(rpmemd_db_entry) next;
char *pool_desc;
struct pool_set *set;
};
/*
* rpmemd_db_init -- initialize the rpmem database of pool set files
*/
struct rpmemd_db *
rpmemd_db_init(const char *root_dir, mode_t mode)
{
if (root_dir[0] != '/') {
RPMEMD_LOG(ERR, "root directory is not an absolute path"
" -- '%s'", root_dir);
errno = EINVAL;
return NULL;
}
struct rpmemd_db *db = calloc(1, sizeof(*db));
if (!db) {
RPMEMD_LOG(ERR, "!allocating the rpmem database structure");
return NULL;
}
db->root_dir = strdup(root_dir);
if (!db->root_dir) {
RPMEMD_LOG(ERR, "!allocating the root dir path");
free(db);
return NULL;
}
db->mode = mode;
util_mutex_init(&db->lock);
return db;
}
/*
* rpmemd_db_concat -- (internal) concatenate two paths
*/
static char *
rpmemd_db_concat(const char *path1, const char *path2)
{
size_t len1 = strlen(path1);
size_t len2 = strlen(path2);
size_t new_len = len1 + len2 + 2; /* +1 for '/' in snprintf() */
if (path1[0] != '/') {
RPMEMD_LOG(ERR, "the first path is not an absolute one -- '%s'",
path1);
errno = EINVAL;
return NULL;
}
if (path2[0] == '/') {
RPMEMD_LOG(ERR, "the second path is not a relative one -- '%s'",
path2);
/* set to EBADF to distinguish this case from other errors */
errno = EBADF;
return NULL;
}
char *new_str = malloc(new_len);
if (new_str == NULL) {
RPMEMD_LOG(ERR, "!allocating path buffer");
return NULL;
}
int ret = util_snprintf(new_str, new_len, "%s/%s", path1, path2);
if (ret < 0) {
RPMEMD_LOG(ERR, "!snprintf");
free(new_str);
errno = EINVAL;
return NULL;
}
return new_str;
}
/*
* rpmemd_db_get_path -- (internal) get the full path of the pool set file
*/
static char *
rpmemd_db_get_path(struct rpmemd_db *db, const char *pool_desc)
{
return rpmemd_db_concat(db->root_dir, pool_desc);
}
/*
* rpmemd_db_pool_madvise -- (internal) workaround device dax alignment issue
*/
static int
rpmemd_db_pool_madvise(struct pool_set *set)
{
/*
* This is a workaround for an issue with using device dax with
* libibverbs. The problem is that we use ibv_fork_init(3) which
* makes all registered memory being madvised with MADV_DONTFORK
* flag. In libpmemobj the remote replication is performed without
* pool header (first 4k). In such case the address passed to
* madvise(2) is aligned to 4k, but device dax can require different
* alignment (default is 2MB). This workaround madvises the entire
* memory region before registering it by ibv_reg_mr(3).
*/
const struct pool_set_part *part = &set->replica[0]->part[0];
if (part->is_dev_dax) {
int ret = os_madvise(part->addr, part->filesize,
MADV_DONTFORK);
if (ret) {
ERR("!madvise");
return -1;
}
}
return 0;
}
/*
* rpmemd_get_attr -- (internal) get pool attributes from remote pool attributes
*/
static void
rpmemd_get_attr(struct pool_attr *attr, const struct rpmem_pool_attr *rattr)
{
LOG(3, "attr %p, rattr %p", attr, rattr);
memcpy(attr->signature, rattr->signature, POOL_HDR_SIG_LEN);
attr->major = rattr->major;
attr->features.compat = rattr->compat_features;
attr->features.incompat = rattr->incompat_features;
attr->features.ro_compat = rattr->ro_compat_features;
memcpy(attr->poolset_uuid, rattr->poolset_uuid, POOL_HDR_UUID_LEN);
memcpy(attr->first_part_uuid, rattr->uuid, POOL_HDR_UUID_LEN);
memcpy(attr->prev_repl_uuid, rattr->prev_uuid, POOL_HDR_UUID_LEN);
memcpy(attr->next_repl_uuid, rattr->next_uuid, POOL_HDR_UUID_LEN);
memcpy(attr->arch_flags, rattr->user_flags, POOL_HDR_ARCH_LEN);
}
/*
* rpmemd_db_pool_create -- create a new pool set
*/
struct rpmemd_db_pool *
rpmemd_db_pool_create(struct rpmemd_db *db, const char *pool_desc,
size_t pool_size, const struct rpmem_pool_attr *rattr)
{
RPMEMD_ASSERT(db != NULL);
util_mutex_lock(&db->lock);
struct rpmemd_db_pool *prp = NULL;
struct pool_set *set;
char *path;
int ret;
prp = malloc(sizeof(struct rpmemd_db_pool));
if (!prp) {
RPMEMD_LOG(ERR, "!allocating pool set db entry");
goto err_unlock;
}
path = rpmemd_db_get_path(db, pool_desc);
if (!path) {
goto err_free_prp;
}
struct pool_attr attr;
struct pool_attr *pattr = NULL;
if (rattr != NULL) {
rpmemd_get_attr(&attr, rattr);
pattr = &attr;
}
ret = util_pool_create_uuids(&set, path, 0, RPMEM_MIN_POOL,
RPMEM_MIN_PART, pattr, NULL, REPLICAS_DISABLED,
POOL_REMOTE);
if (ret) {
RPMEMD_LOG(ERR, "!cannot create pool set -- '%s'", path);
goto err_free_path;
}
ret = util_poolset_chmod(set, db->mode);
if (ret) {
RPMEMD_LOG(ERR, "!cannot change pool set mode bits to 0%o",
db->mode);
}
if (rpmemd_db_pool_madvise(set))
goto err_poolset_close;
/* mark as opened */
prp->pool_addr = set->replica[0]->part[0].addr;
prp->pool_size = set->poolsize;
prp->set = set;
free(path);
util_mutex_unlock(&db->lock);
return prp;
err_poolset_close:
util_poolset_close(set, DO_NOT_DELETE_PARTS);
err_free_path:
free(path);
err_free_prp:
free(prp);
err_unlock:
util_mutex_unlock(&db->lock);
return NULL;
}
/*
* rpmemd_db_pool_open -- open a pool set
*/
struct rpmemd_db_pool *
rpmemd_db_pool_open(struct rpmemd_db *db, const char *pool_desc,
size_t pool_size, struct rpmem_pool_attr *rattr)
{
RPMEMD_ASSERT(db != NULL);
RPMEMD_ASSERT(rattr != NULL);
util_mutex_lock(&db->lock);
struct rpmemd_db_pool *prp = NULL;
struct pool_set *set;
char *path;
int ret;
prp = malloc(sizeof(struct rpmemd_db_pool));
if (!prp) {
RPMEMD_LOG(ERR, "!allocating pool set db entry");
goto err_unlock;
}
path = rpmemd_db_get_path(db, pool_desc);
if (!path) {
goto err_free_prp;
}
ret = util_pool_open_remote(&set, path, 0, RPMEM_MIN_PART, rattr);
if (ret) {
RPMEMD_LOG(ERR, "!cannot open pool set -- '%s'", path);
goto err_free_path;
}
if (rpmemd_db_pool_madvise(set))
goto err_poolset_close;
/* mark as opened */
prp->pool_addr = set->replica[0]->part[0].addr;
prp->pool_size = set->poolsize;
prp->set = set;
free(path);
util_mutex_unlock(&db->lock);
return prp;
err_poolset_close:
util_poolset_close(set, DO_NOT_DELETE_PARTS);
err_free_path:
free(path);
err_free_prp:
free(prp);
err_unlock:
util_mutex_unlock(&db->lock);
return NULL;
}
/*
* rpmemd_db_pool_close -- close a pool set
*/
void
rpmemd_db_pool_close(struct rpmemd_db *db, struct rpmemd_db_pool *prp)
{
RPMEMD_ASSERT(db != NULL);
util_mutex_lock(&db->lock);
util_poolset_close(prp->set, DO_NOT_DELETE_PARTS);
free(prp);
util_mutex_unlock(&db->lock);
}
/*
* rpmemd_db_pool_set_attr -- overwrite pool attributes
*/
int
rpmemd_db_pool_set_attr(struct rpmemd_db_pool *prp,
const struct rpmem_pool_attr *rattr)
{
RPMEMD_ASSERT(prp != NULL);
RPMEMD_ASSERT(prp->set != NULL);
RPMEMD_ASSERT(prp->set->nreplicas == 1);
return util_replica_set_attr(prp->set->replica[0], rattr);
}
struct rm_cb_args {
int force;
int ret;
};
/*
* rm_poolset_cb -- (internal) callback for removing part files
*/
static int
rm_poolset_cb(struct part_file *pf, void *arg)
{
struct rm_cb_args *args = (struct rm_cb_args *)arg;
if (pf->is_remote) {
RPMEMD_LOG(ERR, "removing remote replica not supported");
return -1;
}
int ret = util_unlink_flock(pf->part->path);
if (!args->force && ret) {
RPMEMD_LOG(ERR, "!unlink -- '%s'", pf->part->path);
args->ret = ret;
}
return 0;
}
/*
* rpmemd_db_pool_remove -- remove a pool set
*/
int
rpmemd_db_pool_remove(struct rpmemd_db *db, const char *pool_desc,
int force, int pool_set)
{
RPMEMD_ASSERT(db != NULL);
RPMEMD_ASSERT(pool_desc != NULL);
util_mutex_lock(&db->lock);
struct rm_cb_args args;
args.force = force;
args.ret = 0;
char *path;
path = rpmemd_db_get_path(db, pool_desc);
if (!path) {
args.ret = -1;
goto err_unlock;
}
int ret = util_poolset_foreach_part(path, rm_poolset_cb, &args);
if (!force && ret) {
RPMEMD_LOG(ERR, "!removing '%s' failed", path);
args.ret = ret;
goto err_free_path;
}
if (pool_set)
os_unlink(path);
err_free_path:
free(path);
err_unlock:
util_mutex_unlock(&db->lock);
return args.ret;
}
/*
* rpmemd_db_fini -- deinitialize the rpmem database of pool set files
*/
void
rpmemd_db_fini(struct rpmemd_db *db)
{
RPMEMD_ASSERT(db != NULL);
util_mutex_destroy(&db->lock);
free(db->root_dir);
free(db);
}
/*
* rpmemd_db_check_dups_set -- (internal) check for duplicates in the database
*/
static inline int
rpmemd_db_check_dups_set(struct pool_set *set, const char *path)
{
for (unsigned r = 0; r < set->nreplicas; r++) {
struct pool_replica *rep = set->replica[r];
for (unsigned p = 0; p < rep->nparts; p++) {
if (strcmp(path, rep->part[p].path) == 0)
return -1;
}
}
return 0;
}
/*
* rpmemd_db_check_dups -- (internal) check for duplicates in the database
*/
static int
rpmemd_db_check_dups(struct list_head *head, struct rpmemd_db *db,
const char *pool_desc, struct pool_set *set)
{
struct rpmemd_db_entry *edb;
PMDK_LIST_FOREACH(edb, head, next) {
for (unsigned r = 0; r < edb->set->nreplicas; r++) {
struct pool_replica *rep = edb->set->replica[r];
for (unsigned p = 0; p < rep->nparts; p++) {
if (rpmemd_db_check_dups_set(set,
rep->part[p].path)) {
RPMEMD_LOG(ERR, "part file '%s' from "
"pool set '%s' duplicated in "
"pool set '%s'",
rep->part[p].path,
pool_desc,
edb->pool_desc);
errno = EEXIST;
return -1;
}
}
}
}
return 0;
}
/*
* rpmemd_db_add -- (internal) add an entry for a given set to the database
*/
static struct rpmemd_db_entry *
rpmemd_db_add(struct list_head *head, struct rpmemd_db *db,
const char *pool_desc, struct pool_set *set)
{
struct rpmemd_db_entry *edb;
edb = calloc(1, sizeof(*edb));
if (!edb) {
RPMEMD_LOG(ERR, "!allocating database entry");
goto err_calloc;
}
edb->set = set;
edb->pool_desc = strdup(pool_desc);
if (!edb->pool_desc) {
RPMEMD_LOG(ERR, "!allocating path for database entry");
goto err_strdup;
}
PMDK_LIST_INSERT_HEAD(head, edb, next);
return edb;
err_strdup:
free(edb);
err_calloc:
return NULL;
}
/*
* new_paths -- (internal) create two new paths
*/
static int
new_paths(const char *dir, const char *name, const char *old_desc,
char **path, char **new_desc)
{
*path = rpmemd_db_concat(dir, name);
if (!(*path))
return -1;
if (old_desc[0] != 0)
*new_desc = rpmemd_db_concat(old_desc, name);
else {
*new_desc = strdup(name);
if (!(*new_desc)) {
RPMEMD_LOG(ERR, "!allocating new descriptor");
}
}
if (!(*new_desc)) {
free(*path);
return -1;
}
return 0;
}
/*
* rpmemd_db_check_dir_r -- (internal) recursively check given directory
* for duplicates
*/
static int
rpmemd_db_check_dir_r(struct list_head *head, struct rpmemd_db *db,
const char *dir, char *pool_desc)
{
char *new_dir, *new_desc, *full_path;
struct dirent *dentry;
struct pool_set *set = NULL;
DIR *dirp;
int ret = 0;
dirp = opendir(dir);
if (dirp == NULL) {
RPMEMD_LOG(ERR, "cannot open the directory -- %s", dir);
return -1;
}
while ((dentry = readdir(dirp)) != NULL) {
if (strcmp(dentry->d_name, ".") == 0 ||
strcmp(dentry->d_name, "..") == 0)
continue;
if (dentry->d_type == DT_DIR) { /* directory */
if (new_paths(dir, dentry->d_name, pool_desc,
&new_dir, &new_desc))
goto err_closedir;
/* call recursively for a new directory */
ret = rpmemd_db_check_dir_r(head, db, new_dir,
new_desc);
free(new_dir);
free(new_desc);
if (ret)
goto err_closedir;
continue;
}
if (new_paths(dir, dentry->d_name, pool_desc,
&full_path, &new_desc)) {
goto err_closedir;
}
if (util_poolset_read(&set, full_path)) {
RPMEMD_LOG(ERR, "!error reading pool set file -- %s",
full_path);
goto err_free_paths;
}
if (rpmemd_db_check_dups(head, db, new_desc, set)) {
RPMEMD_LOG(ERR, "!duplicate found in pool set file"
" -- %s", full_path);
goto err_free_set;
}
if (rpmemd_db_add(head, db, new_desc, set) == NULL) {
goto err_free_set;
}
free(new_desc);
free(full_path);
}
closedir(dirp);
return 0;
err_free_set:
util_poolset_close(set, DO_NOT_DELETE_PARTS);
err_free_paths:
free(new_desc);
free(full_path);
err_closedir:
closedir(dirp);
return -1;
}
/*
* rpmemd_db_check_dir -- check given directory for duplicates
*/
int
rpmemd_db_check_dir(struct rpmemd_db *db)
{
RPMEMD_ASSERT(db != NULL);
util_mutex_lock(&db->lock);
struct list_head head;
PMDK_LIST_INIT(&head);
int ret = rpmemd_db_check_dir_r(&head, db, db->root_dir, "");
while (!PMDK_LIST_EMPTY(&head)) {
struct rpmemd_db_entry *edb = PMDK_LIST_FIRST(&head);
PMDK_LIST_REMOVE(edb, next);
util_poolset_close(edb->set, DO_NOT_DELETE_PARTS);
free(edb->pool_desc);
free(edb);
}
util_mutex_unlock(&db->lock);
return ret;
}
/*
* rpmemd_db_pool_is_pmem -- true if pool is in PMEM
*/
int
rpmemd_db_pool_is_pmem(struct rpmemd_db_pool *pool)
{
return REP(pool->set, 0)->is_pmem;
}
| 13,747 | 20.616352 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/tools/rpmemd/rpmemd_fip.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmemd_fip.h -- rpmemd libfabric provider module header file
*/
#include <stddef.h>
struct rpmemd_fip;
struct rpmemd_fip_attr {
void *addr;
size_t size;
unsigned nlanes;
size_t nthreads;
size_t buff_size;
enum rpmem_provider provider;
enum rpmem_persist_method persist_method;
int (*persist)(const void *addr, size_t len);
void *(*memcpy_persist)(void *pmemdest, const void *src, size_t len);
int (*deep_persist)(const void *addr, size_t len, void *ctx);
void *ctx;
};
struct rpmemd_fip *rpmemd_fip_init(const char *node,
const char *service,
struct rpmemd_fip_attr *attr,
struct rpmem_resp_attr *resp,
enum rpmem_err *err);
void rpmemd_fip_fini(struct rpmemd_fip *fip);
int rpmemd_fip_accept(struct rpmemd_fip *fip, int timeout);
int rpmemd_fip_process_start(struct rpmemd_fip *fip);
int rpmemd_fip_process_stop(struct rpmemd_fip *fip);
int rpmemd_fip_wait_close(struct rpmemd_fip *fip, int timeout);
int rpmemd_fip_close(struct rpmemd_fip *fip);
| 1,066 | 27.078947 | 70 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/tools/rpmemd/rpmemd_obc.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmemd_obc.h -- rpmemd out-of-band connection declarations
*/
#include <stdint.h>
#include <sys/types.h>
#include <sys/socket.h>
struct rpmemd_obc;
struct rpmemd_obc_requests {
int (*create)(struct rpmemd_obc *obc, void *arg,
const struct rpmem_req_attr *req,
const struct rpmem_pool_attr *pool_attr);
int (*open)(struct rpmemd_obc *obc, void *arg,
const struct rpmem_req_attr *req);
int (*close)(struct rpmemd_obc *obc, void *arg, int flags);
int (*set_attr)(struct rpmemd_obc *obc, void *arg,
const struct rpmem_pool_attr *pool_attr);
};
struct rpmemd_obc *rpmemd_obc_init(int fd_in, int fd_out);
void rpmemd_obc_fini(struct rpmemd_obc *obc);
int rpmemd_obc_status(struct rpmemd_obc *obc, uint32_t status);
int rpmemd_obc_process(struct rpmemd_obc *obc,
struct rpmemd_obc_requests *req_cb, void *arg);
int rpmemd_obc_create_resp(struct rpmemd_obc *obc,
int status, const struct rpmem_resp_attr *res);
int rpmemd_obc_open_resp(struct rpmemd_obc *obc,
int status, const struct rpmem_resp_attr *res,
const struct rpmem_pool_attr *pool_attr);
int rpmemd_obc_set_attr_resp(struct rpmemd_obc *obc, int status);
int rpmemd_obc_close_resp(struct rpmemd_obc *obc,
int status);
| 1,296 | 31.425 | 65 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/tools/pmempool/create.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* create.c -- pmempool create command source file
*/
#include <stdio.h>
#include <getopt.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/statvfs.h>
#include <errno.h>
#include <libgen.h>
#include <err.h>
#include "common.h"
#include "file.h"
#include "create.h"
#include "os.h"
#include "set.h"
#include "output.h"
#include "libpmemblk.h"
#include "libpmemlog.h"
#include "libpmempool.h"
#define DEFAULT_MODE 0664
/*
* pmempool_create -- context and args for create command
*/
struct pmempool_create {
int verbose;
char *fname;
int fexists;
char *inherit_fname;
int max_size;
char *str_type;
struct pmem_pool_params params;
struct pmem_pool_params inherit_params;
char *str_size;
char *str_mode;
char *str_bsize;
uint64_t csize;
int write_btt_layout;
int force;
char *layout;
struct options *opts;
int clearbadblocks;
};
/*
* pmempool_create_default -- default args for create command
*/
static const struct pmempool_create pmempool_create_default = {
.verbose = 0,
.fname = NULL,
.fexists = 0,
.inherit_fname = NULL,
.max_size = 0,
.str_type = NULL,
.str_bsize = NULL,
.csize = 0,
.write_btt_layout = 0,
.force = 0,
.layout = NULL,
.clearbadblocks = 0,
.params = {
.type = PMEM_POOL_TYPE_UNKNOWN,
.size = 0,
.mode = DEFAULT_MODE,
}
};
/*
* help_str -- string for help message
*/
static const char * const help_str =
"Create pmem pool of specified size, type and name\n"
"\n"
"Common options:\n"
" -s, --size <size> size of pool\n"
" -M, --max-size use maximum available space on file system\n"
" -m, --mode <octal> set permissions to <octal> (the default is 0664)\n"
" -i, --inherit <file> take required parameters from specified pool file\n"
" -b, --clear-bad-blocks clear bad blocks in existing files\n"
" -f, --force remove the pool first\n"
" -v, --verbose increase verbosity level\n"
" -h, --help display this help and exit\n"
"\n"
"Options for PMEMBLK:\n"
" -w, --write-layout force writing the BTT layout\n"
"\n"
"Options for PMEMOBJ:\n"
" -l, --layout <name> layout name stored in pool's header\n"
"\n"
"For complete documentation see %s-create(1) manual page.\n"
;
/*
* long_options -- command line options
*/
static const struct option long_options[] = {
{"size", required_argument, NULL, 's' | OPT_ALL},
{"verbose", no_argument, NULL, 'v' | OPT_ALL},
{"help", no_argument, NULL, 'h' | OPT_ALL},
{"max-size", no_argument, NULL, 'M' | OPT_ALL},
{"inherit", required_argument, NULL, 'i' | OPT_ALL},
{"mode", required_argument, NULL, 'm' | OPT_ALL},
{"write-layout", no_argument, NULL, 'w' | OPT_BLK},
{"layout", required_argument, NULL, 'l' | OPT_OBJ},
{"force", no_argument, NULL, 'f' | OPT_ALL},
{"clear-bad-blocks", no_argument, NULL, 'b' | OPT_ALL},
{NULL, 0, NULL, 0 },
};
/*
* print_usage -- print application usage short description
*/
static void
print_usage(const char *appname)
{
printf("Usage: %s create [<args>] <blk|log|obj> [<bsize>] <file>\n",
appname);
}
/*
* print_version -- print version string
*/
static void
print_version(const char *appname)
{
printf("%s %s\n", appname, SRCVERSION);
}
/*
* pmempool_create_help -- print help message for create command
*/
void
pmempool_create_help(const char *appname)
{
print_usage(appname);
print_version(appname);
printf(help_str, appname);
}
/*
* pmempool_create_obj -- create pmem obj pool
*/
static int
pmempool_create_obj(struct pmempool_create *pcp)
{
PMEMobjpool *pop = pmemobj_create(pcp->fname, pcp->layout,
pcp->params.size, pcp->params.mode);
if (!pop) {
outv_err("'%s' -- %s\n", pcp->fname, pmemobj_errormsg());
return -1;
}
pmemobj_close(pop);
return 0;
}
/*
* pmempool_create_blk -- create pmem blk pool
*/
static int
pmempool_create_blk(struct pmempool_create *pcp)
{
ASSERTne(pcp->params.blk.bsize, 0);
int ret = 0;
PMEMblkpool *pbp = pmemblk_create(pcp->fname, pcp->params.blk.bsize,
pcp->params.size, pcp->params.mode);
if (!pbp) {
outv_err("'%s' -- %s\n", pcp->fname, pmemblk_errormsg());
return -1;
}
if (pcp->write_btt_layout) {
outv(1, "Writing BTT layout using block %d.\n",
pcp->write_btt_layout);
if (pmemblk_set_error(pbp, 0) || pmemblk_set_zero(pbp, 0)) {
outv_err("writing BTT layout to block 0 failed\n");
ret = -1;
}
}
pmemblk_close(pbp);
return ret;
}
/*
* pmempool_create_log -- create pmem log pool
*/
static int
pmempool_create_log(struct pmempool_create *pcp)
{
PMEMlogpool *plp = pmemlog_create(pcp->fname,
pcp->params.size, pcp->params.mode);
if (!plp) {
outv_err("'%s' -- %s\n", pcp->fname, pmemlog_errormsg());
return -1;
}
pmemlog_close(plp);
return 0;
}
/*
* pmempool_get_max_size -- return maximum allowed size of file
*/
#ifndef _WIN32
static int
pmempool_get_max_size(const char *fname, uint64_t *sizep)
{
struct statvfs buf;
int ret = 0;
char *name = strdup(fname);
if (name == NULL) {
return -1;
}
char *dir = dirname(name);
if (statvfs(dir, &buf))
ret = -1;
else
*sizep = buf.f_bsize * buf.f_bavail;
free(name);
return ret;
}
#else
static int
pmempool_get_max_size(const char *fname, uint64_t *sizep)
{
int ret = 0;
ULARGE_INTEGER freespace;
char *name = strdup(fname);
if (name == NULL) {
return -1;
}
char *dir = dirname(name);
wchar_t *str = util_toUTF16(dir);
if (str == NULL) {
free(name);
return -1;
}
if (GetDiskFreeSpaceExW(str, &freespace, NULL, NULL) == 0)
ret = -1;
else
*sizep = freespace.QuadPart;
free(str);
free(name);
return ret;
}
#endif
/*
* print_pool_params -- print some parameters of a pool
*/
static void
print_pool_params(struct pmem_pool_params *params)
{
outv(1, "\ttype : %s\n", out_get_pool_type_str(params->type));
outv(1, "\tsize : %s\n", out_get_size_str(params->size, 2));
outv(1, "\tmode : 0%o\n", params->mode);
switch (params->type) {
case PMEM_POOL_TYPE_BLK:
outv(1, "\tbsize : %s\n",
out_get_size_str(params->blk.bsize, 0));
break;
case PMEM_POOL_TYPE_OBJ:
outv(1, "\tlayout: '%s'\n", params->obj.layout);
break;
default:
break;
}
}
/*
* inherit_pool_params -- inherit pool parameters from specified file
*/
static int
inherit_pool_params(struct pmempool_create *pcp)
{
outv(1, "Parsing pool: '%s'\n", pcp->inherit_fname);
/*
* If no type string passed, --inherit option must be passed
* so parse file and get required parameters.
*/
if (pmem_pool_parse_params(pcp->inherit_fname,
&pcp->inherit_params, 1)) {
if (errno)
perror(pcp->inherit_fname);
else
outv_err("%s: cannot determine type of pool\n",
pcp->inherit_fname);
return -1;
}
if (PMEM_POOL_TYPE_UNKNOWN == pcp->inherit_params.type) {
outv_err("'%s' -- unknown pool type\n",
pcp->inherit_fname);
return -1;
}
print_pool_params(&pcp->inherit_params);
return 0;
}
/*
* pmempool_create_parse_args -- parse command line args
*/
static int
pmempool_create_parse_args(struct pmempool_create *pcp, const char *appname,
int argc, char *argv[], struct options *opts)
{
int opt, ret;
while ((opt = util_options_getopt(argc, argv, "vhi:s:Mm:l:wfb",
opts)) != -1) {
switch (opt) {
case 'v':
pcp->verbose = 1;
break;
case 'h':
pmempool_create_help(appname);
exit(EXIT_SUCCESS);
case 's':
pcp->str_size = optarg;
ret = util_parse_size(optarg,
(size_t *)&pcp->params.size);
if (ret || pcp->params.size == 0) {
outv_err("invalid size value specified '%s'\n",
optarg);
return -1;
}
break;
case 'M':
pcp->max_size = 1;
break;
case 'm':
pcp->str_mode = optarg;
if (util_parse_mode(optarg, &pcp->params.mode)) {
outv_err("invalid mode value specified '%s'\n",
optarg);
return -1;
}
break;
case 'i':
pcp->inherit_fname = optarg;
break;
case 'w':
pcp->write_btt_layout = 1;
break;
case 'l':
pcp->layout = optarg;
break;
case 'f':
pcp->force = 1;
break;
case 'b':
pcp->clearbadblocks = 1;
break;
default:
print_usage(appname);
return -1;
}
}
/* check for <type>, <bsize> and <file> strings */
if (optind + 2 < argc) {
pcp->str_type = argv[optind];
pcp->str_bsize = argv[optind + 1];
pcp->fname = argv[optind + 2];
} else if (optind + 1 < argc) {
pcp->str_type = argv[optind];
pcp->fname = argv[optind + 1];
} else if (optind < argc) {
pcp->fname = argv[optind];
pcp->str_type = NULL;
} else {
print_usage(appname);
return -1;
}
return 0;
}
static int
allocate_max_size_available_file(const char *name_of_file, mode_t mode,
os_off_t max_size)
{
int fd = os_open(name_of_file, O_CREAT | O_EXCL | O_RDWR, mode);
if (fd == -1) {
outv_err("!open '%s' failed", name_of_file);
return -1;
}
os_off_t offset = 0;
os_off_t length = max_size - (max_size % (os_off_t)Pagesize);
int ret;
do {
ret = os_posix_fallocate(fd, offset, length);
if (ret == 0)
offset += length;
else if (ret != ENOSPC) {
os_close(fd);
if (os_unlink(name_of_file) == -1)
outv_err("!unlink '%s' failed", name_of_file);
errno = ret;
outv_err("!space allocation for '%s' failed",
name_of_file);
return -1;
}
length /= 2;
length -= (length % (os_off_t)Pagesize);
} while (length > (os_off_t)Pagesize);
os_close(fd);
return 0;
}
/*
* pmempool_create_func -- main function for create command
*/
int
pmempool_create_func(const char *appname, int argc, char *argv[])
{
int ret = 0;
struct pmempool_create pc = pmempool_create_default;
pc.opts = util_options_alloc(long_options, sizeof(long_options) /
sizeof(long_options[0]), NULL);
/* parse command line arguments */
ret = pmempool_create_parse_args(&pc, appname, argc, argv, pc.opts);
if (ret)
exit(EXIT_FAILURE);
/* set verbosity level */
out_set_vlevel(pc.verbose);
umask(0);
int exists = util_file_exists(pc.fname);
if (exists < 0)
return -1;
pc.fexists = exists;
int is_poolset = util_is_poolset_file(pc.fname) == 1;
if (pc.inherit_fname) {
if (inherit_pool_params(&pc)) {
outv_err("parsing pool '%s' failed\n",
pc.inherit_fname);
return -1;
}
}
/*
* Parse pool type and other parameters if --inherit option
* passed. It is possible to either pass --inherit option
* or pool type string in command line arguments. This is
* validated here.
*/
if (pc.str_type) {
/* parse pool type string if passed in command line arguments */
pc.params.type = pmem_pool_type_parse_str(pc.str_type);
if (PMEM_POOL_TYPE_UNKNOWN == pc.params.type) {
outv_err("'%s' -- unknown pool type\n", pc.str_type);
return -1;
}
if (PMEM_POOL_TYPE_BLK == pc.params.type) {
if (pc.str_bsize == NULL) {
outv_err("blk pool requires <bsize> "
"argument\n");
return -1;
}
if (util_parse_size(pc.str_bsize,
(size_t *)&pc.params.blk.bsize)) {
outv_err("cannot parse '%s' as block size\n",
pc.str_bsize);
return -1;
}
}
if (PMEM_POOL_TYPE_OBJ == pc.params.type && pc.layout != NULL) {
size_t max_layout = PMEMOBJ_MAX_LAYOUT;
if (strlen(pc.layout) >= max_layout) {
outv_err(
"Layout name is too long, maximum number of characters (including the terminating null byte) is %zu\n",
max_layout);
return -1;
}
size_t len = sizeof(pc.params.obj.layout);
strncpy(pc.params.obj.layout, pc.layout, len);
pc.params.obj.layout[len - 1] = '\0';
}
} else if (pc.inherit_fname) {
pc.params.type = pc.inherit_params.type;
} else {
/* neither pool type string nor --inherit options passed */
print_usage(appname);
return -1;
}
if (util_options_verify(pc.opts, pc.params.type))
return -1;
if (pc.params.type != PMEM_POOL_TYPE_BLK && pc.str_bsize != NULL) {
outv_err("invalid option specified for %s pool type"
" -- block size\n",
out_get_pool_type_str(pc.params.type));
return -1;
}
if (is_poolset) {
if (pc.params.size) {
outv_err("-s|--size cannot be used with "
"poolset file\n");
return -1;
}
if (pc.max_size) {
outv_err("-M|--max-size cannot be used with "
"poolset file\n");
return -1;
}
}
if (pc.params.size && pc.max_size) {
outv_err("-M|--max-size option cannot be used with -s|--size"
" option\n");
return -1;
}
if (pc.inherit_fname) {
if (!pc.str_size && !pc.max_size)
pc.params.size = pc.inherit_params.size;
if (!pc.str_mode)
pc.params.mode = pc.inherit_params.mode;
switch (pc.params.type) {
case PMEM_POOL_TYPE_BLK:
if (!pc.str_bsize)
pc.params.blk.bsize =
pc.inherit_params.blk.bsize;
break;
case PMEM_POOL_TYPE_OBJ:
if (!pc.layout) {
memcpy(pc.params.obj.layout,
pc.inherit_params.obj.layout,
sizeof(pc.params.obj.layout));
} else {
size_t len = sizeof(pc.params.obj.layout);
strncpy(pc.params.obj.layout, pc.layout,
len - 1);
pc.params.obj.layout[len - 1] = '\0';
}
break;
default:
break;
}
}
/*
* If neither --size nor --inherit options passed, check
* for --max-size option - if not passed use minimum pool size.
*/
uint64_t min_size = pmem_pool_get_min_size(pc.params.type);
if (pc.params.size == 0) {
if (pc.max_size) {
outv(1, "Maximum size option passed "
"- getting available space of file system.\n");
ret = pmempool_get_max_size(pc.fname,
&pc.params.size);
if (ret) {
outv_err("cannot get available space of fs\n");
return -1;
}
if (pc.params.size == 0) {
outv_err("No space left on device\n");
return -1;
}
outv(1, "Available space is %s\n",
out_get_size_str(pc.params.size, 2));
if (allocate_max_size_available_file(pc.fname,
pc.params.mode,
(os_off_t)pc.params.size))
return -1;
/*
* We are going to create pool based
* on file size instead of the pc.params.size.
*/
pc.params.size = 0;
} else {
if (!pc.fexists) {
outv(1, "No size option passed "
"- picking minimum pool size.\n");
pc.params.size = min_size;
}
}
} else {
if (pc.params.size < min_size) {
outv_err("size must be >= %lu bytes\n", min_size);
return -1;
}
}
if (pc.force)
pmempool_rm(pc.fname, PMEMPOOL_RM_FORCE);
outv(1, "Creating pool: %s\n", pc.fname);
print_pool_params(&pc.params);
if (pc.clearbadblocks) {
int ret = util_pool_clear_badblocks(pc.fname,
1 /* ignore non-existing */);
if (ret) {
outv_err("'%s' -- clearing bad blocks failed\n",
pc.fname);
return -1;
}
}
switch (pc.params.type) {
case PMEM_POOL_TYPE_BLK:
ret = pmempool_create_blk(&pc);
break;
case PMEM_POOL_TYPE_LOG:
ret = pmempool_create_log(&pc);
break;
case PMEM_POOL_TYPE_OBJ:
ret = pmempool_create_obj(&pc);
break;
default:
ret = -1;
break;
}
if (ret) {
outv_err("creating pool file failed\n");
if (!pc.fexists)
util_unlink(pc.fname);
}
util_options_free(pc.opts);
return ret;
}
| 14,987 | 21.403587 | 109 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/tools/pmempool/transform.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* transform.c -- pmempool transform command source file
*/
#include <stdio.h>
#include <libgen.h>
#include <string.h>
#include <unistd.h>
#include <stdlib.h>
#include <getopt.h>
#include <stdbool.h>
#include <sys/mman.h>
#include <endian.h>
#include "common.h"
#include "output.h"
#include "transform.h"
#include "libpmempool.h"
/*
* pmempool_transform_context -- context and arguments for transform command
*/
struct pmempool_transform_context {
unsigned flags; /* flags which modify the command execution */
char *poolset_file_src; /* a path to a source poolset file */
char *poolset_file_dst; /* a path to a target poolset file */
};
/*
* pmempool_transform_default -- default arguments for transform command
*/
static const struct pmempool_transform_context pmempool_transform_default = {
.flags = 0,
.poolset_file_src = NULL,
.poolset_file_dst = NULL,
};
/*
* help_str -- string for help message
*/
static const char * const help_str =
"Modify internal structure of a poolset\n"
"\n"
"Common options:\n"
" -d, --dry-run do not apply changes, only check for viability of"
" transformation\n"
" -v, --verbose increase verbosity level\n"
" -h, --help display this help and exit\n"
"\n"
"For complete documentation see %s-transform(1) manual page.\n"
;
/*
* long_options -- command line options
*/
static const struct option long_options[] = {
{"dry-run", no_argument, NULL, 'd'},
{"help", no_argument, NULL, 'h'},
{"verbose", no_argument, NULL, 'v'},
{NULL, 0, NULL, 0 },
};
/*
* print_usage -- print application usage short description
*/
static void
print_usage(const char *appname)
{
printf("usage: %s transform [<options>] <poolset_file_src>"
" <poolset_file_dst>\n", appname);
}
/*
* print_version -- print version string
*/
static void
print_version(const char *appname)
{
printf("%s %s\n", appname, SRCVERSION);
}
/*
* pmempool_transform_help -- print help message for the transform command
*/
void
pmempool_transform_help(const char *appname)
{
print_usage(appname);
print_version(appname);
printf(help_str, appname);
}
/*
* pmempool_check_parse_args -- parse command line arguments
*/
static int
pmempool_transform_parse_args(struct pmempool_transform_context *ctx,
const char *appname, int argc, char *argv[])
{
int opt;
while ((opt = getopt_long(argc, argv, "dhv",
long_options, NULL)) != -1) {
switch (opt) {
case 'd':
ctx->flags = PMEMPOOL_TRANSFORM_DRY_RUN;
break;
case 'h':
pmempool_transform_help(appname);
exit(EXIT_SUCCESS);
case 'v':
out_set_vlevel(1);
break;
default:
print_usage(appname);
exit(EXIT_FAILURE);
}
}
if (optind + 1 < argc) {
ctx->poolset_file_src = argv[optind];
ctx->poolset_file_dst = argv[optind + 1];
} else {
print_usage(appname);
exit(EXIT_FAILURE);
}
return 0;
}
/*
* pmempool_transform_func -- main function for the transform command
*/
int
pmempool_transform_func(const char *appname, int argc, char *argv[])
{
int ret;
struct pmempool_transform_context ctx = pmempool_transform_default;
/* parse command line arguments */
if ((ret = pmempool_transform_parse_args(&ctx, appname, argc, argv)))
return ret;
ret = pmempool_transform(ctx.poolset_file_src, ctx.poolset_file_dst,
ctx.flags);
if (ret) {
if (errno)
outv_err("%s\n", strerror(errno));
outv_err("failed to transform %s -> %s: %s\n",
ctx.poolset_file_src, ctx.poolset_file_dst,
pmempool_errormsg());
return -1;
} else {
outv(1, "%s -> %s: transformed\n", ctx.poolset_file_src,
ctx.poolset_file_dst);
return 0;
}
}
| 3,689 | 21.919255 | 77 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/tools/pmempool/info_blk.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* info_blk.c -- pmempool info command source file for blk pool
*/
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <err.h>
#include <sys/param.h>
#include <endian.h>
#include "os.h"
#include "common.h"
#include "output.h"
#include "info.h"
#include "btt.h"
/*
* pmempool_info_get_range -- get blocks/data chunk range
*
* Get range based on command line arguments and maximum value.
* Return value:
* 0 - range is empty
* 1 - range is not empty
*/
static int
pmempool_info_get_range(struct pmem_info *pip, struct range *rangep,
struct range *curp, uint32_t max, uint64_t offset)
{
/* not using range */
if (!pip->args.use_range) {
rangep->first = 0;
rangep->last = max;
return 1;
}
if (curp->first > offset + max)
return 0;
if (curp->first >= offset)
rangep->first = curp->first - offset;
else
rangep->first = 0;
if (curp->last < offset)
return 0;
if (curp->last <= offset + max)
rangep->last = curp->last - offset;
else
rangep->last = max;
return 1;
}
/*
* info_blk_skip_block -- get action type for block/data chunk
*
* Return value indicating whether processing block/data chunk
* should be skipped.
*
* Return values:
* 0 - continue processing
* 1 - skip current block
*/
static int
info_blk_skip_block(struct pmem_info *pip, int is_zero,
int is_error)
{
if (pip->args.blk.skip_no_flag && !is_zero && !is_error)
return 1;
if (is_zero && pip->args.blk.skip_zeros)
return 1;
if (is_error && pip->args.blk.skip_error)
return 1;
return 0;
}
/*
* info_btt_data -- print block data and corresponding flags from map
*/
static int
info_btt_data(struct pmem_info *pip, int v, struct btt_info *infop,
uint64_t arena_off, uint64_t offset, uint64_t *countp)
{
if (!outv_check(v))
return 0;
int ret = 0;
size_t mapsize = infop->external_nlba * BTT_MAP_ENTRY_SIZE;
uint32_t *map = malloc(mapsize);
if (!map)
err(1, "Cannot allocate memory for BTT map");
uint8_t *block_buff = malloc(infop->external_lbasize);
if (!block_buff)
err(1, "Cannot allocate memory for pmemblk block buffer");
/* read btt map area */
if (pmempool_info_read(pip, (uint8_t *)map, mapsize,
arena_off + infop->mapoff)) {
outv_err("wrong BTT Map size or offset\n");
ret = -1;
goto error;
}
uint64_t i;
struct range *curp = NULL;
struct range range;
FOREACH_RANGE(curp, &pip->args.ranges) {
if (pmempool_info_get_range(pip, &range, curp,
infop->external_nlba - 1, offset) == 0)
continue;
for (i = range.first; i <= range.last; i++) {
uint32_t map_entry = le32toh(map[i]);
int is_init = (map_entry & ~BTT_MAP_ENTRY_LBA_MASK)
== 0;
int is_zero = (map_entry & ~BTT_MAP_ENTRY_LBA_MASK)
== BTT_MAP_ENTRY_ZERO || is_init;
int is_error = (map_entry & ~BTT_MAP_ENTRY_LBA_MASK)
== BTT_MAP_ENTRY_ERROR;
uint64_t blockno = is_init ? i :
map_entry & BTT_MAP_ENTRY_LBA_MASK;
if (info_blk_skip_block(pip,
is_zero, is_error))
continue;
/* compute block's data address */
uint64_t block_off = arena_off + infop->dataoff +
blockno * infop->internal_lbasize;
if (pmempool_info_read(pip, block_buff,
infop->external_lbasize, block_off)) {
outv_err("cannot read %lu block\n", i);
ret = -1;
goto error;
}
if (*countp == 0)
outv_title(v, "PMEM BLK blocks data");
/*
* Print block number, offset and flags
* from map entry.
*/
outv(v, "Block %10lu: offset: %s\n",
offset + i,
out_get_btt_map_entry(map_entry));
/* dump block's data */
outv_hexdump(v, block_buff, infop->external_lbasize,
block_off, 1);
*countp = *countp + 1;
}
}
error:
free(map);
free(block_buff);
return ret;
}
/*
* info_btt_map -- print all map entries
*/
static int
info_btt_map(struct pmem_info *pip, int v,
struct btt_info *infop, uint64_t arena_off,
uint64_t offset, uint64_t *count)
{
if (!outv_check(v) && !outv_check(pip->args.vstats))
return 0;
int ret = 0;
size_t mapsize = infop->external_nlba * BTT_MAP_ENTRY_SIZE;
uint32_t *map = malloc(mapsize);
if (!map)
err(1, "Cannot allocate memory for BTT map");
/* read btt map area */
if (pmempool_info_read(pip, (uint8_t *)map, mapsize,
arena_off + infop->mapoff)) {
outv_err("wrong BTT Map size or offset\n");
ret = -1;
goto error;
}
uint32_t arena_count = 0;
uint64_t i;
struct range *curp = NULL;
struct range range;
FOREACH_RANGE(curp, &pip->args.ranges) {
if (pmempool_info_get_range(pip, &range, curp,
infop->external_nlba - 1, offset) == 0)
continue;
for (i = range.first; i <= range.last; i++) {
uint32_t entry = le32toh(map[i]);
int is_zero = (entry & ~BTT_MAP_ENTRY_LBA_MASK) ==
BTT_MAP_ENTRY_ZERO ||
(entry & ~BTT_MAP_ENTRY_LBA_MASK) == 0;
int is_error = (entry & ~BTT_MAP_ENTRY_LBA_MASK) ==
BTT_MAP_ENTRY_ERROR;
if (info_blk_skip_block(pip,
is_zero, is_error) == 0) {
if (arena_count == 0)
outv_title(v, "PMEM BLK BTT Map");
if (is_zero)
pip->blk.stats.zeros++;
if (is_error)
pip->blk.stats.errors++;
if (!is_zero && !is_error)
pip->blk.stats.noflag++;
pip->blk.stats.total++;
arena_count++;
(*count)++;
outv(v, "%010lu: %s\n", offset + i,
out_get_btt_map_entry(entry));
}
}
}
error:
free(map);
return ret;
}
/*
* info_btt_flog -- print all flog entries
*/
static int
info_btt_flog(struct pmem_info *pip, int v,
struct btt_info *infop, uint64_t arena_off)
{
if (!outv_check(v))
return 0;
int ret = 0;
struct btt_flog *flogp = NULL;
struct btt_flog *flogpp = NULL;
uint64_t flog_size = infop->nfree *
roundup(2 * sizeof(struct btt_flog), BTT_FLOG_PAIR_ALIGN);
flog_size = roundup(flog_size, BTT_ALIGNMENT);
uint8_t *buff = malloc(flog_size);
if (!buff)
err(1, "Cannot allocate memory for FLOG entries");
if (pmempool_info_read(pip, buff, flog_size,
arena_off + infop->flogoff)) {
outv_err("cannot read BTT FLOG");
ret = -1;
goto error;
}
outv_title(v, "PMEM BLK BTT FLOG");
uint8_t *ptr = buff;
uint32_t i;
for (i = 0; i < infop->nfree; i++) {
flogp = (struct btt_flog *)ptr;
flogpp = flogp + 1;
btt_flog_convert2h(flogp);
btt_flog_convert2h(flogpp);
outv(v, "%010d:\n", i);
outv_field(v, "LBA", "0x%08x", flogp->lba);
outv_field(v, "Old map", "0x%08x: %s", flogp->old_map,
out_get_btt_map_entry(flogp->old_map));
outv_field(v, "New map", "0x%08x: %s", flogp->new_map,
out_get_btt_map_entry(flogp->new_map));
outv_field(v, "Seq", "0x%x", flogp->seq);
outv_field(v, "LBA'", "0x%08x", flogpp->lba);
outv_field(v, "Old map'", "0x%08x: %s", flogpp->old_map,
out_get_btt_map_entry(flogpp->old_map));
outv_field(v, "New map'", "0x%08x: %s", flogpp->new_map,
out_get_btt_map_entry(flogpp->new_map));
outv_field(v, "Seq'", "0x%x", flogpp->seq);
ptr += BTT_FLOG_PAIR_ALIGN;
}
error:
free(buff);
return ret;
}
/*
* info_btt_stats -- print btt related statistics
*/
static void
info_btt_stats(struct pmem_info *pip, int v)
{
if (pip->blk.stats.total > 0) {
outv_title(v, "PMEM BLK Statistics");
double perc_zeros = (double)pip->blk.stats.zeros /
(double)pip->blk.stats.total * 100.0;
double perc_errors = (double)pip->blk.stats.errors /
(double)pip->blk.stats.total * 100.0;
double perc_noflag = (double)pip->blk.stats.noflag /
(double)pip->blk.stats.total * 100.0;
outv_field(v, "Total blocks", "%u", pip->blk.stats.total);
outv_field(v, "Zeroed blocks", "%u [%s]", pip->blk.stats.zeros,
out_get_percentage(perc_zeros));
outv_field(v, "Error blocks", "%u [%s]", pip->blk.stats.errors,
out_get_percentage(perc_errors));
outv_field(v, "Blocks without flag", "%u [%s]",
pip->blk.stats.noflag,
out_get_percentage(perc_noflag));
}
}
/*
* info_btt_info -- print btt_info structure fields
*/
static int
info_btt_info(struct pmem_info *pip, int v, struct btt_info *infop)
{
outv_field(v, "Signature", "%.*s", BTTINFO_SIG_LEN, infop->sig);
outv_field(v, "UUID of container", "%s",
out_get_uuid_str(infop->parent_uuid));
outv_field(v, "Flags", "0x%x", infop->flags);
outv_field(v, "Major", "%d", infop->major);
outv_field(v, "Minor", "%d", infop->minor);
outv_field(v, "External LBA size", "%s",
out_get_size_str(infop->external_lbasize,
pip->args.human));
outv_field(v, "External LBA count", "%u", infop->external_nlba);
outv_field(v, "Internal LBA size", "%s",
out_get_size_str(infop->internal_lbasize,
pip->args.human));
outv_field(v, "Internal LBA count", "%u", infop->internal_nlba);
outv_field(v, "Free blocks", "%u", infop->nfree);
outv_field(v, "Info block size", "%s",
out_get_size_str(infop->infosize, pip->args.human));
outv_field(v, "Next arena offset", "0x%lx", infop->nextoff);
outv_field(v, "Arena data offset", "0x%lx", infop->dataoff);
outv_field(v, "Area map offset", "0x%lx", infop->mapoff);
outv_field(v, "Area flog offset", "0x%lx", infop->flogoff);
outv_field(v, "Info block backup offset", "0x%lx", infop->infooff);
outv_field(v, "Checksum", "%s", out_get_checksum(infop,
sizeof(*infop), &infop->checksum, 0));
return 0;
}
/*
* info_btt_layout -- print information about BTT layout
*/
static int
info_btt_layout(struct pmem_info *pip, os_off_t btt_off)
{
int ret = 0;
if (btt_off <= 0) {
outv_err("wrong BTT layout offset\n");
return -1;
}
struct btt_info *infop = NULL;
infop = malloc(sizeof(struct btt_info));
if (!infop)
err(1, "Cannot allocate memory for BTT Info structure");
int narena = 0;
uint64_t cur_lba = 0;
uint64_t count_data = 0;
uint64_t count_map = 0;
uint64_t offset = (uint64_t)btt_off;
uint64_t nextoff = 0;
do {
/* read btt info area */
if (pmempool_info_read(pip, infop, sizeof(*infop), offset)) {
ret = -1;
outv_err("cannot read BTT Info header\n");
goto err;
}
if (util_check_memory((uint8_t *)infop,
sizeof(*infop), 0) == 0) {
outv(1, "\n<No BTT layout>\n");
break;
}
outv(1, "\n[ARENA %d]", narena);
outv_title(1, "PMEM BLK BTT Info Header");
outv_hexdump(pip->args.vhdrdump, infop,
sizeof(*infop), offset, 1);
btt_info_convert2h(infop);
nextoff = infop->nextoff;
/* print btt info fields */
if (info_btt_info(pip, 1, infop)) {
ret = -1;
goto err;
}
/* dump blocks data */
if (info_btt_data(pip, pip->args.vdata,
infop, offset, cur_lba, &count_data)) {
ret = -1;
goto err;
}
/* print btt map entries and get statistics */
if (info_btt_map(pip, pip->args.blk.vmap, infop,
offset, cur_lba, &count_map)) {
ret = -1;
goto err;
}
/* print flog entries */
if (info_btt_flog(pip, pip->args.blk.vflog, infop,
offset)) {
ret = -1;
goto err;
}
/* increment LBA's counter before reading info backup */
cur_lba += infop->external_nlba;
/* read btt info backup area */
if (pmempool_info_read(pip, infop, sizeof(*infop),
offset + infop->infooff)) {
outv_err("wrong BTT Info Backup size or offset\n");
ret = -1;
goto err;
}
outv_title(pip->args.blk.vbackup,
"PMEM BLK BTT Info Header Backup");
if (outv_check(pip->args.blk.vbackup))
outv_hexdump(pip->args.vhdrdump, infop,
sizeof(*infop),
offset + infop->infooff, 1);
btt_info_convert2h(infop);
info_btt_info(pip, pip->args.blk.vbackup, infop);
offset += nextoff;
narena++;
} while (nextoff > 0);
info_btt_stats(pip, pip->args.vstats);
err:
if (infop)
free(infop);
return ret;
}
/*
* info_blk_descriptor -- print pmemblk descriptor
*/
static void
info_blk_descriptor(struct pmem_info *pip, int v, struct pmemblk *pbp)
{
size_t pmemblk_size;
#ifdef DEBUG
pmemblk_size = offsetof(struct pmemblk, write_lock);
#else
pmemblk_size = sizeof(*pbp);
#endif
outv_title(v, "PMEM BLK Header");
/* dump pmemblk header without pool_hdr */
outv_hexdump(pip->args.vhdrdump, (uint8_t *)pbp + sizeof(pbp->hdr),
pmemblk_size - sizeof(pbp->hdr), sizeof(pbp->hdr), 1);
outv_field(v, "Block size", "%s",
out_get_size_str(pbp->bsize, pip->args.human));
outv_field(v, "Is zeroed", pbp->is_zeroed ? "true" : "false");
}
/*
* pmempool_info_blk -- print information about block type pool
*/
int
pmempool_info_blk(struct pmem_info *pip)
{
int ret;
struct pmemblk *pbp = malloc(sizeof(struct pmemblk));
if (!pbp)
err(1, "Cannot allocate memory for pmemblk structure");
if (pmempool_info_read(pip, pbp, sizeof(struct pmemblk), 0)) {
outv_err("cannot read pmemblk header\n");
free(pbp);
return -1;
}
info_blk_descriptor(pip, VERBOSE_DEFAULT, pbp);
ssize_t btt_off = (char *)pbp->data - (char *)pbp->addr;
ret = info_btt_layout(pip, btt_off);
free(pbp);
return ret;
}
/*
* pmempool_info_btt -- print information about btt device
*/
int
pmempool_info_btt(struct pmem_info *pip)
{
int ret;
outv(1, "\nBTT Device");
ret = info_btt_layout(pip, DEFAULT_HDR_SIZE);
return ret;
}
| 14,565 | 24.644366 | 74 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/tools/pmempool/info_obj.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* info_obj.c -- pmempool info command source file for obj pool
*/
#include <stdlib.h>
#include <stdbool.h>
#include <err.h>
#include <signal.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <assert.h>
#include <inttypes.h>
#include "alloc_class.h"
#include "set.h"
#include "common.h"
#include "output.h"
#include "info.h"
#include "util.h"
#define BITMAP_BUFF_SIZE 1024
#define OFF_TO_PTR(pop, off) ((void *)((uintptr_t)(pop) + (off)))
#define PTR_TO_OFF(pop, ptr) ((uintptr_t)(ptr) - (uintptr_t)(pop))
/*
* lane_need_recovery -- return 1 if lane section needs recovery
*/
static int
lane_need_recovery(struct pmem_info *pip, struct lane_layout *lane)
{
return ulog_recovery_needed((struct ulog *)&lane->external, 1) ||
ulog_recovery_needed((struct ulog *)&lane->internal, 1) ||
ulog_recovery_needed((struct ulog *)&lane->undo, 0);
}
#define RUN_BITMAP_SEPARATOR_DISTANCE 8
/*
* get_bitmap_str -- get bitmap single value string
*/
static const char *
get_bitmap_str(uint64_t val, unsigned values)
{
static char buff[BITMAP_BUFF_SIZE];
unsigned j = 0;
for (unsigned i = 0; i < values && j < BITMAP_BUFF_SIZE - 3; i++) {
buff[j++] = ((val & ((uint64_t)1 << i)) ? 'x' : '.');
if ((i + 1) % RUN_BITMAP_SEPARATOR_DISTANCE == 0)
buff[j++] = ' ';
}
buff[j] = '\0';
return buff;
}
/*
* pmem_obj_stats_get_type -- get stats for specified type number
*/
static struct pmem_obj_type_stats *
pmem_obj_stats_get_type(struct pmem_obj_stats *stats, uint64_t type_num)
{
struct pmem_obj_type_stats *type;
struct pmem_obj_type_stats *type_dest = NULL;
PMDK_TAILQ_FOREACH(type, &stats->type_stats, next) {
if (type->type_num == type_num)
return type;
if (!type_dest && type->type_num > type_num)
type_dest = type;
}
type = calloc(1, sizeof(*type));
if (!type) {
outv_err("cannot allocate memory for type stats\n");
exit(EXIT_FAILURE);
}
type->type_num = type_num;
if (type_dest)
PMDK_TAILQ_INSERT_BEFORE(type_dest, type, next);
else
PMDK_TAILQ_INSERT_TAIL(&stats->type_stats, type, next);
return type;
}
struct info_obj_redo_args {
int v;
size_t i;
struct pmem_info *pip;
};
/*
* info_obj_redo_entry - print redo log entry info
*/
static int
info_obj_redo_entry(struct ulog_entry_base *e, void *arg,
const struct pmem_ops *p_ops)
{
struct info_obj_redo_args *a = arg;
struct ulog_entry_val *ev;
struct ulog_entry_buf *eb;
switch (ulog_entry_type(e)) {
case ULOG_OPERATION_AND:
case ULOG_OPERATION_OR:
case ULOG_OPERATION_SET:
ev = (struct ulog_entry_val *)e;
outv(a->v, "%010zu: "
"Offset: 0x%016jx "
"Value: 0x%016jx ",
a->i++,
ulog_entry_offset(e),
ev->value);
break;
case ULOG_OPERATION_BUF_CPY:
case ULOG_OPERATION_BUF_SET:
eb = (struct ulog_entry_buf *)e;
outv(a->v, "%010zu: "
"Offset: 0x%016jx "
"Size: %s ",
a->i++,
ulog_entry_offset(e),
out_get_size_str(eb->size,
a->pip->args.human));
break;
default:
ASSERT(0); /* unreachable */
}
return 0;
}
/*
* info_obj_redo -- print ulog log entries
*/
static void
info_obj_ulog(struct pmem_info *pip, int v, struct ulog *ulog,
const struct pmem_ops *ops)
{
outv_title(v, "Log entries");
struct info_obj_redo_args args = {v, 0, pip};
ulog_foreach_entry(ulog, info_obj_redo_entry, &args, ops,NULL);
}
/*
* info_obj_alloc_hdr -- print allocation header
*/
static void
info_obj_alloc_hdr(struct pmem_info *pip, int v,
const struct memory_block *m)
{
outv_title(v, "Allocation Header");
outv_field(v, "Size", "%s", out_get_size_str(m->m_ops->get_user_size(m),
pip->args.human));
outv_field(v, "Extra", "%lu", m->m_ops->get_extra(m));
outv_field(v, "Flags", "0x%x", m->m_ops->get_flags(m));
}
/*
* info_obj_object_hdr -- print object headers and data
*/
static void
info_obj_object_hdr(struct pmem_info *pip, int v, int vid,
const struct memory_block *m, uint64_t id)
{
struct pmemobjpool *pop = pip->obj.pop;
void *data = m->m_ops->get_user_data(m);
outv_nl(vid);
outv_field(vid, "Object", "%lu", id);
outv_field(vid, "Offset", "0x%016lx", PTR_TO_OFF(pop, data));
int vahdr = v && pip->args.obj.valloc;
int voobh = v && pip->args.obj.voobhdr;
outv_indent(vahdr || voobh, 1);
info_obj_alloc_hdr(pip, vahdr, m);
outv_hexdump(v && pip->args.vdata, data,
m->m_ops->get_real_size(m),
PTR_TO_OFF(pip->obj.pop, data), 1);
outv_indent(vahdr || voobh, -1);
}
/*
* info_obj_lane_section -- print lane's section
*/
static void
info_obj_lane(struct pmem_info *pip, int v, struct lane_layout *lane)
{
struct pmem_ops p_ops;
p_ops.base = pip->obj.pop;
outv_title(v, "Undo Log");
outv_indent(v, 1);
info_obj_ulog(pip, v, (struct ulog *)&lane->undo, &p_ops);
outv_indent(v, -1);
outv_nl(v);
outv_title(v, "Internal Undo Log");
outv_indent(v, 1);
info_obj_ulog(pip, v, (struct ulog *)&lane->internal, &p_ops);
outv_indent(v, -1);
outv_title(v, "External Undo Log");
outv_indent(v, 1);
info_obj_ulog(pip, v, (struct ulog *)&lane->external, &p_ops);
outv_indent(v, -1);
}
/*
* info_obj_lanes -- print lanes structures
*/
static void
info_obj_lanes(struct pmem_info *pip)
{
int v = pip->args.obj.vlanes;
if (!outv_check(v))
return;
struct pmemobjpool *pop = pip->obj.pop;
/*
* Iterate through all lanes from specified range and print
* specified sections.
*/
struct lane_layout *lanes = (void *)((char *)pip->obj.pop +
pop->lanes_offset);
struct range *curp = NULL;
FOREACH_RANGE(curp, &pip->args.obj.lane_ranges) {
for (uint64_t i = curp->first;
i <= curp->last && i < pop->nlanes; i++) {
/* For -R check print lane only if needs recovery */
if (pip->args.obj.lanes_recovery &&
!lane_need_recovery(pip, &lanes[i]))
continue;
outv_title(v, "Lane %" PRIu64, i);
outv_indent(v, 1);
info_obj_lane(pip, v, &lanes[i]);
outv_indent(v, -1);
}
}
}
/*
* info_obj_heap -- print pmemobj heap headers
*/
static void
info_obj_heap(struct pmem_info *pip)
{
int v = pip->args.obj.vheap;
struct pmemobjpool *pop = pip->obj.pop;
struct heap_layout *layout = OFF_TO_PTR(pop, pop->heap_offset);
struct heap_header *heap = &layout->header;
outv(v, "\nPMEMOBJ Heap Header:\n");
outv_hexdump(v && pip->args.vhdrdump, heap, sizeof(*heap),
pop->heap_offset, 1);
outv_field(v, "Signature", "%s", heap->signature);
outv_field(v, "Major", "%ld", heap->major);
outv_field(v, "Minor", "%ld", heap->minor);
outv_field(v, "Chunk size", "%s",
out_get_size_str(heap->chunksize, pip->args.human));
outv_field(v, "Chunks per zone", "%ld", heap->chunks_per_zone);
outv_field(v, "Checksum", "%s", out_get_checksum(heap, sizeof(*heap),
&heap->checksum, 0));
}
/*
* info_obj_zone -- print information about zone
*/
static void
info_obj_zone_hdr(struct pmem_info *pip, int v, struct zone_header *zone)
{
outv_hexdump(v && pip->args.vhdrdump, zone, sizeof(*zone),
PTR_TO_OFF(pip->obj.pop, zone), 1);
outv_field(v, "Magic", "%s", out_get_zone_magic_str(zone->magic));
outv_field(v, "Size idx", "%u", zone->size_idx);
}
/*
* info_obj_object -- print information about object
*/
static void
info_obj_object(struct pmem_info *pip, const struct memory_block *m,
uint64_t objid)
{
if (!util_ranges_contain(&pip->args.ranges, objid))
return;
uint64_t type_num = m->m_ops->get_extra(m);
if (!util_ranges_contain(&pip->args.obj.type_ranges, type_num))
return;
uint64_t real_size = m->m_ops->get_real_size(m);
pip->obj.stats.n_total_objects++;
pip->obj.stats.n_total_bytes += real_size;
struct pmem_obj_type_stats *type_stats =
pmem_obj_stats_get_type(&pip->obj.stats, type_num);
type_stats->n_objects++;
type_stats->n_bytes += real_size;
int vid = pip->args.obj.vobjects;
int v = pip->args.obj.vobjects;
outv_indent(v, 1);
info_obj_object_hdr(pip, v, vid, m, objid);
outv_indent(v, -1);
}
/*
* info_obj_run_bitmap -- print chunk run's bitmap
*/
static void
info_obj_run_bitmap(int v, struct run_bitmap *b)
{
/* print only used values for lower verbosity */
uint32_t i;
for (i = 0; i < b->nbits / RUN_BITS_PER_VALUE; i++)
outv(v, "%s\n", get_bitmap_str(b->values[i],
RUN_BITS_PER_VALUE));
unsigned mod = b->nbits % RUN_BITS_PER_VALUE;
if (mod != 0) {
outv(v, "%s\n", get_bitmap_str(b->values[i], mod));
}
}
/*
* info_obj_memblock_is_root -- (internal) checks whether the object is root
*/
static int
info_obj_memblock_is_root(struct pmem_info *pip, const struct memory_block *m)
{
uint64_t roff = pip->obj.pop->root_offset;
if (roff == 0)
return 0;
struct memory_block rm = memblock_from_offset(pip->obj.heap, roff);
return MEMORY_BLOCK_EQUALS(*m, rm);
}
/*
* info_obj_run_cb -- (internal) run object callback
*/
static int
info_obj_run_cb(const struct memory_block *m, void *arg)
{
struct pmem_info *pip = arg;
if (info_obj_memblock_is_root(pip, m))
return 0;
info_obj_object(pip, m, pip->obj.objid++);
return 0;
}
static struct pmem_obj_class_stats *
info_obj_class_stats_get_or_insert(struct pmem_obj_zone_stats *stats,
uint64_t unit_size, uint64_t alignment,
uint32_t nallocs, uint16_t flags)
{
struct pmem_obj_class_stats *cstats;
VEC_FOREACH_BY_PTR(cstats, &stats->class_stats) {
if (cstats->alignment == alignment &&
cstats->flags == flags &&
cstats->nallocs == nallocs &&
cstats->unit_size == unit_size)
return cstats;
}
struct pmem_obj_class_stats s = {0, 0, unit_size,
alignment, nallocs, flags};
if (VEC_PUSH_BACK(&stats->class_stats, s) != 0)
return NULL;
return &VEC_BACK(&stats->class_stats);
}
/*
* info_obj_chunk -- print chunk info
*/
static void
info_obj_chunk(struct pmem_info *pip, uint64_t c, uint64_t z,
struct chunk_header *chunk_hdr, struct chunk *chunk,
struct pmem_obj_zone_stats *stats)
{
int v = pip->args.obj.vchunkhdr;
outv(v, "\n");
outv_field(v, "Chunk", "%lu", c);
struct pmemobjpool *pop = pip->obj.pop;
outv_hexdump(v && pip->args.vhdrdump, chunk_hdr, sizeof(*chunk_hdr),
PTR_TO_OFF(pop, chunk_hdr), 1);
outv_field(v, "Type", "%s", out_get_chunk_type_str(chunk_hdr->type));
outv_field(v, "Flags", "0x%x %s", chunk_hdr->flags,
out_get_chunk_flags(chunk_hdr->flags));
outv_field(v, "Size idx", "%u", chunk_hdr->size_idx);
struct memory_block m = MEMORY_BLOCK_NONE;
m.zone_id = (uint32_t)z;
m.chunk_id = (uint32_t)c;
m.size_idx = (uint32_t)chunk_hdr->size_idx;
memblock_rebuild_state(pip->obj.heap, &m);
if (chunk_hdr->type == CHUNK_TYPE_USED ||
chunk_hdr->type == CHUNK_TYPE_FREE) {
VEC_FRONT(&stats->class_stats).n_units +=
chunk_hdr->size_idx;
if (chunk_hdr->type == CHUNK_TYPE_USED) {
VEC_FRONT(&stats->class_stats).n_used +=
chunk_hdr->size_idx;
/* skip root object */
if (!info_obj_memblock_is_root(pip, &m)) {
info_obj_object(pip, &m, pip->obj.objid++);
}
}
} else if (chunk_hdr->type == CHUNK_TYPE_RUN) {
struct chunk_run *run = (struct chunk_run *)chunk;
outv_hexdump(v && pip->args.vhdrdump, run,
sizeof(run->hdr.block_size) +
sizeof(run->hdr.alignment),
PTR_TO_OFF(pop, run), 1);
struct run_bitmap bitmap;
m.m_ops->get_bitmap(&m, &bitmap);
struct pmem_obj_class_stats *cstats =
info_obj_class_stats_get_or_insert(stats,
run->hdr.block_size, run->hdr.alignment, bitmap.nbits,
chunk_hdr->flags);
if (cstats == NULL) {
outv_err("out of memory, can't allocate statistics");
return;
}
outv_field(v, "Block size", "%s",
out_get_size_str(run->hdr.block_size,
pip->args.human));
uint32_t units = bitmap.nbits;
uint32_t free_space = 0;
uint32_t max_free_block = 0;
m.m_ops->calc_free(&m, &free_space, &max_free_block);
uint32_t used = units - free_space;
cstats->n_units += units;
cstats->n_used += used;
outv_field(v, "Bitmap", "%u / %u", used, units);
info_obj_run_bitmap(v && pip->args.obj.vbitmap, &bitmap);
m.m_ops->iterate_used(&m, info_obj_run_cb, pip);
}
}
/*
* info_obj_zone_chunks -- print chunk headers from specified zone
*/
static void
info_obj_zone_chunks(struct pmem_info *pip, struct zone *zone, uint64_t z,
struct pmem_obj_zone_stats *stats)
{
VEC_INIT(&stats->class_stats);
struct pmem_obj_class_stats default_class_stats = {0, 0,
CHUNKSIZE, 0, 0, 0};
VEC_PUSH_BACK(&stats->class_stats, default_class_stats);
uint64_t c = 0;
while (c < zone->header.size_idx) {
enum chunk_type type = zone->chunk_headers[c].type;
uint64_t size_idx = zone->chunk_headers[c].size_idx;
if (util_ranges_contain(&pip->args.obj.chunk_ranges, c)) {
if (pip->args.obj.chunk_types & (1ULL << type)) {
stats->n_chunks++;
stats->n_chunks_type[type]++;
stats->size_chunks += size_idx;
stats->size_chunks_type[type] += size_idx;
info_obj_chunk(pip, c, z,
&zone->chunk_headers[c],
&zone->chunks[c], stats);
}
if (size_idx > 1 && type != CHUNK_TYPE_RUN &&
pip->args.obj.chunk_types &
(1 << CHUNK_TYPE_FOOTER)) {
size_t f = c + size_idx - 1;
info_obj_chunk(pip, f, z,
&zone->chunk_headers[f],
&zone->chunks[f], stats);
}
}
c += size_idx;
}
}
/*
* info_obj_root_obj -- print root object
*/
static void
info_obj_root_obj(struct pmem_info *pip)
{
int v = pip->args.obj.vroot;
struct pmemobjpool *pop = pip->obj.pop;
if (!pop->root_offset) {
outv(v, "\nNo root object...\n");
return;
}
outv_title(v, "Root object");
outv_field(v, "Offset", "0x%016zx", pop->root_offset);
uint64_t root_size = pop->root_size;
outv_field(v, "Size", "%s",
out_get_size_str(root_size, pip->args.human));
struct memory_block m = memblock_from_offset(
pip->obj.heap, pop->root_offset);
/* do not print object id and offset for root object */
info_obj_object_hdr(pip, v, VERBOSE_SILENT, &m, 0);
}
/*
* info_obj_zones -- print zones and chunks
*/
static void
info_obj_zones_chunks(struct pmem_info *pip)
{
if (!outv_check(pip->args.obj.vheap) &&
!outv_check(pip->args.vstats) &&
!outv_check(pip->args.obj.vobjects))
return;
struct pmemobjpool *pop = pip->obj.pop;
struct heap_layout *layout = OFF_TO_PTR(pop, pop->heap_offset);
size_t maxzone = util_heap_max_zone(pop->heap_size);
pip->obj.stats.n_zones = maxzone;
pip->obj.stats.zone_stats = calloc(maxzone,
sizeof(struct pmem_obj_zone_stats));
if (!pip->obj.stats.zone_stats)
err(1, "Cannot allocate memory for zone stats");
for (size_t i = 0; i < maxzone; i++) {
struct zone *zone = ZID_TO_ZONE(layout, i);
if (util_ranges_contain(&pip->args.obj.zone_ranges, i)) {
int vvv = pip->args.obj.vheap &&
(pip->args.obj.vzonehdr ||
pip->args.obj.vchunkhdr);
outv_title(vvv, "Zone %zu", i);
if (zone->header.magic == ZONE_HEADER_MAGIC)
pip->obj.stats.n_zones_used++;
info_obj_zone_hdr(pip, pip->args.obj.vheap &&
pip->args.obj.vzonehdr,
&zone->header);
outv_indent(vvv, 1);
info_obj_zone_chunks(pip, zone, i,
&pip->obj.stats.zone_stats[i]);
outv_indent(vvv, -1);
}
}
}
/*
* info_obj_descriptor -- print pmemobj descriptor
*/
static void
info_obj_descriptor(struct pmem_info *pip)
{
int v = VERBOSE_DEFAULT;
if (!outv_check(v))
return;
outv(v, "\nPMEM OBJ Header:\n");
struct pmemobjpool *pop = pip->obj.pop;
uint8_t *hdrptr = (uint8_t *)pop + sizeof(pop->hdr);
size_t hdrsize = sizeof(*pop) - sizeof(pop->hdr);
size_t hdroff = sizeof(pop->hdr);
outv_hexdump(pip->args.vhdrdump, hdrptr, hdrsize, hdroff, 1);
/* check if layout is zeroed */
char *layout = util_check_memory((uint8_t *)pop->layout,
sizeof(pop->layout), 0) ?
pop->layout : "(null)";
/* address for checksum */
void *dscp = (void *)((uintptr_t)(pop) + sizeof(struct pool_hdr));
outv_field(v, "Layout", "%s", layout);
outv_field(v, "Lanes offset", "0x%lx", pop->lanes_offset);
outv_field(v, "Number of lanes", "%lu", pop->nlanes);
outv_field(v, "Heap offset", "0x%lx", pop->heap_offset);
outv_field(v, "Heap size", "%lu", pop->heap_size);
outv_field(v, "Checksum", "%s", out_get_checksum(dscp, OBJ_DSC_P_SIZE,
&pop->checksum, 0));
outv_field(v, "Root offset", "0x%lx", pop->root_offset);
/* run id with -v option */
outv_field(v + 1, "Run id", "%lu", pop->run_id);
}
/*
* info_obj_stats_objjects -- print objects' statistics
*/
static void
info_obj_stats_objects(struct pmem_info *pip, int v,
struct pmem_obj_stats *stats)
{
outv_field(v, "Number of objects", "%lu",
stats->n_total_objects);
outv_field(v, "Number of bytes", "%s", out_get_size_str(
stats->n_total_bytes, pip->args.human));
outv_title(v, "Objects by type");
outv_indent(v, 1);
struct pmem_obj_type_stats *type_stats;
PMDK_TAILQ_FOREACH(type_stats, &pip->obj.stats.type_stats, next) {
if (!type_stats->n_objects)
continue;
double n_objects_perc = 100.0 *
(double)type_stats->n_objects /
(double)stats->n_total_objects;
double n_bytes_perc = 100.0 *
(double)type_stats->n_bytes /
(double)stats->n_total_bytes;
outv_nl(v);
outv_field(v, "Type number", "%lu", type_stats->type_num);
outv_field(v, "Number of objects", "%lu [%s]",
type_stats->n_objects,
out_get_percentage(n_objects_perc));
outv_field(v, "Number of bytes", "%s [%s]",
out_get_size_str(
type_stats->n_bytes,
pip->args.human),
out_get_percentage(n_bytes_perc));
}
outv_indent(v, -1);
}
/*
* info_boj_stats_alloc_classes -- print allocation classes' statistics
*/
static void
info_obj_stats_alloc_classes(struct pmem_info *pip, int v,
struct pmem_obj_zone_stats *stats)
{
uint64_t total_bytes = 0;
uint64_t total_used = 0;
outv_indent(v, 1);
struct pmem_obj_class_stats *cstats;
VEC_FOREACH_BY_PTR(cstats, &stats->class_stats) {
if (cstats->n_units == 0)
continue;
double used_perc = 100.0 *
(double)cstats->n_used / (double)cstats->n_units;
outv_nl(v);
outv_field(v, "Unit size", "%s", out_get_size_str(
cstats->unit_size, pip->args.human));
outv_field(v, "Units", "%lu", cstats->n_units);
outv_field(v, "Used units", "%lu [%s]",
cstats->n_used,
out_get_percentage(used_perc));
uint64_t bytes = cstats->unit_size *
cstats->n_units;
uint64_t used = cstats->unit_size *
cstats->n_used;
total_bytes += bytes;
total_used += used;
double used_bytes_perc = 100.0 * (double)used / (double)bytes;
outv_field(v, "Bytes", "%s",
out_get_size_str(bytes, pip->args.human));
outv_field(v, "Used bytes", "%s [%s]",
out_get_size_str(used, pip->args.human),
out_get_percentage(used_bytes_perc));
}
outv_indent(v, -1);
double used_bytes_perc = total_bytes ? 100.0 *
(double)total_used / (double)total_bytes : 0.0;
outv_nl(v);
outv_field(v, "Total bytes", "%s",
out_get_size_str(total_bytes, pip->args.human));
outv_field(v, "Total used bytes", "%s [%s]",
out_get_size_str(total_used, pip->args.human),
out_get_percentage(used_bytes_perc));
}
/*
* info_obj_stats_chunks -- print chunks' statistics
*/
static void
info_obj_stats_chunks(struct pmem_info *pip, int v,
struct pmem_obj_zone_stats *stats)
{
outv_field(v, "Number of chunks", "%lu", stats->n_chunks);
outv_indent(v, 1);
for (unsigned type = 0; type < MAX_CHUNK_TYPE; type++) {
double type_perc = 100.0 *
(double)stats->n_chunks_type[type] /
(double)stats->n_chunks;
if (stats->n_chunks_type[type]) {
outv_field(v, out_get_chunk_type_str(type),
"%lu [%s]",
stats->n_chunks_type[type],
out_get_percentage(type_perc));
}
}
outv_indent(v, -1);
outv_nl(v);
outv_field(v, "Total chunks size", "%s", out_get_size_str(
stats->size_chunks, pip->args.human));
outv_indent(v, 1);
for (unsigned type = 0; type < MAX_CHUNK_TYPE; type++) {
double type_perc = 100.0 *
(double)stats->size_chunks_type[type] /
(double)stats->size_chunks;
if (stats->size_chunks_type[type]) {
outv_field(v, out_get_chunk_type_str(type),
"%lu [%s]",
stats->size_chunks_type[type],
out_get_percentage(type_perc));
}
}
outv_indent(v, -1);
}
/*
* info_obj_add_zone_stats -- add stats to total
*/
static void
info_obj_add_zone_stats(struct pmem_obj_zone_stats *total,
struct pmem_obj_zone_stats *stats)
{
total->n_chunks += stats->n_chunks;
total->size_chunks += stats->size_chunks;
for (int type = 0; type < MAX_CHUNK_TYPE; type++) {
total->n_chunks_type[type] +=
stats->n_chunks_type[type];
total->size_chunks_type[type] +=
stats->size_chunks_type[type];
}
struct pmem_obj_class_stats *cstats;
VEC_FOREACH_BY_PTR(cstats, &stats->class_stats) {
struct pmem_obj_class_stats *ctotal =
info_obj_class_stats_get_or_insert(total, cstats->unit_size,
cstats->alignment, cstats->nallocs, cstats->flags);
if (ctotal == NULL) {
outv_err("out of memory, can't allocate statistics");
return;
}
ctotal->n_units += cstats->n_units;
ctotal->n_used += cstats->n_used;
}
}
/*
* info_obj_stats_zones -- print zones' statistics
*/
static void
info_obj_stats_zones(struct pmem_info *pip, int v, struct pmem_obj_stats *stats,
struct pmem_obj_zone_stats *total)
{
double used_zones_perc = 100.0 * (double)stats->n_zones_used /
(double)stats->n_zones;
outv_field(v, "Number of zones", "%lu", stats->n_zones);
outv_field(v, "Number of used zones", "%lu [%s]", stats->n_zones_used,
out_get_percentage(used_zones_perc));
outv_indent(v, 1);
for (uint64_t i = 0; i < stats->n_zones_used; i++) {
outv_title(v, "Zone %" PRIu64, i);
struct pmem_obj_zone_stats *zstats = &stats->zone_stats[i];
info_obj_stats_chunks(pip, v, zstats);
outv_title(v, "Zone's allocation classes");
info_obj_stats_alloc_classes(pip, v, zstats);
info_obj_add_zone_stats(total, zstats);
}
outv_indent(v, -1);
}
/*
* info_obj_stats -- print statistics
*/
static void
info_obj_stats(struct pmem_info *pip)
{
int v = pip->args.vstats;
if (!outv_check(v))
return;
struct pmem_obj_stats *stats = &pip->obj.stats;
struct pmem_obj_zone_stats total;
memset(&total, 0, sizeof(total));
outv_title(v, "Statistics");
outv_title(v, "Objects");
info_obj_stats_objects(pip, v, stats);
outv_title(v, "Heap");
info_obj_stats_zones(pip, v, stats, &total);
if (stats->n_zones_used > 1) {
outv_title(v, "Total zone's statistics");
outv_title(v, "Chunks statistics");
info_obj_stats_chunks(pip, v, &total);
outv_title(v, "Allocation classes");
info_obj_stats_alloc_classes(pip, v, &total);
}
VEC_DELETE(&total.class_stats);
}
static struct pmem_info *Pip;
#ifndef _WIN32
static void
info_obj_sa_sigaction(int signum, siginfo_t *info, void *context)
{
uintptr_t offset = (uintptr_t)info->si_addr - (uintptr_t)Pip->obj.pop;
outv_err("Invalid offset 0x%lx\n", offset);
exit(EXIT_FAILURE);
}
static struct sigaction info_obj_sigaction = {
.sa_sigaction = info_obj_sa_sigaction,
.sa_flags = SA_SIGINFO
};
#else
#define CALL_FIRST 1
static LONG CALLBACK
exception_handler(_In_ PEXCEPTION_POINTERS ExceptionInfo)
{
PEXCEPTION_RECORD record = ExceptionInfo->ExceptionRecord;
if (record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION) {
return EXCEPTION_CONTINUE_SEARCH;
}
uintptr_t offset = (uintptr_t)record->ExceptionInformation[1] -
(uintptr_t)Pip->obj.pop;
outv_err("Invalid offset 0x%lx\n", offset);
exit(EXIT_FAILURE);
}
#endif
/*
* info_obj -- print information about obj pool type
*/
int
pmempool_info_obj(struct pmem_info *pip)
{
pip->obj.pop = pool_set_file_map(pip->pfile, 0);
if (pip->obj.pop == NULL)
return -1;
pip->obj.size = pip->pfile->size;
struct palloc_heap *heap = calloc(1, sizeof(*heap));
if (heap == NULL)
err(1, "Cannot allocate memory for heap data");
heap->layout = OFF_TO_PTR(pip->obj.pop, pip->obj.pop->heap_offset);
heap->base = pip->obj.pop;
pip->obj.alloc_classes = alloc_class_collection_new();
pip->obj.heap = heap;
Pip = pip;
#ifndef _WIN32
if (sigaction(SIGSEGV, &info_obj_sigaction, NULL)) {
#else
if (AddVectoredExceptionHandler(CALL_FIRST, exception_handler) ==
NULL) {
#endif
perror("sigaction");
return -1;
}
pip->obj.uuid_lo = pmemobj_get_uuid_lo(pip->obj.pop);
info_obj_descriptor(pip);
info_obj_lanes(pip);
info_obj_root_obj(pip);
info_obj_heap(pip);
info_obj_zones_chunks(pip);
info_obj_stats(pip);
free(heap);
alloc_class_collection_delete(pip->obj.alloc_classes);
return 0;
}
| 24,182 | 24.11215 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/tools/pmempool/check.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* check.c -- pmempool check command source file
*/
#include <getopt.h>
#include <stdlib.h>
#include "common.h"
#include "check.h"
#include "output.h"
#include "set.h"
#include "file.h"
#include "libpmempool.h"
typedef enum
{
CHECK_RESULT_CONSISTENT,
CHECK_RESULT_NOT_CONSISTENT,
CHECK_RESULT_REPAIRED,
CHECK_RESULT_CANNOT_REPAIR,
CHECK_RESULT_SYNC_REQ,
CHECK_RESULT_ERROR
} check_result_t;
/*
* pmempool_check_context -- context and arguments for check command
*/
struct pmempool_check_context {
int verbose; /* verbosity level */
char *fname; /* file name */
struct pool_set_file *pfile;
bool repair; /* do repair */
bool backup; /* do backup */
bool advanced; /* do advanced repairs */
char *backup_fname; /* backup file name */
bool exec; /* do execute */
char ans; /* default answer on all questions or '?' */
};
/*
* pmempool_check_default -- default arguments for check command
*/
static const struct pmempool_check_context pmempool_check_default = {
.verbose = 1,
.fname = NULL,
.repair = false,
.backup = false,
.backup_fname = NULL,
.advanced = false,
.exec = true,
.ans = '?',
};
/*
* help_str -- string for help message
*/
static const char * const help_str =
"Check consistency of a pool\n"
"\n"
"Common options:\n"
" -r, --repair try to repair a pool file if possible\n"
" -y, --yes answer yes to all questions\n"
" -d, --dry-run don't execute, just show what would be done\n"
" -b, --backup <file> create backup of a pool file before executing\n"
" -a, --advanced perform advanced repairs\n"
" -q, --quiet be quiet and don't print any messages\n"
" -v, --verbose increase verbosity level\n"
" -h, --help display this help and exit\n"
"\n"
"For complete documentation see %s-check(1) manual page.\n"
;
/*
* long_options -- command line options
*/
static const struct option long_options[] = {
{"repair", no_argument, NULL, 'r'},
{"yes", no_argument, NULL, 'y'},
{"dry-run", no_argument, NULL, 'd'},
{"no-exec", no_argument, NULL, 'N'}, /* deprecated */
{"backup", required_argument, NULL, 'b'},
{"advanced", no_argument, NULL, 'a'},
{"quiet", no_argument, NULL, 'q'},
{"verbose", no_argument, NULL, 'v'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0 },
};
/*
* print_usage -- print short description of application's usage
*/
static void
print_usage(const char *appname)
{
printf("Usage: %s check [<args>] <file>\n", appname);
}
/*
* print_version -- print version string
*/
static void
print_version(const char *appname)
{
printf("%s %s\n", appname, SRCVERSION);
}
/*
* pmempool_check_help -- print help message for check command
*/
void
pmempool_check_help(const char *appname)
{
print_usage(appname);
print_version(appname);
printf(help_str, appname);
}
/*
* pmempool_check_parse_args -- parse command line arguments
*/
static int
pmempool_check_parse_args(struct pmempool_check_context *pcp,
const char *appname, int argc, char *argv[])
{
int opt;
while ((opt = getopt_long(argc, argv, "ahvrdNb:qy",
long_options, NULL)) != -1) {
switch (opt) {
case 'r':
pcp->repair = true;
break;
case 'y':
pcp->ans = 'y';
break;
case 'd':
case 'N':
pcp->exec = false;
break;
case 'b':
pcp->backup = true;
pcp->backup_fname = optarg;
break;
case 'a':
pcp->advanced = true;
break;
case 'q':
pcp->verbose = 0;
break;
case 'v':
pcp->verbose = 2;
break;
case 'h':
pmempool_check_help(appname);
exit(EXIT_SUCCESS);
default:
print_usage(appname);
exit(EXIT_FAILURE);
}
}
if (optind < argc) {
pcp->fname = argv[optind];
} else {
print_usage(appname);
exit(EXIT_FAILURE);
}
if (!pcp->repair && !pcp->exec) {
outv_err("'-N' option requires '-r'\n");
exit(EXIT_FAILURE);
}
if (!pcp->repair && pcp->backup) {
outv_err("'-b' option requires '-r'\n");
exit(EXIT_FAILURE);
}
return 0;
}
static check_result_t pmempool_check_2_check_res_t[] =
{
[PMEMPOOL_CHECK_RESULT_CONSISTENT] = CHECK_RESULT_CONSISTENT,
[PMEMPOOL_CHECK_RESULT_NOT_CONSISTENT] = CHECK_RESULT_NOT_CONSISTENT,
[PMEMPOOL_CHECK_RESULT_REPAIRED] = CHECK_RESULT_REPAIRED,
[PMEMPOOL_CHECK_RESULT_CANNOT_REPAIR] = CHECK_RESULT_CANNOT_REPAIR,
[PMEMPOOL_CHECK_RESULT_SYNC_REQ] = CHECK_RESULT_SYNC_REQ,
[PMEMPOOL_CHECK_RESULT_ERROR] = CHECK_RESULT_ERROR,
};
static const char *
check_ask(const char *msg)
{
char answer = ask_Yn('?', "%s", msg);
switch (answer) {
case 'y':
return "yes";
case 'n':
return "no";
default:
return "?";
}
}
static check_result_t
pmempool_check_perform(struct pmempool_check_context *pc)
{
struct pmempool_check_args args = {
.path = pc->fname,
.backup_path = pc->backup_fname,
.pool_type = PMEMPOOL_POOL_TYPE_DETECT,
.flags = PMEMPOOL_CHECK_FORMAT_STR
};
if (pc->repair)
args.flags |= PMEMPOOL_CHECK_REPAIR;
if (!pc->exec)
args.flags |= PMEMPOOL_CHECK_DRY_RUN;
if (pc->advanced)
args.flags |= PMEMPOOL_CHECK_ADVANCED;
if (pc->ans == 'y')
args.flags |= PMEMPOOL_CHECK_ALWAYS_YES;
if (pc->verbose == 2)
args.flags |= PMEMPOOL_CHECK_VERBOSE;
PMEMpoolcheck *ppc = pmempool_check_init(&args, sizeof(args));
if (ppc == NULL)
return CHECK_RESULT_ERROR;
struct pmempool_check_status *status = NULL;
while ((status = pmempool_check(ppc)) != NULL) {
switch (status->type) {
case PMEMPOOL_CHECK_MSG_TYPE_ERROR:
outv(1, "%s\n", status->str.msg);
break;
case PMEMPOOL_CHECK_MSG_TYPE_INFO:
outv(2, "%s\n", status->str.msg);
break;
case PMEMPOOL_CHECK_MSG_TYPE_QUESTION:
status->str.answer = check_ask(status->str.msg);
break;
default:
pmempool_check_end(ppc);
exit(EXIT_FAILURE);
}
}
enum pmempool_check_result ret = pmempool_check_end(ppc);
return pmempool_check_2_check_res_t[ret];
}
/*
* pmempool_check_func -- main function for check command
*/
int
pmempool_check_func(const char *appname, int argc, char *argv[])
{
int ret = 0;
check_result_t res = CHECK_RESULT_CONSISTENT;
struct pmempool_check_context pc = pmempool_check_default;
/* parse command line arguments */
ret = pmempool_check_parse_args(&pc, appname, argc, argv);
if (ret)
return ret;
/* set verbosity level */
out_set_vlevel(pc.verbose);
res = pmempool_check_perform(&pc);
switch (res) {
case CHECK_RESULT_CONSISTENT:
outv(2, "%s: consistent\n", pc.fname);
ret = 0;
break;
case CHECK_RESULT_NOT_CONSISTENT:
outv(1, "%s: not consistent\n", pc.fname);
ret = -1;
break;
case CHECK_RESULT_REPAIRED:
outv(1, "%s: repaired\n", pc.fname);
ret = 0;
break;
case CHECK_RESULT_CANNOT_REPAIR:
outv(1, "%s: cannot repair\n", pc.fname);
ret = -1;
break;
case CHECK_RESULT_SYNC_REQ:
outv(1, "%s: sync required\n", pc.fname);
ret = 0;
break;
case CHECK_RESULT_ERROR:
if (errno)
outv_err("%s\n", strerror(errno));
if (pc.repair)
outv_err("repairing failed\n");
else
outv_err("checking consistency failed\n");
ret = -1;
break;
default:
outv_err("status unknown\n");
ret = -1;
break;
}
return ret;
}
| 7,163 | 21.670886 | 72 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/tools/pmempool/common.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* common.h -- declarations of common functions
*/
#include <stdint.h>
#include <stddef.h>
#include <stdarg.h>
#include <stdbool.h>
#include "queue.h"
#include "log.h"
#include "blk.h"
#include "libpmemobj.h"
#include "lane.h"
#include "ulog.h"
#include "memops.h"
#include "pmalloc.h"
#include "list.h"
#include "obj.h"
#include "memblock.h"
#include "heap_layout.h"
#include "tx.h"
#include "heap.h"
#include "btt_layout.h"
#include "page_size.h"
/* XXX - modify Linux makefiles to generate srcversion.h and remove #ifdef */
#ifdef _WIN32
#include "srcversion.h"
#endif
#define COUNT_OF(x) (sizeof(x) / sizeof(0[x]))
#define OPT_SHIFT 12
#define OPT_MASK (~((1 << OPT_SHIFT) - 1))
#define OPT_LOG (1 << (PMEM_POOL_TYPE_LOG + OPT_SHIFT))
#define OPT_BLK (1 << (PMEM_POOL_TYPE_BLK + OPT_SHIFT))
#define OPT_OBJ (1 << (PMEM_POOL_TYPE_OBJ + OPT_SHIFT))
#define OPT_BTT (1 << (PMEM_POOL_TYPE_BTT + OPT_SHIFT))
#define OPT_ALL (OPT_LOG | OPT_BLK | OPT_OBJ | OPT_BTT)
#define OPT_REQ_SHIFT 8
#define OPT_REQ_MASK ((1 << OPT_REQ_SHIFT) - 1)
#define _OPT_REQ(c, n) ((c) << (OPT_REQ_SHIFT * (n)))
#define OPT_REQ0(c) _OPT_REQ(c, 0)
#define OPT_REQ1(c) _OPT_REQ(c, 1)
#define OPT_REQ2(c) _OPT_REQ(c, 2)
#define OPT_REQ3(c) _OPT_REQ(c, 3)
#define OPT_REQ4(c) _OPT_REQ(c, 4)
#define OPT_REQ5(c) _OPT_REQ(c, 5)
#define OPT_REQ6(c) _OPT_REQ(c, 6)
#define OPT_REQ7(c) _OPT_REQ(c, 7)
#ifndef min
#define min(a, b) ((a) < (b) ? (a) : (b))
#endif
#define FOREACH_RANGE(range, ranges)\
PMDK_LIST_FOREACH(range, &(ranges)->head, next)
#define PLIST_OFF_TO_PTR(pop, off)\
((off) == 0 ? NULL : (void *)((uintptr_t)(pop) + (off) - OBJ_OOB_SIZE))
#define ENTRY_TO_ALLOC_HDR(entry)\
((void *)((uintptr_t)(entry) - sizeof(struct allocation_header)))
#define OBJH_FROM_PTR(ptr)\
((void *)((uintptr_t)(ptr) - sizeof(struct legacy_object_header)))
#define DEFAULT_HDR_SIZE PMEM_PAGESIZE
#define DEFAULT_DESC_SIZE PMEM_PAGESIZE
#define POOL_HDR_DESC_SIZE (DEFAULT_HDR_SIZE + DEFAULT_DESC_SIZE)
#define PTR_TO_ALLOC_HDR(ptr)\
((void *)((uintptr_t)(ptr) -\
sizeof(struct legacy_object_header)))
#define OBJH_TO_PTR(objh)\
((void *)((uintptr_t)(objh) + sizeof(struct legacy_object_header)))
/* invalid answer for ask_* functions */
#define INV_ANS '\0'
#define FORMAT_PRINTF(a, b) __attribute__((__format__(__printf__, (a), (b))))
/*
* pmem_pool_type_t -- pool types
*/
typedef enum {
PMEM_POOL_TYPE_LOG = 0x01,
PMEM_POOL_TYPE_BLK = 0x02,
PMEM_POOL_TYPE_OBJ = 0x04,
PMEM_POOL_TYPE_BTT = 0x08,
PMEM_POOL_TYPE_ALL = 0x0f,
PMEM_POOL_TYPE_UNKNOWN = 0x80,
} pmem_pool_type_t;
struct option_requirement {
int opt;
pmem_pool_type_t type;
uint64_t req;
};
struct options {
const struct option *opts;
size_t noptions;
char *bitmap;
const struct option_requirement *req;
};
struct pmem_pool_params {
pmem_pool_type_t type;
char signature[POOL_HDR_SIG_LEN];
uint64_t size;
mode_t mode;
int is_poolset;
int is_part;
int is_checksum_ok;
union {
struct {
uint64_t bsize;
} blk;
struct {
char layout[PMEMOBJ_MAX_LAYOUT];
} obj;
};
};
struct pool_set_file {
int fd;
char *fname;
void *addr;
size_t size;
struct pool_set *poolset;
size_t replica;
time_t mtime;
mode_t mode;
bool fileio;
};
struct pool_set_file *pool_set_file_open(const char *fname,
int rdonly, int check);
void pool_set_file_close(struct pool_set_file *file);
int pool_set_file_read(struct pool_set_file *file, void *buff,
size_t nbytes, uint64_t off);
int pool_set_file_write(struct pool_set_file *file, void *buff,
size_t nbytes, uint64_t off);
int pool_set_file_set_replica(struct pool_set_file *file, size_t replica);
size_t pool_set_file_nreplicas(struct pool_set_file *file);
void *pool_set_file_map(struct pool_set_file *file, uint64_t offset);
void pool_set_file_persist(struct pool_set_file *file,
const void *addr, size_t len);
struct range {
PMDK_LIST_ENTRY(range) next;
uint64_t first;
uint64_t last;
};
struct ranges {
PMDK_LIST_HEAD(rangeshead, range) head;
};
pmem_pool_type_t pmem_pool_type_parse_hdr(const struct pool_hdr *hdrp);
pmem_pool_type_t pmem_pool_type(const void *base_pool_addr);
int pmem_pool_checksum(const void *base_pool_addr);
pmem_pool_type_t pmem_pool_type_parse_str(const char *str);
uint64_t pmem_pool_get_min_size(pmem_pool_type_t type);
int pmem_pool_parse_params(const char *fname, struct pmem_pool_params *paramsp,
int check);
int util_poolset_map(const char *fname, struct pool_set **poolset, int rdonly);
struct options *util_options_alloc(const struct option *options,
size_t nopts, const struct option_requirement *req);
void util_options_free(struct options *opts);
int util_options_verify(const struct options *opts, pmem_pool_type_t type);
int util_options_getopt(int argc, char *argv[], const char *optstr,
const struct options *opts);
pmem_pool_type_t util_get_pool_type_second_page(const void *pool_base_addr);
int util_parse_mode(const char *str, mode_t *mode);
int util_parse_ranges(const char *str, struct ranges *rangesp,
struct range entire);
int util_ranges_add(struct ranges *rangesp, struct range range);
void util_ranges_clear(struct ranges *rangesp);
int util_ranges_contain(const struct ranges *rangesp, uint64_t n);
int util_ranges_empty(const struct ranges *rangesp);
int util_check_memory(const uint8_t *buff, size_t len, uint8_t val);
int util_parse_chunk_types(const char *str, uint64_t *types);
int util_parse_lane_sections(const char *str, uint64_t *types);
char ask(char op, char *answers, char def_ans, const char *fmt, va_list ap);
char ask_Yn(char op, const char *fmt, ...) FORMAT_PRINTF(2, 3);
char ask_yN(char op, const char *fmt, ...) FORMAT_PRINTF(2, 3);
unsigned util_heap_max_zone(size_t size);
int util_pool_clear_badblocks(const char *path, int create);
static const struct range ENTIRE_UINT64 = {
{ NULL, NULL }, /* range */
0, /* first */
UINT64_MAX /* last */
};
| 5,957 | 28.205882 | 79 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/tools/pmempool/info_log.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* info_log.c -- pmempool info command source file for log pool
*/
#include <stdbool.h>
#include <stdlib.h>
#include <err.h>
#include <sys/mman.h>
#include "common.h"
#include "output.h"
#include "info.h"
/*
* info_log_data -- print used data from log pool
*/
static int
info_log_data(struct pmem_info *pip, int v, struct pmemlog *plp)
{
if (!outv_check(v))
return 0;
uint64_t size_used = plp->write_offset - plp->start_offset;
if (size_used == 0)
return 0;
uint8_t *addr = pool_set_file_map(pip->pfile, plp->start_offset);
if (addr == MAP_FAILED) {
warn("%s", pip->file_name);
outv_err("cannot read pmem log data\n");
return -1;
}
if (pip->args.log.walk == 0) {
outv_title(v, "PMEMLOG data");
struct range *curp = NULL;
PMDK_LIST_FOREACH(curp, &pip->args.ranges.head, next) {
uint8_t *ptr = addr + curp->first;
if (curp->last >= size_used)
curp->last = size_used - 1;
uint64_t count = curp->last - curp->first + 1;
outv_hexdump(v, ptr, count, curp->first +
plp->start_offset, 1);
size_used -= count;
if (!size_used)
break;
}
} else {
/*
* Walk through used data with fixed chunk size
* passed by user.
*/
uint64_t nchunks = size_used / pip->args.log.walk;
outv_title(v, "PMEMLOG data [chunks: total = %lu size = %ld]",
nchunks, pip->args.log.walk);
struct range *curp = NULL;
PMDK_LIST_FOREACH(curp, &pip->args.ranges.head, next) {
uint64_t i;
for (i = curp->first; i <= curp->last &&
i < nchunks; i++) {
outv(v, "Chunk %10lu:\n", i);
outv_hexdump(v, addr + i * pip->args.log.walk,
pip->args.log.walk,
plp->start_offset +
i * pip->args.log.walk,
1);
}
}
}
return 0;
}
/*
* info_logs_stats -- print log type pool statistics
*/
static void
info_log_stats(struct pmem_info *pip, int v, struct pmemlog *plp)
{
uint64_t size_total = plp->end_offset - plp->start_offset;
uint64_t size_used = plp->write_offset - plp->start_offset;
uint64_t size_avail = size_total - size_used;
if (size_total == 0)
return;
double perc_used = (double)size_used / (double)size_total * 100.0;
double perc_avail = 100.0 - perc_used;
outv_title(v, "PMEM LOG Statistics");
outv_field(v, "Total", "%s",
out_get_size_str(size_total, pip->args.human));
outv_field(v, "Available", "%s [%s]",
out_get_size_str(size_avail, pip->args.human),
out_get_percentage(perc_avail));
outv_field(v, "Used", "%s [%s]",
out_get_size_str(size_used, pip->args.human),
out_get_percentage(perc_used));
}
/*
* info_log_descriptor -- print pmemlog descriptor and return 1 if
* write offset is valid
*/
static int
info_log_descriptor(struct pmem_info *pip, int v, struct pmemlog *plp)
{
outv_title(v, "PMEM LOG Header");
/* dump pmemlog header without pool_hdr */
outv_hexdump(pip->args.vhdrdump, (uint8_t *)plp + sizeof(plp->hdr),
sizeof(*plp) - sizeof(plp->hdr),
sizeof(plp->hdr), 1);
log_convert2h(plp);
int write_offset_valid = plp->write_offset >= plp->start_offset &&
plp->write_offset <= plp->end_offset;
outv_field(v, "Start offset", "0x%lx", plp->start_offset);
outv_field(v, "Write offset", "0x%lx [%s]", plp->write_offset,
write_offset_valid ? "OK":"ERROR");
outv_field(v, "End offset", "0x%lx", plp->end_offset);
return write_offset_valid;
}
/*
* pmempool_info_log -- print information about log type pool
*/
int
pmempool_info_log(struct pmem_info *pip)
{
int ret = 0;
struct pmemlog *plp = malloc(sizeof(struct pmemlog));
if (!plp)
err(1, "Cannot allocate memory for pmemlog structure");
if (pmempool_info_read(pip, plp, sizeof(struct pmemlog), 0)) {
outv_err("cannot read pmemlog header\n");
free(plp);
return -1;
}
if (info_log_descriptor(pip, VERBOSE_DEFAULT, plp)) {
info_log_stats(pip, pip->args.vstats, plp);
ret = info_log_data(pip, pip->args.vdata, plp);
}
free(plp);
return ret;
}
| 3,972 | 23.677019 | 70 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/tools/pmempool/info.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* info.h -- pmempool info command header file
*/
#include "vec.h"
/*
* Verbose levels used in application:
*
* VERBOSE_DEFAULT:
* Default value for application's verbosity level.
* This is also set for data structures which should be
* printed without any command line argument.
*
* VERBOSE_MAX:
* Maximum value for application's verbosity level.
* This value is used when -v command line argument passed.
*
* VERBOSE_SILENT:
* This value is higher than VERBOSE_MAX and it is used only
* for verbosity levels of data structures which should _not_ be
* printed without specified command line arguments.
*/
#define VERBOSE_SILENT 0
#define VERBOSE_DEFAULT 1
#define VERBOSE_MAX 2
/*
* print_bb_e -- printing bad blocks options
*/
enum print_bb_e {
PRINT_BAD_BLOCKS_NOT_SET,
PRINT_BAD_BLOCKS_NO,
PRINT_BAD_BLOCKS_YES,
PRINT_BAD_BLOCKS_MAX
};
/*
* pmempool_info_args -- structure for storing command line arguments
*/
struct pmempool_info_args {
char *file; /* input file */
unsigned col_width; /* column width for printing fields */
bool human; /* sizes in human-readable formats */
bool force; /* force parsing pool */
enum print_bb_e badblocks; /* print bad blocks */
pmem_pool_type_t type; /* forced pool type */
bool use_range; /* use range for blocks */
struct ranges ranges; /* range of block/chunks to dump */
int vlevel; /* verbosity level */
int vdata; /* verbosity level for data dump */
int vhdrdump; /* verbosity level for headers hexdump */
int vstats; /* verbosity level for statistics */
struct {
size_t walk; /* data chunk size */
} log;
struct {
int vmap; /* verbosity level for BTT Map */
int vflog; /* verbosity level for BTT FLOG */
int vbackup; /* verbosity level for BTT Info backup */
bool skip_zeros; /* skip blocks marked with zero flag */
bool skip_error; /* skip blocks marked with error flag */
bool skip_no_flag; /* skip blocks not marked with any flag */
} blk;
struct {
int vlanes; /* verbosity level for lanes */
int vroot;
int vobjects;
int valloc;
int voobhdr;
int vheap;
int vzonehdr;
int vchunkhdr;
int vbitmap;
bool lanes_recovery;
bool ignore_empty_obj;
uint64_t chunk_types;
size_t replica;
struct ranges lane_ranges;
struct ranges type_ranges;
struct ranges zone_ranges;
struct ranges chunk_ranges;
} obj;
};
/*
* pmem_blk_stats -- structure with statistics for pmemblk
*/
struct pmem_blk_stats {
uint32_t total; /* number of processed blocks */
uint32_t zeros; /* number of blocks marked by zero flag */
uint32_t errors; /* number of blocks marked by error flag */
uint32_t noflag; /* number of blocks not marked with any flag */
};
struct pmem_obj_class_stats {
uint64_t n_units;
uint64_t n_used;
uint64_t unit_size;
uint64_t alignment;
uint32_t nallocs;
uint16_t flags;
};
struct pmem_obj_zone_stats {
uint64_t n_chunks;
uint64_t n_chunks_type[MAX_CHUNK_TYPE];
uint64_t size_chunks;
uint64_t size_chunks_type[MAX_CHUNK_TYPE];
VEC(, struct pmem_obj_class_stats) class_stats;
};
struct pmem_obj_type_stats {
PMDK_TAILQ_ENTRY(pmem_obj_type_stats) next;
uint64_t type_num;
uint64_t n_objects;
uint64_t n_bytes;
};
struct pmem_obj_stats {
uint64_t n_total_objects;
uint64_t n_total_bytes;
uint64_t n_zones;
uint64_t n_zones_used;
struct pmem_obj_zone_stats *zone_stats;
PMDK_TAILQ_HEAD(obj_type_stats_head, pmem_obj_type_stats) type_stats;
};
/*
* pmem_info -- context for pmeminfo application
*/
struct pmem_info {
const char *file_name; /* current file name */
struct pool_set_file *pfile;
struct pmempool_info_args args; /* arguments parsed from command line */
struct options *opts;
struct pool_set *poolset;
pmem_pool_type_t type;
struct pmem_pool_params params;
struct {
struct pmem_blk_stats stats;
} blk;
struct {
struct pmemobjpool *pop;
struct palloc_heap *heap;
struct alloc_class_collection *alloc_classes;
size_t size;
struct pmem_obj_stats stats;
uint64_t uuid_lo;
uint64_t objid;
} obj;
};
int pmempool_info_func(const char *appname, int argc, char *argv[]);
void pmempool_info_help(const char *appname);
int pmempool_info_read(struct pmem_info *pip, void *buff,
size_t nbytes, uint64_t off);
int pmempool_info_blk(struct pmem_info *pip);
int pmempool_info_log(struct pmem_info *pip);
int pmempool_info_obj(struct pmem_info *pip);
int pmempool_info_btt(struct pmem_info *pip);
| 4,492 | 25.904192 | 73 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/tools/pmempool/output.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* output.h -- declarations of output printing related functions
*/
#include <time.h>
#include <stdint.h>
#include <stdio.h>
void out_set_vlevel(int vlevel);
void out_set_stream(FILE *stream);
void out_set_prefix(const char *prefix);
void out_set_col_width(unsigned col_width);
void outv_err(const char *fmt, ...) FORMAT_PRINTF(1, 2);
void out_err(const char *file, int line, const char *func,
const char *fmt, ...) FORMAT_PRINTF(4, 5);
void outv_err_vargs(const char *fmt, va_list ap);
void outv_indent(int vlevel, int i);
void outv(int vlevel, const char *fmt, ...) FORMAT_PRINTF(2, 3);
void outv_nl(int vlevel);
int outv_check(int vlevel);
void outv_title(int vlevel, const char *fmt, ...) FORMAT_PRINTF(2, 3);
void outv_field(int vlevel, const char *field, const char *fmt,
...) FORMAT_PRINTF(3, 4);
void outv_hexdump(int vlevel, const void *addr, size_t len, size_t offset,
int sep);
const char *out_get_uuid_str(uuid_t uuid);
const char *out_get_time_str(time_t time);
const char *out_get_size_str(uint64_t size, int human);
const char *out_get_percentage(double percentage);
const char *out_get_checksum(void *addr, size_t len, uint64_t *csump,
uint64_t skip_off);
const char *out_get_btt_map_entry(uint32_t map);
const char *out_get_pool_type_str(pmem_pool_type_t type);
const char *out_get_pool_signature(pmem_pool_type_t type);
const char *out_get_tx_state_str(uint64_t state);
const char *out_get_chunk_type_str(enum chunk_type type);
const char *out_get_chunk_flags(uint16_t flags);
const char *out_get_zone_magic_str(uint32_t magic);
const char *out_get_pmemoid_str(PMEMoid oid, uint64_t uuid_lo);
const char *out_get_arch_machine_class_str(uint8_t machine_class);
const char *out_get_arch_data_str(uint8_t data);
const char *out_get_arch_machine_str(uint16_t machine);
const char *out_get_last_shutdown_str(uint8_t dirty);
const char *out_get_alignment_desc_str(uint64_t ad, uint64_t cur_ad);
const char *out_get_incompat_features_str(uint32_t incompat);
| 2,070 | 41.265306 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/tools/pmempool/synchronize.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* synchronize.c -- pmempool sync command source file
*/
#include "synchronize.h"
#include <stdio.h>
#include <libgen.h>
#include <string.h>
#include <unistd.h>
#include <stdlib.h>
#include <getopt.h>
#include <stdbool.h>
#include <sys/mman.h>
#include <endian.h>
#include "common.h"
#include "output.h"
#include "libpmempool.h"
/*
* pmempool_sync_context -- context and arguments for sync command
*/
struct pmempool_sync_context {
unsigned flags; /* flags which modify the command execution */
char *poolset_file; /* a path to a poolset file */
};
/*
* pmempool_sync_default -- default arguments for sync command
*/
static const struct pmempool_sync_context pmempool_sync_default = {
.flags = 0,
.poolset_file = NULL,
};
/*
* help_str -- string for help message
*/
static const char * const help_str =
"Check consistency of a pool\n"
"\n"
"Common options:\n"
" -b, --bad-blocks fix bad blocks - it requires creating or reading special recovery files\n"
" -d, --dry-run do not apply changes, only check for viability of synchronization\n"
" -v, --verbose increase verbosity level\n"
" -h, --help display this help and exit\n"
"\n"
"For complete documentation see %s-sync(1) manual page.\n"
;
/*
* long_options -- command line options
*/
static const struct option long_options[] = {
{"bad-blocks", no_argument, NULL, 'b'},
{"dry-run", no_argument, NULL, 'd'},
{"help", no_argument, NULL, 'h'},
{"verbose", no_argument, NULL, 'v'},
{NULL, 0, NULL, 0 },
};
/*
* print_usage -- (internal) print application usage short description
*/
static void
print_usage(const char *appname)
{
printf("usage: %s sync [<options>] <poolset_file>\n", appname);
}
/*
* print_version -- (internal) print version string
*/
static void
print_version(const char *appname)
{
printf("%s %s\n", appname, SRCVERSION);
}
/*
* pmempool_sync_help -- print help message for the sync command
*/
void
pmempool_sync_help(const char *appname)
{
print_usage(appname);
print_version(appname);
printf(help_str, appname);
}
/*
* pmempool_sync_parse_args -- (internal) parse command line arguments
*/
static int
pmempool_sync_parse_args(struct pmempool_sync_context *ctx, const char *appname,
int argc, char *argv[])
{
int opt;
while ((opt = getopt_long(argc, argv, "bdhv",
long_options, NULL)) != -1) {
switch (opt) {
case 'd':
ctx->flags |= PMEMPOOL_SYNC_DRY_RUN;
break;
case 'b':
ctx->flags |= PMEMPOOL_SYNC_FIX_BAD_BLOCKS;
break;
case 'h':
pmempool_sync_help(appname);
exit(EXIT_SUCCESS);
case 'v':
out_set_vlevel(1);
break;
default:
print_usage(appname);
exit(EXIT_FAILURE);
}
}
if (optind < argc) {
ctx->poolset_file = argv[optind];
} else {
print_usage(appname);
exit(EXIT_FAILURE);
}
return 0;
}
/*
* pmempool_sync_func -- main function for the sync command
*/
int
pmempool_sync_func(const char *appname, int argc, char *argv[])
{
int ret = 0;
struct pmempool_sync_context ctx = pmempool_sync_default;
/* parse command line arguments */
if ((ret = pmempool_sync_parse_args(&ctx, appname, argc, argv)))
return ret;
ret = pmempool_sync(ctx.poolset_file, ctx.flags);
if (ret) {
outv_err("failed to synchronize: %s\n", pmempool_errormsg());
if (errno)
outv_err("%s\n", strerror(errno));
return -1;
} else {
outv(1, "%s: synchronized\n", ctx.poolset_file);
return 0;
}
}
| 3,499 | 21.151899 | 98 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/tools/daxio/daxio.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* daxio.c -- simple app for reading and writing data from/to
* Device DAX device using mmap instead of file I/O API
*/
#include <assert.h>
#include <stdio.h>
#include <unistd.h>
#include <getopt.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include <sys/stat.h>
#include <sys/sysmacros.h>
#include <limits.h>
#include <string.h>
#include <ndctl/libndctl.h>
#include <ndctl/libdaxctl.h>
#include <libpmem.h>
#include "util.h"
#include "os.h"
#include "badblocks.h"
#define ALIGN_UP(size, align) (((size) + (align) - 1) & ~((align) - 1))
#define ALIGN_DOWN(size, align) ((size) & ~((align) - 1))
#define ERR(fmt, ...)\
do {\
fprintf(stderr, "daxio: " fmt, ##__VA_ARGS__);\
} while (0)
#define FAIL(func)\
do {\
fprintf(stderr, "daxio: %s:%d: %s: %s\n",\
__func__, __LINE__, func, strerror(errno));\
} while (0)
#define USAGE_MESSAGE \
"Usage: daxio [option] ...\n"\
"Valid options:\n"\
" -i, --input=FILE - input device/file (default stdin)\n"\
" -o, --output=FILE - output device/file (default stdout)\n"\
" -k, --skip=BYTES - skip offset for input (default 0)\n"\
" -s, --seek=BYTES - seek offset for output (default 0)\n"\
" -l, --len=BYTES - total length to perform the I/O\n"\
" -b, --clear-bad-blocks=<yes|no> - clear bad blocks (default: yes)\n"\
" -z, --zero - zeroing the device\n"\
" -h. --help - print this help\n"\
" -V, --version - display version of daxio\n"
struct daxio_device {
char *path;
int fd;
size_t size; /* actual file/device size */
int is_devdax;
/* Device DAX only */
size_t align; /* internal device alignment */
char *addr; /* mapping base address */
size_t maplen; /* mapping length */
size_t offset; /* seek or skip */
unsigned major;
unsigned minor;
struct ndctl_ctx *ndctl_ctx;
struct ndctl_region *region; /* parent region */
};
/*
* daxio_context -- context and arguments
*/
struct daxio_context {
size_t len; /* total length of I/O */
int zero;
int clear_bad_blocks;
struct daxio_device src;
struct daxio_device dst;
};
/*
* default context
*/
static struct daxio_context Ctx = {
SIZE_MAX, /* len */
0, /* zero */
1, /* clear_bad_blocks */
{ NULL, -1, SIZE_MAX, 0, 0, NULL, 0, 0, 0, 0, NULL, NULL },
{ NULL, -1, SIZE_MAX, 0, 0, NULL, 0, 0, 0, 0, NULL, NULL },
};
/*
* print_version -- print daxio version
*/
static void
print_version(void)
{
printf("%s\n", SRCVERSION);
}
/*
* print_usage -- print short description of usage
*/
static void
print_usage(void)
{
fprintf(stderr, USAGE_MESSAGE);
}
/*
* long_options -- command line options
*/
static const struct option long_options[] = {
{"input", required_argument, NULL, 'i'},
{"output", required_argument, NULL, 'o'},
{"skip", required_argument, NULL, 'k'},
{"seek", required_argument, NULL, 's'},
{"len", required_argument, NULL, 'l'},
{"clear-bad-blocks", required_argument, NULL, 'b'},
{"zero", no_argument, NULL, 'z'},
{"help", no_argument, NULL, 'h'},
{"version", no_argument, NULL, 'V'},
{NULL, 0, NULL, 0 },
};
/*
* parse_args -- (internal) parse command line arguments
*/
static int
parse_args(struct daxio_context *ctx, int argc, char * const argv[])
{
int opt;
size_t offset;
size_t len;
while ((opt = getopt_long(argc, argv, "i:o:k:s:l:b:zhV",
long_options, NULL)) != -1) {
switch (opt) {
case 'i':
ctx->src.path = optarg;
break;
case 'o':
ctx->dst.path = optarg;
break;
case 'k':
if (util_parse_size(optarg, &offset)) {
ERR("'%s' -- invalid input offset\n", optarg);
return -1;
}
ctx->src.offset = offset;
break;
case 's':
if (util_parse_size(optarg, &offset)) {
ERR("'%s' -- invalid output offset\n", optarg);
return -1;
}
ctx->dst.offset = offset;
break;
case 'l':
if (util_parse_size(optarg, &len)) {
ERR("'%s' -- invalid length\n", optarg);
return -1;
}
ctx->len = len;
break;
case 'z':
ctx->zero = 1;
break;
case 'b':
if (strcmp(optarg, "no") == 0) {
ctx->clear_bad_blocks = 0;
} else if (strcmp(optarg, "yes") == 0) {
ctx->clear_bad_blocks = 1;
} else {
ERR(
"'%s' -- invalid argument of the '--clear-bad-blocks' option\n",
optarg);
return -1;
}
break;
case 'h':
print_usage();
exit(EXIT_SUCCESS);
case 'V':
print_version();
exit(EXIT_SUCCESS);
default:
print_usage();
exit(EXIT_FAILURE);
}
}
return 0;
}
/*
* validate_args -- (internal) validate command line arguments
*/
static int
validate_args(struct daxio_context *ctx)
{
if (ctx->zero && ctx->dst.path == NULL) {
ERR("zeroing flag specified but no output file provided\n");
return -1;
}
if (!ctx->zero && ctx->src.path == NULL && ctx->dst.path == NULL) {
ERR("an input file and/or an output file must be provided\n");
return -1;
}
/* if no input file provided, use stdin */
if (ctx->src.path == NULL) {
if (ctx->src.offset != 0) {
ERR(
"skip offset specified but no input file provided\n");
return -1;
}
ctx->src.fd = STDIN_FILENO;
ctx->src.path = "STDIN";
}
/* if no output file provided, use stdout */
if (ctx->dst.path == NULL) {
if (ctx->dst.offset != 0) {
ERR(
"seek offset specified but no output file provided\n");
return -1;
}
ctx->dst.fd = STDOUT_FILENO;
ctx->dst.path = "STDOUT";
}
return 0;
}
/*
* match_dev_dax -- (internal) find Device DAX by major/minor device number
*/
static int
match_dev_dax(struct daxio_device *dev, struct daxctl_region *dax_region)
{
struct daxctl_dev *d;
daxctl_dev_foreach(dax_region, d) {
if (dev->major == (unsigned)daxctl_dev_get_major(d) &&
dev->minor == (unsigned)daxctl_dev_get_minor(d)) {
dev->size = daxctl_dev_get_size(d);
return 1;
}
}
return 0;
}
/*
* find_dev_dax -- (internal) check if device is Device DAX
*
* If there is matching Device DAX, find its region, size and alignment.
*/
static int
find_dev_dax(struct ndctl_ctx *ndctl_ctx, struct daxio_device *dev)
{
struct ndctl_bus *bus = NULL;
struct ndctl_region *region = NULL;
struct ndctl_dax *dax = NULL;
struct daxctl_region *dax_region = NULL;
ndctl_bus_foreach(ndctl_ctx, bus) {
ndctl_region_foreach(bus, region) {
ndctl_dax_foreach(region, dax) {
dax_region = ndctl_dax_get_daxctl_region(dax);
if (match_dev_dax(dev, dax_region)) {
dev->is_devdax = 1;
dev->align = ndctl_dax_get_align(dax);
dev->region = region;
return 1;
}
}
}
}
/* try with dax regions */
struct daxctl_ctx *daxctl_ctx;
if (daxctl_new(&daxctl_ctx))
return 0;
int ret = 0;
daxctl_region_foreach(daxctl_ctx, dax_region) {
if (match_dev_dax(dev, dax_region)) {
dev->is_devdax = 1;
dev->align = daxctl_region_get_align(dax_region);
dev->region = region;
ret = 1;
goto end;
}
}
end:
daxctl_unref(daxctl_ctx);
return ret;
}
/*
* setup_device -- (internal) open/mmap file/device
*/
static int
setup_device(struct ndctl_ctx *ndctl_ctx, struct daxio_device *dev, int is_dst,
int clear_bad_blocks)
{
int ret;
int flags = O_RDWR;
int prot = is_dst ? PROT_WRITE : PROT_READ;
if (dev->fd != -1) {
dev->size = SIZE_MAX;
return 0; /* stdin/stdout */
}
/* try to open file/device (if exists) */
dev->fd = os_open(dev->path, flags, S_IRUSR|S_IWUSR);
if (dev->fd == -1) {
ret = errno;
if (ret == ENOENT && is_dst) {
/* file does not exist - create it */
flags = O_CREAT|O_WRONLY|O_TRUNC;
dev->size = SIZE_MAX;
dev->fd = os_open(dev->path, flags, S_IRUSR|S_IWUSR);
if (dev->fd == -1) {
FAIL("open");
return -1;
}
return 0;
} else {
ERR("failed to open '%s': %s\n", dev->path,
strerror(errno));
return -1;
}
}
struct stat stbuf;
ret = fstat(dev->fd, &stbuf);
if (ret == -1) {
FAIL("stat");
return -1;
}
/* check if this is regular file or device */
if (S_ISREG(stbuf.st_mode)) {
if (is_dst)
dev->size = SIZE_MAX;
else
dev->size = (size_t)stbuf.st_size;
} else if (S_ISBLK(stbuf.st_mode)) {
dev->size = (size_t)stbuf.st_size;
} else if (S_ISCHR(stbuf.st_mode)) {
dev->size = SIZE_MAX;
dev->major = major(stbuf.st_rdev);
dev->minor = minor(stbuf.st_rdev);
} else {
return -1;
}
/* check if this is Device DAX */
if (S_ISCHR(stbuf.st_mode))
find_dev_dax(ndctl_ctx, dev);
if (!dev->is_devdax)
return 0;
if (is_dst && clear_bad_blocks) {
/* XXX - clear only badblocks in range bound by offset/len */
if (badblocks_clear_all(dev->path)) {
ERR("failed to clear bad blocks on \"%s\"\n"
" Probably you have not enough permissions to do that.\n"
" You can choose one of three options now:\n"
" 1) run 'daxio' with 'sudo' or as 'root',\n"
" 2) turn off clearing bad blocks using\n"
" the '-b/--clear-bad-blocks=no' option or\n"
" 3) change permissions of some resource files -\n"
" - for details see the description of the CHECK_BAD_BLOCKS\n"
" compat feature in the pmempool-feature(1) man page.\n",
dev->path);
return -1;
}
}
if (dev->align == ULONG_MAX) {
ERR("cannot determine device alignment for \"%s\"\n",
dev->path);
return -1;
}
if (dev->offset > dev->size) {
ERR("'%zu' -- offset beyond device size (%zu)\n",
dev->offset, dev->size);
return -1;
}
/* align len/offset to the internal device alignment */
dev->maplen = ALIGN_UP(dev->size, dev->align);
size_t offset = ALIGN_DOWN(dev->offset, dev->align);
dev->offset = dev->offset - offset;
dev->maplen = dev->maplen - offset;
dev->addr = mmap(NULL, dev->maplen, prot, MAP_SHARED, dev->fd,
(off_t)offset);
if (dev->addr == MAP_FAILED) {
FAIL("mmap");
return -1;
}
return 0;
}
/*
* setup_devices -- (internal) open/mmap input and output
*/
static int
setup_devices(struct ndctl_ctx *ndctl_ctx, struct daxio_context *ctx)
{
if (!ctx->zero &&
setup_device(ndctl_ctx, &ctx->src, 0, ctx->clear_bad_blocks))
return -1;
return setup_device(ndctl_ctx, &ctx->dst, 1, ctx->clear_bad_blocks);
}
/*
* adjust_io_len -- (internal) calculate I/O length if not specified
*/
static void
adjust_io_len(struct daxio_context *ctx)
{
size_t src_len = ctx->src.maplen - ctx->src.offset;
size_t dst_len = ctx->dst.maplen - ctx->dst.offset;
size_t max_len = SIZE_MAX;
if (ctx->zero)
assert(ctx->dst.is_devdax);
else
assert(ctx->src.is_devdax || ctx->dst.is_devdax);
if (ctx->src.is_devdax)
max_len = src_len;
if (ctx->dst.is_devdax)
max_len = max_len < dst_len ? max_len : dst_len;
/* if length is specified and is not bigger than mmapped region */
if (ctx->len != SIZE_MAX && ctx->len <= max_len)
return;
/* adjust len to device size */
ctx->len = max_len;
}
/*
* cleanup_device -- (internal) unmap/close file/device
*/
static void
cleanup_device(struct daxio_device *dev)
{
if (dev->addr)
(void) munmap(dev->addr, dev->maplen);
if (dev->path && dev->fd != -1)
(void) close(dev->fd);
}
/*
* cleanup_devices -- (internal) unmap/close input and output
*/
static void
cleanup_devices(struct daxio_context *ctx)
{
cleanup_device(&ctx->dst);
if (!ctx->zero)
cleanup_device(&ctx->src);
}
/*
* do_io -- (internal) write data to device/file
*/
static int
do_io(struct ndctl_ctx *ndctl_ctx, struct daxio_context *ctx)
{
ssize_t cnt = 0;
assert(ctx->src.is_devdax || ctx->dst.is_devdax);
if (ctx->zero) {
if (ctx->dst.offset > ctx->dst.maplen) {
ERR("output offset larger than device size");
return -1;
}
if (ctx->dst.offset + ctx->len > ctx->dst.maplen) {
ERR("output offset beyond device size");
return -1;
}
char *dst_addr = ctx->dst.addr + ctx->dst.offset;
pmem_memset_persist(dst_addr, 0, ctx->len);
cnt = (ssize_t)ctx->len;
} else if (ctx->src.is_devdax && ctx->dst.is_devdax) {
/* memcpy between src and dst */
char *src_addr = ctx->src.addr + ctx->src.offset;
char *dst_addr = ctx->dst.addr + ctx->dst.offset;
pmem_memcpy_persist(dst_addr, src_addr, ctx->len);
cnt = (ssize_t)ctx->len;
} else if (ctx->src.is_devdax) {
/* write to file directly from mmap'ed src */
char *src_addr = ctx->src.addr + ctx->src.offset;
if (ctx->dst.offset) {
if (lseek(ctx->dst.fd, (off_t)ctx->dst.offset,
SEEK_SET) < 0) {
FAIL("lseek");
goto err;
}
}
do {
ssize_t wcnt = write(ctx->dst.fd, src_addr + cnt,
ctx->len - (size_t)cnt);
if (wcnt == -1) {
FAIL("write");
goto err;
}
cnt += wcnt;
} while ((size_t)cnt < ctx->len);
} else if (ctx->dst.is_devdax) {
/* read from file directly to mmap'ed dst */
char *dst_addr = ctx->dst.addr + ctx->dst.offset;
if (ctx->src.offset) {
if (lseek(ctx->src.fd, (off_t)ctx->src.offset,
SEEK_SET) < 0) {
FAIL("lseek");
return -1;
}
}
do {
ssize_t rcnt = read(ctx->src.fd, dst_addr + cnt,
ctx->len - (size_t)cnt);
if (rcnt == -1) {
FAIL("read");
goto err;
}
/* end of file */
if (rcnt == 0)
break;
cnt = cnt + rcnt;
} while ((size_t)cnt < ctx->len);
pmem_persist(dst_addr, (size_t)cnt);
if ((size_t)cnt != ctx->len)
ERR("requested size %zu larger than source\n",
ctx->len);
}
ERR("copied %zd bytes to device \"%s\"\n", cnt, ctx->dst.path);
return 0;
err:
ERR("failed to perform I/O\n");
return -1;
}
int
main(int argc, char **argv)
{
struct ndctl_ctx *ndctl_ctx;
int ret = EXIT_SUCCESS;
if (parse_args(&Ctx, argc, argv))
return EXIT_FAILURE;
if (validate_args(&Ctx))
return EXIT_FAILURE;
if (ndctl_new(&ndctl_ctx))
return EXIT_FAILURE;
if (setup_devices(ndctl_ctx, &Ctx)) {
ret = EXIT_FAILURE;
goto err;
}
if (!Ctx.src.is_devdax && !Ctx.dst.is_devdax) {
ERR("neither input nor output is device dax\n");
ret = EXIT_FAILURE;
goto err;
}
adjust_io_len(&Ctx);
if (do_io(ndctl_ctx, &Ctx))
ret = EXIT_FAILURE;
err:
cleanup_devices(&Ctx);
ndctl_unref(ndctl_ctx);
return ret;
}
| 14,160 | 22.291118 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemlog/log.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* log.h -- internal definitions for libpmem log module
*/
#ifndef LOG_H
#define LOG_H 1
#include <stdint.h>
#include <stddef.h>
#include <endian.h>
#include "ctl.h"
#include "util.h"
#include "os_thread.h"
#include "pool_hdr.h"
#include "page_size.h"
#ifdef __cplusplus
extern "C" {
#endif
#include "alloc.h"
#include "fault_injection.h"
#define PMEMLOG_LOG_PREFIX "libpmemlog"
#define PMEMLOG_LOG_LEVEL_VAR "PMEMLOG_LOG_LEVEL"
#define PMEMLOG_LOG_FILE_VAR "PMEMLOG_LOG_FILE"
/* attributes of the log memory pool format for the pool header */
#define LOG_HDR_SIG "PMEMLOG" /* must be 8 bytes including '\0' */
#define LOG_FORMAT_MAJOR 1
#define LOG_FORMAT_FEAT_DEFAULT \
{POOL_FEAT_COMPAT_DEFAULT, POOL_FEAT_INCOMPAT_DEFAULT, 0x0000}
#define LOG_FORMAT_FEAT_CHECK \
{POOL_FEAT_COMPAT_VALID, POOL_FEAT_INCOMPAT_VALID, 0x0000}
static const features_t log_format_feat_default = LOG_FORMAT_FEAT_DEFAULT;
struct pmemlog {
struct pool_hdr hdr; /* memory pool header */
/* root info for on-media format... */
uint64_t start_offset; /* start offset of the usable log space */
uint64_t end_offset; /* maximum offset of the usable log space */
uint64_t write_offset; /* current write point for the log */
/* some run-time state, allocated out of memory pool... */
void *addr; /* mapped region */
size_t size; /* size of mapped region */
int is_pmem; /* true if pool is PMEM */
int rdonly; /* true if pool is opened read-only */
os_rwlock_t *rwlockp; /* pointer to RW lock */
int is_dev_dax; /* true if mapped on device dax */
struct ctl *ctl; /* top level node of the ctl tree structure */
struct pool_set *set; /* pool set info */
};
/* data area starts at this alignment after the struct pmemlog above */
#define LOG_FORMAT_DATA_ALIGN ((uintptr_t)PMEM_PAGESIZE)
/*
* log_convert2h -- convert pmemlog structure to host byte order
*/
static inline void
log_convert2h(struct pmemlog *plp)
{
plp->start_offset = le64toh(plp->start_offset);
plp->end_offset = le64toh(plp->end_offset);
plp->write_offset = le64toh(plp->write_offset);
}
/*
* log_convert2le -- convert pmemlog structure to LE byte order
*/
static inline void
log_convert2le(struct pmemlog *plp)
{
plp->start_offset = htole64(plp->start_offset);
plp->end_offset = htole64(plp->end_offset);
plp->write_offset = htole64(plp->write_offset);
}
#if FAULT_INJECTION
void
pmemlog_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at);
int
pmemlog_fault_injection_enabled(void);
#else
static inline void
pmemlog_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
abort();
}
static inline int
pmemlog_fault_injection_enabled(void)
{
return 0;
}
#endif
#ifdef __cplusplus
}
#endif
#endif
| 2,832 | 23.422414 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemlog/log.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* log.c -- log memory pool entry points for libpmem
*/
#include <inttypes.h>
#include <stdio.h>
#include <string.h>
#include <sys/types.h>
#include <sys/param.h>
#include <unistd.h>
#include <errno.h>
#include <time.h>
#include <stdint.h>
#include <stdbool.h>
#include "libpmem.h"
#include "libpmemlog.h"
#include "ctl_global.h"
#include "os.h"
#include "set.h"
#include "out.h"
#include "log.h"
#include "mmap.h"
#include "sys_util.h"
#include "util_pmem.h"
#include "valgrind_internal.h"
static const struct pool_attr Log_create_attr = {
LOG_HDR_SIG,
LOG_FORMAT_MAJOR,
LOG_FORMAT_FEAT_DEFAULT,
{0}, {0}, {0}, {0}, {0}
};
static const struct pool_attr Log_open_attr = {
LOG_HDR_SIG,
LOG_FORMAT_MAJOR,
LOG_FORMAT_FEAT_CHECK,
{0}, {0}, {0}, {0}, {0}
};
/*
* log_descr_create -- (internal) create log memory pool descriptor
*/
static void
log_descr_create(PMEMlogpool *plp, size_t poolsize)
{
LOG(3, "plp %p poolsize %zu", plp, poolsize);
ASSERTeq(poolsize % Pagesize, 0);
/* create required metadata */
plp->start_offset = htole64(roundup(sizeof(*plp),
LOG_FORMAT_DATA_ALIGN));
plp->end_offset = htole64(poolsize);
plp->write_offset = plp->start_offset;
/* store non-volatile part of pool's descriptor */
util_persist(plp->is_pmem, &plp->start_offset, 3 * sizeof(uint64_t));
}
/*
* log_descr_check -- (internal) validate log memory pool descriptor
*/
static int
log_descr_check(PMEMlogpool *plp, size_t poolsize)
{
LOG(3, "plp %p poolsize %zu", plp, poolsize);
struct pmemlog hdr = *plp;
log_convert2h(&hdr);
if ((hdr.start_offset !=
roundup(sizeof(*plp), LOG_FORMAT_DATA_ALIGN)) ||
(hdr.end_offset != poolsize) ||
(hdr.start_offset > hdr.end_offset)) {
ERR("wrong start/end offsets "
"(start: %" PRIu64 " end: %" PRIu64 "), "
"pool size %zu",
hdr.start_offset, hdr.end_offset, poolsize);
errno = EINVAL;
return -1;
}
if ((hdr.write_offset > hdr.end_offset) || (hdr.write_offset <
hdr.start_offset)) {
ERR("wrong write offset (start: %" PRIu64 " end: %" PRIu64
" write: %" PRIu64 ")",
hdr.start_offset, hdr.end_offset, hdr.write_offset);
errno = EINVAL;
return -1;
}
LOG(3, "start: %" PRIu64 ", end: %" PRIu64 ", write: %" PRIu64 "",
hdr.start_offset, hdr.end_offset, hdr.write_offset);
return 0;
}
/*
* log_runtime_init -- (internal) initialize log memory pool runtime data
*/
static int
log_runtime_init(PMEMlogpool *plp, int rdonly)
{
LOG(3, "plp %p rdonly %d", plp, rdonly);
/* remove volatile part of header */
VALGRIND_REMOVE_PMEM_MAPPING(&plp->addr,
sizeof(struct pmemlog) -
sizeof(struct pool_hdr) -
3 * sizeof(uint64_t));
/*
* Use some of the memory pool area for run-time info. This
* run-time state is never loaded from the file, it is always
* created here, so no need to worry about byte-order.
*/
plp->rdonly = rdonly;
if ((plp->rwlockp = Malloc(sizeof(*plp->rwlockp))) == NULL) {
ERR("!Malloc for a RW lock");
return -1;
}
util_rwlock_init(plp->rwlockp);
/*
* If possible, turn off all permissions on the pool header page.
*
* The prototype PMFS doesn't allow this when large pages are in
* use. It is not considered an error if this fails.
*/
RANGE_NONE(plp->addr, sizeof(struct pool_hdr), plp->is_dev_dax);
/* the rest should be kept read-only (debug version only) */
RANGE_RO((char *)plp->addr + sizeof(struct pool_hdr),
plp->size - sizeof(struct pool_hdr), plp->is_dev_dax);
return 0;
}
/*
* pmemlog_createU -- create a log memory pool
*/
#ifndef _WIN32
static inline
#endif
PMEMlogpool *
pmemlog_createU(const char *path, size_t poolsize, mode_t mode)
{
LOG(3, "path %s poolsize %zu mode %d", path, poolsize, mode);
struct pool_set *set;
struct pool_attr adj_pool_attr = Log_create_attr;
/* force set SDS feature */
if (SDS_at_create)
adj_pool_attr.features.incompat |= POOL_FEAT_SDS;
else
adj_pool_attr.features.incompat &= ~POOL_FEAT_SDS;
if (util_pool_create(&set, path, poolsize, PMEMLOG_MIN_POOL,
PMEMLOG_MIN_PART, &adj_pool_attr, NULL,
REPLICAS_DISABLED) != 0) {
LOG(2, "cannot create pool or pool set");
return NULL;
}
ASSERT(set->nreplicas > 0);
struct pool_replica *rep = set->replica[0];
PMEMlogpool *plp = rep->part[0].addr;
VALGRIND_REMOVE_PMEM_MAPPING(&plp->addr,
sizeof(struct pmemlog) -
((uintptr_t)&plp->addr - (uintptr_t)&plp->hdr));
plp->addr = plp;
plp->size = rep->repsize;
plp->set = set;
plp->is_pmem = rep->is_pmem;
plp->is_dev_dax = rep->part[0].is_dev_dax;
/* is_dev_dax implies is_pmem */
ASSERT(!plp->is_dev_dax || plp->is_pmem);
/* create pool descriptor */
log_descr_create(plp, rep->repsize);
/* initialize runtime parts */
if (log_runtime_init(plp, 0) != 0) {
ERR("pool initialization failed");
goto err;
}
if (util_poolset_chmod(set, mode))
goto err;
util_poolset_fdclose(set);
LOG(3, "plp %p", plp);
return plp;
err:
LOG(4, "error clean up");
int oerrno = errno;
util_poolset_close(set, DELETE_CREATED_PARTS);
errno = oerrno;
return NULL;
}
#ifndef _WIN32
/*
* pmemlog_create -- create a log memory pool
*/
PMEMlogpool *
pmemlog_create(const char *path, size_t poolsize, mode_t mode)
{
return pmemlog_createU(path, poolsize, mode);
}
#else
/*
* pmemlog_createW -- create a log memory pool
*/
PMEMlogpool *
pmemlog_createW(const wchar_t *path, size_t poolsize, mode_t mode)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return NULL;
PMEMlogpool *ret = pmemlog_createU(upath, poolsize, mode);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* log_open_common -- (internal) open a log memory pool
*
* This routine does all the work, but takes a cow flag so internal
* calls can map a read-only pool if required.
*/
static PMEMlogpool *
log_open_common(const char *path, unsigned flags)
{
LOG(3, "path %s flags 0x%x", path, flags);
struct pool_set *set;
if (util_pool_open(&set, path, PMEMLOG_MIN_PART, &Log_open_attr,
NULL, NULL, flags) != 0) {
LOG(2, "cannot open pool or pool set");
return NULL;
}
ASSERT(set->nreplicas > 0);
struct pool_replica *rep = set->replica[0];
PMEMlogpool *plp = rep->part[0].addr;
VALGRIND_REMOVE_PMEM_MAPPING(&plp->addr,
sizeof(struct pmemlog) -
((uintptr_t)&plp->addr - (uintptr_t)&plp->hdr));
plp->addr = plp;
plp->size = rep->repsize;
plp->set = set;
plp->is_pmem = rep->is_pmem;
plp->is_dev_dax = rep->part[0].is_dev_dax;
/* is_dev_dax implies is_pmem */
ASSERT(!plp->is_dev_dax || plp->is_pmem);
if (set->nreplicas > 1) {
errno = ENOTSUP;
ERR("!replicas not supported");
goto err;
}
/* validate pool descriptor */
if (log_descr_check(plp, rep->repsize) != 0) {
LOG(2, "descriptor check failed");
goto err;
}
/* initialize runtime parts */
if (log_runtime_init(plp, set->rdonly) != 0) {
ERR("pool initialization failed");
goto err;
}
util_poolset_fdclose(set);
LOG(3, "plp %p", plp);
return plp;
err:
LOG(4, "error clean up");
int oerrno = errno;
util_poolset_close(set, DO_NOT_DELETE_PARTS);
errno = oerrno;
return NULL;
}
/*
* pmemlog_openU -- open an existing log memory pool
*/
#ifndef _WIN32
static inline
#endif
PMEMlogpool *
pmemlog_openU(const char *path)
{
LOG(3, "path %s", path);
return log_open_common(path, COW_at_open ? POOL_OPEN_COW : 0);
}
#ifndef _WIN32
/*
* pmemlog_open -- open an existing log memory pool
*/
PMEMlogpool *
pmemlog_open(const char *path)
{
return pmemlog_openU(path);
}
#else
/*
* pmemlog_openW -- open an existing log memory pool
*/
PMEMlogpool *
pmemlog_openW(const wchar_t *path)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return NULL;
PMEMlogpool *ret = pmemlog_openU(upath);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* pmemlog_close -- close a log memory pool
*/
void
pmemlog_close(PMEMlogpool *plp)
{
LOG(3, "plp %p", plp);
util_rwlock_destroy(plp->rwlockp);
Free((void *)plp->rwlockp);
util_poolset_close(plp->set, DO_NOT_DELETE_PARTS);
}
/*
* pmemlog_nbyte -- return usable size of a log memory pool
*/
size_t
pmemlog_nbyte(PMEMlogpool *plp)
{
LOG(3, "plp %p", plp);
util_rwlock_rdlock(plp->rwlockp);
size_t size = le64toh(plp->end_offset) - le64toh(plp->start_offset);
LOG(4, "plp %p nbyte %zu", plp, size);
util_rwlock_unlock(plp->rwlockp);
return size;
}
/*
* log_persist -- (internal) persist data, then metadata
*
* On entry, the write lock should be held.
*/
static void
log_persist(PMEMlogpool *plp, uint64_t new_write_offset)
{
uint64_t old_write_offset = le64toh(plp->write_offset);
size_t length = new_write_offset - old_write_offset;
/* unprotect the log space range (debug version only) */
RANGE_RW((char *)plp->addr + old_write_offset, length, plp->is_dev_dax);
/* persist the data */
if (plp->is_pmem)
pmem_drain(); /* data already flushed */
else
pmem_msync((char *)plp->addr + old_write_offset, length);
/* protect the log space range (debug version only) */
RANGE_RO((char *)plp->addr + old_write_offset, length, plp->is_dev_dax);
/* unprotect the pool descriptor (debug version only) */
RANGE_RW((char *)plp->addr + sizeof(struct pool_hdr),
LOG_FORMAT_DATA_ALIGN, plp->is_dev_dax);
/* write the metadata */
plp->write_offset = htole64(new_write_offset);
/* persist the metadata */
if (plp->is_pmem)
pmem_persist(&plp->write_offset, sizeof(plp->write_offset));
else
pmem_msync(&plp->write_offset, sizeof(plp->write_offset));
/* set the write-protection again (debug version only) */
RANGE_RO((char *)plp->addr + sizeof(struct pool_hdr),
LOG_FORMAT_DATA_ALIGN, plp->is_dev_dax);
}
/*
* pmemlog_append -- add data to a log memory pool
*/
int
pmemlog_append(PMEMlogpool *plp, const void *buf, size_t count)
{
int ret = 0;
LOG(3, "plp %p buf %p count %zu", plp, buf, count);
if (plp->rdonly) {
ERR("can't append to read-only log");
errno = EROFS;
return -1;
}
util_rwlock_wrlock(plp->rwlockp);
/* get the current values */
uint64_t end_offset = le64toh(plp->end_offset);
uint64_t write_offset = le64toh(plp->write_offset);
if (write_offset >= end_offset) {
/* no space left */
errno = ENOSPC;
ERR("!pmemlog_append");
ret = -1;
goto end;
}
/* make sure we don't write past the available space */
if (count > (end_offset - write_offset)) {
errno = ENOSPC;
ERR("!pmemlog_append");
ret = -1;
goto end;
}
char *data = plp->addr;
/*
* unprotect the log space range, where the new data will be stored
* (debug version only)
*/
RANGE_RW(&data[write_offset], count, plp->is_dev_dax);
if (plp->is_pmem)
pmem_memcpy_nodrain(&data[write_offset], buf, count);
else
memcpy(&data[write_offset], buf, count);
/* protect the log space range (debug version only) */
RANGE_RO(&data[write_offset], count, plp->is_dev_dax);
write_offset += count;
/* persist the data and the metadata */
log_persist(plp, write_offset);
end:
util_rwlock_unlock(plp->rwlockp);
return ret;
}
/*
* pmemlog_appendv -- add gathered data to a log memory pool
*/
int
pmemlog_appendv(PMEMlogpool *plp, const struct iovec *iov, int iovcnt)
{
LOG(3, "plp %p iovec %p iovcnt %d", plp, iov, iovcnt);
int ret = 0;
int i;
if (iovcnt < 0) {
errno = EINVAL;
ERR("iovcnt is less than zero: %d", iovcnt);
return -1;
}
if (plp->rdonly) {
ERR("can't append to read-only log");
errno = EROFS;
return -1;
}
util_rwlock_wrlock(plp->rwlockp);
/* get the current values */
uint64_t end_offset = le64toh(plp->end_offset);
uint64_t write_offset = le64toh(plp->write_offset);
if (write_offset >= end_offset) {
/* no space left */
errno = ENOSPC;
ERR("!pmemlog_appendv");
ret = -1;
goto end;
}
char *data = plp->addr;
uint64_t count = 0;
char *buf;
/* calculate required space */
for (i = 0; i < iovcnt; ++i)
count += iov[i].iov_len;
/* check if there is enough free space */
if (count > (end_offset - write_offset)) {
errno = ENOSPC;
ret = -1;
goto end;
}
/* append the data */
for (i = 0; i < iovcnt; ++i) {
buf = iov[i].iov_base;
count = iov[i].iov_len;
/*
* unprotect the log space range, where the new data will be
* stored (debug version only)
*/
RANGE_RW(&data[write_offset], count, plp->is_dev_dax);
if (plp->is_pmem)
pmem_memcpy_nodrain(&data[write_offset], buf, count);
else
memcpy(&data[write_offset], buf, count);
/*
* protect the log space range (debug version only)
*/
RANGE_RO(&data[write_offset], count, plp->is_dev_dax);
write_offset += count;
}
/* persist the data and the metadata */
log_persist(plp, write_offset);
end:
util_rwlock_unlock(plp->rwlockp);
return ret;
}
/*
* pmemlog_tell -- return current write point in a log memory pool
*/
long long
pmemlog_tell(PMEMlogpool *plp)
{
LOG(3, "plp %p", plp);
util_rwlock_rdlock(plp->rwlockp);
ASSERT(le64toh(plp->write_offset) >= le64toh(plp->start_offset));
long long wp = (long long)(le64toh(plp->write_offset) -
le64toh(plp->start_offset));
LOG(4, "write offset %lld", wp);
util_rwlock_unlock(plp->rwlockp);
return wp;
}
/*
* pmemlog_rewind -- discard all data, resetting a log memory pool to empty
*/
void
pmemlog_rewind(PMEMlogpool *plp)
{
LOG(3, "plp %p", plp);
if (plp->rdonly) {
ERR("can't rewind read-only log");
errno = EROFS;
return;
}
util_rwlock_wrlock(plp->rwlockp);
/* unprotect the pool descriptor (debug version only) */
RANGE_RW((char *)plp->addr + sizeof(struct pool_hdr),
LOG_FORMAT_DATA_ALIGN, plp->is_dev_dax);
plp->write_offset = plp->start_offset;
if (plp->is_pmem)
pmem_persist(&plp->write_offset, sizeof(uint64_t));
else
pmem_msync(&plp->write_offset, sizeof(uint64_t));
/* set the write-protection again (debug version only) */
RANGE_RO((char *)plp->addr + sizeof(struct pool_hdr),
LOG_FORMAT_DATA_ALIGN, plp->is_dev_dax);
util_rwlock_unlock(plp->rwlockp);
}
/*
* pmemlog_walk -- walk through all data in a log memory pool
*
* chunksize of 0 means process_chunk gets called once for all data
* as a single chunk.
*/
void
pmemlog_walk(PMEMlogpool *plp, size_t chunksize,
int (*process_chunk)(const void *buf, size_t len, void *arg), void *arg)
{
LOG(3, "plp %p chunksize %zu", plp, chunksize);
/*
* We are assuming that the walker doesn't change the data it's reading
* in place. We prevent everyone from changing the data behind our back
* until we are done with processing it.
*/
util_rwlock_rdlock(plp->rwlockp);
char *data = plp->addr;
uint64_t write_offset = le64toh(plp->write_offset);
uint64_t data_offset = le64toh(plp->start_offset);
size_t len;
if (chunksize == 0) {
/* most common case: process everything at once */
len = write_offset - data_offset;
LOG(3, "length %zu", len);
(*process_chunk)(&data[data_offset], len, arg);
} else {
/*
* Walk through the complete record, chunk by chunk.
* The callback returns 0 to terminate the walk.
*/
while (data_offset < write_offset) {
len = MIN(chunksize, write_offset - data_offset);
if (!(*process_chunk)(&data[data_offset], len, arg))
break;
data_offset += chunksize;
}
}
util_rwlock_unlock(plp->rwlockp);
}
/*
* pmemlog_checkU -- log memory pool consistency check
*
* Returns true if consistent, zero if inconsistent, -1/error if checking
* cannot happen due to other errors.
*/
#ifndef _WIN32
static inline
#endif
int
pmemlog_checkU(const char *path)
{
LOG(3, "path \"%s\"", path);
PMEMlogpool *plp = log_open_common(path, POOL_OPEN_COW);
if (plp == NULL)
return -1; /* errno set by log_open_common() */
int consistent = 1;
/* validate pool descriptor */
uint64_t hdr_start = le64toh(plp->start_offset);
uint64_t hdr_end = le64toh(plp->end_offset);
uint64_t hdr_write = le64toh(plp->write_offset);
if (hdr_start != roundup(sizeof(*plp), LOG_FORMAT_DATA_ALIGN)) {
ERR("wrong value of start_offset");
consistent = 0;
}
if (hdr_end != plp->size) {
ERR("wrong value of end_offset");
consistent = 0;
}
if (hdr_start > hdr_end) {
ERR("start_offset greater than end_offset");
consistent = 0;
}
if (hdr_start > hdr_write) {
ERR("start_offset greater than write_offset");
consistent = 0;
}
if (hdr_write > hdr_end) {
ERR("write_offset greater than end_offset");
consistent = 0;
}
pmemlog_close(plp);
if (consistent)
LOG(4, "pool consistency check OK");
return consistent;
}
#ifndef _WIN32
/*
* pmemlog_check -- log memory pool consistency check
*
* Returns true if consistent, zero if inconsistent, -1/error if checking
* cannot happen due to other errors.
*/
int
pmemlog_check(const char *path)
{
return pmemlog_checkU(path);
}
#else
/*
* pmemlog_checkW -- log memory pool consistency check
*/
int
pmemlog_checkW(const wchar_t *path)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return -1;
int ret = pmemlog_checkU(upath);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* pmemlog_ctl_getU -- programmatically executes a read ctl query
*/
#ifndef _WIN32
static inline
#endif
int
pmemlog_ctl_getU(PMEMlogpool *plp, const char *name, void *arg)
{
LOG(3, "plp %p name %s arg %p", plp, name, arg);
return ctl_query(plp == NULL ? NULL : plp->ctl, plp,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_READ, arg);
}
/*
* pmemblk_ctl_setU -- programmatically executes a write ctl query
*/
#ifndef _WIN32
static inline
#endif
int
pmemlog_ctl_setU(PMEMlogpool *plp, const char *name, void *arg)
{
LOG(3, "plp %p name %s arg %p", plp, name, arg);
return ctl_query(plp == NULL ? NULL : plp->ctl, plp,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_WRITE, arg);
}
/*
* pmemlog_ctl_execU -- programmatically executes a runnable ctl query
*/
#ifndef _WIN32
static inline
#endif
int
pmemlog_ctl_execU(PMEMlogpool *plp, const char *name, void *arg)
{
LOG(3, "plp %p name %s arg %p", plp, name, arg);
return ctl_query(plp == NULL ? NULL : plp->ctl, plp,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_RUNNABLE, arg);
}
#ifndef _WIN32
/*
* pmemlog_ctl_get -- programmatically executes a read ctl query
*/
int
pmemlog_ctl_get(PMEMlogpool *plp, const char *name, void *arg)
{
return pmemlog_ctl_getU(plp, name, arg);
}
/*
* pmemlog_ctl_set -- programmatically executes a write ctl query
*/
int
pmemlog_ctl_set(PMEMlogpool *plp, const char *name, void *arg)
{
return pmemlog_ctl_setU(plp, name, arg);
}
/*
* pmemlog_ctl_exec -- programmatically executes a runnable ctl query
*/
int
pmemlog_ctl_exec(PMEMlogpool *plp, const char *name, void *arg)
{
return pmemlog_ctl_execU(plp, name, arg);
}
#else
/*
* pmemlog_ctl_getW -- programmatically executes a read ctl query
*/
int
pmemlog_ctl_getW(PMEMlogpool *plp, const wchar_t *name, void *arg)
{
char *uname = util_toUTF8(name);
if (uname == NULL)
return -1;
int ret = pmemlog_ctl_getU(plp, uname, arg);
util_free_UTF8(uname);
return ret;
}
/*
* pmemlog_ctl_setW -- programmatically executes a write ctl query
*/
int
pmemlog_ctl_setW(PMEMlogpool *plp, const wchar_t *name, void *arg)
{
char *uname = util_toUTF8(name);
if (uname == NULL)
return -1;
int ret = pmemlog_ctl_setU(plp, uname, arg);
util_free_UTF8(uname);
return ret;
}
/*
* pmemlog_ctl_execW -- programmatically executes a runnable ctl query
*/
int
pmemlog_ctl_execW(PMEMlogpool *plp, const wchar_t *name, void *arg)
{
char *uname = util_toUTF8(name);
if (uname == NULL)
return -1;
int ret = pmemlog_ctl_execU(plp, uname, arg);
util_free_UTF8(uname);
return ret;
}
#endif
#if FAULT_INJECTION
void
pmemlog_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
core_inject_fault_at(type, nth, at);
}
int
pmemlog_fault_injection_enabled(void)
{
return core_fault_injection_enabled();
}
#endif
| 19,695 | 20.982143 | 75 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemlog/libpmemlog.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* libpmemlog.c -- pmem entry points for libpmemlog
*/
#include <stdio.h>
#include <stdint.h>
#include "libpmemlog.h"
#include "ctl_global.h"
#include "pmemcommon.h"
#include "log.h"
/*
* The variable from which the config is directly loaded. The string
* cannot contain any comments or extraneous white characters.
*/
#define LOG_CONFIG_ENV_VARIABLE "PMEMLOG_CONF"
/*
* The variable that points to a config file from which the config is loaded.
*/
#define LOG_CONFIG_FILE_ENV_VARIABLE "PMEMLOG_CONF_FILE"
/*
* log_ctl_init_and_load -- (static) initializes CTL and loads configuration
* from env variable and file
*/
static int
log_ctl_init_and_load(PMEMlogpool *plp)
{
LOG(3, "plp %p", plp);
if (plp != NULL && (plp->ctl = ctl_new()) == NULL) {
LOG(2, "!ctl_new");
return -1;
}
char *env_config = os_getenv(LOG_CONFIG_ENV_VARIABLE);
if (env_config != NULL) {
if (ctl_load_config_from_string(plp ? plp->ctl : NULL,
plp, env_config) != 0) {
LOG(2, "unable to parse config stored in %s "
"environment variable",
LOG_CONFIG_ENV_VARIABLE);
goto err;
}
}
char *env_config_file = os_getenv(LOG_CONFIG_FILE_ENV_VARIABLE);
if (env_config_file != NULL && env_config_file[0] != '\0') {
if (ctl_load_config_from_file(plp ? plp->ctl : NULL,
plp, env_config_file) != 0) {
LOG(2, "unable to parse config stored in %s "
"file (from %s environment variable)",
env_config_file,
LOG_CONFIG_FILE_ENV_VARIABLE);
goto err;
}
}
return 0;
err:
if (plp)
ctl_delete(plp->ctl);
return -1;
}
/*
* log_init -- load-time initialization for log
*
* Called automatically by the run-time loader.
*/
ATTR_CONSTRUCTOR
void
libpmemlog_init(void)
{
ctl_global_register();
if (log_ctl_init_and_load(NULL))
FATAL("error: %s", pmemlog_errormsg());
common_init(PMEMLOG_LOG_PREFIX, PMEMLOG_LOG_LEVEL_VAR,
PMEMLOG_LOG_FILE_VAR, PMEMLOG_MAJOR_VERSION,
PMEMLOG_MINOR_VERSION);
LOG(3, NULL);
}
/*
* libpmemlog_fini -- libpmemlog cleanup routine
*
* Called automatically when the process terminates.
*/
ATTR_DESTRUCTOR
void
libpmemlog_fini(void)
{
LOG(3, NULL);
common_fini();
}
/*
* pmemlog_check_versionU -- see if lib meets application version requirements
*/
#ifndef _WIN32
static inline
#endif
const char *
pmemlog_check_versionU(unsigned major_required, unsigned minor_required)
{
LOG(3, "major_required %u minor_required %u",
major_required, minor_required);
if (major_required != PMEMLOG_MAJOR_VERSION) {
ERR("libpmemlog major version mismatch (need %u, found %u)",
major_required, PMEMLOG_MAJOR_VERSION);
return out_get_errormsg();
}
if (minor_required > PMEMLOG_MINOR_VERSION) {
ERR("libpmemlog minor version mismatch (need %u, found %u)",
minor_required, PMEMLOG_MINOR_VERSION);
return out_get_errormsg();
}
return NULL;
}
#ifndef _WIN32
/*
* pmemlog_check_version -- see if lib meets application version requirements
*/
const char *
pmemlog_check_version(unsigned major_required, unsigned minor_required)
{
return pmemlog_check_versionU(major_required, minor_required);
}
#else
/*
* pmemlog_check_versionW -- see if lib meets application version requirements
*/
const wchar_t *
pmemlog_check_versionW(unsigned major_required, unsigned minor_required)
{
if (pmemlog_check_versionU(major_required, minor_required) != NULL)
return out_get_errormsgW();
else
return NULL;
}
#endif
/*
* pmemlog_set_funcs -- allow overriding libpmemlog's call to malloc, etc.
*/
void
pmemlog_set_funcs(
void *(*malloc_func)(size_t size),
void (*free_func)(void *ptr),
void *(*realloc_func)(void *ptr, size_t size),
char *(*strdup_func)(const char *s))
{
LOG(3, NULL);
util_set_alloc_funcs(malloc_func, free_func, realloc_func, strdup_func);
}
/*
* pmemlog_errormsgU -- return last error message
*/
#ifndef _WIN32
static inline
#endif
const char *
pmemlog_errormsgU(void)
{
return out_get_errormsg();
}
#ifndef _WIN32
/*
* pmemlog_errormsg -- return last error message
*/
const char *
pmemlog_errormsg(void)
{
return pmemlog_errormsgU();
}
#else
/*
* pmemlog_errormsgW -- return last error message as wchar_t
*/
const wchar_t *
pmemlog_errormsgW(void)
{
return out_get_errormsgW();
}
#endif
| 4,301 | 20.29703 | 78 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/core/os_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* os_windows.c -- windows abstraction layer
*/
#include <io.h>
#include <sys/locking.h>
#include <errno.h>
#include <pmemcompat.h>
#include <windows.h>
#include "alloc.h"
#include "util.h"
#include "os.h"
#include "out.h"
#define UTF8_BOM "\xEF\xBB\xBF"
/*
* os_open -- open abstraction layer
*/
int
os_open(const char *pathname, int flags, ...)
{
wchar_t *path = util_toUTF16(pathname);
if (path == NULL)
return -1;
int ret;
if (flags & O_CREAT) {
va_list arg;
va_start(arg, flags);
mode_t mode = va_arg(arg, mode_t);
va_end(arg);
ret = _wopen(path, flags, mode);
} else {
ret = _wopen(path, flags);
}
util_free_UTF16(path);
/* BOM skipping should not modify errno */
int orig_errno = errno;
/*
* text files on windows can contain BOM. As we open files
* in binary mode we have to detect bom and skip it
*/
if (ret != -1) {
char bom[3];
if (_read(ret, bom, sizeof(bom)) != 3 ||
memcmp(bom, UTF8_BOM, 3) != 0) {
/* UTF-8 bom not found - reset file to the beginning */
_lseek(ret, 0, SEEK_SET);
}
}
errno = orig_errno;
return ret;
}
/*
* os_fsync -- fsync abstraction layer
*/
int
os_fsync(int fd)
{
HANDLE handle = (HANDLE) _get_osfhandle(fd);
if (handle == INVALID_HANDLE_VALUE) {
errno = EBADF;
return -1;
}
if (!FlushFileBuffers(handle)) {
errno = EINVAL;
return -1;
}
return 0;
}
/*
* os_fsync_dir -- fsync the directory
*/
int
os_fsync_dir(const char *dir_name)
{
/* XXX not used and not implemented */
ASSERT(0);
return -1;
}
/*
* os_stat -- stat abstraction layer
*/
int
os_stat(const char *pathname, os_stat_t *buf)
{
wchar_t *path = util_toUTF16(pathname);
if (path == NULL)
return -1;
int ret = _wstat64(path, buf);
util_free_UTF16(path);
return ret;
}
/*
* os_unlink -- unlink abstraction layer
*/
int
os_unlink(const char *pathname)
{
wchar_t *path = util_toUTF16(pathname);
if (path == NULL)
return -1;
int ret = _wunlink(path);
util_free_UTF16(path);
return ret;
}
/*
* os_access -- access abstraction layer
*/
int
os_access(const char *pathname, int mode)
{
wchar_t *path = util_toUTF16(pathname);
if (path == NULL)
return -1;
int ret = _waccess(path, mode);
util_free_UTF16(path);
return ret;
}
/*
* os_skipBOM -- (internal) Skip BOM in file stream
*
* text files on windows can contain BOM. We have to detect bom and skip it.
*/
static void
os_skipBOM(FILE *file)
{
if (file == NULL)
return;
/* BOM skipping should not modify errno */
int orig_errno = errno;
/* UTF-8 BOM */
uint8_t bom[3];
size_t read_num = fread(bom, sizeof(bom[0]), sizeof(bom), file);
if (read_num != ARRAY_SIZE(bom))
goto out;
if (memcmp(bom, UTF8_BOM, ARRAY_SIZE(bom)) != 0) {
/* UTF-8 bom not found - reset file to the beginning */
fseek(file, 0, SEEK_SET);
}
out:
errno = orig_errno;
}
/*
* os_fopen -- fopen abstraction layer
*/
FILE *
os_fopen(const char *pathname, const char *mode)
{
wchar_t *path = util_toUTF16(pathname);
if (path == NULL)
return NULL;
wchar_t *wmode = util_toUTF16(mode);
if (wmode == NULL) {
util_free_UTF16(path);
return NULL;
}
FILE *ret = _wfopen(path, wmode);
util_free_UTF16(path);
util_free_UTF16(wmode);
os_skipBOM(ret);
return ret;
}
/*
* os_fdopen -- fdopen abstraction layer
*/
FILE *
os_fdopen(int fd, const char *mode)
{
FILE *ret = fdopen(fd, mode);
os_skipBOM(ret);
return ret;
}
/*
* os_chmod -- chmod abstraction layer
*/
int
os_chmod(const char *pathname, mode_t mode)
{
wchar_t *path = util_toUTF16(pathname);
if (path == NULL)
return -1;
int ret = _wchmod(path, mode);
util_free_UTF16(path);
return ret;
}
/*
* os_mkstemp -- generate a unique temporary filename from template
*/
int
os_mkstemp(char *temp)
{
unsigned rnd;
wchar_t *utemp = util_toUTF16(temp);
if (utemp == NULL)
return -1;
wchar_t *path = _wmktemp(utemp);
if (path == NULL) {
util_free_UTF16(utemp);
return -1;
}
wchar_t *npath = Malloc(sizeof(*npath) * wcslen(path) + _MAX_FNAME);
if (npath == NULL) {
util_free_UTF16(utemp);
return -1;
}
wcscpy(npath, path);
util_free_UTF16(utemp);
/*
* Use rand_s to generate more unique tmp file name than _mktemp do.
* In case with multiple threads and multiple files even after close()
* file name conflicts occurred.
* It resolved issue with synchronous removing
* multiples files by system.
*/
rand_s(&rnd);
int ret = _snwprintf(npath + wcslen(npath), _MAX_FNAME, L"%u", rnd);
if (ret < 0)
goto out;
/*
* Use O_TEMPORARY flag to make sure the file is deleted when
* the last file descriptor is closed. Also, it prevents opening
* this file from another process.
*/
ret = _wopen(npath, O_RDWR | O_CREAT | O_EXCL | O_TEMPORARY,
S_IWRITE | S_IREAD);
out:
Free(npath);
return ret;
}
/*
* os_posix_fallocate -- allocate file space
*/
int
os_posix_fallocate(int fd, os_off_t offset, os_off_t len)
{
/*
* From POSIX:
* "EINVAL -- The len argument was zero or the offset argument was
* less than zero."
*
* From Linux man-page:
* "EINVAL -- offset was less than 0, or len was less than or
* equal to 0"
*/
if (offset < 0 || len <= 0)
return EINVAL;
/*
* From POSIX:
* "EFBIG -- The value of offset+len is greater than the maximum
* file size."
*
* Overflow can't be checked for by _chsize_s, since it only gets
* the sum.
*/
if (offset + len < offset)
return EFBIG;
HANDLE handle = (HANDLE)_get_osfhandle(fd);
if (handle == INVALID_HANDLE_VALUE) {
return errno;
}
FILE_ATTRIBUTE_TAG_INFO attributes;
if (!GetFileInformationByHandleEx(handle, FileAttributeTagInfo,
&attributes, sizeof(attributes))) {
return EINVAL;
}
/*
* To physically allocate space on windows we have to remove
* sparsefile and file compressed flags. This method is much faster
* than using _chsize_s which has terrible performance. Dax on
* windows doesn't support sparse files and file compression so
* this workaround is acceptable.
*/
if (attributes.FileAttributes & FILE_ATTRIBUTE_SPARSE_FILE) {
DWORD unused;
FILE_SET_SPARSE_BUFFER buffer;
buffer.SetSparse = FALSE;
if (!DeviceIoControl(handle, FSCTL_SET_SPARSE, &buffer,
sizeof(buffer), NULL, 0, &unused,
NULL)) {
return EINVAL;
}
}
if (attributes.FileAttributes & FILE_ATTRIBUTE_COMPRESSED) {
DWORD unused;
USHORT buffer = 0; /* magic undocumented value */
if (!DeviceIoControl(handle, FSCTL_SET_COMPRESSION,
&buffer, sizeof(buffer), NULL, 0,
&unused, NULL)) {
return EINVAL;
}
}
/*
* posix_fallocate should not clobber errno, but
* _filelengthi64 might set errno.
*/
int orig_errno = errno;
__int64 current_size = _filelengthi64(fd);
int file_length_errno = errno;
errno = orig_errno;
if (current_size < 0)
return file_length_errno;
__int64 requested_size = offset + len;
if (requested_size <= current_size)
return 0;
int ret = os_ftruncate(fd, requested_size);
if (ret) {
errno = ret;
return -1;
}
return 0;
}
/*
* os_ftruncate -- truncate a file to a specified length
*/
int
os_ftruncate(int fd, os_off_t length)
{
LARGE_INTEGER distanceToMove = {0};
distanceToMove.QuadPart = length;
HANDLE handle = (HANDLE)_get_osfhandle(fd);
if (handle == INVALID_HANDLE_VALUE)
return -1;
if (!SetFilePointerEx(handle, distanceToMove, NULL, FILE_BEGIN)) {
errno = EINVAL;
return -1;
}
if (!SetEndOfFile(handle)) {
errno = EINVAL;
return -1;
}
return 0;
}
/*
* os_flock -- apply or remove an advisory lock on an open file
*/
int
os_flock(int fd, int operation)
{
int flags = 0;
SYSTEM_INFO systemInfo;
GetSystemInfo(&systemInfo);
switch (operation & (OS_LOCK_EX | OS_LOCK_SH | OS_LOCK_UN)) {
case OS_LOCK_EX:
case OS_LOCK_SH:
if (operation & OS_LOCK_NB)
flags = _LK_NBLCK;
else
flags = _LK_LOCK;
break;
case OS_LOCK_UN:
flags = _LK_UNLCK;
break;
default:
errno = EINVAL;
return -1;
}
os_off_t filelen = _filelengthi64(fd);
if (filelen < 0)
return -1;
/* for our purpose it's enough to lock the first page of the file */
long len = (filelen > systemInfo.dwPageSize) ?
systemInfo.dwPageSize : (long)filelen;
int res = _locking(fd, flags, len);
if (res != 0 && errno == EACCES)
errno = EWOULDBLOCK; /* for consistency with flock() */
return res;
}
/*
* os_writev -- windows version of writev function
*
* XXX: _write and other similar functions are 32 bit on windows
* if size of data is bigger then 2^32, this function
* will be not atomic.
*/
ssize_t
os_writev(int fd, const struct iovec *iov, int iovcnt)
{
size_t size = 0;
/* XXX: _write is 32 bit on windows */
for (int i = 0; i < iovcnt; i++)
size += iov[i].iov_len;
void *buf = malloc(size);
if (buf == NULL)
return ENOMEM;
char *it_buf = buf;
for (int i = 0; i < iovcnt; i++) {
memcpy(it_buf, iov[i].iov_base, iov[i].iov_len);
it_buf += iov[i].iov_len;
}
ssize_t written = 0;
while (size > 0) {
int ret = _write(fd, buf, size >= MAXUINT ?
MAXUINT : (unsigned)size);
if (ret == -1) {
written = -1;
break;
}
written += ret;
size -= ret;
}
free(buf);
return written;
}
#define NSEC_IN_SEC 1000000000ull
/* number of useconds between 1970-01-01T00:00:00Z and 1601-01-01T00:00:00Z */
#define DELTA_WIN2UNIX (11644473600000000ull)
/*
* clock_gettime -- returns elapsed time since the system was restarted
* or since Epoch, depending on the mode id
*/
int
os_clock_gettime(int id, struct timespec *ts)
{
switch (id) {
case CLOCK_MONOTONIC:
{
LARGE_INTEGER time;
LARGE_INTEGER frequency;
QueryPerformanceFrequency(&frequency);
QueryPerformanceCounter(&time);
ts->tv_sec = time.QuadPart / frequency.QuadPart;
ts->tv_nsec = (long)(
(time.QuadPart % frequency.QuadPart) *
NSEC_IN_SEC / frequency.QuadPart);
}
break;
case CLOCK_REALTIME:
{
FILETIME ctime_ft;
GetSystemTimeAsFileTime(&ctime_ft);
ULARGE_INTEGER ctime = {
.HighPart = ctime_ft.dwHighDateTime,
.LowPart = ctime_ft.dwLowDateTime,
};
ts->tv_sec = (ctime.QuadPart - DELTA_WIN2UNIX * 10)
/ 10000000;
ts->tv_nsec = ((ctime.QuadPart - DELTA_WIN2UNIX * 10)
% 10000000) * 100;
}
break;
default:
SetLastError(EINVAL);
return -1;
}
return 0;
}
/*
* os_setenv -- change or add an environment variable
*/
int
os_setenv(const char *name, const char *value, int overwrite)
{
errno_t err;
/*
* If caller doesn't want to overwrite make sure that a environment
* variable with the same name doesn't exist.
*/
if (!overwrite && getenv(name))
return 0;
/*
* _putenv_s returns a non-zero error code on failure but setenv
* needs to return -1 on failure, let's translate the error code.
*/
if ((err = _putenv_s(name, value)) != 0) {
errno = err;
return -1;
}
return 0;
}
/*
* os_unsetenv -- remove an environment variable
*/
int
os_unsetenv(const char *name)
{
errno_t err;
if ((err = _putenv_s(name, "")) != 0) {
errno = err;
return -1;
}
return 0;
}
/*
* os_getenv -- getenv abstraction layer
*/
char *
os_getenv(const char *name)
{
return getenv(name);
}
/*
* rand_r -- rand_r for windows
*
* XXX: RAND_MAX is equal 0x7fff on Windows, so to get 32 bit random number
* we need to merge two numbers returned by rand_s().
* It is not to the best solution as subsequences returned by rand_s are
* not guaranteed to be independent.
*
* XXX: Windows doesn't implement deterministic thread-safe pseudorandom
* generator (generator which can be initialized by seed ).
* We have to chose between a deterministic nonthread-safe generator
* (rand(), srand()) or a non-deterministic thread-safe generator(rand_s())
* as thread-safety is more important, a seed parameter is ignored in this
* implementation.
*/
unsigned
os_rand_r(unsigned *seedp)
{
UNREFERENCED_PARAMETER(seedp);
unsigned part1, part2;
rand_s(&part1);
rand_s(&part2);
return part1 << 16 | part2;
}
/*
* sys_siglist -- map of signal to human readable messages like sys_siglist
*/
const char * const sys_siglist[] = {
"Unknown signal 0", /* 0 */
"Hangup", /* 1 */
"Interrupt", /* 2 */
"Quit", /* 3 */
"Illegal instruction", /* 4 */
"Trace/breakpoint trap", /* 5 */
"Aborted", /* 6 */
"Bus error", /* 7 */
"Floating point exception", /* 8 */
"Killed", /* 9 */
"User defined signal 1", /* 10 */
"Segmentation fault", /* 11 */
"User defined signal 2", /* 12 */
"Broken pipe", /* 13 */
"Alarm clock", /* 14 */
"Terminated", /* 15 */
"Stack fault", /* 16 */
"Child exited", /* 17 */
"Continued", /* 18 */
"Stopped (signal)", /* 19 */
"Stopped", /* 20 */
"Stopped (tty input)", /* 21 */
"Stopped (tty output)", /* 22 */
"Urgent I/O condition", /* 23 */
"CPU time limit exceeded", /* 24 */
"File size limit exceeded", /* 25 */
"Virtual timer expired", /* 26 */
"Profiling timer expired", /* 27 */
"Window changed", /* 28 */
"I/O possible", /* 29 */
"Power failure", /* 30 */
"Bad system call", /* 31 */
"Unknown signal 32" /* 32 */
};
int sys_siglist_size = ARRAYSIZE(sys_siglist);
/*
* string constants for strsignal
* XXX: ideally this should have the signal number as the suffix but then we
* should use a buffer from thread local storage, so deferring the same till
* we need it
* NOTE: In Linux strsignal uses TLS for the same reason but if it fails to get
* a thread local buffer it falls back to using a static buffer trading the
* thread safety.
*/
#define STR_REALTIME_SIGNAL "Real-time signal"
#define STR_UNKNOWN_SIGNAL "Unknown signal"
/*
* strsignal -- returns a string describing the signal number 'sig'
*
* XXX: According to POSIX, this one is of type 'char *', but in our
* implementation it returns 'const char *'.
*/
const char *
os_strsignal(int sig)
{
if (sig >= 0 && sig < ARRAYSIZE(sys_siglist))
return sys_siglist[sig];
else if (sig >= 34 && sig <= 64)
return STR_REALTIME_SIGNAL;
else
return STR_UNKNOWN_SIGNAL;
}
int
os_execv(const char *path, char *const argv[])
{
wchar_t *wpath = util_toUTF16(path);
if (wpath == NULL)
return -1;
int argc = 0;
while (argv[argc])
argc++;
int ret;
wchar_t **wargv = Zalloc((argc + 1) * sizeof(wargv[0]));
if (!wargv) {
ret = -1;
goto wargv_alloc_failed;
}
for (int i = 0; i < argc; ++i) {
wargv[i] = util_toUTF16(argv[i]);
if (!wargv[i]) {
ret = -1;
goto end;
}
}
intptr_t iret = _wexecv(wpath, wargv);
if (iret == 0)
ret = 0;
else
ret = -1;
end:
for (int i = 0; i < argc; ++i)
util_free_UTF16(wargv[i]);
Free(wargv);
wargv_alloc_failed:
util_free_UTF16(wpath);
return ret;
}
| 16,299 | 20.967655 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/core/os_thread_posix.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* os_thread_posix.c -- Posix thread abstraction layer
*/
#define _GNU_SOURCE
#include <pthread.h>
#ifdef __FreeBSD__
#include <pthread_np.h>
#endif
#include <semaphore.h>
#include "os_thread.h"
#include "util.h"
typedef struct {
pthread_t thread;
} internal_os_thread_t;
/*
* os_once -- pthread_once abstraction layer
*/
int
os_once(os_once_t *o, void (*func)(void))
{
COMPILE_ERROR_ON(sizeof(os_once_t) < sizeof(pthread_once_t));
return pthread_once((pthread_once_t *)o, func);
}
/*
* os_tls_key_create -- pthread_key_create abstraction layer
*/
int
os_tls_key_create(os_tls_key_t *key, void (*destructor)(void *))
{
COMPILE_ERROR_ON(sizeof(os_tls_key_t) < sizeof(pthread_key_t));
return pthread_key_create((pthread_key_t *)key, destructor);
}
/*
* os_tls_key_delete -- pthread_key_delete abstraction layer
*/
int
os_tls_key_delete(os_tls_key_t key)
{
return pthread_key_delete((pthread_key_t)key);
}
/*
* os_tls_setspecific -- pthread_key_setspecific abstraction layer
*/
int
os_tls_set(os_tls_key_t key, const void *value)
{
return pthread_setspecific((pthread_key_t)key, value);
}
/*
* os_tls_get -- pthread_key_getspecific abstraction layer
*/
void *
os_tls_get(os_tls_key_t key)
{
return pthread_getspecific((pthread_key_t)key);
}
/*
* os_mutex_init -- pthread_mutex_init abstraction layer
*/
int
os_mutex_init(os_mutex_t *__restrict mutex)
{
COMPILE_ERROR_ON(sizeof(os_mutex_t) < sizeof(pthread_mutex_t));
return pthread_mutex_init((pthread_mutex_t *)mutex, NULL);
}
/*
* os_mutex_destroy -- pthread_mutex_destroy abstraction layer
*/
int
os_mutex_destroy(os_mutex_t *__restrict mutex)
{
return pthread_mutex_destroy((pthread_mutex_t *)mutex);
}
/*
* os_mutex_lock -- pthread_mutex_lock abstraction layer
*/
int
os_mutex_lock(os_mutex_t *__restrict mutex)
{
return pthread_mutex_lock((pthread_mutex_t *)mutex);
}
/*
* os_mutex_trylock -- pthread_mutex_trylock abstraction layer
*/
int
os_mutex_trylock(os_mutex_t *__restrict mutex)
{
return pthread_mutex_trylock((pthread_mutex_t *)mutex);
}
/*
* os_mutex_unlock -- pthread_mutex_unlock abstraction layer
*/
int
os_mutex_unlock(os_mutex_t *__restrict mutex)
{
return pthread_mutex_unlock((pthread_mutex_t *)mutex);
}
/*
* os_mutex_timedlock -- pthread_mutex_timedlock abstraction layer
*/
int
os_mutex_timedlock(os_mutex_t *__restrict mutex,
const struct timespec *abstime)
{
return pthread_mutex_timedlock((pthread_mutex_t *)mutex, abstime);
}
/*
* os_rwlock_init -- pthread_rwlock_init abstraction layer
*/
int
os_rwlock_init(os_rwlock_t *__restrict rwlock)
{
COMPILE_ERROR_ON(sizeof(os_rwlock_t) < sizeof(pthread_rwlock_t));
return pthread_rwlock_init((pthread_rwlock_t *)rwlock, NULL);
}
/*
* os_rwlock_destroy -- pthread_rwlock_destroy abstraction layer
*/
int
os_rwlock_destroy(os_rwlock_t *__restrict rwlock)
{
return pthread_rwlock_destroy((pthread_rwlock_t *)rwlock);
}
/*
* os_rwlock_rdlock - pthread_rwlock_rdlock abstraction layer
*/
int
os_rwlock_rdlock(os_rwlock_t *__restrict rwlock)
{
return pthread_rwlock_rdlock((pthread_rwlock_t *)rwlock);
}
/*
* os_rwlock_wrlock -- pthread_rwlock_wrlock abstraction layer
*/
int
os_rwlock_wrlock(os_rwlock_t *__restrict rwlock)
{
return pthread_rwlock_wrlock((pthread_rwlock_t *)rwlock);
}
/*
* os_rwlock_unlock -- pthread_rwlock_unlock abstraction layer
*/
int
os_rwlock_unlock(os_rwlock_t *__restrict rwlock)
{
return pthread_rwlock_unlock((pthread_rwlock_t *)rwlock);
}
/*
* os_rwlock_tryrdlock -- pthread_rwlock_tryrdlock abstraction layer
*/
int
os_rwlock_tryrdlock(os_rwlock_t *__restrict rwlock)
{
return pthread_rwlock_tryrdlock((pthread_rwlock_t *)rwlock);
}
/*
* os_rwlock_tryrwlock -- pthread_rwlock_trywrlock abstraction layer
*/
int
os_rwlock_trywrlock(os_rwlock_t *__restrict rwlock)
{
return pthread_rwlock_trywrlock((pthread_rwlock_t *)rwlock);
}
/*
* os_rwlock_timedrdlock -- pthread_rwlock_timedrdlock abstraction layer
*/
int
os_rwlock_timedrdlock(os_rwlock_t *__restrict rwlock,
const struct timespec *abstime)
{
return pthread_rwlock_timedrdlock((pthread_rwlock_t *)rwlock, abstime);
}
/*
* os_rwlock_timedwrlock -- pthread_rwlock_timedwrlock abstraction layer
*/
int
os_rwlock_timedwrlock(os_rwlock_t *__restrict rwlock,
const struct timespec *abstime)
{
return pthread_rwlock_timedwrlock((pthread_rwlock_t *)rwlock, abstime);
}
/*
* os_spin_init -- pthread_spin_init abstraction layer
*/
int
os_spin_init(os_spinlock_t *lock, int pshared)
{
COMPILE_ERROR_ON(sizeof(os_spinlock_t) < sizeof(pthread_spinlock_t));
return pthread_spin_init((pthread_spinlock_t *)lock, pshared);
}
/*
* os_spin_destroy -- pthread_spin_destroy abstraction layer
*/
int
os_spin_destroy(os_spinlock_t *lock)
{
return pthread_spin_destroy((pthread_spinlock_t *)lock);
}
/*
* os_spin_lock -- pthread_spin_lock abstraction layer
*/
int
os_spin_lock(os_spinlock_t *lock)
{
return pthread_spin_lock((pthread_spinlock_t *)lock);
}
/*
* os_spin_unlock -- pthread_spin_unlock abstraction layer
*/
int
os_spin_unlock(os_spinlock_t *lock)
{
return pthread_spin_unlock((pthread_spinlock_t *)lock);
}
/*
* os_spin_trylock -- pthread_spin_trylock abstraction layer
*/
int
os_spin_trylock(os_spinlock_t *lock)
{
return pthread_spin_trylock((pthread_spinlock_t *)lock);
}
/*
* os_cond_init -- pthread_cond_init abstraction layer
*/
int
os_cond_init(os_cond_t *__restrict cond)
{
COMPILE_ERROR_ON(sizeof(os_cond_t) < sizeof(pthread_cond_t));
return pthread_cond_init((pthread_cond_t *)cond, NULL);
}
/*
* os_cond_destroy -- pthread_cond_destroy abstraction layer
*/
int
os_cond_destroy(os_cond_t *__restrict cond)
{
return pthread_cond_destroy((pthread_cond_t *)cond);
}
/*
* os_cond_broadcast -- pthread_cond_broadcast abstraction layer
*/
int
os_cond_broadcast(os_cond_t *__restrict cond)
{
return pthread_cond_broadcast((pthread_cond_t *)cond);
}
/*
* os_cond_signal -- pthread_cond_signal abstraction layer
*/
int
os_cond_signal(os_cond_t *__restrict cond)
{
return pthread_cond_signal((pthread_cond_t *)cond);
}
/*
* os_cond_timedwait -- pthread_cond_timedwait abstraction layer
*/
int
os_cond_timedwait(os_cond_t *__restrict cond,
os_mutex_t *__restrict mutex, const struct timespec *abstime)
{
return pthread_cond_timedwait((pthread_cond_t *)cond,
(pthread_mutex_t *)mutex, abstime);
}
/*
* os_cond_wait -- pthread_cond_wait abstraction layer
*/
int
os_cond_wait(os_cond_t *__restrict cond,
os_mutex_t *__restrict mutex)
{
return pthread_cond_wait((pthread_cond_t *)cond,
(pthread_mutex_t *)mutex);
}
/*
* os_thread_create -- pthread_create abstraction layer
*/
int
os_thread_create(os_thread_t *thread, const os_thread_attr_t *attr,
void *(*start_routine)(void *), void *arg)
{
COMPILE_ERROR_ON(sizeof(os_thread_t) < sizeof(internal_os_thread_t));
internal_os_thread_t *thread_info = (internal_os_thread_t *)thread;
return pthread_create(&thread_info->thread, (pthread_attr_t *)attr,
start_routine, arg);
}
/*
* os_thread_join -- pthread_join abstraction layer
*/
int
os_thread_join(os_thread_t *thread, void **result)
{
internal_os_thread_t *thread_info = (internal_os_thread_t *)thread;
return pthread_join(thread_info->thread, result);
}
/*
* os_thread_self -- pthread_self abstraction layer
*/
void
os_thread_self(os_thread_t *thread)
{
internal_os_thread_t *thread_info = (internal_os_thread_t *)thread;
thread_info->thread = pthread_self();
}
/*
* os_thread_atfork -- pthread_atfork abstraction layer
*/
int
os_thread_atfork(void (*prepare)(void), void (*parent)(void),
void (*child)(void))
{
return pthread_atfork(prepare, parent, child);
}
/*
* os_thread_setaffinity_np -- pthread_atfork abstraction layer
*/
int
os_thread_setaffinity_np(os_thread_t *thread, size_t set_size,
const os_cpu_set_t *set)
{
COMPILE_ERROR_ON(sizeof(os_cpu_set_t) < sizeof(cpu_set_t));
internal_os_thread_t *thread_info = (internal_os_thread_t *)thread;
return pthread_setaffinity_np(thread_info->thread, set_size,
(cpu_set_t *)set);
}
/*
* os_cpu_zero -- CP_ZERO abstraction layer
*/
void
os_cpu_zero(os_cpu_set_t *set)
{
CPU_ZERO((cpu_set_t *)set);
}
/*
* os_cpu_set -- CP_SET abstraction layer
*/
void
os_cpu_set(size_t cpu, os_cpu_set_t *set)
{
CPU_SET(cpu, (cpu_set_t *)set);
}
/*
* os_semaphore_init -- initializes semaphore instance
*/
int
os_semaphore_init(os_semaphore_t *sem, unsigned value)
{
COMPILE_ERROR_ON(sizeof(os_semaphore_t) < sizeof(sem_t));
return sem_init((sem_t *)sem, 0, value);
}
/*
* os_semaphore_destroy -- destroys a semaphore instance
*/
int
os_semaphore_destroy(os_semaphore_t *sem)
{
return sem_destroy((sem_t *)sem);
}
/*
* os_semaphore_wait -- decreases the value of the semaphore
*/
int
os_semaphore_wait(os_semaphore_t *sem)
{
return sem_wait((sem_t *)sem);
}
/*
* os_semaphore_trywait -- tries to decrease the value of the semaphore
*/
int
os_semaphore_trywait(os_semaphore_t *sem)
{
return sem_trywait((sem_t *)sem);
}
/*
* os_semaphore_post -- increases the value of the semaphore
*/
int
os_semaphore_post(os_semaphore_t *sem)
{
return sem_post((sem_t *)sem);
}
| 9,190 | 20.032037 | 72 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/core/util.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* util.c -- very basic utilities
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <endian.h>
#include <errno.h>
#include <time.h>
#include <stdarg.h>
#include "util.h"
#include "os.h"
#include "valgrind_internal.h"
#include "alloc.h"
/* library-wide page size */
unsigned long long Pagesize;
/* allocation/mmap granularity */
unsigned long long Mmap_align;
#if ANY_VG_TOOL_ENABLED
/* Initialized to true if the process is running inside Valgrind. */
unsigned _On_valgrind;
#endif
#if VG_HELGRIND_ENABLED
/* Initialized to true if the process is running inside Valgrind helgrind. */
unsigned _On_helgrind;
#endif
#if VG_DRD_ENABLED
/* Initialized to true if the process is running inside Valgrind drd. */
unsigned _On_drd;
#endif
#if VG_HELGRIND_ENABLED || VG_DRD_ENABLED
/* Initialized to true if the process is running inside Valgrind drd or hg. */
unsigned _On_drd_or_hg;
#endif
#if VG_MEMCHECK_ENABLED
/* Initialized to true if the process is running inside Valgrind memcheck. */
unsigned _On_memcheck;
#endif
#if VG_PMEMCHECK_ENABLED
/* Initialized to true if the process is running inside Valgrind pmemcheck. */
unsigned _On_pmemcheck;
#define LIB_LOG_LEN 20
#define FUNC_LOG_LEN 50
#define SUFFIX_LEN 7
/* true if pmreorder instrumentation has to be enabled */
int _Pmreorder_emit;
/*
* util_emit_log -- emits lib and func name with appropriate suffix
* to pmemcheck store log file
*/
void
util_emit_log(const char *lib, const char *func, int order)
{
char lib_name[LIB_LOG_LEN];
char func_name[FUNC_LOG_LEN];
char suffix[SUFFIX_LEN];
size_t lib_len = strlen(lib);
size_t func_len = strlen(func);
if (order == 0)
strcpy(suffix, ".BEGIN");
else
strcpy(suffix, ".END");
size_t suffix_len = strlen(suffix);
if (lib_len + suffix_len + 1 > LIB_LOG_LEN) {
VALGRIND_EMIT_LOG("Library name is too long");
return;
}
if (func_len + suffix_len + 1 > FUNC_LOG_LEN) {
VALGRIND_EMIT_LOG("Function name is too long");
return;
}
strcpy(lib_name, lib);
strcat(lib_name, suffix);
strcpy(func_name, func);
strcat(func_name, suffix);
if (order == 0) {
VALGRIND_EMIT_LOG(func_name);
VALGRIND_EMIT_LOG(lib_name);
} else {
VALGRIND_EMIT_LOG(lib_name);
VALGRIND_EMIT_LOG(func_name);
}
}
#endif
/*
* util_is_zeroed -- check if given memory range is all zero
*/
int
util_is_zeroed(const void *addr, size_t len)
{
const char *a = addr;
if (len == 0)
return 1;
if (a[0] == 0 && memcmp(a, a + 1, len - 1) == 0)
return 1;
return 0;
}
/*
* util_checksum_compute -- compute Fletcher64-like checksum
*
* csump points to where the checksum lives, so that location
* is treated as zeros while calculating the checksum. The
* checksummed data is assumed to be in little endian order.
*/
uint64_t
util_checksum_compute(void *addr, size_t len, uint64_t *csump, size_t skip_off)
{
if (len % 4 != 0)
abort();
uint32_t *p32 = addr;
uint32_t *p32end = (uint32_t *)((char *)addr + len);
uint32_t *skip;
uint32_t lo32 = 0;
uint32_t hi32 = 0;
if (skip_off)
skip = (uint32_t *)((char *)addr + skip_off);
else
skip = (uint32_t *)((char *)addr + len);
while (p32 < p32end)
if (p32 == (uint32_t *)csump || p32 >= skip) {
/* lo32 += 0; treat first 32-bits as zero */
p32++;
hi32 += lo32;
/* lo32 += 0; treat second 32-bits as zero */
p32++;
hi32 += lo32;
} else {
lo32 += le32toh(*p32);
++p32;
hi32 += lo32;
}
return (uint64_t)hi32 << 32 | lo32;
}
/*
* util_checksum -- compute Fletcher64-like checksum
*
* csump points to where the checksum lives, so that location
* is treated as zeros while calculating the checksum.
* If insert is true, the calculated checksum is inserted into
* the range at *csump. Otherwise the calculated checksum is
* checked against *csump and the result returned (true means
* the range checksummed correctly).
*/
int
util_checksum(void *addr, size_t len, uint64_t *csump,
int insert, size_t skip_off)
{
uint64_t csum = util_checksum_compute(addr, len, csump, skip_off);
if (insert) {
*csump = htole64(csum);
return 1;
}
return *csump == htole64(csum);
}
/*
* util_checksum_seq -- compute sequential Fletcher64-like checksum
*
* Merges checksum from the old buffer with checksum for current buffer.
*/
uint64_t
util_checksum_seq(const void *addr, size_t len, uint64_t csum)
{
if (len % 4 != 0)
abort();
const uint32_t *p32 = addr;
const uint32_t *p32end = (const uint32_t *)((const char *)addr + len);
uint32_t lo32 = (uint32_t)csum;
uint32_t hi32 = (uint32_t)(csum >> 32);
while (p32 < p32end) {
lo32 += le32toh(*p32);
++p32;
hi32 += lo32;
}
return (uint64_t)hi32 << 32 | lo32;
}
/*
* util_fgets -- fgets wrapper with conversion CRLF to LF
*/
char *
util_fgets(char *buffer, int max, FILE *stream)
{
char *str = fgets(buffer, max, stream);
if (str == NULL)
goto end;
int len = (int)strlen(str);
if (len < 2)
goto end;
if (str[len - 2] == '\r' && str[len - 1] == '\n') {
str[len - 2] = '\n';
str[len - 1] = '\0';
}
end:
return str;
}
struct suff {
const char *suff;
uint64_t mag;
};
/*
* util_parse_size -- parse size from string
*/
int
util_parse_size(const char *str, size_t *sizep)
{
const struct suff suffixes[] = {
{ "B", 1ULL },
{ "K", 1ULL << 10 }, /* JEDEC */
{ "M", 1ULL << 20 },
{ "G", 1ULL << 30 },
{ "T", 1ULL << 40 },
{ "P", 1ULL << 50 },
{ "KiB", 1ULL << 10 }, /* IEC */
{ "MiB", 1ULL << 20 },
{ "GiB", 1ULL << 30 },
{ "TiB", 1ULL << 40 },
{ "PiB", 1ULL << 50 },
{ "kB", 1000ULL }, /* SI */
{ "MB", 1000ULL * 1000 },
{ "GB", 1000ULL * 1000 * 1000 },
{ "TB", 1000ULL * 1000 * 1000 * 1000 },
{ "PB", 1000ULL * 1000 * 1000 * 1000 * 1000 }
};
int res = -1;
unsigned i;
size_t size = 0;
char unit[9] = {0};
int ret = sscanf(str, "%zu%8s", &size, unit);
if (ret == 1) {
res = 0;
} else if (ret == 2) {
for (i = 0; i < ARRAY_SIZE(suffixes); ++i) {
if (strcmp(suffixes[i].suff, unit) == 0) {
size = size * suffixes[i].mag;
res = 0;
break;
}
}
} else {
return -1;
}
if (sizep && res == 0)
*sizep = size;
return res;
}
/*
* util_init -- initialize the utils
*
* This is called from the library initialization code.
*/
void
util_init(void)
{
/* XXX - replace sysconf() with util_get_sys_xxx() */
if (Pagesize == 0)
Pagesize = (unsigned long) sysconf(_SC_PAGESIZE);
#ifndef _WIN32
Mmap_align = Pagesize;
#else
if (Mmap_align == 0) {
SYSTEM_INFO si;
GetSystemInfo(&si);
Mmap_align = si.dwAllocationGranularity;
}
#endif
#if ANY_VG_TOOL_ENABLED
_On_valgrind = RUNNING_ON_VALGRIND;
#endif
#if VG_MEMCHECK_ENABLED
if (_On_valgrind) {
unsigned tmp;
unsigned result;
unsigned res = VALGRIND_GET_VBITS(&tmp, &result, sizeof(tmp));
_On_memcheck = res ? 1 : 0;
} else {
_On_memcheck = 0;
}
#endif
#if VG_DRD_ENABLED
if (_On_valgrind)
_On_drd = DRD_GET_DRD_THREADID ? 1 : 0;
else
_On_drd = 0;
#endif
#if VG_HELGRIND_ENABLED
if (_On_valgrind) {
unsigned tmp;
unsigned result;
/*
* As of now (pmem-3.15) VALGRIND_HG_GET_ABITS is broken on
* the upstream version of Helgrind headers. It generates
* a sign-conversion error and actually returns UINT32_MAX-1
* when not running under Helgrind.
*/
long res = VALGRIND_HG_GET_ABITS(&tmp, &result, sizeof(tmp));
_On_helgrind = res != -2 ? 1 : 0;
} else {
_On_helgrind = 0;
}
#endif
#if VG_DRD_ENABLED || VG_HELGRIND_ENABLED
_On_drd_or_hg = (unsigned)(On_helgrind + On_drd);
#endif
#if VG_PMEMCHECK_ENABLED
if (On_valgrind) {
char *pmreorder_env = os_getenv("PMREORDER_EMIT_LOG");
if (pmreorder_env)
_Pmreorder_emit = atoi(pmreorder_env);
VALGRIND_PMC_REGISTER_PMEM_MAPPING(&_On_pmemcheck,
sizeof(_On_pmemcheck));
unsigned pmc = (unsigned)VALGRIND_PMC_CHECK_IS_PMEM_MAPPING(
&_On_pmemcheck, sizeof(_On_pmemcheck));
VALGRIND_PMC_REMOVE_PMEM_MAPPING(&_On_pmemcheck,
sizeof(_On_pmemcheck));
_On_pmemcheck = pmc ? 1 : 0;
} else {
_On_pmemcheck = 0;
_Pmreorder_emit = 0;
}
#endif
}
/*
* util_concat_str -- concatenate two strings
*/
char *
util_concat_str(const char *s1, const char *s2)
{
char *result = malloc(strlen(s1) + strlen(s2) + 1);
if (!result)
return NULL;
strcpy(result, s1);
strcat(result, s2);
return result;
}
/*
* util_localtime -- a wrapper for localtime function
*
* localtime can set nonzero errno even if it succeeds (e.g. when there is no
* /etc/localtime file under Linux) and we do not want the errno to be polluted
* in such cases.
*/
struct tm *
util_localtime(const time_t *timep)
{
int oerrno = errno;
struct tm *tm = localtime(timep);
if (tm != NULL)
errno = oerrno;
return tm;
}
/*
* util_safe_strcpy -- copies string from src to dst, returns -1
* when length of source string (including null-terminator)
* is greater than max_length, 0 otherwise
*
* For gcc (found in version 8.1.1) calling this function with
* max_length equal to dst size produces -Wstringop-truncation warning
*
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85902
*/
#ifdef STRINGOP_TRUNCATION_SUPPORTED
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstringop-truncation"
#endif
int
util_safe_strcpy(char *dst, const char *src, size_t max_length)
{
if (max_length == 0)
return -1;
strncpy(dst, src, max_length);
return dst[max_length - 1] == '\0' ? 0 : -1;
}
#ifdef STRINGOP_TRUNCATION_SUPPORTED
#pragma GCC diagnostic pop
#endif
#define PARSER_MAX_LINE (PATH_MAX + 1024)
/*
* util_snprintf -- run snprintf; in case of truncation or a failure
* return a negative value, or the number of characters printed otherwise.
*/
int
util_snprintf(char *str, size_t size, const char *format, ...)
{
va_list ap;
va_start(ap, format);
int ret = vsnprintf(str, size, format, ap);
va_end(ap);
if (ret < 0) {
if (!errno)
errno = EIO;
goto err;
} else if ((size_t)ret >= size) {
errno = ENOBUFS;
goto err;
}
return ret;
err:
return -1;
}
/*
* util_readline -- read line from stream
*/
char *
util_readline(FILE *fh)
{
size_t bufsize = PARSER_MAX_LINE;
size_t position = 0;
char *buffer = NULL;
do {
char *tmp = buffer;
buffer = Realloc(buffer, bufsize);
if (buffer == NULL) {
Free(tmp);
return NULL;
}
/* ensure if we can cast bufsize to int */
char *s = util_fgets(buffer + position, (int)bufsize / 2, fh);
if (s == NULL) {
Free(buffer);
return NULL;
}
position = strlen(buffer);
bufsize *= 2;
} while (!feof(fh) && buffer[position - 1] != '\n');
return buffer;
}
| 10,620 | 20.456566 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/core/os_thread_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* os_thread_windows.c -- (imperfect) POSIX-like threads for Windows
*
* Loosely inspired by:
* http://locklessinc.com/articles/pthreads_on_windows/
*/
#include <time.h>
#include <synchapi.h>
#include <sys/types.h>
#include <sys/timeb.h>
#include "os_thread.h"
#include "util.h"
#include "out.h"
typedef struct {
unsigned attr;
CRITICAL_SECTION lock;
} internal_os_mutex_t;
typedef struct {
unsigned attr;
char is_write;
SRWLOCK lock;
} internal_os_rwlock_t;
typedef struct {
unsigned attr;
CONDITION_VARIABLE cond;
} internal_os_cond_t;
typedef long long internal_os_once_t;
typedef struct {
HANDLE handle;
} internal_semaphore_t;
typedef struct {
GROUP_AFFINITY affinity;
} internal_os_cpu_set_t;
typedef struct {
HANDLE thread_handle;
void *arg;
void *(*start_routine)(void *);
void *result;
} internal_os_thread_t;
/* number of useconds between 1970-01-01T00:00:00Z and 1601-01-01T00:00:00Z */
#define DELTA_WIN2UNIX (11644473600000000ull)
#define TIMED_LOCK(action, ts) {\
if ((action) == TRUE)\
return 0;\
unsigned long long et = (ts)->tv_sec * 1000000000 + (ts)->tv_nsec;\
while (1) {\
FILETIME _t;\
GetSystemTimeAsFileTime(&_t);\
ULARGE_INTEGER _UI = {\
.HighPart = _t.dwHighDateTime,\
.LowPart = _t.dwLowDateTime,\
};\
if (100 * _UI.QuadPart - 1000 * DELTA_WIN2UNIX >= et)\
return ETIMEDOUT;\
if ((action) == TRUE)\
return 0;\
Sleep(1);\
}\
return ETIMEDOUT;\
}
/*
* os_mutex_init -- initializes mutex
*/
int
os_mutex_init(os_mutex_t *__restrict mutex)
{
COMPILE_ERROR_ON(sizeof(os_mutex_t) < sizeof(internal_os_mutex_t));
internal_os_mutex_t *mutex_internal = (internal_os_mutex_t *)mutex;
InitializeCriticalSection(&mutex_internal->lock);
return 0;
}
/*
* os_mutex_destroy -- destroys mutex
*/
int
os_mutex_destroy(os_mutex_t *__restrict mutex)
{
internal_os_mutex_t *mutex_internal = (internal_os_mutex_t *)mutex;
DeleteCriticalSection(&mutex_internal->lock);
return 0;
}
/*
* os_mutex_lock -- locks mutex
*/
_Use_decl_annotations_
int
os_mutex_lock(os_mutex_t *__restrict mutex)
{
internal_os_mutex_t *mutex_internal = (internal_os_mutex_t *)mutex;
EnterCriticalSection(&mutex_internal->lock);
if (mutex_internal->lock.RecursionCount > 1) {
LeaveCriticalSection(&mutex_internal->lock);
FATAL("deadlock detected");
}
return 0;
}
/*
* os_mutex_trylock -- tries lock mutex
*/
_Use_decl_annotations_
int
os_mutex_trylock(os_mutex_t *__restrict mutex)
{
internal_os_mutex_t *mutex_internal = (internal_os_mutex_t *)mutex;
if (TryEnterCriticalSection(&mutex_internal->lock) == FALSE)
return EBUSY;
if (mutex_internal->lock.RecursionCount > 1) {
LeaveCriticalSection(&mutex_internal->lock);
return EBUSY;
}
return 0;
}
/*
* os_mutex_timedlock -- tries lock mutex with timeout
*/
int
os_mutex_timedlock(os_mutex_t *__restrict mutex,
const struct timespec *abstime)
{
TIMED_LOCK((os_mutex_trylock(mutex) == 0), abstime);
}
/*
* os_mutex_unlock -- unlocks mutex
*/
int
os_mutex_unlock(os_mutex_t *__restrict mutex)
{
internal_os_mutex_t *mutex_internal = (internal_os_mutex_t *)mutex;
LeaveCriticalSection(&mutex_internal->lock);
return 0;
}
/*
* os_rwlock_init -- initializes rwlock
*/
int
os_rwlock_init(os_rwlock_t *__restrict rwlock)
{
COMPILE_ERROR_ON(sizeof(os_rwlock_t) < sizeof(internal_os_rwlock_t));
internal_os_rwlock_t *rwlock_internal = (internal_os_rwlock_t *)rwlock;
InitializeSRWLock(&rwlock_internal->lock);
return 0;
}
/*
* os_rwlock_destroy -- destroys rwlock
*/
int
os_rwlock_destroy(os_rwlock_t *__restrict rwlock)
{
/* do nothing */
UNREFERENCED_PARAMETER(rwlock);
return 0;
}
/*
* os_rwlock_rdlock -- get shared lock
*/
int
os_rwlock_rdlock(os_rwlock_t *__restrict rwlock)
{
internal_os_rwlock_t *rwlock_internal = (internal_os_rwlock_t *)rwlock;
AcquireSRWLockShared(&rwlock_internal->lock);
rwlock_internal->is_write = 0;
return 0;
}
/*
* os_rwlock_wrlock -- get exclusive lock
*/
int
os_rwlock_wrlock(os_rwlock_t *__restrict rwlock)
{
internal_os_rwlock_t *rwlock_internal = (internal_os_rwlock_t *)rwlock;
AcquireSRWLockExclusive(&rwlock_internal->lock);
rwlock_internal->is_write = 1;
return 0;
}
/*
* os_rwlock_tryrdlock -- tries get shared lock
*/
int
os_rwlock_tryrdlock(os_rwlock_t *__restrict rwlock)
{
internal_os_rwlock_t *rwlock_internal = (internal_os_rwlock_t *)rwlock;
if (TryAcquireSRWLockShared(&rwlock_internal->lock) == FALSE) {
return EBUSY;
} else {
rwlock_internal->is_write = 0;
return 0;
}
}
/*
* os_rwlock_trywrlock -- tries get exclusive lock
*/
_Use_decl_annotations_
int
os_rwlock_trywrlock(os_rwlock_t *__restrict rwlock)
{
internal_os_rwlock_t *rwlock_internal = (internal_os_rwlock_t *)rwlock;
if (TryAcquireSRWLockExclusive(&rwlock_internal->lock) == FALSE) {
return EBUSY;
} else {
rwlock_internal->is_write = 1;
return 0;
}
}
/*
* os_rwlock_timedrdlock -- gets shared lock with timeout
*/
int
os_rwlock_timedrdlock(os_rwlock_t *__restrict rwlock,
const struct timespec *abstime)
{
TIMED_LOCK((os_rwlock_tryrdlock(rwlock) == 0), abstime);
}
/*
* os_rwlock_timedwrlock -- gets exclusive lock with timeout
*/
int
os_rwlock_timedwrlock(os_rwlock_t *__restrict rwlock,
const struct timespec *abstime)
{
TIMED_LOCK((os_rwlock_trywrlock(rwlock) == 0), abstime);
}
/*
* os_rwlock_unlock -- unlocks rwlock
*/
_Use_decl_annotations_
int
os_rwlock_unlock(os_rwlock_t *__restrict rwlock)
{
internal_os_rwlock_t *rwlock_internal = (internal_os_rwlock_t *)rwlock;
if (rwlock_internal->is_write)
ReleaseSRWLockExclusive(&rwlock_internal->lock);
else
ReleaseSRWLockShared(&rwlock_internal->lock);
return 0;
}
/*
* os_cond_init -- initializes condition variable
*/
int
os_cond_init(os_cond_t *__restrict cond)
{
COMPILE_ERROR_ON(sizeof(os_cond_t) < sizeof(internal_os_cond_t));
internal_os_cond_t *cond_internal = (internal_os_cond_t *)cond;
InitializeConditionVariable(&cond_internal->cond);
return 0;
}
/*
* os_cond_destroy -- destroys condition variable
*/
int
os_cond_destroy(os_cond_t *__restrict cond)
{
/* do nothing */
UNREFERENCED_PARAMETER(cond);
return 0;
}
/*
* os_cond_broadcast -- broadcast condition variable
*/
int
os_cond_broadcast(os_cond_t *__restrict cond)
{
internal_os_cond_t *cond_internal = (internal_os_cond_t *)cond;
WakeAllConditionVariable(&cond_internal->cond);
return 0;
}
/*
* os_cond_wait -- signal condition variable
*/
int
os_cond_signal(os_cond_t *__restrict cond)
{
internal_os_cond_t *cond_internal = (internal_os_cond_t *)cond;
WakeConditionVariable(&cond_internal->cond);
return 0;
}
/*
* get_rel_wait -- (internal) convert timespec to windows timeout
*/
static DWORD
get_rel_wait(const struct timespec *abstime)
{
struct __timeb64 t;
_ftime64_s(&t);
time_t now_ms = t.time * 1000 + t.millitm;
time_t ms = (time_t)(abstime->tv_sec * 1000 +
abstime->tv_nsec / 1000000);
DWORD rel_wait = (DWORD)(ms - now_ms);
return rel_wait < 0 ? 0 : rel_wait;
}
/*
* os_cond_timedwait -- waits on condition variable with timeout
*/
int
os_cond_timedwait(os_cond_t *__restrict cond,
os_mutex_t *__restrict mutex, const struct timespec *abstime)
{
internal_os_cond_t *cond_internal = (internal_os_cond_t *)cond;
internal_os_mutex_t *mutex_internal = (internal_os_mutex_t *)mutex;
BOOL ret;
SetLastError(0);
ret = SleepConditionVariableCS(&cond_internal->cond,
&mutex_internal->lock, get_rel_wait(abstime));
if (ret == FALSE)
return (GetLastError() == ERROR_TIMEOUT) ? ETIMEDOUT : EINVAL;
return 0;
}
/*
* os_cond_wait -- waits on condition variable
*/
int
os_cond_wait(os_cond_t *__restrict cond,
os_mutex_t *__restrict mutex)
{
internal_os_cond_t *cond_internal = (internal_os_cond_t *)cond;
internal_os_mutex_t *mutex_internal = (internal_os_mutex_t *)mutex;
/* XXX - return error code based on GetLastError() */
BOOL ret;
ret = SleepConditionVariableCS(&cond_internal->cond,
&mutex_internal->lock, INFINITE);
return (ret == FALSE) ? EINVAL : 0;
}
/*
* os_once -- once-only function call
*/
int
os_once(os_once_t *once, void (*func)(void))
{
internal_os_once_t *once_internal = (internal_os_once_t *)once;
internal_os_once_t tmp;
while ((tmp = *once_internal) != 2) {
if (tmp == 1)
continue; /* another thread is already calling func() */
/* try to be the first one... */
if (!util_bool_compare_and_swap64(once_internal, tmp, 1))
continue; /* sorry, another thread was faster */
func();
if (!util_bool_compare_and_swap64(once_internal, 1, 2)) {
ERR("error setting once");
return -1;
}
}
return 0;
}
/*
* os_tls_key_create -- creates a new tls key
*/
int
os_tls_key_create(os_tls_key_t *key, void (*destructor)(void *))
{
*key = FlsAlloc(destructor);
if (*key == TLS_OUT_OF_INDEXES)
return EAGAIN;
return 0;
}
/*
* os_tls_key_delete -- deletes key from tls
*/
int
os_tls_key_delete(os_tls_key_t key)
{
if (!FlsFree(key))
return EINVAL;
return 0;
}
/*
* os_tls_set -- sets a value in tls
*/
int
os_tls_set(os_tls_key_t key, const void *value)
{
if (!FlsSetValue(key, (LPVOID)value))
return ENOENT;
return 0;
}
/*
* os_tls_get -- gets a value from tls
*/
void *
os_tls_get(os_tls_key_t key)
{
return FlsGetValue(key);
}
/* threading */
/*
* os_thread_start_routine_wrapper is a start routine for _beginthreadex() and
* it helps:
*
* - wrap the os_thread_create's start function
*/
static unsigned __stdcall
os_thread_start_routine_wrapper(void *arg)
{
internal_os_thread_t *thread_info = (internal_os_thread_t *)arg;
thread_info->result = thread_info->start_routine(thread_info->arg);
return 0;
}
/*
* os_thread_create -- starts a new thread
*/
int
os_thread_create(os_thread_t *thread, const os_thread_attr_t *attr,
void *(*start_routine)(void *), void *arg)
{
COMPILE_ERROR_ON(sizeof(os_thread_t) < sizeof(internal_os_thread_t));
internal_os_thread_t *thread_info = (internal_os_thread_t *)thread;
thread_info->start_routine = start_routine;
thread_info->arg = arg;
thread_info->thread_handle = (HANDLE)_beginthreadex(NULL, 0,
os_thread_start_routine_wrapper, thread_info, CREATE_SUSPENDED,
NULL);
if (thread_info->thread_handle == 0) {
free(thread_info);
return errno;
}
if (ResumeThread(thread_info->thread_handle) == -1) {
free(thread_info);
return EAGAIN;
}
return 0;
}
/*
* os_thread_join -- joins a thread
*/
int
os_thread_join(os_thread_t *thread, void **result)
{
internal_os_thread_t *internal_thread = (internal_os_thread_t *)thread;
WaitForSingleObject(internal_thread->thread_handle, INFINITE);
CloseHandle(internal_thread->thread_handle);
if (result != NULL)
*result = internal_thread->result;
return 0;
}
/*
* os_thread_self -- returns handle to calling thread
*/
void
os_thread_self(os_thread_t *thread)
{
internal_os_thread_t *internal_thread = (internal_os_thread_t *)thread;
internal_thread->thread_handle = GetCurrentThread();
}
/*
* os_cpu_zero -- clears cpu set
*/
void
os_cpu_zero(os_cpu_set_t *set)
{
internal_os_cpu_set_t *internal_set = (internal_os_cpu_set_t *)set;
memset(&internal_set->affinity, 0, sizeof(internal_set->affinity));
}
/*
* os_cpu_set -- adds cpu to set
*/
void
os_cpu_set(size_t cpu, os_cpu_set_t *set)
{
internal_os_cpu_set_t *internal_set = (internal_os_cpu_set_t *)set;
int sum = 0;
int group_max = GetActiveProcessorGroupCount();
int group = 0;
while (group < group_max) {
sum += GetActiveProcessorCount(group);
if (sum > cpu) {
/*
* XXX: can't set affinity to two different cpu groups
*/
if (internal_set->affinity.Group != group) {
internal_set->affinity.Mask = 0;
internal_set->affinity.Group = group;
}
cpu -= sum - GetActiveProcessorCount(group);
internal_set->affinity.Mask |= 1LL << cpu;
return;
}
group++;
}
FATAL("os_cpu_set cpu out of bounds");
}
/*
* os_thread_setaffinity_np -- sets affinity of the thread
*/
int
os_thread_setaffinity_np(os_thread_t *thread, size_t set_size,
const os_cpu_set_t *set)
{
internal_os_cpu_set_t *internal_set = (internal_os_cpu_set_t *)set;
internal_os_thread_t *internal_thread = (internal_os_thread_t *)thread;
int ret = SetThreadGroupAffinity(internal_thread->thread_handle,
&internal_set->affinity, NULL);
return ret != 0 ? 0 : EINVAL;
}
/*
* os_semaphore_init -- initializes a new semaphore instance
*/
int
os_semaphore_init(os_semaphore_t *sem, unsigned value)
{
internal_semaphore_t *internal_sem = (internal_semaphore_t *)sem;
internal_sem->handle = CreateSemaphore(NULL,
value, LONG_MAX, NULL);
return internal_sem->handle != 0 ? 0 : -1;
}
/*
* os_semaphore_destroy -- destroys a semaphore instance
*/
int
os_semaphore_destroy(os_semaphore_t *sem)
{
internal_semaphore_t *internal_sem = (internal_semaphore_t *)sem;
BOOL ret = CloseHandle(internal_sem->handle);
return ret ? 0 : -1;
}
/*
* os_semaphore_wait -- decreases the value of the semaphore
*/
int
os_semaphore_wait(os_semaphore_t *sem)
{
internal_semaphore_t *internal_sem = (internal_semaphore_t *)sem;
DWORD ret = WaitForSingleObject(internal_sem->handle, INFINITE);
return ret == WAIT_OBJECT_0 ? 0 : -1;
}
/*
* os_semaphore_trywait -- tries to decrease the value of the semaphore
*/
int
os_semaphore_trywait(os_semaphore_t *sem)
{
internal_semaphore_t *internal_sem = (internal_semaphore_t *)sem;
DWORD ret = WaitForSingleObject(internal_sem->handle, 0);
if (ret == WAIT_TIMEOUT)
errno = EAGAIN;
return ret == WAIT_OBJECT_0 ? 0 : -1;
}
/*
* os_semaphore_post -- increases the value of the semaphore
*/
int
os_semaphore_post(os_semaphore_t *sem)
{
internal_semaphore_t *internal_sem = (internal_semaphore_t *)sem;
BOOL ret = ReleaseSemaphore(internal_sem->handle, 1, NULL);
return ret ? 0 : -1;
}
| 15,425 | 22.443769 | 78 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/core/out.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* out.c -- support for logging, tracing, and assertion output
*
* Macros like LOG(), OUT, ASSERT(), etc. end up here.
*/
#include <stdio.h>
#include <stdarg.h>
#include <stdlib.h>
#include <unistd.h>
#include <limits.h>
#include <string.h>
#include <errno.h>
#include "out.h"
#include "os.h"
#include "os_thread.h"
#include "valgrind_internal.h"
#include "util.h"
/* XXX - modify Linux makefiles to generate srcversion.h and remove #ifdef */
#ifdef _WIN32
#include "srcversion.h"
#endif
static const char *Log_prefix;
static int Log_level;
static FILE *Out_fp;
static unsigned Log_alignment;
#ifndef NO_LIBPTHREAD
#define MAXPRINT 8192 /* maximum expected log line */
#else
#define MAXPRINT 256 /* maximum expected log line for libpmem */
#endif
struct errormsg
{
char msg[MAXPRINT];
#ifdef _WIN32
wchar_t wmsg[MAXPRINT];
#endif
};
#ifndef NO_LIBPTHREAD
static os_once_t Last_errormsg_key_once = OS_ONCE_INIT;
static os_tls_key_t Last_errormsg_key;
static void
_Last_errormsg_key_alloc(void)
{
int pth_ret = os_tls_key_create(&Last_errormsg_key, free);
if (pth_ret)
FATAL("!os_thread_key_create");
VALGRIND_ANNOTATE_HAPPENS_BEFORE(&Last_errormsg_key_once);
}
static void
Last_errormsg_key_alloc(void)
{
os_once(&Last_errormsg_key_once, _Last_errormsg_key_alloc);
/*
* Workaround Helgrind's bug:
* https://bugs.kde.org/show_bug.cgi?id=337735
*/
VALGRIND_ANNOTATE_HAPPENS_AFTER(&Last_errormsg_key_once);
}
static inline void
Last_errormsg_fini(void)
{
void *p = os_tls_get(Last_errormsg_key);
if (p) {
free(p);
(void) os_tls_set(Last_errormsg_key, NULL);
}
(void) os_tls_key_delete(Last_errormsg_key);
}
static inline struct errormsg *
Last_errormsg_get(void)
{
Last_errormsg_key_alloc();
struct errormsg *errormsg = os_tls_get(Last_errormsg_key);
if (errormsg == NULL) {
errormsg = malloc(sizeof(struct errormsg));
if (errormsg == NULL)
FATAL("!malloc");
/* make sure it contains empty string initially */
errormsg->msg[0] = '\0';
int ret = os_tls_set(Last_errormsg_key, errormsg);
if (ret)
FATAL("!os_tls_set");
}
return errormsg;
}
#else
/*
* We don't want libpmem to depend on libpthread. Instead of using pthread
* API to dynamically allocate thread-specific error message buffer, we put
* it into TLS. However, keeping a pretty large static buffer (8K) in TLS
* may lead to some issues, so the maximum message length is reduced.
* Fortunately, it looks like the longest error message in libpmem should
* not be longer than about 90 chars (in case of pmem_check_version()).
*/
static __thread struct errormsg Last_errormsg;
static inline void
Last_errormsg_key_alloc(void)
{
}
static inline void
Last_errormsg_fini(void)
{
}
static inline const struct errormsg *
Last_errormsg_get(void)
{
return &Last_errormsg;
}
#endif /* NO_LIBPTHREAD */
/*
* out_init -- initialize the log
*
* This is called from the library initialization code.
*/
void
out_init(const char *log_prefix, const char *log_level_var,
const char *log_file_var, int major_version,
int minor_version)
{
static int once;
/* only need to initialize the out module once */
if (once)
return;
once++;
Log_prefix = log_prefix;
#ifdef DEBUG
char *log_level;
char *log_file;
if ((log_level = os_getenv(log_level_var)) != NULL) {
Log_level = atoi(log_level);
if (Log_level < 0) {
Log_level = 0;
}
}
if ((log_file = os_getenv(log_file_var)) != NULL &&
log_file[0] != '\0') {
/* reserve more than enough space for a PID + '\0' */
char log_file_pid[PATH_MAX];
size_t len = strlen(log_file);
if (len > 0 && log_file[len - 1] == '-') {
if (util_snprintf(log_file_pid, PATH_MAX, "%s%d",
log_file, getpid()) < 0) {
ERR("snprintf: %d", errno);
abort();
}
log_file = log_file_pid;
}
if ((Out_fp = os_fopen(log_file, "w")) == NULL) {
char buff[UTIL_MAX_ERR_MSG];
util_strerror(errno, buff, UTIL_MAX_ERR_MSG);
fprintf(stderr, "Error (%s): %s=%s: %s\n",
log_prefix, log_file_var,
log_file, buff);
abort();
}
}
#endif /* DEBUG */
char *log_alignment = os_getenv("PMDK_LOG_ALIGN");
if (log_alignment) {
int align = atoi(log_alignment);
if (align > 0)
Log_alignment = (unsigned)align;
}
if (Out_fp == NULL)
Out_fp = stderr;
else
setlinebuf(Out_fp);
#ifdef DEBUG
static char namepath[PATH_MAX];
LOG(1, "pid %d: program: %s", getpid(),
util_getexecname(namepath, PATH_MAX));
#endif
LOG(1, "%s version %d.%d", log_prefix, major_version, minor_version);
static __attribute__((used)) const char *version_msg =
"src version: " SRCVERSION;
LOG(1, "%s", version_msg);
#if VG_PMEMCHECK_ENABLED
/*
* Attribute "used" to prevent compiler from optimizing out the variable
* when LOG expands to no code (!DEBUG)
*/
static __attribute__((used)) const char *pmemcheck_msg =
"compiled with support for Valgrind pmemcheck";
LOG(1, "%s", pmemcheck_msg);
#endif /* VG_PMEMCHECK_ENABLED */
#if VG_HELGRIND_ENABLED
static __attribute__((used)) const char *helgrind_msg =
"compiled with support for Valgrind helgrind";
LOG(1, "%s", helgrind_msg);
#endif /* VG_HELGRIND_ENABLED */
#if VG_MEMCHECK_ENABLED
static __attribute__((used)) const char *memcheck_msg =
"compiled with support for Valgrind memcheck";
LOG(1, "%s", memcheck_msg);
#endif /* VG_MEMCHECK_ENABLED */
#if VG_DRD_ENABLED
static __attribute__((used)) const char *drd_msg =
"compiled with support for Valgrind drd";
LOG(1, "%s", drd_msg);
#endif /* VG_DRD_ENABLED */
#if SDS_ENABLED
static __attribute__((used)) const char *shutdown_state_msg =
"compiled with support for shutdown state";
LOG(1, "%s", shutdown_state_msg);
#endif
#if NDCTL_ENABLED
static __attribute__((used)) const char *ndctl_ge_63_msg =
"compiled with libndctl 63+";
LOG(1, "%s", ndctl_ge_63_msg);
#endif
Last_errormsg_key_alloc();
}
/*
* out_fini -- close the log file
*
* This is called to close log file before process stop.
*/
void
out_fini(void)
{
if (Out_fp != NULL && Out_fp != stderr) {
fclose(Out_fp);
Out_fp = stderr;
}
Last_errormsg_fini();
}
/*
* out_print_func -- default print_func, goes to stderr or Out_fp
*/
static void
out_print_func(const char *s)
{
/* to suppress drd false-positive */
/* XXX: confirm real nature of this issue: pmem/issues#863 */
#ifdef SUPPRESS_FPUTS_DRD_ERROR
VALGRIND_ANNOTATE_IGNORE_READS_BEGIN();
VALGRIND_ANNOTATE_IGNORE_WRITES_BEGIN();
#endif
fputs(s, Out_fp);
#ifdef SUPPRESS_FPUTS_DRD_ERROR
VALGRIND_ANNOTATE_IGNORE_READS_END();
VALGRIND_ANNOTATE_IGNORE_WRITES_END();
#endif
}
/*
* calling Print(s) calls the current print_func...
*/
typedef void (*Print_func)(const char *s);
typedef int (*Vsnprintf_func)(char *str, size_t size, const char *format,
va_list ap);
static Print_func Print = out_print_func;
static Vsnprintf_func Vsnprintf = vsnprintf;
/*
* out_set_print_func -- allow override of print_func used by out module
*/
void
out_set_print_func(void (*print_func)(const char *s))
{
LOG(3, "print %p", print_func);
Print = (print_func == NULL) ? out_print_func : print_func;
}
/*
* out_set_vsnprintf_func -- allow override of vsnprintf_func used by out module
*/
void
out_set_vsnprintf_func(int (*vsnprintf_func)(char *str, size_t size,
const char *format, va_list ap))
{
LOG(3, "vsnprintf %p", vsnprintf_func);
Vsnprintf = (vsnprintf_func == NULL) ? vsnprintf : vsnprintf_func;
}
/*
* out_snprintf -- (internal) custom snprintf implementation
*/
FORMAT_PRINTF(3, 4)
static int
out_snprintf(char *str, size_t size, const char *format, ...)
{
int ret;
va_list ap;
va_start(ap, format);
ret = Vsnprintf(str, size, format, ap);
va_end(ap);
return (ret);
}
/*
* out_common -- common output code, all output goes through here
*/
static void
out_common(const char *file, int line, const char *func, int level,
const char *suffix, const char *fmt, va_list ap)
{
int oerrno = errno;
char buf[MAXPRINT];
unsigned cc = 0;
int ret;
const char *sep = "";
char errstr[UTIL_MAX_ERR_MSG] = "";
unsigned long olast_error = 0;
#ifdef _WIN32
if (fmt && fmt[0] == '!' && fmt[1] == '!')
olast_error = GetLastError();
#endif
if (file) {
char *f = strrchr(file, OS_DIR_SEPARATOR);
if (f)
file = f + 1;
ret = out_snprintf(&buf[cc], MAXPRINT - cc,
"<%s>: <%d> [%s:%d %s] ",
Log_prefix, level, file, line, func);
if (ret < 0) {
Print("out_snprintf failed");
goto end;
}
cc += (unsigned)ret;
if (cc < Log_alignment) {
memset(buf + cc, ' ', Log_alignment - cc);
cc = Log_alignment;
}
}
if (fmt) {
if (*fmt == '!') {
sep = ": ";
fmt++;
if (*fmt == '!') {
fmt++;
/* it will abort on non Windows OS */
util_strwinerror(olast_error, errstr,
UTIL_MAX_ERR_MSG);
} else {
util_strerror(oerrno, errstr, UTIL_MAX_ERR_MSG);
}
}
ret = Vsnprintf(&buf[cc], MAXPRINT - cc, fmt, ap);
if (ret < 0) {
Print("Vsnprintf failed");
goto end;
}
cc += (unsigned)ret;
}
out_snprintf(&buf[cc], MAXPRINT - cc, "%s%s%s", sep, errstr, suffix);
Print(buf);
end:
errno = oerrno;
#ifdef _WIN32
SetLastError(olast_error);
#endif
}
/*
* out_error -- common error output code, all error messages go through here
*/
static void
out_error(const char *file, int line, const char *func,
const char *suffix, const char *fmt, va_list ap)
{
int oerrno = errno;
unsigned long olast_error = 0;
#ifdef _WIN32
olast_error = GetLastError();
#endif
unsigned cc = 0;
int ret;
const char *sep = "";
char errstr[UTIL_MAX_ERR_MSG] = "";
char *errormsg = (char *)out_get_errormsg();
if (fmt) {
if (*fmt == '!') {
sep = ": ";
fmt++;
if (*fmt == '!') {
fmt++;
/* it will abort on non Windows OS */
util_strwinerror(olast_error, errstr,
UTIL_MAX_ERR_MSG);
} else {
util_strerror(oerrno, errstr, UTIL_MAX_ERR_MSG);
}
}
ret = Vsnprintf(&errormsg[cc], MAXPRINT, fmt, ap);
if (ret < 0) {
strcpy(errormsg, "Vsnprintf failed");
goto end;
}
cc += (unsigned)ret;
out_snprintf(&errormsg[cc], MAXPRINT - cc, "%s%s",
sep, errstr);
}
#ifdef DEBUG
if (Log_level >= 1) {
char buf[MAXPRINT];
cc = 0;
if (file) {
char *f = strrchr(file, OS_DIR_SEPARATOR);
if (f)
file = f + 1;
ret = out_snprintf(&buf[cc], MAXPRINT,
"<%s>: <1> [%s:%d %s] ",
Log_prefix, file, line, func);
if (ret < 0) {
Print("out_snprintf failed");
goto end;
}
cc += (unsigned)ret;
if (cc < Log_alignment) {
memset(buf + cc, ' ', Log_alignment - cc);
cc = Log_alignment;
}
}
out_snprintf(&buf[cc], MAXPRINT - cc, "%s%s", errormsg,
suffix);
Print(buf);
}
#endif
end:
errno = oerrno;
#ifdef _WIN32
SetLastError(olast_error);
#endif
}
/*
* out -- output a line, newline added automatically
*/
void
out(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
out_common(NULL, 0, NULL, 0, "\n", fmt, ap);
va_end(ap);
}
/*
* out_nonl -- output a line, no newline added automatically
*/
void
out_nonl(int level, const char *fmt, ...)
{
va_list ap;
if (Log_level < level)
return;
va_start(ap, fmt);
out_common(NULL, 0, NULL, level, "", fmt, ap);
va_end(ap);
}
/*
* out_log -- output a log line if Log_level >= level
*/
void
out_log(const char *file, int line, const char *func, int level,
const char *fmt, ...)
{
va_list ap;
if (Log_level < level)
return;
va_start(ap, fmt);
out_common(file, line, func, level, "\n", fmt, ap);
va_end(ap);
}
/*
* out_fatal -- output a fatal error & die (i.e. assertion failure)
*/
void
out_fatal(const char *file, int line, const char *func,
const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
out_common(file, line, func, 1, "\n", fmt, ap);
va_end(ap);
abort();
}
/*
* out_err -- output an error message
*/
void
out_err(const char *file, int line, const char *func,
const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
out_error(file, line, func, "\n", fmt, ap);
va_end(ap);
}
/*
* out_get_errormsg -- get the last error message
*/
const char *
out_get_errormsg(void)
{
const struct errormsg *errormsg = Last_errormsg_get();
return &errormsg->msg[0];
}
#ifdef _WIN32
/*
* out_get_errormsgW -- get the last error message in wchar_t
*/
const wchar_t *
out_get_errormsgW(void)
{
struct errormsg *errormsg = Last_errormsg_get();
const char *utf8 = &errormsg->msg[0];
wchar_t *utf16 = &errormsg->wmsg[0];
if (util_toUTF16_buff(utf8, utf16, sizeof(errormsg->wmsg)) != 0)
FATAL("!Failed to convert string");
return (const wchar_t *)utf16;
}
#endif
| 12,602 | 20.252951 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/core/util.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* Copyright (c) 2016-2020, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* util.h -- internal definitions for util module
*/
#ifndef PMDK_UTIL_H
#define PMDK_UTIL_H 1
#include <string.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <ctype.h>
#ifdef _MSC_VER
#include <intrin.h> /* popcnt, bitscan */
#endif
#include <sys/param.h>
#ifdef __cplusplus
extern "C" {
#endif
extern unsigned long long Pagesize;
extern unsigned long long Mmap_align;
#if defined(__x86_64) || defined(_M_X64) || defined(__aarch64__)
#define CACHELINE_SIZE 64ULL
#elif defined(__PPC64__)
#define CACHELINE_SIZE 128ULL
#else
#error unable to recognize architecture at compile time
#endif
#define PAGE_ALIGNED_DOWN_SIZE(size) ((size) & ~(Pagesize - 1))
#define PAGE_ALIGNED_UP_SIZE(size)\
PAGE_ALIGNED_DOWN_SIZE((size) + (Pagesize - 1))
#define IS_PAGE_ALIGNED(size) (((size) & (Pagesize - 1)) == 0)
#define IS_MMAP_ALIGNED(size) (((size) & (Mmap_align - 1)) == 0)
#define PAGE_ALIGN_UP(addr) ((void *)PAGE_ALIGNED_UP_SIZE((uintptr_t)(addr)))
#define ALIGN_UP(size, align) (((size) + (align) - 1) & ~((align) - 1))
#define ALIGN_DOWN(size, align) ((size) & ~((align) - 1))
#define ADDR_SUM(vp, lp) ((void *)((char *)(vp) + (lp)))
#define util_alignof(t) offsetof(struct {char _util_c; t _util_m; }, _util_m)
#define FORMAT_PRINTF(a, b) __attribute__((__format__(__printf__, (a), (b))))
void util_init(void);
int util_is_zeroed(const void *addr, size_t len);
uint64_t util_checksum_compute(void *addr, size_t len, uint64_t *csump,
size_t skip_off);
int util_checksum(void *addr, size_t len, uint64_t *csump,
int insert, size_t skip_off);
uint64_t util_checksum_seq(const void *addr, size_t len, uint64_t csum);
int util_parse_size(const char *str, size_t *sizep);
char *util_fgets(char *buffer, int max, FILE *stream);
char *util_getexecname(char *path, size_t pathlen);
char *util_part_realpath(const char *path);
int util_compare_file_inodes(const char *path1, const char *path2);
void *util_aligned_malloc(size_t alignment, size_t size);
void util_aligned_free(void *ptr);
struct tm *util_localtime(const time_t *timep);
int util_safe_strcpy(char *dst, const char *src, size_t max_length);
void util_emit_log(const char *lib, const char *func, int order);
char *util_readline(FILE *fh);
int util_snprintf(char *str, size_t size,
const char *format, ...) FORMAT_PRINTF(3, 4);
#ifdef _WIN32
char *util_toUTF8(const wchar_t *wstr);
wchar_t *util_toUTF16(const char *wstr);
void util_free_UTF8(char *str);
void util_free_UTF16(wchar_t *str);
int util_toUTF16_buff(const char *in, wchar_t *out, size_t out_size);
int util_toUTF8_buff(const wchar_t *in, char *out, size_t out_size);
void util_suppress_errmsg(void);
int util_lasterror_to_errno(unsigned long err);
#endif
#define UTIL_MAX_ERR_MSG 128
void util_strerror(int errnum, char *buff, size_t bufflen);
void util_strwinerror(unsigned long err, char *buff, size_t bufflen);
void util_set_alloc_funcs(
void *(*malloc_func)(size_t size),
void (*free_func)(void *ptr),
void *(*realloc_func)(void *ptr, size_t size),
char *(*strdup_func)(const char *s));
/*
* Macro calculates number of elements in given table
*/
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
#ifdef _MSC_VER
#define force_inline inline __forceinline
#define NORETURN __declspec(noreturn)
#define barrier() _ReadWriteBarrier()
#else
#define force_inline __attribute__((always_inline)) inline
#define NORETURN __attribute__((noreturn))
#define barrier() asm volatile("" ::: "memory")
#endif
#ifdef _MSC_VER
typedef UNALIGNED uint64_t ua_uint64_t;
typedef UNALIGNED uint32_t ua_uint32_t;
typedef UNALIGNED uint16_t ua_uint16_t;
#else
typedef uint64_t ua_uint64_t __attribute__((aligned(1)));
typedef uint32_t ua_uint32_t __attribute__((aligned(1)));
typedef uint16_t ua_uint16_t __attribute__((aligned(1)));
#endif
#define util_get_not_masked_bits(x, mask) ((x) & ~(mask))
/*
* util_setbit -- setbit macro substitution which properly deals with types
*/
static inline void
util_setbit(uint8_t *b, uint32_t i)
{
b[i / 8] = (uint8_t)(b[i / 8] | (uint8_t)(1 << (i % 8)));
}
/*
* util_clrbit -- clrbit macro substitution which properly deals with types
*/
static inline void
util_clrbit(uint8_t *b, uint32_t i)
{
b[i / 8] = (uint8_t)(b[i / 8] & (uint8_t)(~(1 << (i % 8))));
}
#define util_isset(a, i) isset(a, i)
#define util_isclr(a, i) isclr(a, i)
#define util_flag_isset(a, f) ((a) & (f))
#define util_flag_isclr(a, f) (((a) & (f)) == 0)
/*
* util_is_pow2 -- returns !0 when there's only 1 bit set in v, 0 otherwise
*/
static force_inline int
util_is_pow2(uint64_t v)
{
return v && !(v & (v - 1));
}
/*
* util_div_ceil -- divides a by b and rounds up the result
*/
static force_inline unsigned
util_div_ceil(unsigned a, unsigned b)
{
return (unsigned)(((unsigned long)a + b - 1) / b);
}
/*
* util_bool_compare_and_swap -- perform an atomic compare and swap
* util_fetch_and_* -- perform an operation atomically, return old value
* util_synchronize -- issue a full memory barrier
* util_popcount -- count number of set bits
* util_lssb_index -- return index of least significant set bit,
* undefined on zero
* util_mssb_index -- return index of most significant set bit
* undefined on zero
*
* XXX assertions needed on (value != 0) in both versions of bitscans
*
*/
#ifndef _MSC_VER
/*
* ISO C11 -- 7.17.1.4
* memory_order - an enumerated type whose enumerators identify memory ordering
* constraints.
*/
typedef enum {
memory_order_relaxed = __ATOMIC_RELAXED,
memory_order_consume = __ATOMIC_CONSUME,
memory_order_acquire = __ATOMIC_ACQUIRE,
memory_order_release = __ATOMIC_RELEASE,
memory_order_acq_rel = __ATOMIC_ACQ_REL,
memory_order_seq_cst = __ATOMIC_SEQ_CST
} memory_order;
/*
* ISO C11 -- 7.17.7.2 The atomic_load generic functions
* Integer width specific versions as supplement for:
*
*
* #include <stdatomic.h>
* C atomic_load(volatile A *object);
* C atomic_load_explicit(volatile A *object, memory_order order);
*
* The atomic_load interface doesn't return the loaded value, but instead
* copies it to a specified address -- see comments at the MSVC version.
*
* Also, instead of generic functions, two versions are available:
* for 32 bit fundamental integers, and for 64 bit ones.
*/
#define util_atomic_load_explicit32 __atomic_load
#define util_atomic_load_explicit64 __atomic_load
/*
* ISO C11 -- 7.17.7.1 The atomic_store generic functions
* Integer width specific versions as supplement for:
*
* #include <stdatomic.h>
* void atomic_store(volatile A *object, C desired);
* void atomic_store_explicit(volatile A *object, C desired,
* memory_order order);
*/
#define util_atomic_store_explicit32 __atomic_store_n
#define util_atomic_store_explicit64 __atomic_store_n
/*
* https://gcc.gnu.org/onlinedocs/gcc/_005f_005fsync-Builtins.html
* https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html
* https://clang.llvm.org/docs/LanguageExtensions.html#builtin-functions
*/
#define util_bool_compare_and_swap32 __sync_bool_compare_and_swap
#define util_bool_compare_and_swap64 __sync_bool_compare_and_swap
#define util_fetch_and_add32 __sync_fetch_and_add
#define util_fetch_and_add64 __sync_fetch_and_add
#define util_fetch_and_sub32 __sync_fetch_and_sub
#define util_fetch_and_sub64 __sync_fetch_and_sub
#define util_fetch_and_and32 __sync_fetch_and_and
#define util_fetch_and_and64 __sync_fetch_and_and
#define util_fetch_and_or32 __sync_fetch_and_or
#define util_fetch_and_or64 __sync_fetch_and_or
#define util_synchronize __sync_synchronize
#define util_popcount(value) ((unsigned char)__builtin_popcount(value))
#define util_popcount64(value) ((unsigned char)__builtin_popcountll(value))
#define util_lssb_index(value) ((unsigned char)__builtin_ctz(value))
#define util_lssb_index64(value) ((unsigned char)__builtin_ctzll(value))
#define util_mssb_index(value) ((unsigned char)(31 - __builtin_clz(value)))
#define util_mssb_index64(value) ((unsigned char)(63 - __builtin_clzll(value)))
#else
/* ISO C11 -- 7.17.1.4 */
typedef enum {
memory_order_relaxed,
memory_order_consume,
memory_order_acquire,
memory_order_release,
memory_order_acq_rel,
memory_order_seq_cst
} memory_order;
/*
* ISO C11 -- 7.17.7.2 The atomic_load generic functions
* Integer width specific versions as supplement for:
*
*
* #include <stdatomic.h>
* C atomic_load(volatile A *object);
* C atomic_load_explicit(volatile A *object, memory_order order);
*
* The atomic_load interface doesn't return the loaded value, but instead
* copies it to a specified address.
* The MSVC specific implementation needs to trigger a barrier (at least
* compiler barrier) after the load from the volatile value. The actual load
* from the volatile value itself is expected to be atomic.
*
* The actual isnterface here:
* #include "util.h"
* void util_atomic_load32(volatile A *object, A *destination);
* void util_atomic_load64(volatile A *object, A *destination);
* void util_atomic_load_explicit32(volatile A *object, A *destination,
* memory_order order);
* void util_atomic_load_explicit64(volatile A *object, A *destination,
* memory_order order);
*/
#ifndef _M_X64
#error MSVC ports of util_atomic_ only work on X86_64
#endif
#if _MSC_VER >= 2000
#error util_atomic_ utility functions not tested with this version of VC++
#error These utility functions are not future proof, as they are not
#error based on publicly available documentation.
#endif
#define util_atomic_load_explicit(object, dest, order)\
do {\
COMPILE_ERROR_ON(order != memory_order_seq_cst &&\
order != memory_order_consume &&\
order != memory_order_acquire &&\
order != memory_order_relaxed);\
*dest = *object;\
if (order == memory_order_seq_cst ||\
order == memory_order_consume ||\
order == memory_order_acquire)\
_ReadWriteBarrier();\
} while (0)
#define util_atomic_load_explicit32 util_atomic_load_explicit
#define util_atomic_load_explicit64 util_atomic_load_explicit
/* ISO C11 -- 7.17.7.1 The atomic_store generic functions */
#define util_atomic_store_explicit64(object, desired, order)\
do {\
COMPILE_ERROR_ON(order != memory_order_seq_cst &&\
order != memory_order_release &&\
order != memory_order_relaxed);\
if (order == memory_order_seq_cst) {\
_InterlockedExchange64(\
(volatile long long *)object, desired);\
} else {\
if (order == memory_order_release)\
_ReadWriteBarrier();\
*object = desired;\
}\
} while (0)
#define util_atomic_store_explicit32(object, desired, order)\
do {\
COMPILE_ERROR_ON(order != memory_order_seq_cst &&\
order != memory_order_release &&\
order != memory_order_relaxed);\
if (order == memory_order_seq_cst) {\
_InterlockedExchange(\
(volatile long *)object, desired);\
} else {\
if (order == memory_order_release)\
_ReadWriteBarrier();\
*object = desired;\
}\
} while (0)
/*
* https://msdn.microsoft.com/en-us/library/hh977022.aspx
*/
static __inline int
bool_compare_and_swap32_VC(volatile LONG *ptr,
LONG oldval, LONG newval)
{
LONG old = InterlockedCompareExchange(ptr, newval, oldval);
return (old == oldval);
}
static __inline int
bool_compare_and_swap64_VC(volatile LONG64 *ptr,
LONG64 oldval, LONG64 newval)
{
LONG64 old = InterlockedCompareExchange64(ptr, newval, oldval);
return (old == oldval);
}
#define util_bool_compare_and_swap32(p, o, n)\
bool_compare_and_swap32_VC((LONG *)(p), (LONG)(o), (LONG)(n))
#define util_bool_compare_and_swap64(p, o, n)\
bool_compare_and_swap64_VC((LONG64 *)(p), (LONG64)(o), (LONG64)(n))
#define util_fetch_and_add32(ptr, value)\
InterlockedExchangeAdd((LONG *)(ptr), value)
#define util_fetch_and_add64(ptr, value)\
InterlockedExchangeAdd64((LONG64 *)(ptr), value)
#define util_fetch_and_sub32(ptr, value)\
InterlockedExchangeSubtract((LONG *)(ptr), value)
#define util_fetch_and_sub64(ptr, value)\
InterlockedExchangeAdd64((LONG64 *)(ptr), -((LONG64)(value)))
#define util_fetch_and_and32(ptr, value)\
InterlockedAnd((LONG *)(ptr), value)
#define util_fetch_and_and64(ptr, value)\
InterlockedAnd64((LONG64 *)(ptr), value)
#define util_fetch_and_or32(ptr, value)\
InterlockedOr((LONG *)(ptr), value)
#define util_fetch_and_or64(ptr, value)\
InterlockedOr64((LONG64 *)(ptr), value)
static __inline void
util_synchronize(void)
{
MemoryBarrier();
}
#define util_popcount(value) (unsigned char)__popcnt(value)
#define util_popcount64(value) (unsigned char)__popcnt64(value)
static __inline unsigned char
util_lssb_index(int value)
{
unsigned long ret;
_BitScanForward(&ret, value);
return (unsigned char)ret;
}
static __inline unsigned char
util_lssb_index64(long long value)
{
unsigned long ret;
_BitScanForward64(&ret, value);
return (unsigned char)ret;
}
static __inline unsigned char
util_mssb_index(int value)
{
unsigned long ret;
_BitScanReverse(&ret, value);
return (unsigned char)ret;
}
static __inline unsigned char
util_mssb_index64(long long value)
{
unsigned long ret;
_BitScanReverse64(&ret, value);
return (unsigned char)ret;
}
#endif
/* ISO C11 -- 7.17.7 Operations on atomic types */
#define util_atomic_load32(object, dest)\
util_atomic_load_explicit32(object, dest, memory_order_seq_cst)
#define util_atomic_load64(object, dest)\
util_atomic_load_explicit64(object, dest, memory_order_seq_cst)
#define util_atomic_store32(object, desired)\
util_atomic_store_explicit32(object, desired, memory_order_seq_cst)
#define util_atomic_store64(object, desired)\
util_atomic_store_explicit64(object, desired, memory_order_seq_cst)
/*
* util_get_printable_ascii -- convert non-printable ascii to dot '.'
*/
static inline char
util_get_printable_ascii(char c)
{
return isprint((unsigned char)c) ? c : '.';
}
char *util_concat_str(const char *s1, const char *s2);
#if !defined(likely)
#if defined(__GNUC__)
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else
#define likely(x) (!!(x))
#define unlikely(x) (!!(x))
#endif
#endif
#if defined(__CHECKER__)
#define COMPILE_ERROR_ON(cond)
#define ASSERT_COMPILE_ERROR_ON(cond)
#elif defined(_MSC_VER)
#define COMPILE_ERROR_ON(cond) C_ASSERT(!(cond))
/* XXX - can't be done with C_ASSERT() unless we have __builtin_constant_p() */
#define ASSERT_COMPILE_ERROR_ON(cond) do {} while (0)
#else
#define COMPILE_ERROR_ON(cond) ((void)sizeof(char[(cond) ? -1 : 1]))
#define ASSERT_COMPILE_ERROR_ON(cond) COMPILE_ERROR_ON(cond)
#endif
#ifndef _MSC_VER
#define ATTR_CONSTRUCTOR __attribute__((constructor)) static
#define ATTR_DESTRUCTOR __attribute__((destructor)) static
#else
#define ATTR_CONSTRUCTOR
#define ATTR_DESTRUCTOR
#endif
#ifndef _MSC_VER
#define CONSTRUCTOR(fun) ATTR_CONSTRUCTOR
#else
#ifdef __cplusplus
#define CONSTRUCTOR(fun) \
void fun(); \
struct _##fun { \
_##fun() { \
fun(); \
} \
}; static _##fun foo; \
static
#else
#define CONSTRUCTOR(fun) \
MSVC_CONSTR(fun) \
static
#endif
#endif
#ifdef __GNUC__
#define CHECK_FUNC_COMPATIBLE(func1, func2)\
COMPILE_ERROR_ON(!__builtin_types_compatible_p(typeof(func1),\
typeof(func2)))
#else
#define CHECK_FUNC_COMPATIBLE(func1, func2) do {} while (0)
#endif /* __GNUC__ */
#ifdef __cplusplus
}
#endif
#endif /* util.h */
| 17,058 | 30.47417 | 79 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/core/valgrind_internal.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* valgrind_internal.h -- internal definitions for valgrind macros
*/
#ifndef PMDK_VALGRIND_INTERNAL_H
#define PMDK_VALGRIND_INTERNAL_H 1
#if !defined(_WIN32) && !defined(__FreeBSD__)
#ifndef VALGRIND_ENABLED
#define VALGRIND_ENABLED 1
#endif
#endif
#if VALGRIND_ENABLED
#define VG_PMEMCHECK_ENABLED 1
#define VG_HELGRIND_ENABLED 1
#define VG_MEMCHECK_ENABLED 1
#define VG_DRD_ENABLED 1
#endif
#if VG_PMEMCHECK_ENABLED || VG_HELGRIND_ENABLED || VG_MEMCHECK_ENABLED || \
VG_DRD_ENABLED
#define ANY_VG_TOOL_ENABLED 1
#else
#define ANY_VG_TOOL_ENABLED 0
#endif
#if ANY_VG_TOOL_ENABLED
extern unsigned _On_valgrind;
#define On_valgrind __builtin_expect(_On_valgrind, 0)
#include "valgrind/valgrind.h"
#else
#define On_valgrind (0)
#endif
#if VG_HELGRIND_ENABLED
extern unsigned _On_helgrind;
#define On_helgrind __builtin_expect(_On_helgrind, 0)
#include "valgrind/helgrind.h"
#else
#define On_helgrind (0)
#endif
#if VG_DRD_ENABLED
extern unsigned _On_drd;
#define On_drd __builtin_expect(_On_drd, 0)
#include "valgrind/drd.h"
#else
#define On_drd (0)
#endif
#if VG_HELGRIND_ENABLED || VG_DRD_ENABLED
extern unsigned _On_drd_or_hg;
#define On_drd_or_hg __builtin_expect(_On_drd_or_hg, 0)
#define VALGRIND_ANNOTATE_HAPPENS_BEFORE(obj) do {\
if (On_drd_or_hg) \
ANNOTATE_HAPPENS_BEFORE((obj));\
} while (0)
#define VALGRIND_ANNOTATE_HAPPENS_AFTER(obj) do {\
if (On_drd_or_hg) \
ANNOTATE_HAPPENS_AFTER((obj));\
} while (0)
#define VALGRIND_ANNOTATE_NEW_MEMORY(addr, size) do {\
if (On_drd_or_hg) \
ANNOTATE_NEW_MEMORY((addr), (size));\
} while (0)
#define VALGRIND_ANNOTATE_IGNORE_READS_BEGIN() do {\
if (On_drd_or_hg) \
ANNOTATE_IGNORE_READS_BEGIN();\
} while (0)
#define VALGRIND_ANNOTATE_IGNORE_READS_END() do {\
if (On_drd_or_hg) \
ANNOTATE_IGNORE_READS_END();\
} while (0)
#define VALGRIND_ANNOTATE_IGNORE_WRITES_BEGIN() do {\
if (On_drd_or_hg) \
ANNOTATE_IGNORE_WRITES_BEGIN();\
} while (0)
#define VALGRIND_ANNOTATE_IGNORE_WRITES_END() do {\
if (On_drd_or_hg) \
ANNOTATE_IGNORE_WRITES_END();\
} while (0)
/* Supported by both helgrind and drd. */
#define VALGRIND_HG_DRD_DISABLE_CHECKING(addr, size) do {\
if (On_drd_or_hg) \
VALGRIND_HG_DISABLE_CHECKING((addr), (size));\
} while (0)
#else
#define On_drd_or_hg (0)
#define VALGRIND_ANNOTATE_HAPPENS_BEFORE(obj) do { (void)(obj); } while (0)
#define VALGRIND_ANNOTATE_HAPPENS_AFTER(obj) do { (void)(obj); } while (0)
#define VALGRIND_ANNOTATE_NEW_MEMORY(addr, size) do {\
(void) (addr);\
(void) (size);\
} while (0)
#define VALGRIND_ANNOTATE_IGNORE_READS_BEGIN() do {} while (0)
#define VALGRIND_ANNOTATE_IGNORE_READS_END() do {} while (0)
#define VALGRIND_ANNOTATE_IGNORE_WRITES_BEGIN() do {} while (0)
#define VALGRIND_ANNOTATE_IGNORE_WRITES_END() do {} while (0)
#define VALGRIND_HG_DRD_DISABLE_CHECKING(addr, size) do {\
(void) (addr);\
(void) (size);\
} while (0)
#endif
#if VG_PMEMCHECK_ENABLED
extern unsigned _On_pmemcheck;
#define On_pmemcheck __builtin_expect(_On_pmemcheck, 0)
#include "valgrind/pmemcheck.h"
void pobj_emit_log(const char *func, int order);
void pmem_emit_log(const char *func, int order);
void pmem2_emit_log(const char *func, int order);
extern int _Pmreorder_emit;
#define Pmreorder_emit __builtin_expect(_Pmreorder_emit, 0)
#define VALGRIND_REGISTER_PMEM_MAPPING(addr, len) do {\
if (On_pmemcheck)\
VALGRIND_PMC_REGISTER_PMEM_MAPPING((addr), (len));\
} while (0)
#define VALGRIND_REGISTER_PMEM_FILE(desc, base_addr, size, offset) do {\
if (On_pmemcheck)\
VALGRIND_PMC_REGISTER_PMEM_FILE((desc), (base_addr), (size), \
(offset));\
} while (0)
#define VALGRIND_REMOVE_PMEM_MAPPING(addr, len) do {\
if (On_pmemcheck)\
VALGRIND_PMC_REMOVE_PMEM_MAPPING((addr), (len));\
} while (0)
#define VALGRIND_CHECK_IS_PMEM_MAPPING(addr, len) do {\
if (On_pmemcheck)\
VALGRIND_PMC_CHECK_IS_PMEM_MAPPING((addr), (len));\
} while (0)
#define VALGRIND_PRINT_PMEM_MAPPINGS do {\
if (On_pmemcheck)\
VALGRIND_PMC_PRINT_PMEM_MAPPINGS;\
} while (0)
#define VALGRIND_DO_FLUSH(addr, len) do {\
if (On_pmemcheck)\
VALGRIND_PMC_DO_FLUSH((addr), (len));\
} while (0)
#define VALGRIND_DO_FENCE do {\
if (On_pmemcheck)\
VALGRIND_PMC_DO_FENCE;\
} while (0)
#define VALGRIND_DO_PERSIST(addr, len) do {\
if (On_pmemcheck) {\
VALGRIND_PMC_DO_FLUSH((addr), (len));\
VALGRIND_PMC_DO_FENCE;\
}\
} while (0)
#define VALGRIND_SET_CLEAN(addr, len) do {\
if (On_pmemcheck)\
VALGRIND_PMC_SET_CLEAN(addr, len);\
} while (0)
#define VALGRIND_WRITE_STATS do {\
if (On_pmemcheck)\
VALGRIND_PMC_WRITE_STATS;\
} while (0)
#define VALGRIND_EMIT_LOG(emit_log) do {\
if (On_pmemcheck)\
VALGRIND_PMC_EMIT_LOG((emit_log));\
} while (0)
#define VALGRIND_START_TX do {\
if (On_pmemcheck)\
VALGRIND_PMC_START_TX;\
} while (0)
#define VALGRIND_START_TX_N(txn) do {\
if (On_pmemcheck)\
VALGRIND_PMC_START_TX_N(txn);\
} while (0)
#define VALGRIND_END_TX do {\
if (On_pmemcheck)\
VALGRIND_PMC_END_TX;\
} while (0)
#define VALGRIND_END_TX_N(txn) do {\
if (On_pmemcheck)\
VALGRIND_PMC_END_TX_N(txn);\
} while (0)
#define VALGRIND_ADD_TO_TX(addr, len) do {\
if (On_pmemcheck)\
VALGRIND_PMC_ADD_TO_TX(addr, len);\
} while (0)
#define VALGRIND_ADD_TO_TX_N(txn, addr, len) do {\
if (On_pmemcheck)\
VALGRIND_PMC_ADD_TO_TX_N(txn, addr, len);\
} while (0)
#define VALGRIND_REMOVE_FROM_TX(addr, len) do {\
if (On_pmemcheck)\
VALGRIND_PMC_REMOVE_FROM_TX(addr, len);\
} while (0)
#define VALGRIND_REMOVE_FROM_TX_N(txn, addr, len) do {\
if (On_pmemcheck)\
VALGRIND_PMC_REMOVE_FROM_TX_N(txn, addr, len);\
} while (0)
#define VALGRIND_ADD_TO_GLOBAL_TX_IGNORE(addr, len) do {\
if (On_pmemcheck)\
VALGRIND_PMC_ADD_TO_GLOBAL_TX_IGNORE(addr, len);\
} while (0)
/*
* Logs library and function name with proper suffix
* to pmemcheck store log file.
*/
#define PMEMOBJ_API_START()\
if (Pmreorder_emit)\
pobj_emit_log(__func__, 0);
#define PMEMOBJ_API_END()\
if (Pmreorder_emit)\
pobj_emit_log(__func__, 1);
#define PMEM_API_START()\
if (Pmreorder_emit)\
pmem_emit_log(__func__, 0);
#define PMEM_API_END()\
if (Pmreorder_emit)\
pmem_emit_log(__func__, 1);
#define PMEM2_API_START(func_name)\
if (Pmreorder_emit)\
pmem2_emit_log(func_name, 0);
#define PMEM2_API_END(func_name)\
if (Pmreorder_emit)\
pmem2_emit_log(func_name, 1);
#else
#define On_pmemcheck (0)
#define Pmreorder_emit (0)
#define VALGRIND_REGISTER_PMEM_MAPPING(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_REGISTER_PMEM_FILE(desc, base_addr, size, offset) do {\
(void) (desc);\
(void) (base_addr);\
(void) (size);\
(void) (offset);\
} while (0)
#define VALGRIND_REMOVE_PMEM_MAPPING(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_CHECK_IS_PMEM_MAPPING(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_PRINT_PMEM_MAPPINGS do {} while (0)
#define VALGRIND_DO_FLUSH(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_DO_FENCE do {} while (0)
#define VALGRIND_DO_PERSIST(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_SET_CLEAN(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_WRITE_STATS do {} while (0)
#define VALGRIND_EMIT_LOG(emit_log) do {\
(void) (emit_log);\
} while (0)
#define VALGRIND_START_TX do {} while (0)
#define VALGRIND_START_TX_N(txn) do { (void) (txn); } while (0)
#define VALGRIND_END_TX do {} while (0)
#define VALGRIND_END_TX_N(txn) do {\
(void) (txn);\
} while (0)
#define VALGRIND_ADD_TO_TX(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_ADD_TO_TX_N(txn, addr, len) do {\
(void) (txn);\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_REMOVE_FROM_TX(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_REMOVE_FROM_TX_N(txn, addr, len) do {\
(void) (txn);\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_ADD_TO_GLOBAL_TX_IGNORE(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define PMEMOBJ_API_START() do {} while (0)
#define PMEMOBJ_API_END() do {} while (0)
#define PMEM_API_START() do {} while (0)
#define PMEM_API_END() do {} while (0)
#define PMEM2_API_START(func_name) do {\
(void) (func_name);\
} while (0)
#define PMEM2_API_END(func_name) do {\
(void) (func_name);\
} while (0)
#endif
#if VG_MEMCHECK_ENABLED
extern unsigned _On_memcheck;
#define On_memcheck __builtin_expect(_On_memcheck, 0)
#include "valgrind/memcheck.h"
#define VALGRIND_DO_DISABLE_ERROR_REPORTING do {\
if (On_valgrind)\
VALGRIND_DISABLE_ERROR_REPORTING;\
} while (0)
#define VALGRIND_DO_ENABLE_ERROR_REPORTING do {\
if (On_valgrind)\
VALGRIND_ENABLE_ERROR_REPORTING;\
} while (0)
#define VALGRIND_DO_CREATE_MEMPOOL(heap, rzB, is_zeroed) do {\
if (On_memcheck)\
VALGRIND_CREATE_MEMPOOL(heap, rzB, is_zeroed);\
} while (0)
#define VALGRIND_DO_DESTROY_MEMPOOL(heap) do {\
if (On_memcheck)\
VALGRIND_DESTROY_MEMPOOL(heap);\
} while (0)
#define VALGRIND_DO_MEMPOOL_ALLOC(heap, addr, size) do {\
if (On_memcheck)\
VALGRIND_MEMPOOL_ALLOC(heap, addr, size);\
} while (0)
#define VALGRIND_DO_MEMPOOL_FREE(heap, addr) do {\
if (On_memcheck)\
VALGRIND_MEMPOOL_FREE(heap, addr);\
} while (0)
#define VALGRIND_DO_MEMPOOL_CHANGE(heap, addrA, addrB, size) do {\
if (On_memcheck)\
VALGRIND_MEMPOOL_CHANGE(heap, addrA, addrB, size);\
} while (0)
#define VALGRIND_DO_MAKE_MEM_DEFINED(addr, len) do {\
if (On_memcheck)\
VALGRIND_MAKE_MEM_DEFINED(addr, len);\
} while (0)
#define VALGRIND_DO_MAKE_MEM_UNDEFINED(addr, len) do {\
if (On_memcheck)\
VALGRIND_MAKE_MEM_UNDEFINED(addr, len);\
} while (0)
#define VALGRIND_DO_MAKE_MEM_NOACCESS(addr, len) do {\
if (On_memcheck)\
VALGRIND_MAKE_MEM_NOACCESS(addr, len);\
} while (0)
#define VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len) do {\
if (On_memcheck)\
VALGRIND_CHECK_MEM_IS_ADDRESSABLE(addr, len);\
} while (0)
#else
#define On_memcheck (0)
#define VALGRIND_DO_DISABLE_ERROR_REPORTING do {} while (0)
#define VALGRIND_DO_ENABLE_ERROR_REPORTING do {} while (0)
#define VALGRIND_DO_CREATE_MEMPOOL(heap, rzB, is_zeroed)\
do { (void) (heap); (void) (rzB); (void) (is_zeroed); } while (0)
#define VALGRIND_DO_DESTROY_MEMPOOL(heap)\
do { (void) (heap); } while (0)
#define VALGRIND_DO_MEMPOOL_ALLOC(heap, addr, size)\
do { (void) (heap); (void) (addr); (void) (size); } while (0)
#define VALGRIND_DO_MEMPOOL_FREE(heap, addr)\
do { (void) (heap); (void) (addr); } while (0)
#define VALGRIND_DO_MEMPOOL_CHANGE(heap, addrA, addrB, size)\
do {\
(void) (heap); (void) (addrA); (void) (addrB); (void) (size);\
} while (0)
#define VALGRIND_DO_MAKE_MEM_DEFINED(addr, len)\
do { (void) (addr); (void) (len); } while (0)
#define VALGRIND_DO_MAKE_MEM_UNDEFINED(addr, len)\
do { (void) (addr); (void) (len); } while (0)
#define VALGRIND_DO_MAKE_MEM_NOACCESS(addr, len)\
do { (void) (addr); (void) (len); } while (0)
#define VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len)\
do { (void) (addr); (void) (len); } while (0)
#endif
#endif
| 11,169 | 22.319415 | 75 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/core/alloc.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
#ifndef COMMON_ALLOC_H
#define COMMON_ALLOC_H
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef void *(*Malloc_func)(size_t size);
typedef void *(*Realloc_func)(void *ptr, size_t size);
extern Malloc_func fn_malloc;
extern Realloc_func fn_realloc;
#if FAULT_INJECTION
void *_flt_Malloc(size_t, const char *);
void *_flt_Realloc(void *, size_t, const char *);
#define Malloc(size) _flt_Malloc(size, __func__)
#define Realloc(ptr, size) _flt_Realloc(ptr, size, __func__)
#else
void *_Malloc(size_t);
void *_Realloc(void *, size_t);
#define Malloc(size) _Malloc(size)
#define Realloc(ptr, size) _Realloc(ptr, size)
#endif
void set_func_malloc(void *(*malloc_func)(size_t size));
void set_func_realloc(void *(*realloc_func)(void *ptr, size_t size));
/*
* overridable names for malloc & friends used by this library
*/
typedef void (*Free_func)(void *ptr);
typedef char *(*Strdup_func)(const char *s);
extern Free_func Free;
extern Strdup_func Strdup;
extern void *Zalloc(size_t sz);
#ifdef __cplusplus
}
#endif
#endif
| 1,131 | 21.64 | 69 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/core/os_thread.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* os_thread.h -- os thread abstraction layer
*/
#ifndef OS_THREAD_H
#define OS_THREAD_H 1
#include <stdint.h>
#include <time.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef union {
long long align;
char padding[44]; /* linux: 40 windows: 44 */
} os_mutex_t;
typedef union {
long long align;
char padding[56]; /* linux: 56 windows: 13 */
} os_rwlock_t;
typedef union {
long long align;
char padding[48]; /* linux: 48 windows: 12 */
} os_cond_t;
typedef union {
long long align;
char padding[32]; /* linux: 8 windows: 32 */
} os_thread_t;
typedef union {
long long align; /* linux: long windows: 8 FreeBSD: 12 */
char padding[16]; /* 16 to be safe */
} os_once_t;
#define OS_ONCE_INIT { .padding = {0} }
typedef unsigned os_tls_key_t;
typedef union {
long long align;
char padding[56]; /* linux: 56 windows: 8 */
} os_semaphore_t;
typedef union {
long long align;
char padding[56]; /* linux: 56 windows: 8 */
} os_thread_attr_t;
typedef union {
long long align;
char padding[512];
} os_cpu_set_t;
#ifdef __FreeBSD__
#define cpu_set_t cpuset_t
typedef uintptr_t os_spinlock_t;
#else
typedef volatile int os_spinlock_t; /* XXX: not implemented on windows */
#endif
void os_cpu_zero(os_cpu_set_t *set);
void os_cpu_set(size_t cpu, os_cpu_set_t *set);
#ifndef _WIN32
#define _When_(...)
#endif
int os_once(os_once_t *o, void (*func)(void));
int os_tls_key_create(os_tls_key_t *key, void (*destructor)(void *));
int os_tls_key_delete(os_tls_key_t key);
int os_tls_set(os_tls_key_t key, const void *value);
void *os_tls_get(os_tls_key_t key);
int os_mutex_init(os_mutex_t *__restrict mutex);
int os_mutex_destroy(os_mutex_t *__restrict mutex);
_When_(return == 0, _Acquires_lock_(mutex->lock))
int os_mutex_lock(os_mutex_t *__restrict mutex);
_When_(return == 0, _Acquires_lock_(mutex->lock))
int os_mutex_trylock(os_mutex_t *__restrict mutex);
int os_mutex_unlock(os_mutex_t *__restrict mutex);
/* XXX - non POSIX */
int os_mutex_timedlock(os_mutex_t *__restrict mutex,
const struct timespec *abstime);
int os_rwlock_init(os_rwlock_t *__restrict rwlock);
int os_rwlock_destroy(os_rwlock_t *__restrict rwlock);
int os_rwlock_rdlock(os_rwlock_t *__restrict rwlock);
int os_rwlock_wrlock(os_rwlock_t *__restrict rwlock);
int os_rwlock_tryrdlock(os_rwlock_t *__restrict rwlock);
_When_(return == 0, _Acquires_exclusive_lock_(rwlock->lock))
int os_rwlock_trywrlock(os_rwlock_t *__restrict rwlock);
_When_(rwlock->is_write != 0, _Requires_exclusive_lock_held_(rwlock->lock))
_When_(rwlock->is_write == 0, _Requires_shared_lock_held_(rwlock->lock))
int os_rwlock_unlock(os_rwlock_t *__restrict rwlock);
int os_rwlock_timedrdlock(os_rwlock_t *__restrict rwlock,
const struct timespec *abstime);
int os_rwlock_timedwrlock(os_rwlock_t *__restrict rwlock,
const struct timespec *abstime);
int os_spin_init(os_spinlock_t *lock, int pshared);
int os_spin_destroy(os_spinlock_t *lock);
int os_spin_lock(os_spinlock_t *lock);
int os_spin_unlock(os_spinlock_t *lock);
int os_spin_trylock(os_spinlock_t *lock);
int os_cond_init(os_cond_t *__restrict cond);
int os_cond_destroy(os_cond_t *__restrict cond);
int os_cond_broadcast(os_cond_t *__restrict cond);
int os_cond_signal(os_cond_t *__restrict cond);
int os_cond_timedwait(os_cond_t *__restrict cond,
os_mutex_t *__restrict mutex, const struct timespec *abstime);
int os_cond_wait(os_cond_t *__restrict cond,
os_mutex_t *__restrict mutex);
/* threading */
int os_thread_create(os_thread_t *thread, const os_thread_attr_t *attr,
void *(*start_routine)(void *), void *arg);
int os_thread_join(os_thread_t *thread, void **result);
void os_thread_self(os_thread_t *thread);
/* thread affinity */
int os_thread_setaffinity_np(os_thread_t *thread, size_t set_size,
const os_cpu_set_t *set);
int os_thread_atfork(void (*prepare)(void), void (*parent)(void),
void (*child)(void));
int os_semaphore_init(os_semaphore_t *sem, unsigned value);
int os_semaphore_destroy(os_semaphore_t *sem);
int os_semaphore_wait(os_semaphore_t *sem);
int os_semaphore_trywait(os_semaphore_t *sem);
int os_semaphore_post(os_semaphore_t *sem);
#ifdef __cplusplus
}
#endif
#endif /* OS_THREAD_H */
| 5,876 | 31.291209 | 75 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/core/out.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* out.h -- definitions for "out" module
*/
#ifndef PMDK_OUT_H
#define PMDK_OUT_H 1
#include <stdarg.h>
#include <stddef.h>
#include <stdlib.h>
#include "util.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* Suppress errors which are after appropriate ASSERT* macro for nondebug
* builds.
*/
#if !defined(DEBUG) && (defined(__clang_analyzer__) || defined(__COVERITY__) ||\
defined(__KLOCWORK__))
#define OUT_FATAL_DISCARD_NORETURN __attribute__((noreturn))
#else
#define OUT_FATAL_DISCARD_NORETURN
#endif
#ifndef EVALUATE_DBG_EXPRESSIONS
#if defined(DEBUG) || defined(__clang_analyzer__) || defined(__COVERITY__) ||\
defined(__KLOCWORK__)
#define EVALUATE_DBG_EXPRESSIONS 1
#else
#define EVALUATE_DBG_EXPRESSIONS 0
#endif
#endif
#ifdef DEBUG
#define OUT_LOG out_log
#define OUT_NONL out_nonl
#define OUT_FATAL out_fatal
#define OUT_FATAL_ABORT out_fatal
#else
static __attribute__((always_inline)) inline void
out_log_discard(const char *file, int line, const char *func, int level,
const char *fmt, ...)
{
(void) file;
(void) line;
(void) func;
(void) level;
(void) fmt;
}
static __attribute__((always_inline)) inline void
out_nonl_discard(int level, const char *fmt, ...)
{
(void) level;
(void) fmt;
}
static __attribute__((always_inline)) OUT_FATAL_DISCARD_NORETURN inline void
out_fatal_discard(const char *file, int line, const char *func,
const char *fmt, ...)
{
(void) file;
(void) line;
(void) func;
(void) fmt;
}
static __attribute__((always_inline)) NORETURN inline void
out_fatal_abort(const char *file, int line, const char *func,
const char *fmt, ...)
{
(void) file;
(void) line;
(void) func;
(void) fmt;
abort();
}
#define OUT_LOG out_log_discard
#define OUT_NONL out_nonl_discard
#define OUT_FATAL out_fatal_discard
#define OUT_FATAL_ABORT out_fatal_abort
#endif
#if defined(__KLOCWORK__)
#define TEST_ALWAYS_TRUE_EXPR(cnd)
#define TEST_ALWAYS_EQ_EXPR(cnd)
#define TEST_ALWAYS_NE_EXPR(cnd)
#else
#define TEST_ALWAYS_TRUE_EXPR(cnd)\
if (__builtin_constant_p(cnd))\
ASSERT_COMPILE_ERROR_ON(cnd);
#define TEST_ALWAYS_EQ_EXPR(lhs, rhs)\
if (__builtin_constant_p(lhs) && __builtin_constant_p(rhs))\
ASSERT_COMPILE_ERROR_ON((lhs) == (rhs));
#define TEST_ALWAYS_NE_EXPR(lhs, rhs)\
if (__builtin_constant_p(lhs) && __builtin_constant_p(rhs))\
ASSERT_COMPILE_ERROR_ON((lhs) != (rhs));
#endif
/* produce debug/trace output */
#define LOG(level, ...) do { \
if (!EVALUATE_DBG_EXPRESSIONS) break;\
OUT_LOG(__FILE__, __LINE__, __func__, level, __VA_ARGS__);\
} while (0)
/* produce debug/trace output without prefix and new line */
#define LOG_NONL(level, ...) do { \
if (!EVALUATE_DBG_EXPRESSIONS) break; \
OUT_NONL(level, __VA_ARGS__); \
} while (0)
/* produce output and exit */
#define FATAL(...)\
OUT_FATAL_ABORT(__FILE__, __LINE__, __func__, __VA_ARGS__)
/* assert a condition is true at runtime */
#define ASSERT_rt(cnd) do { \
if (!EVALUATE_DBG_EXPRESSIONS || (cnd)) break; \
OUT_FATAL(__FILE__, __LINE__, __func__, "assertion failure: %s", #cnd);\
} while (0)
/* assertion with extra info printed if assertion fails at runtime */
#define ASSERTinfo_rt(cnd, info) do { \
if (!EVALUATE_DBG_EXPRESSIONS || (cnd)) break; \
OUT_FATAL(__FILE__, __LINE__, __func__, \
"assertion failure: %s (%s = %s)", #cnd, #info, info);\
} while (0)
/* assert two integer values are equal at runtime */
#define ASSERTeq_rt(lhs, rhs) do { \
if (!EVALUATE_DBG_EXPRESSIONS || ((lhs) == (rhs))) break; \
OUT_FATAL(__FILE__, __LINE__, __func__,\
"assertion failure: %s (0x%llx) == %s (0x%llx)", #lhs,\
(unsigned long long)(lhs), #rhs, (unsigned long long)(rhs)); \
} while (0)
/* assert two integer values are not equal at runtime */
#define ASSERTne_rt(lhs, rhs) do { \
if (!EVALUATE_DBG_EXPRESSIONS || ((lhs) != (rhs))) break; \
OUT_FATAL(__FILE__, __LINE__, __func__,\
"assertion failure: %s (0x%llx) != %s (0x%llx)", #lhs,\
(unsigned long long)(lhs), #rhs, (unsigned long long)(rhs)); \
} while (0)
/* assert a condition is true */
#define ASSERT(cnd)\
do {\
/*\
* Detect useless asserts on always true expression. Please use\
* COMPILE_ERROR_ON(!cnd) or ASSERT_rt(cnd) in such cases.\
*/\
TEST_ALWAYS_TRUE_EXPR(cnd);\
ASSERT_rt(cnd);\
} while (0)
/* assertion with extra info printed if assertion fails */
#define ASSERTinfo(cnd, info)\
do {\
/* See comment in ASSERT. */\
TEST_ALWAYS_TRUE_EXPR(cnd);\
ASSERTinfo_rt(cnd, info);\
} while (0)
/* assert two integer values are equal */
#define ASSERTeq(lhs, rhs)\
do {\
/* See comment in ASSERT. */\
TEST_ALWAYS_EQ_EXPR(lhs, rhs);\
ASSERTeq_rt(lhs, rhs);\
} while (0)
/* assert two integer values are not equal */
#define ASSERTne(lhs, rhs)\
do {\
/* See comment in ASSERT. */\
TEST_ALWAYS_NE_EXPR(lhs, rhs);\
ASSERTne_rt(lhs, rhs);\
} while (0)
#define ERR(...)\
out_err(__FILE__, __LINE__, __func__, __VA_ARGS__)
void out_init(const char *log_prefix, const char *log_level_var,
const char *log_file_var, int major_version,
int minor_version);
void out_fini(void);
void out(const char *fmt, ...) FORMAT_PRINTF(1, 2);
void out_nonl(int level, const char *fmt, ...) FORMAT_PRINTF(2, 3);
void out_log(const char *file, int line, const char *func, int level,
const char *fmt, ...) FORMAT_PRINTF(5, 6);
void out_err(const char *file, int line, const char *func,
const char *fmt, ...) FORMAT_PRINTF(4, 5);
void NORETURN out_fatal(const char *file, int line, const char *func,
const char *fmt, ...) FORMAT_PRINTF(4, 5);
void out_set_print_func(void (*print_func)(const char *s));
void out_set_vsnprintf_func(int (*vsnprintf_func)(char *str, size_t size,
const char *format, va_list ap));
#ifdef _WIN32
#ifndef PMDK_UTF8_API
#define out_get_errormsg out_get_errormsgW
#else
#define out_get_errormsg out_get_errormsgU
#endif
#endif
#ifndef _WIN32
const char *out_get_errormsg(void);
#else
const char *out_get_errormsgU(void);
const wchar_t *out_get_errormsgW(void);
#endif
#ifdef __cplusplus
}
#endif
#endif
| 6,066 | 25.150862 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/core/valgrind/memcheck.h |
/*
----------------------------------------------------------------
Notice that the following BSD-style license applies to this one
file (memcheck.h) only. The rest of Valgrind is licensed under the
terms of the GNU General Public License, version 2, unless
otherwise indicated. See the COPYING file in the source
distribution for details.
----------------------------------------------------------------
This file is part of MemCheck, a heavyweight Valgrind tool for
detecting memory errors.
Copyright (C) 2000-2017 Julian Seward. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. The origin of this software must not be misrepresented; you must
not claim that you wrote the original software. If you use this
software in a product, an acknowledgment in the product
documentation would be appreciated but is not required.
3. Altered source versions must be plainly marked as such, and must
not be misrepresented as being the original software.
4. The name of the author may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
----------------------------------------------------------------
Notice that the above BSD-style license applies to this one file
(memcheck.h) only. The entire rest of Valgrind is licensed under
the terms of the GNU General Public License, version 2. See the
COPYING file in the source distribution for details.
----------------------------------------------------------------
*/
#ifndef __MEMCHECK_H
#define __MEMCHECK_H
/* This file is for inclusion into client (your!) code.
You can use these macros to manipulate and query memory permissions
inside your own programs.
See comment near the top of valgrind.h on how to use them.
*/
#include "valgrind.h"
/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
This enum comprises an ABI exported by Valgrind to programs
which use client requests. DO NOT CHANGE THE ORDER OF THESE
ENTRIES, NOR DELETE ANY -- add new ones at the end. */
typedef
enum {
VG_USERREQ__MAKE_MEM_NOACCESS = VG_USERREQ_TOOL_BASE('M','C'),
VG_USERREQ__MAKE_MEM_UNDEFINED,
VG_USERREQ__MAKE_MEM_DEFINED,
VG_USERREQ__DISCARD,
VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,
VG_USERREQ__CHECK_MEM_IS_DEFINED,
VG_USERREQ__DO_LEAK_CHECK,
VG_USERREQ__COUNT_LEAKS,
VG_USERREQ__GET_VBITS,
VG_USERREQ__SET_VBITS,
VG_USERREQ__CREATE_BLOCK,
VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE,
/* Not next to VG_USERREQ__COUNT_LEAKS because it was added later. */
VG_USERREQ__COUNT_LEAK_BLOCKS,
VG_USERREQ__ENABLE_ADDR_ERROR_REPORTING_IN_RANGE,
VG_USERREQ__DISABLE_ADDR_ERROR_REPORTING_IN_RANGE,
VG_USERREQ__CHECK_MEM_IS_UNADDRESSABLE,
VG_USERREQ__CHECK_MEM_IS_UNDEFINED,
/* This is just for memcheck's internal use - don't use it */
_VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR
= VG_USERREQ_TOOL_BASE('M','C') + 256
} Vg_MemCheckClientRequest;
/* Client-code macros to manipulate the state of memory. */
/* Mark memory at _qzz_addr as unaddressable for _qzz_len bytes. */
#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__MAKE_MEM_NOACCESS, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Similarly, mark memory at _qzz_addr as addressable but undefined
for _qzz_len bytes. */
#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__MAKE_MEM_UNDEFINED, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Similarly, mark memory at _qzz_addr as addressable and defined
for _qzz_len bytes. */
#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__MAKE_MEM_DEFINED, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Similar to VALGRIND_MAKE_MEM_DEFINED except that addressability is
not altered: bytes which are addressable are marked as defined,
but those which are not addressable are left unchanged. */
#define VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Create a block-description handle. The description is an ascii
string which is included in any messages pertaining to addresses
within the specified memory range. Has no other effect on the
properties of the memory range. */
#define VALGRIND_CREATE_BLOCK(_qzz_addr,_qzz_len, _qzz_desc) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__CREATE_BLOCK, \
(_qzz_addr), (_qzz_len), (_qzz_desc), \
0, 0)
/* Discard a block-description-handle. Returns 1 for an
invalid handle, 0 for a valid handle. */
#define VALGRIND_DISCARD(_qzz_blkindex) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__DISCARD, \
0, (_qzz_blkindex), 0, 0, 0)
/* Client-code macros to check the state of memory. */
/* Check that memory at _qzz_addr is addressable for _qzz_len bytes.
If suitable addressability is not established, Valgrind prints an
error message and returns the address of the first offending byte.
Otherwise it returns zero. */
#define VALGRIND_CHECK_MEM_IS_ADDRESSABLE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Check that memory at _qzz_addr is addressable and defined for
_qzz_len bytes. If suitable addressability and definedness are not
established, Valgrind prints an error message and returns the
address of the first offending byte. Otherwise it returns zero. */
#define VALGRIND_CHECK_MEM_IS_DEFINED(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__CHECK_MEM_IS_DEFINED, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Use this macro to force the definedness and addressability of an
lvalue to be checked. If suitable addressability and definedness
are not established, Valgrind prints an error message and returns
the address of the first offending byte. Otherwise it returns
zero. */
#define VALGRIND_CHECK_VALUE_IS_DEFINED(__lvalue) \
VALGRIND_CHECK_MEM_IS_DEFINED( \
(volatile unsigned char *)&(__lvalue), \
(unsigned long)(sizeof (__lvalue)))
/* Check that memory at _qzz_addr is unaddressable for _qzz_len bytes.
If any byte in this range is addressable, Valgrind returns the
address of the first offending byte. Otherwise it returns zero. */
#define VALGRIND_CHECK_MEM_IS_UNADDRESSABLE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__CHECK_MEM_IS_UNADDRESSABLE,\
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Check that memory at _qzz_addr is undefined for _qzz_len bytes. If any
byte in this range is defined or unaddressable, Valgrind returns the
address of the first offending byte. Otherwise it returns zero. */
#define VALGRIND_CHECK_MEM_IS_UNDEFINED(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__CHECK_MEM_IS_UNDEFINED, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Do a full memory leak check (like --leak-check=full) mid-execution. */
#define VALGRIND_DO_LEAK_CHECK \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
0, 0, 0, 0, 0)
/* Same as VALGRIND_DO_LEAK_CHECK but only showing the entries for
which there was an increase in leaked bytes or leaked nr of blocks
since the previous leak search. */
#define VALGRIND_DO_ADDED_LEAK_CHECK \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
0, 1, 0, 0, 0)
/* Same as VALGRIND_DO_ADDED_LEAK_CHECK but showing entries with
increased or decreased leaked bytes/blocks since previous leak
search. */
#define VALGRIND_DO_CHANGED_LEAK_CHECK \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
0, 2, 0, 0, 0)
/* Do a summary memory leak check (like --leak-check=summary) mid-execution. */
#define VALGRIND_DO_QUICK_LEAK_CHECK \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
1, 0, 0, 0, 0)
/* Return number of leaked, dubious, reachable and suppressed bytes found by
all previous leak checks. They must be lvalues. */
#define VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed) \
/* For safety on 64-bit platforms we assign the results to private
unsigned long variables, then assign these to the lvalues the user
specified, which works no matter what type 'leaked', 'dubious', etc
are. We also initialise '_qzz_leaked', etc because
VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
defined. */ \
{ \
unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \
unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \
VALGRIND_DO_CLIENT_REQUEST_STMT( \
VG_USERREQ__COUNT_LEAKS, \
&_qzz_leaked, &_qzz_dubious, \
&_qzz_reachable, &_qzz_suppressed, 0); \
leaked = _qzz_leaked; \
dubious = _qzz_dubious; \
reachable = _qzz_reachable; \
suppressed = _qzz_suppressed; \
}
/* Return number of leaked, dubious, reachable and suppressed bytes found by
all previous leak checks. They must be lvalues. */
#define VALGRIND_COUNT_LEAK_BLOCKS(leaked, dubious, reachable, suppressed) \
/* For safety on 64-bit platforms we assign the results to private
unsigned long variables, then assign these to the lvalues the user
specified, which works no matter what type 'leaked', 'dubious', etc
are. We also initialise '_qzz_leaked', etc because
VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
defined. */ \
{ \
unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \
unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \
VALGRIND_DO_CLIENT_REQUEST_STMT( \
VG_USERREQ__COUNT_LEAK_BLOCKS, \
&_qzz_leaked, &_qzz_dubious, \
&_qzz_reachable, &_qzz_suppressed, 0); \
leaked = _qzz_leaked; \
dubious = _qzz_dubious; \
reachable = _qzz_reachable; \
suppressed = _qzz_suppressed; \
}
/* Get the validity data for addresses [zza..zza+zznbytes-1] and copy it
into the provided zzvbits array. Return values:
0 if not running on valgrind
1 success
2 [previously indicated unaligned arrays; these are now allowed]
3 if any parts of zzsrc/zzvbits are not addressable.
The metadata is not copied in cases 0, 2 or 3 so it should be
impossible to segfault your system by using this call.
*/
#define VALGRIND_GET_VBITS(zza,zzvbits,zznbytes) \
(unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__GET_VBITS, \
(const char*)(zza), \
(char*)(zzvbits), \
(zznbytes), 0, 0)
/* Set the validity data for addresses [zza..zza+zznbytes-1], copying it
from the provided zzvbits array. Return values:
0 if not running on valgrind
1 success
2 [previously indicated unaligned arrays; these are now allowed]
3 if any parts of zza/zzvbits are not addressable.
The metadata is not copied in cases 0, 2 or 3 so it should be
impossible to segfault your system by using this call.
*/
#define VALGRIND_SET_VBITS(zza,zzvbits,zznbytes) \
(unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__SET_VBITS, \
(const char*)(zza), \
(const char*)(zzvbits), \
(zznbytes), 0, 0 )
/* Disable and re-enable reporting of addressing errors in the
specified address range. */
#define VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__DISABLE_ADDR_ERROR_REPORTING_IN_RANGE, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
#define VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__ENABLE_ADDR_ERROR_REPORTING_IN_RANGE, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
#endif
| 15,621 | 47.666667 | 79 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/core/valgrind/pmemcheck.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2015, Intel Corporation */
#ifndef __PMEMCHECK_H
#define __PMEMCHECK_H
/* This file is for inclusion into client (your!) code.
You can use these macros to manipulate and query memory permissions
inside your own programs.
See comment near the top of valgrind.h on how to use them.
*/
#include "valgrind.h"
/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
This enum comprises an ABI exported by Valgrind to programs
which use client requests. DO NOT CHANGE THE ORDER OF THESE
ENTRIES, NOR DELETE ANY -- add new ones at the end. */
typedef
enum {
VG_USERREQ__PMC_REGISTER_PMEM_MAPPING = VG_USERREQ_TOOL_BASE('P','C'),
VG_USERREQ__PMC_REGISTER_PMEM_FILE,
VG_USERREQ__PMC_REMOVE_PMEM_MAPPING,
VG_USERREQ__PMC_CHECK_IS_PMEM_MAPPING,
VG_USERREQ__PMC_PRINT_PMEM_MAPPINGS,
VG_USERREQ__PMC_DO_FLUSH,
VG_USERREQ__PMC_DO_FENCE,
VG_USERREQ__PMC_RESERVED1, /* Do not use. */
VG_USERREQ__PMC_WRITE_STATS,
VG_USERREQ__PMC_RESERVED2, /* Do not use. */
VG_USERREQ__PMC_RESERVED3, /* Do not use. */
VG_USERREQ__PMC_RESERVED4, /* Do not use. */
VG_USERREQ__PMC_RESERVED5, /* Do not use. */
VG_USERREQ__PMC_RESERVED7, /* Do not use. */
VG_USERREQ__PMC_RESERVED8, /* Do not use. */
VG_USERREQ__PMC_RESERVED9, /* Do not use. */
VG_USERREQ__PMC_RESERVED10, /* Do not use. */
VG_USERREQ__PMC_SET_CLEAN,
/* transaction support */
VG_USERREQ__PMC_START_TX,
VG_USERREQ__PMC_START_TX_N,
VG_USERREQ__PMC_END_TX,
VG_USERREQ__PMC_END_TX_N,
VG_USERREQ__PMC_ADD_TO_TX,
VG_USERREQ__PMC_ADD_TO_TX_N,
VG_USERREQ__PMC_REMOVE_FROM_TX,
VG_USERREQ__PMC_REMOVE_FROM_TX_N,
VG_USERREQ__PMC_ADD_THREAD_TO_TX_N,
VG_USERREQ__PMC_REMOVE_THREAD_FROM_TX_N,
VG_USERREQ__PMC_ADD_TO_GLOBAL_TX_IGNORE,
VG_USERREQ__PMC_RESERVED6, /* Do not use. */
VG_USERREQ__PMC_EMIT_LOG,
} Vg_PMemCheckClientRequest;
/* Client-code macros to manipulate pmem mappings */
/** Register a persistent memory mapping region */
#define VALGRIND_PMC_REGISTER_PMEM_MAPPING(_qzz_addr, _qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_REGISTER_PMEM_MAPPING, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/** Register a persistent memory file */
#define VALGRIND_PMC_REGISTER_PMEM_FILE(_qzz_desc, _qzz_addr_base, \
_qzz_size, _qzz_offset) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_REGISTER_PMEM_FILE, \
(_qzz_desc), (_qzz_addr_base), (_qzz_size), \
(_qzz_offset), 0)
/** Remove a persistent memory mapping region */
#define VALGRIND_PMC_REMOVE_PMEM_MAPPING(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_REMOVE_PMEM_MAPPING, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/** Check if the given range is a registered persistent memory mapping */
#define VALGRIND_PMC_CHECK_IS_PMEM_MAPPING(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_CHECK_IS_PMEM_MAPPING, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/** Register an SFENCE */
#define VALGRIND_PMC_PRINT_PMEM_MAPPINGS \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PMC_PRINT_PMEM_MAPPINGS, \
0, 0, 0, 0, 0)
/** Register a CLFLUSH-like operation */
#define VALGRIND_PMC_DO_FLUSH(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_DO_FLUSH, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/** Register an SFENCE */
#define VALGRIND_PMC_DO_FENCE \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PMC_DO_FENCE, \
0, 0, 0, 0, 0)
/** Write tool stats */
#define VALGRIND_PMC_WRITE_STATS \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PMC_WRITE_STATS, \
0, 0, 0, 0, 0)
/** Emit user log */
#define VALGRIND_PMC_EMIT_LOG(_qzz_emit_log) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_EMIT_LOG, \
(_qzz_emit_log), 0, 0, 0, 0)
/** Set a region of persistent memory as clean */
#define VALGRIND_PMC_SET_CLEAN(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_SET_CLEAN, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/** Support for transactions */
/** Start an implicit persistent memory transaction */
#define VALGRIND_PMC_START_TX \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PMC_START_TX, \
0, 0, 0, 0, 0)
/** Start an explicit persistent memory transaction */
#define VALGRIND_PMC_START_TX_N(_qzz_txn) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_START_TX_N, \
(_qzz_txn), 0, 0, 0, 0)
/** End an implicit persistent memory transaction */
#define VALGRIND_PMC_END_TX \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PMC_END_TX, \
0, 0, 0, 0, 0)
/** End an explicit persistent memory transaction */
#define VALGRIND_PMC_END_TX_N(_qzz_txn) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_END_TX_N, \
(_qzz_txn), 0, 0, 0, 0)
/** Add a persistent memory region to the implicit transaction */
#define VALGRIND_PMC_ADD_TO_TX(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_ADD_TO_TX, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/** Add a persistent memory region to an explicit transaction */
#define VALGRIND_PMC_ADD_TO_TX_N(_qzz_txn,_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_ADD_TO_TX_N, \
(_qzz_txn), (_qzz_addr), (_qzz_len), 0, 0)
/** Remove a persistent memory region from the implicit transaction */
#define VALGRIND_PMC_REMOVE_FROM_TX(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_REMOVE_FROM_TX, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/** Remove a persistent memory region from an explicit transaction */
#define VALGRIND_PMC_REMOVE_FROM_TX_N(_qzz_txn,_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_REMOVE_FROM_TX_N, \
(_qzz_txn), (_qzz_addr), (_qzz_len), 0, 0)
/** End an explicit persistent memory transaction */
#define VALGRIND_PMC_ADD_THREAD_TX_N(_qzz_txn) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_ADD_THREAD_TO_TX_N, \
(_qzz_txn), 0, 0, 0, 0)
/** End an explicit persistent memory transaction */
#define VALGRIND_PMC_REMOVE_THREAD_FROM_TX_N(_qzz_txn) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__PMC_REMOVE_THREAD_FROM_TX_N, \
(_qzz_txn), 0, 0, 0, 0)
/** Remove a persistent memory region from the implicit transaction */
#define VALGRIND_PMC_ADD_TO_GLOBAL_TX_IGNORE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PMC_ADD_TO_GLOBAL_TX_IGNORE,\
(_qzz_addr), (_qzz_len), 0, 0, 0)
#endif
| 9,085 | 47.588235 | 77 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/ctl.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* ctl.h -- internal declaration of statistics and control related structures
*/
#ifndef PMDK_CTL_H
#define PMDK_CTL_H 1
#include "queue.h"
#include "errno.h"
#include "out.h"
#ifdef __cplusplus
extern "C" {
#endif
struct ctl;
struct ctl_index {
const char *name;
long value;
PMDK_SLIST_ENTRY(ctl_index) entry;
};
PMDK_SLIST_HEAD(ctl_indexes, ctl_index);
enum ctl_query_source {
CTL_UNKNOWN_QUERY_SOURCE,
/* query executed directly from the program */
CTL_QUERY_PROGRAMMATIC,
/* query executed from the config file */
CTL_QUERY_CONFIG_INPUT,
MAX_CTL_QUERY_SOURCE
};
enum ctl_query_type {
CTL_QUERY_READ,
CTL_QUERY_WRITE,
CTL_QUERY_RUNNABLE,
MAX_CTL_QUERY_TYPE
};
typedef int (*node_callback)(void *ctx, enum ctl_query_source type,
void *arg, struct ctl_indexes *indexes);
enum ctl_node_type {
CTL_NODE_UNKNOWN,
CTL_NODE_NAMED,
CTL_NODE_LEAF,
CTL_NODE_INDEXED,
MAX_CTL_NODE
};
typedef int (*ctl_arg_parser)(const void *arg, void *dest, size_t dest_size);
struct ctl_argument_parser {
size_t dest_offset; /* offset of the field inside of the argument */
size_t dest_size; /* size of the field inside of the argument */
ctl_arg_parser parser;
};
struct ctl_argument {
size_t dest_size; /* sizeof the entire argument */
struct ctl_argument_parser parsers[]; /* array of 'fields' in arg */
};
#define sizeof_member(t, m) sizeof(((t *)0)->m)
#define CTL_ARG_PARSER(t, p)\
{0, sizeof(t), p}
#define CTL_ARG_PARSER_STRUCT(t, m, p)\
{offsetof(t, m), sizeof_member(t, m), p}
#define CTL_ARG_PARSER_END {0, 0, NULL}
/*
* CTL Tree node structure, do not use directly. All the necessary functionality
* is provided by the included macros.
*/
struct ctl_node {
const char *name;
enum ctl_node_type type;
node_callback cb[MAX_CTL_QUERY_TYPE];
const struct ctl_argument *arg;
const struct ctl_node *children;
};
struct ctl *ctl_new(void);
void ctl_delete(struct ctl *stats);
int ctl_load_config_from_string(struct ctl *ctl, void *ctx,
const char *cfg_string);
int ctl_load_config_from_file(struct ctl *ctl, void *ctx,
const char *cfg_file);
/* Use through CTL_REGISTER_MODULE, never directly */
void ctl_register_module_node(struct ctl *c,
const char *name, struct ctl_node *n);
int ctl_arg_boolean(const void *arg, void *dest, size_t dest_size);
#define CTL_ARG_BOOLEAN {sizeof(int),\
{{0, sizeof(int), ctl_arg_boolean},\
CTL_ARG_PARSER_END}};
int ctl_arg_integer(const void *arg, void *dest, size_t dest_size);
#define CTL_ARG_INT {sizeof(int),\
{{0, sizeof(int), ctl_arg_integer},\
CTL_ARG_PARSER_END}};
#define CTL_ARG_LONG_LONG {sizeof(long long),\
{{0, sizeof(long long), ctl_arg_integer},\
CTL_ARG_PARSER_END}};
int ctl_arg_string(const void *arg, void *dest, size_t dest_size);
#define CTL_ARG_STRING(len) {len,\
{{0, len, ctl_arg_string},\
CTL_ARG_PARSER_END}};
#define CTL_STR(name) #name
#define CTL_NODE_END {NULL, CTL_NODE_UNKNOWN, {NULL, NULL, NULL}, NULL, NULL}
#define CTL_NODE(name, ...)\
ctl_node_##__VA_ARGS__##_##name
int ctl_query(struct ctl *ctl, void *ctx, enum ctl_query_source source,
const char *name, enum ctl_query_type type, void *arg);
/* Declaration of a new child node */
#define CTL_CHILD(name, ...)\
{CTL_STR(name), CTL_NODE_NAMED, {NULL, NULL, NULL}, NULL,\
(struct ctl_node *)CTL_NODE(name, __VA_ARGS__)}
/* Declaration of a new indexed node */
#define CTL_INDEXED(name, ...)\
{CTL_STR(name), CTL_NODE_INDEXED, {NULL, NULL, NULL}, NULL,\
(struct ctl_node *)CTL_NODE(name, __VA_ARGS__)}
#define CTL_READ_HANDLER(name, ...)\
ctl_##__VA_ARGS__##_##name##_read
#define CTL_WRITE_HANDLER(name, ...)\
ctl_##__VA_ARGS__##_##name##_write
#define CTL_RUNNABLE_HANDLER(name, ...)\
ctl_##__VA_ARGS__##_##name##_runnable
#define CTL_ARG(name)\
ctl_arg_##name
/*
* Declaration of a new read-only leaf. If used the corresponding read function
* must be declared by CTL_READ_HANDLER macro.
*/
#define CTL_LEAF_RO(name, ...)\
{CTL_STR(name), CTL_NODE_LEAF, \
{CTL_READ_HANDLER(name, __VA_ARGS__), NULL, NULL}, NULL, NULL}
/*
* Declaration of a new write-only leaf. If used the corresponding write
* function must be declared by CTL_WRITE_HANDLER macro.
*/
#define CTL_LEAF_WO(name, ...)\
{CTL_STR(name), CTL_NODE_LEAF, \
{NULL, CTL_WRITE_HANDLER(name, __VA_ARGS__), NULL},\
&CTL_ARG(name), NULL}
/*
* Declaration of a new runnable leaf. If used the corresponding run
* function must be declared by CTL_RUNNABLE_HANDLER macro.
*/
#define CTL_LEAF_RUNNABLE(name, ...)\
{CTL_STR(name), CTL_NODE_LEAF, \
{NULL, NULL, CTL_RUNNABLE_HANDLER(name, __VA_ARGS__)},\
NULL, NULL}
/*
* Declaration of a new read-write leaf. If used both read and write function
* must be declared by CTL_READ_HANDLER and CTL_WRITE_HANDLER macros.
*/
#define CTL_LEAF_RW(name)\
{CTL_STR(name), CTL_NODE_LEAF,\
{CTL_READ_HANDLER(name), CTL_WRITE_HANDLER(name), NULL},\
&CTL_ARG(name), NULL}
#define CTL_REGISTER_MODULE(_ctl, name)\
ctl_register_module_node((_ctl), CTL_STR(name),\
(struct ctl_node *)CTL_NODE(name))
#ifdef __cplusplus
}
#endif
#endif
| 5,127 | 24.261084 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/file.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* file.h -- internal definitions for file module
*/
#ifndef PMDK_FILE_H
#define PMDK_FILE_H 1
#include <stddef.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <dirent.h>
#include <limits.h>
#include "os.h"
#ifdef __cplusplus
extern "C" {
#endif
#ifdef _WIN32
#define NAME_MAX _MAX_FNAME
#endif
struct file_info {
char filename[NAME_MAX + 1];
int is_dir;
};
struct dir_handle {
const char *path;
#ifdef _WIN32
HANDLE handle;
char *_file;
#else
DIR *dirp;
#endif
};
enum file_type {
OTHER_ERROR = -2,
NOT_EXISTS = -1,
TYPE_NORMAL = 1,
TYPE_DEVDAX = 2
};
int util_file_dir_open(struct dir_handle *a, const char *path);
int util_file_dir_next(struct dir_handle *a, struct file_info *info);
int util_file_dir_close(struct dir_handle *a);
int util_file_dir_remove(const char *path);
int util_file_exists(const char *path);
enum file_type util_stat_get_type(const os_stat_t *st);
enum file_type util_fd_get_type(int fd);
enum file_type util_file_get_type(const char *path);
int util_ddax_region_find(const char *path, unsigned *region_id);
ssize_t util_file_get_size(const char *path);
ssize_t util_fd_get_size(int fd);
size_t util_file_device_dax_alignment(const char *path);
void *util_file_map_whole(const char *path);
int util_file_zero(const char *path, os_off_t off, size_t len);
ssize_t util_file_pread(const char *path, void *buffer, size_t size,
os_off_t offset);
ssize_t util_file_pwrite(const char *path, const void *buffer, size_t size,
os_off_t offset);
int util_tmpfile(const char *dir, const char *templ, int flags);
int util_is_absolute_path(const char *path);
int util_file_create(const char *path, size_t size, size_t minsize);
int util_file_open(const char *path, size_t *size, size_t minsize, int flags);
int util_unlink(const char *path);
int util_unlink_flock(const char *path);
int util_file_mkdir(const char *path, mode_t mode);
int util_write_all(int fd, const char *buf, size_t count);
#ifndef _WIN32
#define util_read read
#define util_write write
#else
static inline ssize_t
util_read(int fd, void *buf, size_t count)
{
/*
* Simulate short read, because Windows' _read uses "unsigned" as
* a type of the last argument and "int" as a return type.
* We have to limit "count" to what _read can return as a success,
* not what it can accept.
*/
if (count > INT_MAX)
count = INT_MAX;
return _read(fd, buf, (unsigned)count);
}
static inline ssize_t
util_write(int fd, const void *buf, size_t count)
{
/*
* Simulate short write, because Windows' _write uses "unsigned" as
* a type of the last argument and "int" as a return type.
* We have to limit "count" to what _write can return as a success,
* not what it can accept.
*/
if (count > INT_MAX)
count = INT_MAX;
return _write(fd, buf, (unsigned)count);
}
#define S_ISCHR(m) (((m) & S_IFMT) == S_IFCHR)
#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
#endif
#ifdef __cplusplus
}
#endif
#endif
| 3,013 | 24.982759 | 78 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/badblocks.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* badblocks.h -- bad blocks API based on the libpmem2 library
*/
#ifndef PMDK_BADBLOCKS_H
#define PMDK_BADBLOCKS_H 1
#include <string.h>
#include <stdint.h>
#include <sys/types.h>
#ifdef __cplusplus
extern "C" {
#endif
#define B2SEC(n) ((n) >> 9) /* convert bytes to sectors */
#define SEC2B(n) ((n) << 9) /* convert sectors to bytes */
#define NO_HEALTHY_REPLICA ((int)(-1))
#define BB_NOT_SUPP \
"checking bad blocks is not supported on this OS, please switch off the CHECK_BAD_BLOCKS compat feature using 'pmempool-feature'"
/*
* 'struct badblock' is already defined in ndctl/libndctl.h,
* so we cannot use this name.
*
* libndctl returns offset relative to the beginning of the region,
* but in this structure we save offset relative to the beginning of:
* - namespace (before badblocks_get())
* and
* - file (before sync_recalc_badblocks())
* and
* - pool (after sync_recalc_badblocks())
*/
struct bad_block {
/*
* offset in bytes relative to the beginning of
* - namespace (before badblocks_get())
* and
* - file (before sync_recalc_badblocks())
* and
* - pool (after sync_recalc_badblocks())
*/
size_t offset;
/* length in bytes */
size_t length;
/* number of healthy replica to fix this bad block */
int nhealthy;
};
struct badblocks {
unsigned bb_cnt; /* number of bad blocks */
struct bad_block *bbv; /* array of bad blocks */
};
struct badblocks *badblocks_new(void);
void badblocks_delete(struct badblocks *bbs);
long badblocks_count(const char *path);
int badblocks_get(const char *file, struct badblocks *bbs);
int badblocks_clear(const char *path, struct badblocks *bbs);
int badblocks_clear_all(const char *file);
int badblocks_check_file(const char *path);
#ifdef __cplusplus
}
#endif
#endif /* PMDK_BADBLOCKS_H */
| 1,878 | 23.089744 | 130 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/ctl.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* ctl.c -- implementation of the interface for examination and modification of
* the library's internal state
*/
#include "ctl.h"
#include "os.h"
#include "alloc.h"
#define CTL_MAX_ENTRIES 100
#define MAX_CONFIG_FILE_LEN (1 << 20) /* 1 megabyte */
#define CTL_STRING_QUERY_SEPARATOR ";"
#define CTL_NAME_VALUE_SEPARATOR "="
#define CTL_QUERY_NODE_SEPARATOR "."
#define CTL_VALUE_ARG_SEPARATOR ","
static int ctl_global_first_free = 0;
static struct ctl_node CTL_NODE(global)[CTL_MAX_ENTRIES];
/*
* This is the top level node of the ctl tree structure. Each node can contain
* children and leaf nodes.
*
* Internal nodes simply create a new path in the tree whereas child nodes are
* the ones providing the read/write functionality by the means of callbacks.
*
* Each tree node must be NULL-terminated, CTL_NODE_END macro is provided for
* convenience.
*/
struct ctl {
struct ctl_node root[CTL_MAX_ENTRIES];
int first_free;
};
/*
* ctl_find_node -- (internal) searches for a matching entry point in the
* provided nodes
*
* The caller is responsible for freeing all of the allocated indexes,
* regardless of the return value.
*/
static const struct ctl_node *
ctl_find_node(const struct ctl_node *nodes, const char *name,
struct ctl_indexes *indexes)
{
LOG(3, "nodes %p name %s indexes %p", nodes, name, indexes);
const struct ctl_node *n = NULL;
char *sptr = NULL;
char *parse_str = Strdup(name);
if (parse_str == NULL)
return NULL;
char *node_name = strtok_r(parse_str, CTL_QUERY_NODE_SEPARATOR, &sptr);
/*
* Go through the string and separate tokens that correspond to nodes
* in the main ctl tree.
*/
while (node_name != NULL) {
char *endptr;
/*
* Ignore errno from strtol: FreeBSD returns EINVAL if no
* conversion is performed. Linux does not, but endptr
* check is valid in both cases.
*/
int tmp_errno = errno;
long index_value = strtol(node_name, &endptr, 0);
errno = tmp_errno;
struct ctl_index *index_entry = NULL;
if (endptr != node_name) { /* a valid index */
index_entry = Malloc(sizeof(*index_entry));
if (index_entry == NULL)
goto error;
index_entry->value = index_value;
PMDK_SLIST_INSERT_HEAD(indexes, index_entry, entry);
}
for (n = &nodes[0]; n->name != NULL; ++n) {
if (index_entry && n->type == CTL_NODE_INDEXED)
break;
else if (strcmp(n->name, node_name) == 0)
break;
}
if (n->name == NULL)
goto error;
if (index_entry)
index_entry->name = n->name;
nodes = n->children;
node_name = strtok_r(NULL, CTL_QUERY_NODE_SEPARATOR, &sptr);
}
Free(parse_str);
return n;
error:
Free(parse_str);
return NULL;
}
/*
* ctl_delete_indexes --
* (internal) removes and frees all entries on the index list
*/
static void
ctl_delete_indexes(struct ctl_indexes *indexes)
{
while (!PMDK_SLIST_EMPTY(indexes)) {
struct ctl_index *index = PMDK_SLIST_FIRST(indexes);
PMDK_SLIST_REMOVE_HEAD(indexes, entry);
Free(index);
}
}
/*
* ctl_parse_args -- (internal) parses a string argument based on the node
* structure
*/
static void *
ctl_parse_args(const struct ctl_argument *arg_proto, char *arg)
{
ASSERTne(arg, NULL);
char *dest_arg = Malloc(arg_proto->dest_size);
if (dest_arg == NULL) {
ERR("!Malloc");
return NULL;
}
char *sptr = NULL;
char *arg_sep = strtok_r(arg, CTL_VALUE_ARG_SEPARATOR, &sptr);
for (const struct ctl_argument_parser *p = arg_proto->parsers;
p->parser != NULL; ++p) {
ASSERT(p->dest_offset + p->dest_size <= arg_proto->dest_size);
if (arg_sep == NULL) {
ERR("!strtok_r");
goto error_parsing;
}
if (p->parser(arg_sep, dest_arg + p->dest_offset,
p->dest_size) != 0)
goto error_parsing;
arg_sep = strtok_r(NULL, CTL_VALUE_ARG_SEPARATOR, &sptr);
}
return dest_arg;
error_parsing:
Free(dest_arg);
return NULL;
}
/*
* ctl_query_get_real_args -- (internal) returns a pointer with actual argument
* structure as required by the node callback
*/
static void *
ctl_query_get_real_args(const struct ctl_node *n, void *write_arg,
enum ctl_query_source source)
{
void *real_arg = NULL;
switch (source) {
case CTL_QUERY_CONFIG_INPUT:
real_arg = ctl_parse_args(n->arg, write_arg);
break;
case CTL_QUERY_PROGRAMMATIC:
real_arg = write_arg;
break;
default:
ASSERT(0);
break;
}
return real_arg;
}
/*
* ctl_query_cleanup_real_args -- (internal) cleanups relevant argument
* structures allocated as a result of the get_real_args call
*/
static void
ctl_query_cleanup_real_args(const struct ctl_node *n, void *real_arg,
enum ctl_query_source source)
{
switch (source) {
case CTL_QUERY_CONFIG_INPUT:
Free(real_arg);
break;
case CTL_QUERY_PROGRAMMATIC:
break;
default:
ASSERT(0);
break;
}
}
/*
* ctl_exec_query_read -- (internal) calls the read callback of a node
*/
static int
ctl_exec_query_read(void *ctx, const struct ctl_node *n,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
if (arg == NULL) {
ERR("read queries require non-NULL argument");
errno = EINVAL;
return -1;
}
return n->cb[CTL_QUERY_READ](ctx, source, arg, indexes);
}
/*
* ctl_exec_query_write -- (internal) calls the write callback of a node
*/
static int
ctl_exec_query_write(void *ctx, const struct ctl_node *n,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
if (arg == NULL) {
ERR("write queries require non-NULL argument");
errno = EINVAL;
return -1;
}
void *real_arg = ctl_query_get_real_args(n, arg, source);
if (real_arg == NULL) {
LOG(1, "Invalid arguments");
return -1;
}
int ret = n->cb[CTL_QUERY_WRITE](ctx, source, real_arg, indexes);
ctl_query_cleanup_real_args(n, real_arg, source);
return ret;
}
/*
* ctl_exec_query_runnable -- (internal) calls the run callback of a node
*/
static int
ctl_exec_query_runnable(void *ctx, const struct ctl_node *n,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
return n->cb[CTL_QUERY_RUNNABLE](ctx, source, arg, indexes);
}
static int (*ctl_exec_query[MAX_CTL_QUERY_TYPE])(void *ctx,
const struct ctl_node *n, enum ctl_query_source source, void *arg,
struct ctl_indexes *indexes) = {
ctl_exec_query_read,
ctl_exec_query_write,
ctl_exec_query_runnable,
};
/*
* ctl_query -- (internal) parses the name and calls the appropriate methods
* from the ctl tree
*/
int
ctl_query(struct ctl *ctl, void *ctx, enum ctl_query_source source,
const char *name, enum ctl_query_type type, void *arg)
{
LOG(3, "ctl %p ctx %p source %d name %s type %d arg %p",
ctl, ctx, source, name, type, arg);
if (name == NULL) {
ERR("invalid query");
errno = EINVAL;
return -1;
}
/*
* All of the indexes are put on this list so that the handlers can
* easily retrieve the index values. The list is cleared once the ctl
* query has been handled.
*/
struct ctl_indexes indexes;
PMDK_SLIST_INIT(&indexes);
int ret = -1;
const struct ctl_node *n = ctl_find_node(CTL_NODE(global),
name, &indexes);
if (n == NULL && ctl) {
ctl_delete_indexes(&indexes);
n = ctl_find_node(ctl->root, name, &indexes);
}
if (n == NULL || n->type != CTL_NODE_LEAF || n->cb[type] == NULL) {
ERR("invalid query entry point %s", name);
errno = EINVAL;
goto out;
}
ret = ctl_exec_query[type](ctx, n, source, arg, &indexes);
out:
ctl_delete_indexes(&indexes);
return ret;
}
/*
* ctl_register_module_node -- adds a new node to the CTL tree root.
*/
void
ctl_register_module_node(struct ctl *c, const char *name, struct ctl_node *n)
{
struct ctl_node *nnode = c == NULL ?
&CTL_NODE(global)[ctl_global_first_free++] :
&c->root[c->first_free++];
nnode->children = n;
nnode->type = CTL_NODE_NAMED;
nnode->name = name;
}
/*
* ctl_parse_query -- (internal) splits an entire query string
* into name and value
*/
static int
ctl_parse_query(char *qbuf, char **name, char **value)
{
if (qbuf == NULL)
return -1;
char *sptr;
*name = strtok_r(qbuf, CTL_NAME_VALUE_SEPARATOR, &sptr);
if (*name == NULL)
return -1;
*value = strtok_r(NULL, CTL_NAME_VALUE_SEPARATOR, &sptr);
if (*value == NULL)
return -1;
/* the value itself mustn't include CTL_NAME_VALUE_SEPARATOR */
char *extra = strtok_r(NULL, CTL_NAME_VALUE_SEPARATOR, &sptr);
if (extra != NULL)
return -1;
return 0;
}
/*
* ctl_load_config -- executes the entire query collection from a provider
*/
static int
ctl_load_config(struct ctl *ctl, void *ctx, char *buf)
{
int r = 0;
char *sptr = NULL; /* for internal use of strtok */
char *name;
char *value;
ASSERTne(buf, NULL);
char *qbuf = strtok_r(buf, CTL_STRING_QUERY_SEPARATOR, &sptr);
while (qbuf != NULL) {
r = ctl_parse_query(qbuf, &name, &value);
if (r != 0) {
ERR("failed to parse query %s", qbuf);
return -1;
}
r = ctl_query(ctl, ctx, CTL_QUERY_CONFIG_INPUT,
name, CTL_QUERY_WRITE, value);
if (r < 0 && ctx != NULL)
return -1;
qbuf = strtok_r(NULL, CTL_STRING_QUERY_SEPARATOR, &sptr);
}
return 0;
}
/*
* ctl_load_config_from_string -- loads obj configuration from string
*/
int
ctl_load_config_from_string(struct ctl *ctl, void *ctx, const char *cfg_string)
{
LOG(3, "ctl %p ctx %p cfg_string \"%s\"", ctl, ctx, cfg_string);
char *buf = Strdup(cfg_string);
if (buf == NULL) {
ERR("!Strdup");
return -1;
}
int ret = ctl_load_config(ctl, ctx, buf);
Free(buf);
return ret;
}
/*
* ctl_load_config_from_file -- loads obj configuration from file
*
* This function opens up the config file, allocates a buffer of size equal to
* the size of the file, reads its content and sanitizes it for ctl_load_config.
*/
int
ctl_load_config_from_file(struct ctl *ctl, void *ctx, const char *cfg_file)
{
LOG(3, "ctl %p ctx %p cfg_file \"%s\"", ctl, ctx, cfg_file);
int ret = -1;
FILE *fp = os_fopen(cfg_file, "r");
if (fp == NULL)
return ret;
int err;
if ((err = fseek(fp, 0, SEEK_END)) != 0)
goto error_file_parse;
long fsize = ftell(fp);
if (fsize == -1)
goto error_file_parse;
if (fsize > MAX_CONFIG_FILE_LEN) {
ERR("Config file too large");
goto error_file_parse;
}
if ((err = fseek(fp, 0, SEEK_SET)) != 0)
goto error_file_parse;
char *buf = Zalloc((size_t)fsize + 1); /* +1 for NULL-termination */
if (buf == NULL) {
ERR("!Zalloc");
goto error_file_parse;
}
size_t bufpos = 0;
int c;
int is_comment_section = 0;
while ((c = fgetc(fp)) != EOF) {
if (c == '#')
is_comment_section = 1;
else if (c == '\n')
is_comment_section = 0;
else if (!is_comment_section && !isspace(c))
buf[bufpos++] = (char)c;
}
ret = ctl_load_config(ctl, ctx, buf);
Free(buf);
error_file_parse:
(void) fclose(fp);
return ret;
}
/*
* ctl_new -- allocates and initializes ctl data structures
*/
struct ctl *
ctl_new(void)
{
struct ctl *c = Zalloc(sizeof(struct ctl));
if (c == NULL) {
ERR("!Zalloc");
return NULL;
}
c->first_free = 0;
return c;
}
/*
* ctl_delete -- deletes ctl
*/
void
ctl_delete(struct ctl *c)
{
Free(c);
}
/*
* ctl_parse_ll -- (internal) parses and returns a long long signed integer
*/
static long long
ctl_parse_ll(const char *str)
{
char *endptr;
int olderrno = errno;
errno = 0;
long long val = strtoll(str, &endptr, 0);
if (endptr == str || errno != 0)
return LLONG_MIN;
errno = olderrno;
return val;
}
/*
* ctl_arg_boolean -- checks whether the provided argument contains
* either a 1 or y or Y.
*/
int
ctl_arg_boolean(const void *arg, void *dest, size_t dest_size)
{
int *intp = dest;
char in = ((char *)arg)[0];
if (tolower(in) == 'y' || in == '1') {
*intp = 1;
return 0;
} else if (tolower(in) == 'n' || in == '0') {
*intp = 0;
return 0;
}
return -1;
}
/*
* ctl_arg_integer -- parses signed integer argument
*/
int
ctl_arg_integer(const void *arg, void *dest, size_t dest_size)
{
long long val = ctl_parse_ll(arg);
if (val == LLONG_MIN)
return -1;
switch (dest_size) {
case sizeof(int):
if (val > INT_MAX || val < INT_MIN)
return -1;
*(int *)dest = (int)val;
break;
case sizeof(long long):
*(long long *)dest = val;
break;
case sizeof(uint8_t):
if (val > UINT8_MAX || val < 0)
return -1;
*(uint8_t *)dest = (uint8_t)val;
break;
default:
ERR("invalid destination size %zu", dest_size);
errno = EINVAL;
return -1;
}
return 0;
}
/*
* ctl_arg_string -- verifies length and copies a string argument into a zeroed
* buffer
*/
int
ctl_arg_string(const void *arg, void *dest, size_t dest_size)
{
/* check if the incoming string is longer or equal to dest_size */
if (strnlen(arg, dest_size) == dest_size)
return -1;
strncpy(dest, arg, dest_size);
return 0;
}
| 12,706 | 20.946459 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/pool_hdr.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* pool_hdr.c -- pool header utilities
*/
#include <errno.h>
#include <stdio.h>
#include <string.h>
#include <endian.h>
#include "out.h"
#include "pool_hdr.h"
/* Determine ISA for which PMDK is currently compiled */
#if defined(__x86_64) || defined(_M_X64)
/* x86 -- 64 bit */
#define PMDK_MACHINE PMDK_MACHINE_X86_64
#define PMDK_MACHINE_CLASS PMDK_MACHINE_CLASS_64
#elif defined(__aarch64__)
/* 64 bit ARM not supported yet */
#define PMDK_MACHINE PMDK_MACHINE_AARCH64
#define PMDK_MACHINE_CLASS PMDK_MACHINE_CLASS_64
#elif defined(__PPC64__)
#define PMDK_MACHINE PMDK_MACHINE_PPC64
#define PMDK_MACHINE_CLASS PMDK_MACHINE_CLASS_64
#else
/* add appropriate definitions here when porting PMDK to another ISA */
#error unable to recognize ISA at compile time
#endif
/*
* arch_machine -- (internal) determine endianness
*/
static uint8_t
arch_data(void)
{
uint16_t word = (PMDK_DATA_BE << 8) + PMDK_DATA_LE;
return ((uint8_t *)&word)[0];
}
/*
* util_get_arch_flags -- get architecture identification flags
*/
void
util_get_arch_flags(struct arch_flags *arch_flags)
{
memset(arch_flags, 0, sizeof(*arch_flags));
arch_flags->machine = PMDK_MACHINE;
arch_flags->machine_class = PMDK_MACHINE_CLASS;
arch_flags->data = arch_data();
arch_flags->alignment_desc = alignment_desc();
}
/*
* util_convert2le_hdr -- convert pool_hdr into little-endian byte order
*/
void
util_convert2le_hdr(struct pool_hdr *hdrp)
{
hdrp->major = htole32(hdrp->major);
hdrp->features.compat = htole32(hdrp->features.compat);
hdrp->features.incompat = htole32(hdrp->features.incompat);
hdrp->features.ro_compat = htole32(hdrp->features.ro_compat);
hdrp->arch_flags.alignment_desc =
htole64(hdrp->arch_flags.alignment_desc);
hdrp->arch_flags.machine = htole16(hdrp->arch_flags.machine);
hdrp->crtime = htole64(hdrp->crtime);
hdrp->checksum = htole64(hdrp->checksum);
}
/*
* util_convert2h_hdr_nocheck -- convert pool_hdr into host byte order
*/
void
util_convert2h_hdr_nocheck(struct pool_hdr *hdrp)
{
hdrp->major = le32toh(hdrp->major);
hdrp->features.compat = le32toh(hdrp->features.compat);
hdrp->features.incompat = le32toh(hdrp->features.incompat);
hdrp->features.ro_compat = le32toh(hdrp->features.ro_compat);
hdrp->crtime = le64toh(hdrp->crtime);
hdrp->arch_flags.machine = le16toh(hdrp->arch_flags.machine);
hdrp->arch_flags.alignment_desc =
le64toh(hdrp->arch_flags.alignment_desc);
hdrp->checksum = le64toh(hdrp->checksum);
}
/*
* util_arch_flags_check -- validates arch_flags
*/
int
util_check_arch_flags(const struct arch_flags *arch_flags)
{
struct arch_flags cur_af;
int ret = 0;
util_get_arch_flags(&cur_af);
if (!util_is_zeroed(&arch_flags->reserved,
sizeof(arch_flags->reserved))) {
ERR("invalid reserved values");
ret = -1;
}
if (arch_flags->machine != cur_af.machine) {
ERR("invalid machine value");
ret = -1;
}
if (arch_flags->data != cur_af.data) {
ERR("invalid data value");
ret = -1;
}
if (arch_flags->machine_class != cur_af.machine_class) {
ERR("invalid machine_class value");
ret = -1;
}
if (arch_flags->alignment_desc != cur_af.alignment_desc) {
ERR("invalid alignment_desc value");
ret = -1;
}
return ret;
}
/*
* util_get_unknown_features -- filter out unknown features flags
*/
features_t
util_get_unknown_features(features_t features, features_t known)
{
features_t unknown;
unknown.compat = util_get_not_masked_bits(
features.compat, known.compat);
unknown.incompat = util_get_not_masked_bits(
features.incompat, known.incompat);
unknown.ro_compat = util_get_not_masked_bits(
features.ro_compat, known.ro_compat);
return unknown;
}
/*
* util_feature_check -- check features masks
*/
int
util_feature_check(struct pool_hdr *hdrp, features_t known)
{
LOG(3, "hdrp %p features {incompat %#x ro_compat %#x compat %#x}",
hdrp,
known.incompat, known.ro_compat, known.compat);
features_t unknown = util_get_unknown_features(hdrp->features, known);
/* check incompatible ("must support") features */
if (unknown.incompat) {
ERR("unsafe to continue due to unknown incompat "\
"features: %#x", unknown.incompat);
errno = EINVAL;
return -1;
}
/* check RO-compatible features (force RO if unsupported) */
if (unknown.ro_compat) {
ERR("switching to read-only mode due to unknown ro_compat "\
"features: %#x", unknown.ro_compat);
return 0;
}
/* check compatible ("may") features */
if (unknown.compat) {
LOG(3, "ignoring unknown compat features: %#x", unknown.compat);
}
return 1;
}
/*
* util_feature_cmp -- compares features with reference
*
* returns 1 if features and reference match and 0 otherwise
*/
int
util_feature_cmp(features_t features, features_t ref)
{
LOG(3, "features {incompat %#x ro_compat %#x compat %#x} "
"ref {incompat %#x ro_compat %#x compat %#x}",
features.incompat, features.ro_compat, features.compat,
ref.incompat, ref.ro_compat, ref.compat);
return features.compat == ref.compat &&
features.incompat == ref.incompat &&
features.ro_compat == ref.ro_compat;
}
/*
* util_feature_is_zero -- check if features flags are zeroed
*
* returns 1 if features is zeroed and 0 otherwise
*/
int
util_feature_is_zero(features_t features)
{
const uint32_t bits =
features.compat | features.incompat |
features.ro_compat;
return bits ? 0 : 1;
}
/*
* util_feature_is_set -- check if feature flag is set in features
*
* returns 1 if feature flag is set and 0 otherwise
*/
int
util_feature_is_set(features_t features, features_t flag)
{
uint32_t bits = 0;
bits |= features.compat & flag.compat;
bits |= features.incompat & flag.incompat;
bits |= features.ro_compat & flag.ro_compat;
return bits ? 1 : 0;
}
/*
* util_feature_enable -- enable feature
*/
void
util_feature_enable(features_t *features, features_t new_feature)
{
#define FEATURE_ENABLE(flags, X) \
(flags) |= (X)
FEATURE_ENABLE(features->compat, new_feature.compat);
FEATURE_ENABLE(features->incompat, new_feature.incompat);
FEATURE_ENABLE(features->ro_compat, new_feature.ro_compat);
#undef FEATURE_ENABLE
}
/*
* util_feature_disable -- (internal) disable feature
*/
void
util_feature_disable(features_t *features, features_t old_feature)
{
#define FEATURE_DISABLE(flags, X) \
(flags) &= ~(X)
FEATURE_DISABLE(features->compat, old_feature.compat);
FEATURE_DISABLE(features->incompat, old_feature.incompat);
FEATURE_DISABLE(features->ro_compat, old_feature.ro_compat);
#undef FEATURE_DISABLE
}
static const features_t feature_2_pmempool_feature_map[] = {
FEAT_INCOMPAT(SINGLEHDR), /* PMEMPOOL_FEAT_SINGLEHDR */
FEAT_INCOMPAT(CKSUM_2K), /* PMEMPOOL_FEAT_CKSUM_2K */
FEAT_INCOMPAT(SDS), /* PMEMPOOL_FEAT_SHUTDOWN_STATE */
FEAT_COMPAT(CHECK_BAD_BLOCKS), /* PMEMPOOL_FEAT_CHECK_BAD_BLOCKS */
};
#define FEAT_2_PMEMPOOL_FEATURE_MAP_SIZE \
ARRAY_SIZE(feature_2_pmempool_feature_map)
static const char *str_2_pmempool_feature_map[] = {
"SINGLEHDR",
"CKSUM_2K",
"SHUTDOWN_STATE",
"CHECK_BAD_BLOCKS",
};
#define PMEMPOOL_FEATURE_2_STR_MAP_SIZE ARRAY_SIZE(str_2_pmempool_feature_map)
/*
* util_str2feature -- convert string to feat_flags value
*/
features_t
util_str2feature(const char *str)
{
/* all features have to be named in incompat_features_str array */
COMPILE_ERROR_ON(FEAT_2_PMEMPOOL_FEATURE_MAP_SIZE !=
PMEMPOOL_FEATURE_2_STR_MAP_SIZE);
for (uint32_t f = 0; f < PMEMPOOL_FEATURE_2_STR_MAP_SIZE; ++f) {
if (strcmp(str, str_2_pmempool_feature_map[f]) == 0) {
return feature_2_pmempool_feature_map[f];
}
}
return features_zero;
}
/*
* util_feature2pmempool_feature -- convert feature to pmempool_feature
*/
uint32_t
util_feature2pmempool_feature(features_t feat)
{
for (uint32_t pf = 0; pf < FEAT_2_PMEMPOOL_FEATURE_MAP_SIZE; ++pf) {
const features_t *record =
&feature_2_pmempool_feature_map[pf];
if (util_feature_cmp(feat, *record)) {
return pf;
}
}
return UINT32_MAX;
}
/*
* util_str2pmempool_feature -- convert string to uint32_t enum pmempool_feature
* equivalent
*/
uint32_t
util_str2pmempool_feature(const char *str)
{
features_t fval = util_str2feature(str);
if (util_feature_is_zero(fval))
return UINT32_MAX;
return util_feature2pmempool_feature(fval);
}
/*
* util_feature2str -- convert uint32_t feature to string
*/
const char *
util_feature2str(features_t features, features_t *found)
{
for (uint32_t i = 0; i < FEAT_2_PMEMPOOL_FEATURE_MAP_SIZE; ++i) {
const features_t *record = &feature_2_pmempool_feature_map[i];
if (util_feature_is_set(features, *record)) {
if (found)
memcpy(found, record, sizeof(features_t));
return str_2_pmempool_feature_map[i];
}
}
return NULL;
}
| 8,733 | 24.242775 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/shutdown_state.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* shutdown_state.h -- unsafe shudown detection
*/
#ifndef PMDK_SHUTDOWN_STATE_H
#define PMDK_SHUTDOWN_STATE_H 1
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
struct pool_replica;
struct shutdown_state {
uint64_t usc;
uint64_t uuid; /* UID checksum */
uint8_t dirty;
uint8_t reserved[39];
uint64_t checksum;
};
int shutdown_state_init(struct shutdown_state *sds, struct pool_replica *rep);
int shutdown_state_add_part(struct shutdown_state *sds, int fd,
struct pool_replica *rep);
void shutdown_state_set_dirty(struct shutdown_state *sds,
struct pool_replica *rep);
void shutdown_state_clear_dirty(struct shutdown_state *sds,
struct pool_replica *rep);
int shutdown_state_check(struct shutdown_state *curr_sds,
struct shutdown_state *pool_sds, struct pool_replica *rep);
#ifdef __cplusplus
}
#endif
#endif /* shutdown_state.h */
| 950 | 21.642857 | 78 | h |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.