repo
stringlengths
1
152
file
stringlengths
15
205
code
stringlengths
0
41.6M
file_length
int64
0
41.6M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
90 values
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params86243.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS86243_H #define SFMT_PARAMS86243_H #define POS1 366 #define SL1 6 #define SL2 7 #define SR1 19 #define SR2 1 #define MSK1 0xfdbffbffU #define MSK2 0xbff7ff3fU #define MSK3 0xfd77efffU #define MSK4 0xbf9ff3ffU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0xe9528d85U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6) #define ALTI_SL2_PERM64 \ (vector unsigned char)(7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6} #define ALTI_SL2_PERM64 {7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-86243:366-6-7-19-1:fdbffbff-bff7ff3f-fd77efff-bf9ff3ff" #endif /* SFMT_PARAMS86243_H */
3,564
42.47561
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/thd.h
/* Abstraction layer for threading in tests */ #ifdef _WIN32 typedef HANDLE thd_t; #else typedef pthread_t thd_t; #endif void thd_create(thd_t *thd, void *(*proc)(void *), void *arg); void thd_join(thd_t thd, void **ret);
223
21.4
62
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params132049.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS132049_H #define SFMT_PARAMS132049_H #define POS1 110 #define SL1 19 #define SL2 1 #define SR1 21 #define SR2 1 #define MSK1 0xffffbb5fU #define MSK2 0xfb6ebf95U #define MSK3 0xfffefffaU #define MSK4 0xcff77fffU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0xcb520000U #define PARITY4 0xc7e91c7dU /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) #define ALTI_SL2_PERM64 \ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-132049:110-19-1-21-1:ffffbb5f-fb6ebf95-fffefffa-cff77fff" #endif /* SFMT_PARAMS132049_H */
3,564
42.47561
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/msvc_compat/windows_extra.h
#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H #define MSVC_COMPAT_WINDOWS_EXTRA_H #ifndef ENOENT # define ENOENT ERROR_PATH_NOT_FOUND #endif #ifndef EINVAL # define EINVAL ERROR_BAD_ARGUMENTS #endif #ifndef EAGAIN # define EAGAIN ERROR_OUTOFMEMORY #endif #ifndef EPERM # define EPERM ERROR_WRITE_FAULT #endif #ifndef EFAULT # define EFAULT ERROR_INVALID_ADDRESS #endif #ifndef ENOMEM # define ENOMEM ERROR_NOT_ENOUGH_MEMORY #endif #ifndef ERANGE # define ERANGE ERROR_INVALID_DATA #endif #endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */
529
18.62963
40
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/msvc_compat/inttypes.h
// ISO C9x compliant inttypes.h for Microsoft Visual Studio // Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 // // Copyright (c) 2006 Alexander Chemeris // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. The name of the author may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO // EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////////// #ifndef _MSC_VER // [ #error "Use this header only with Microsoft Visual C++ compilers!" #endif // _MSC_VER ] #ifndef _MSC_INTTYPES_H_ // [ #define _MSC_INTTYPES_H_ #if _MSC_VER > 1000 #pragma once #endif #include "stdint.h" // 7.8 Format conversion of integer types typedef struct { intmax_t quot; intmax_t rem; } imaxdiv_t; // 7.8.1 Macros for format specifiers #if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [ See footnote 185 at page 198 #ifdef _WIN64 # define __PRI64_PREFIX "l" # define __PRIPTR_PREFIX "l" #else # define __PRI64_PREFIX "ll" # define __PRIPTR_PREFIX #endif // The fprintf macros for signed integers are: #define PRId8 "d" #define PRIi8 "i" #define PRIdLEAST8 "d" #define PRIiLEAST8 "i" #define PRIdFAST8 "d" #define PRIiFAST8 "i" #define PRId16 "hd" #define PRIi16 "hi" #define PRIdLEAST16 "hd" #define PRIiLEAST16 "hi" #define PRIdFAST16 "hd" #define PRIiFAST16 "hi" #define PRId32 "d" #define PRIi32 "i" #define PRIdLEAST32 "d" #define PRIiLEAST32 "i" #define PRIdFAST32 "d" #define PRIiFAST32 "i" #define PRId64 __PRI64_PREFIX "d" #define PRIi64 __PRI64_PREFIX "i" #define PRIdLEAST64 __PRI64_PREFIX "d" #define PRIiLEAST64 __PRI64_PREFIX "i" #define PRIdFAST64 __PRI64_PREFIX "d" #define PRIiFAST64 __PRI64_PREFIX "i" #define PRIdMAX __PRI64_PREFIX "d" #define PRIiMAX __PRI64_PREFIX "i" #define PRIdPTR __PRIPTR_PREFIX "d" #define PRIiPTR __PRIPTR_PREFIX "i" // The fprintf macros for unsigned integers are: #define PRIo8 "o" #define PRIu8 "u" #define PRIx8 "x" #define PRIX8 "X" #define PRIoLEAST8 "o" #define PRIuLEAST8 "u" #define PRIxLEAST8 "x" #define PRIXLEAST8 "X" #define PRIoFAST8 "o" #define PRIuFAST8 "u" #define PRIxFAST8 "x" #define PRIXFAST8 "X" #define PRIo16 "ho" #define PRIu16 "hu" #define PRIx16 "hx" #define PRIX16 "hX" #define PRIoLEAST16 "ho" #define PRIuLEAST16 "hu" #define PRIxLEAST16 "hx" #define PRIXLEAST16 "hX" #define PRIoFAST16 "ho" #define PRIuFAST16 "hu" #define PRIxFAST16 "hx" #define PRIXFAST16 "hX" #define PRIo32 "o" #define PRIu32 "u" #define PRIx32 "x" #define PRIX32 "X" #define PRIoLEAST32 "o" #define PRIuLEAST32 "u" #define PRIxLEAST32 "x" #define PRIXLEAST32 "X" #define PRIoFAST32 "o" #define PRIuFAST32 "u" #define PRIxFAST32 "x" #define PRIXFAST32 "X" #define PRIo64 __PRI64_PREFIX "o" #define PRIu64 __PRI64_PREFIX "u" #define PRIx64 __PRI64_PREFIX "x" #define PRIX64 __PRI64_PREFIX "X" #define PRIoLEAST64 __PRI64_PREFIX "o" #define PRIuLEAST64 __PRI64_PREFIX "u" #define PRIxLEAST64 __PRI64_PREFIX "x" #define PRIXLEAST64 __PRI64_PREFIX "X" #define PRIoFAST64 __PRI64_PREFIX "o" #define PRIuFAST64 __PRI64_PREFIX "u" #define PRIxFAST64 __PRI64_PREFIX "x" #define PRIXFAST64 __PRI64_PREFIX "X" #define PRIoMAX __PRI64_PREFIX "o" #define PRIuMAX __PRI64_PREFIX "u" #define PRIxMAX __PRI64_PREFIX "x" #define PRIXMAX __PRI64_PREFIX "X" #define PRIoPTR __PRIPTR_PREFIX "o" #define PRIuPTR __PRIPTR_PREFIX "u" #define PRIxPTR __PRIPTR_PREFIX "x" #define PRIXPTR __PRIPTR_PREFIX "X" // The fscanf macros for signed integers are: #define SCNd8 "d" #define SCNi8 "i" #define SCNdLEAST8 "d" #define SCNiLEAST8 "i" #define SCNdFAST8 "d" #define SCNiFAST8 "i" #define SCNd16 "hd" #define SCNi16 "hi" #define SCNdLEAST16 "hd" #define SCNiLEAST16 "hi" #define SCNdFAST16 "hd" #define SCNiFAST16 "hi" #define SCNd32 "ld" #define SCNi32 "li" #define SCNdLEAST32 "ld" #define SCNiLEAST32 "li" #define SCNdFAST32 "ld" #define SCNiFAST32 "li" #define SCNd64 "I64d" #define SCNi64 "I64i" #define SCNdLEAST64 "I64d" #define SCNiLEAST64 "I64i" #define SCNdFAST64 "I64d" #define SCNiFAST64 "I64i" #define SCNdMAX "I64d" #define SCNiMAX "I64i" #ifdef _WIN64 // [ # define SCNdPTR "I64d" # define SCNiPTR "I64i" #else // _WIN64 ][ # define SCNdPTR "ld" # define SCNiPTR "li" #endif // _WIN64 ] // The fscanf macros for unsigned integers are: #define SCNo8 "o" #define SCNu8 "u" #define SCNx8 "x" #define SCNX8 "X" #define SCNoLEAST8 "o" #define SCNuLEAST8 "u" #define SCNxLEAST8 "x" #define SCNXLEAST8 "X" #define SCNoFAST8 "o" #define SCNuFAST8 "u" #define SCNxFAST8 "x" #define SCNXFAST8 "X" #define SCNo16 "ho" #define SCNu16 "hu" #define SCNx16 "hx" #define SCNX16 "hX" #define SCNoLEAST16 "ho" #define SCNuLEAST16 "hu" #define SCNxLEAST16 "hx" #define SCNXLEAST16 "hX" #define SCNoFAST16 "ho" #define SCNuFAST16 "hu" #define SCNxFAST16 "hx" #define SCNXFAST16 "hX" #define SCNo32 "lo" #define SCNu32 "lu" #define SCNx32 "lx" #define SCNX32 "lX" #define SCNoLEAST32 "lo" #define SCNuLEAST32 "lu" #define SCNxLEAST32 "lx" #define SCNXLEAST32 "lX" #define SCNoFAST32 "lo" #define SCNuFAST32 "lu" #define SCNxFAST32 "lx" #define SCNXFAST32 "lX" #define SCNo64 "I64o" #define SCNu64 "I64u" #define SCNx64 "I64x" #define SCNX64 "I64X" #define SCNoLEAST64 "I64o" #define SCNuLEAST64 "I64u" #define SCNxLEAST64 "I64x" #define SCNXLEAST64 "I64X" #define SCNoFAST64 "I64o" #define SCNuFAST64 "I64u" #define SCNxFAST64 "I64x" #define SCNXFAST64 "I64X" #define SCNoMAX "I64o" #define SCNuMAX "I64u" #define SCNxMAX "I64x" #define SCNXMAX "I64X" #ifdef _WIN64 // [ # define SCNoPTR "I64o" # define SCNuPTR "I64u" # define SCNxPTR "I64x" # define SCNXPTR "I64X" #else // _WIN64 ][ # define SCNoPTR "lo" # define SCNuPTR "lu" # define SCNxPTR "lx" # define SCNXPTR "lX" #endif // _WIN64 ] #endif // __STDC_FORMAT_MACROS ] // 7.8.2 Functions for greatest-width integer types // 7.8.2.1 The imaxabs function #define imaxabs _abs64 // 7.8.2.2 The imaxdiv function // This is modified version of div() function from Microsoft's div.c found // in %MSVC.NET%\crt\src\div.c #ifdef STATIC_IMAXDIV // [ static #else // STATIC_IMAXDIV ][ _inline #endif // STATIC_IMAXDIV ] imaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom) { imaxdiv_t result; result.quot = numer / denom; result.rem = numer % denom; if (numer < 0 && result.rem > 0) { // did division wrong; must fix up ++result.quot; result.rem -= denom; } return result; } // 7.8.2.3 The strtoimax and strtoumax functions #define strtoimax _strtoi64 #define strtoumax _strtoui64 // 7.8.2.4 The wcstoimax and wcstoumax functions #define wcstoimax _wcstoi64 #define wcstoumax _wcstoui64 #endif // _MSC_INTTYPES_H_ ]
8,491
26.044586
94
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/msvc_compat/stdint.h
// ISO C9x compliant stdint.h for Microsoft Visual Studio // Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 // // Copyright (c) 2006-2008 Alexander Chemeris // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. The name of the author may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO // EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////////// #ifndef _MSC_VER // [ #error "Use this header only with Microsoft Visual C++ compilers!" #endif // _MSC_VER ] #ifndef _MSC_STDINT_H_ // [ #define _MSC_STDINT_H_ #if _MSC_VER > 1000 #pragma once #endif #include <limits.h> // For Visual Studio 6 in C++ mode and for many Visual Studio versions when // compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}' // or compiler give many errors like this: // error C2733: second C linkage of overloaded function 'wmemchr' not allowed #ifdef __cplusplus extern "C" { #endif # include <wchar.h> #ifdef __cplusplus } #endif // Define _W64 macros to mark types changing their size, like intptr_t. #ifndef _W64 # if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 # define _W64 __w64 # else # define _W64 # endif #endif // 7.18.1 Integer types // 7.18.1.1 Exact-width integer types // Visual Studio 6 and Embedded Visual C++ 4 doesn't // realize that, e.g. char has the same size as __int8 // so we give up on __intX for them. #if (_MSC_VER < 1300) typedef signed char int8_t; typedef signed short int16_t; typedef signed int int32_t; typedef unsigned char uint8_t; typedef unsigned short uint16_t; typedef unsigned int uint32_t; #else typedef signed __int8 int8_t; typedef signed __int16 int16_t; typedef signed __int32 int32_t; typedef unsigned __int8 uint8_t; typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; #endif typedef signed __int64 int64_t; typedef unsigned __int64 uint64_t; // 7.18.1.2 Minimum-width integer types typedef int8_t int_least8_t; typedef int16_t int_least16_t; typedef int32_t int_least32_t; typedef int64_t int_least64_t; typedef uint8_t uint_least8_t; typedef uint16_t uint_least16_t; typedef uint32_t uint_least32_t; typedef uint64_t uint_least64_t; // 7.18.1.3 Fastest minimum-width integer types typedef int8_t int_fast8_t; typedef int16_t int_fast16_t; typedef int32_t int_fast32_t; typedef int64_t int_fast64_t; typedef uint8_t uint_fast8_t; typedef uint16_t uint_fast16_t; typedef uint32_t uint_fast32_t; typedef uint64_t uint_fast64_t; // 7.18.1.4 Integer types capable of holding object pointers #ifdef _WIN64 // [ typedef signed __int64 intptr_t; typedef unsigned __int64 uintptr_t; #else // _WIN64 ][ typedef _W64 signed int intptr_t; typedef _W64 unsigned int uintptr_t; #endif // _WIN64 ] // 7.18.1.5 Greatest-width integer types typedef int64_t intmax_t; typedef uint64_t uintmax_t; // 7.18.2 Limits of specified-width integer types #if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 // 7.18.2.1 Limits of exact-width integer types #define INT8_MIN ((int8_t)_I8_MIN) #define INT8_MAX _I8_MAX #define INT16_MIN ((int16_t)_I16_MIN) #define INT16_MAX _I16_MAX #define INT32_MIN ((int32_t)_I32_MIN) #define INT32_MAX _I32_MAX #define INT64_MIN ((int64_t)_I64_MIN) #define INT64_MAX _I64_MAX #define UINT8_MAX _UI8_MAX #define UINT16_MAX _UI16_MAX #define UINT32_MAX _UI32_MAX #define UINT64_MAX _UI64_MAX // 7.18.2.2 Limits of minimum-width integer types #define INT_LEAST8_MIN INT8_MIN #define INT_LEAST8_MAX INT8_MAX #define INT_LEAST16_MIN INT16_MIN #define INT_LEAST16_MAX INT16_MAX #define INT_LEAST32_MIN INT32_MIN #define INT_LEAST32_MAX INT32_MAX #define INT_LEAST64_MIN INT64_MIN #define INT_LEAST64_MAX INT64_MAX #define UINT_LEAST8_MAX UINT8_MAX #define UINT_LEAST16_MAX UINT16_MAX #define UINT_LEAST32_MAX UINT32_MAX #define UINT_LEAST64_MAX UINT64_MAX // 7.18.2.3 Limits of fastest minimum-width integer types #define INT_FAST8_MIN INT8_MIN #define INT_FAST8_MAX INT8_MAX #define INT_FAST16_MIN INT16_MIN #define INT_FAST16_MAX INT16_MAX #define INT_FAST32_MIN INT32_MIN #define INT_FAST32_MAX INT32_MAX #define INT_FAST64_MIN INT64_MIN #define INT_FAST64_MAX INT64_MAX #define UINT_FAST8_MAX UINT8_MAX #define UINT_FAST16_MAX UINT16_MAX #define UINT_FAST32_MAX UINT32_MAX #define UINT_FAST64_MAX UINT64_MAX // 7.18.2.4 Limits of integer types capable of holding object pointers #ifdef _WIN64 // [ # define INTPTR_MIN INT64_MIN # define INTPTR_MAX INT64_MAX # define UINTPTR_MAX UINT64_MAX #else // _WIN64 ][ # define INTPTR_MIN INT32_MIN # define INTPTR_MAX INT32_MAX # define UINTPTR_MAX UINT32_MAX #endif // _WIN64 ] // 7.18.2.5 Limits of greatest-width integer types #define INTMAX_MIN INT64_MIN #define INTMAX_MAX INT64_MAX #define UINTMAX_MAX UINT64_MAX // 7.18.3 Limits of other integer types #ifdef _WIN64 // [ # define PTRDIFF_MIN _I64_MIN # define PTRDIFF_MAX _I64_MAX #else // _WIN64 ][ # define PTRDIFF_MIN _I32_MIN # define PTRDIFF_MAX _I32_MAX #endif // _WIN64 ] #define SIG_ATOMIC_MIN INT_MIN #define SIG_ATOMIC_MAX INT_MAX #ifndef SIZE_MAX // [ # ifdef _WIN64 // [ # define SIZE_MAX _UI64_MAX # else // _WIN64 ][ # define SIZE_MAX _UI32_MAX # endif // _WIN64 ] #endif // SIZE_MAX ] // WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h> #ifndef WCHAR_MIN // [ # define WCHAR_MIN 0 #endif // WCHAR_MIN ] #ifndef WCHAR_MAX // [ # define WCHAR_MAX _UI16_MAX #endif // WCHAR_MAX ] #define WINT_MIN 0 #define WINT_MAX _UI16_MAX #endif // __STDC_LIMIT_MACROS ] // 7.18.4 Limits of other integer types #if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 // 7.18.4.1 Macros for minimum-width integer constants #define INT8_C(val) val##i8 #define INT16_C(val) val##i16 #define INT32_C(val) val##i32 #define INT64_C(val) val##i64 #define UINT8_C(val) val##ui8 #define UINT16_C(val) val##ui16 #define UINT32_C(val) val##ui32 #define UINT64_C(val) val##ui64 // 7.18.4.2 Macros for greatest-width integer constants #define INTMAX_C INT64_C #define UINTMAX_C UINT64_C #endif // __STDC_CONSTANT_MACROS ] #endif // _MSC_STDINT_H_ ]
7,728
30.165323
122
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/msvc_compat/strings.h
#ifndef strings_h #define strings_h /* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided * for both */ #ifdef _MSC_VER # include <intrin.h> # pragma intrinsic(_BitScanForward) static __forceinline int ffsl(long x) { unsigned long i; if (_BitScanForward(&i, x)) return (i + 1); return (0); } static __forceinline int ffs(int x) { return (ffsl(x)); } # ifdef _M_X64 # pragma intrinsic(_BitScanForward64) # endif static __forceinline int ffsll(unsigned __int64 x) { unsigned long i; #ifdef _M_X64 if (_BitScanForward64(&i, x)) return (i + 1); return (0); #else // Fallback for 32-bit build where 64-bit version not available // assuming little endian union { unsigned __int64 ll; unsigned long l[2]; } s; s.ll = x; if (_BitScanForward(&i, s.l[0])) return (i + 1); else if(_BitScanForward(&i, s.l[1])) return (i + 33); return (0); #endif } #else # define ffsll(x) __builtin_ffsll(x) # define ffsl(x) __builtin_ffsl(x) # define ffs(x) __builtin_ffs(x) #endif #endif /* strings_h */
1,047
16.466667
72
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/msvc_compat/C99/inttypes.h
// ISO C9x compliant inttypes.h for Microsoft Visual Studio // Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 // // Copyright (c) 2006 Alexander Chemeris // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. The name of the author may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO // EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////////// #ifndef _MSC_VER // [ #error "Use this header only with Microsoft Visual C++ compilers!" #endif // _MSC_VER ] #ifndef _MSC_INTTYPES_H_ // [ #define _MSC_INTTYPES_H_ #if _MSC_VER > 1000 #pragma once #endif #include "stdint.h" // 7.8 Format conversion of integer types typedef struct { intmax_t quot; intmax_t rem; } imaxdiv_t; // 7.8.1 Macros for format specifiers #if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [ See footnote 185 at page 198 #ifdef _WIN64 # define __PRI64_PREFIX "l" # define __PRIPTR_PREFIX "l" #else # define __PRI64_PREFIX "ll" # define __PRIPTR_PREFIX #endif // The fprintf macros for signed integers are: #define PRId8 "d" #define PRIi8 "i" #define PRIdLEAST8 "d" #define PRIiLEAST8 "i" #define PRIdFAST8 "d" #define PRIiFAST8 "i" #define PRId16 "hd" #define PRIi16 "hi" #define PRIdLEAST16 "hd" #define PRIiLEAST16 "hi" #define PRIdFAST16 "hd" #define PRIiFAST16 "hi" #define PRId32 "d" #define PRIi32 "i" #define PRIdLEAST32 "d" #define PRIiLEAST32 "i" #define PRIdFAST32 "d" #define PRIiFAST32 "i" #define PRId64 __PRI64_PREFIX "d" #define PRIi64 __PRI64_PREFIX "i" #define PRIdLEAST64 __PRI64_PREFIX "d" #define PRIiLEAST64 __PRI64_PREFIX "i" #define PRIdFAST64 __PRI64_PREFIX "d" #define PRIiFAST64 __PRI64_PREFIX "i" #define PRIdMAX __PRI64_PREFIX "d" #define PRIiMAX __PRI64_PREFIX "i" #define PRIdPTR __PRIPTR_PREFIX "d" #define PRIiPTR __PRIPTR_PREFIX "i" // The fprintf macros for unsigned integers are: #define PRIo8 "o" #define PRIu8 "u" #define PRIx8 "x" #define PRIX8 "X" #define PRIoLEAST8 "o" #define PRIuLEAST8 "u" #define PRIxLEAST8 "x" #define PRIXLEAST8 "X" #define PRIoFAST8 "o" #define PRIuFAST8 "u" #define PRIxFAST8 "x" #define PRIXFAST8 "X" #define PRIo16 "ho" #define PRIu16 "hu" #define PRIx16 "hx" #define PRIX16 "hX" #define PRIoLEAST16 "ho" #define PRIuLEAST16 "hu" #define PRIxLEAST16 "hx" #define PRIXLEAST16 "hX" #define PRIoFAST16 "ho" #define PRIuFAST16 "hu" #define PRIxFAST16 "hx" #define PRIXFAST16 "hX" #define PRIo32 "o" #define PRIu32 "u" #define PRIx32 "x" #define PRIX32 "X" #define PRIoLEAST32 "o" #define PRIuLEAST32 "u" #define PRIxLEAST32 "x" #define PRIXLEAST32 "X" #define PRIoFAST32 "o" #define PRIuFAST32 "u" #define PRIxFAST32 "x" #define PRIXFAST32 "X" #define PRIo64 __PRI64_PREFIX "o" #define PRIu64 __PRI64_PREFIX "u" #define PRIx64 __PRI64_PREFIX "x" #define PRIX64 __PRI64_PREFIX "X" #define PRIoLEAST64 __PRI64_PREFIX "o" #define PRIuLEAST64 __PRI64_PREFIX "u" #define PRIxLEAST64 __PRI64_PREFIX "x" #define PRIXLEAST64 __PRI64_PREFIX "X" #define PRIoFAST64 __PRI64_PREFIX "o" #define PRIuFAST64 __PRI64_PREFIX "u" #define PRIxFAST64 __PRI64_PREFIX "x" #define PRIXFAST64 __PRI64_PREFIX "X" #define PRIoMAX __PRI64_PREFIX "o" #define PRIuMAX __PRI64_PREFIX "u" #define PRIxMAX __PRI64_PREFIX "x" #define PRIXMAX __PRI64_PREFIX "X" #define PRIoPTR __PRIPTR_PREFIX "o" #define PRIuPTR __PRIPTR_PREFIX "u" #define PRIxPTR __PRIPTR_PREFIX "x" #define PRIXPTR __PRIPTR_PREFIX "X" // The fscanf macros for signed integers are: #define SCNd8 "d" #define SCNi8 "i" #define SCNdLEAST8 "d" #define SCNiLEAST8 "i" #define SCNdFAST8 "d" #define SCNiFAST8 "i" #define SCNd16 "hd" #define SCNi16 "hi" #define SCNdLEAST16 "hd" #define SCNiLEAST16 "hi" #define SCNdFAST16 "hd" #define SCNiFAST16 "hi" #define SCNd32 "ld" #define SCNi32 "li" #define SCNdLEAST32 "ld" #define SCNiLEAST32 "li" #define SCNdFAST32 "ld" #define SCNiFAST32 "li" #define SCNd64 "I64d" #define SCNi64 "I64i" #define SCNdLEAST64 "I64d" #define SCNiLEAST64 "I64i" #define SCNdFAST64 "I64d" #define SCNiFAST64 "I64i" #define SCNdMAX "I64d" #define SCNiMAX "I64i" #ifdef _WIN64 // [ # define SCNdPTR "I64d" # define SCNiPTR "I64i" #else // _WIN64 ][ # define SCNdPTR "ld" # define SCNiPTR "li" #endif // _WIN64 ] // The fscanf macros for unsigned integers are: #define SCNo8 "o" #define SCNu8 "u" #define SCNx8 "x" #define SCNX8 "X" #define SCNoLEAST8 "o" #define SCNuLEAST8 "u" #define SCNxLEAST8 "x" #define SCNXLEAST8 "X" #define SCNoFAST8 "o" #define SCNuFAST8 "u" #define SCNxFAST8 "x" #define SCNXFAST8 "X" #define SCNo16 "ho" #define SCNu16 "hu" #define SCNx16 "hx" #define SCNX16 "hX" #define SCNoLEAST16 "ho" #define SCNuLEAST16 "hu" #define SCNxLEAST16 "hx" #define SCNXLEAST16 "hX" #define SCNoFAST16 "ho" #define SCNuFAST16 "hu" #define SCNxFAST16 "hx" #define SCNXFAST16 "hX" #define SCNo32 "lo" #define SCNu32 "lu" #define SCNx32 "lx" #define SCNX32 "lX" #define SCNoLEAST32 "lo" #define SCNuLEAST32 "lu" #define SCNxLEAST32 "lx" #define SCNXLEAST32 "lX" #define SCNoFAST32 "lo" #define SCNuFAST32 "lu" #define SCNxFAST32 "lx" #define SCNXFAST32 "lX" #define SCNo64 "I64o" #define SCNu64 "I64u" #define SCNx64 "I64x" #define SCNX64 "I64X" #define SCNoLEAST64 "I64o" #define SCNuLEAST64 "I64u" #define SCNxLEAST64 "I64x" #define SCNXLEAST64 "I64X" #define SCNoFAST64 "I64o" #define SCNuFAST64 "I64u" #define SCNxFAST64 "I64x" #define SCNXFAST64 "I64X" #define SCNoMAX "I64o" #define SCNuMAX "I64u" #define SCNxMAX "I64x" #define SCNXMAX "I64X" #ifdef _WIN64 // [ # define SCNoPTR "I64o" # define SCNuPTR "I64u" # define SCNxPTR "I64x" # define SCNXPTR "I64X" #else // _WIN64 ][ # define SCNoPTR "lo" # define SCNuPTR "lu" # define SCNxPTR "lx" # define SCNXPTR "lX" #endif // _WIN64 ] #endif // __STDC_FORMAT_MACROS ] // 7.8.2 Functions for greatest-width integer types // 7.8.2.1 The imaxabs function #define imaxabs _abs64 // 7.8.2.2 The imaxdiv function // This is modified version of div() function from Microsoft's div.c found // in %MSVC.NET%\crt\src\div.c #ifdef STATIC_IMAXDIV // [ static #else // STATIC_IMAXDIV ][ _inline #endif // STATIC_IMAXDIV ] imaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom) { imaxdiv_t result; result.quot = numer / denom; result.rem = numer % denom; if (numer < 0 && result.rem > 0) { // did division wrong; must fix up ++result.quot; result.rem -= denom; } return result; } // 7.8.2.3 The strtoimax and strtoumax functions #define strtoimax _strtoi64 #define strtoumax _strtoui64 // 7.8.2.4 The wcstoimax and wcstoumax functions #define wcstoimax _wcstoi64 #define wcstoumax _wcstoui64 #endif // _MSC_INTTYPES_H_ ]
8,491
26.044586
94
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/msvc_compat/C99/stdbool.h
#ifndef stdbool_h #define stdbool_h #include <wtypes.h> /* MSVC doesn't define _Bool or bool in C, but does have BOOL */ /* Note this doesn't pass autoconf's test because (bool) 0.5 != true */ /* Clang-cl uses MSVC headers, so needs msvc_compat, but has _Bool as * a built-in type. */ #ifndef __clang__ typedef BOOL _Bool; #endif #define bool _Bool #define true 1 #define false 0 #define __bool_true_false_are_defined 1 #endif /* stdbool_h */
449
20.428571
71
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/msvc_compat/C99/stdint.h
// ISO C9x compliant stdint.h for Microsoft Visual Studio // Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 // // Copyright (c) 2006-2008 Alexander Chemeris // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. The name of the author may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO // EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////////// #ifndef _MSC_VER // [ #error "Use this header only with Microsoft Visual C++ compilers!" #endif // _MSC_VER ] #ifndef _MSC_STDINT_H_ // [ #define _MSC_STDINT_H_ #if _MSC_VER > 1000 #pragma once #endif #include <limits.h> // For Visual Studio 6 in C++ mode and for many Visual Studio versions when // compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}' // or compiler give many errors like this: // error C2733: second C linkage of overloaded function 'wmemchr' not allowed #ifdef __cplusplus extern "C" { #endif # include <wchar.h> #ifdef __cplusplus } #endif // Define _W64 macros to mark types changing their size, like intptr_t. #ifndef _W64 # if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 # define _W64 __w64 # else # define _W64 # endif #endif // 7.18.1 Integer types // 7.18.1.1 Exact-width integer types // Visual Studio 6 and Embedded Visual C++ 4 doesn't // realize that, e.g. char has the same size as __int8 // so we give up on __intX for them. #if (_MSC_VER < 1300) typedef signed char int8_t; typedef signed short int16_t; typedef signed int int32_t; typedef unsigned char uint8_t; typedef unsigned short uint16_t; typedef unsigned int uint32_t; #else typedef signed __int8 int8_t; typedef signed __int16 int16_t; typedef signed __int32 int32_t; typedef unsigned __int8 uint8_t; typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; #endif typedef signed __int64 int64_t; typedef unsigned __int64 uint64_t; // 7.18.1.2 Minimum-width integer types typedef int8_t int_least8_t; typedef int16_t int_least16_t; typedef int32_t int_least32_t; typedef int64_t int_least64_t; typedef uint8_t uint_least8_t; typedef uint16_t uint_least16_t; typedef uint32_t uint_least32_t; typedef uint64_t uint_least64_t; // 7.18.1.3 Fastest minimum-width integer types typedef int8_t int_fast8_t; typedef int16_t int_fast16_t; typedef int32_t int_fast32_t; typedef int64_t int_fast64_t; typedef uint8_t uint_fast8_t; typedef uint16_t uint_fast16_t; typedef uint32_t uint_fast32_t; typedef uint64_t uint_fast64_t; // 7.18.1.4 Integer types capable of holding object pointers #ifdef _WIN64 // [ typedef signed __int64 intptr_t; typedef unsigned __int64 uintptr_t; #else // _WIN64 ][ typedef _W64 signed int intptr_t; typedef _W64 unsigned int uintptr_t; #endif // _WIN64 ] // 7.18.1.5 Greatest-width integer types typedef int64_t intmax_t; typedef uint64_t uintmax_t; // 7.18.2 Limits of specified-width integer types #if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 // 7.18.2.1 Limits of exact-width integer types #define INT8_MIN ((int8_t)_I8_MIN) #define INT8_MAX _I8_MAX #define INT16_MIN ((int16_t)_I16_MIN) #define INT16_MAX _I16_MAX #define INT32_MIN ((int32_t)_I32_MIN) #define INT32_MAX _I32_MAX #define INT64_MIN ((int64_t)_I64_MIN) #define INT64_MAX _I64_MAX #define UINT8_MAX _UI8_MAX #define UINT16_MAX _UI16_MAX #define UINT32_MAX _UI32_MAX #define UINT64_MAX _UI64_MAX // 7.18.2.2 Limits of minimum-width integer types #define INT_LEAST8_MIN INT8_MIN #define INT_LEAST8_MAX INT8_MAX #define INT_LEAST16_MIN INT16_MIN #define INT_LEAST16_MAX INT16_MAX #define INT_LEAST32_MIN INT32_MIN #define INT_LEAST32_MAX INT32_MAX #define INT_LEAST64_MIN INT64_MIN #define INT_LEAST64_MAX INT64_MAX #define UINT_LEAST8_MAX UINT8_MAX #define UINT_LEAST16_MAX UINT16_MAX #define UINT_LEAST32_MAX UINT32_MAX #define UINT_LEAST64_MAX UINT64_MAX // 7.18.2.3 Limits of fastest minimum-width integer types #define INT_FAST8_MIN INT8_MIN #define INT_FAST8_MAX INT8_MAX #define INT_FAST16_MIN INT16_MIN #define INT_FAST16_MAX INT16_MAX #define INT_FAST32_MIN INT32_MIN #define INT_FAST32_MAX INT32_MAX #define INT_FAST64_MIN INT64_MIN #define INT_FAST64_MAX INT64_MAX #define UINT_FAST8_MAX UINT8_MAX #define UINT_FAST16_MAX UINT16_MAX #define UINT_FAST32_MAX UINT32_MAX #define UINT_FAST64_MAX UINT64_MAX // 7.18.2.4 Limits of integer types capable of holding object pointers #ifdef _WIN64 // [ # define INTPTR_MIN INT64_MIN # define INTPTR_MAX INT64_MAX # define UINTPTR_MAX UINT64_MAX #else // _WIN64 ][ # define INTPTR_MIN INT32_MIN # define INTPTR_MAX INT32_MAX # define UINTPTR_MAX UINT32_MAX #endif // _WIN64 ] // 7.18.2.5 Limits of greatest-width integer types #define INTMAX_MIN INT64_MIN #define INTMAX_MAX INT64_MAX #define UINTMAX_MAX UINT64_MAX // 7.18.3 Limits of other integer types #ifdef _WIN64 // [ # define PTRDIFF_MIN _I64_MIN # define PTRDIFF_MAX _I64_MAX #else // _WIN64 ][ # define PTRDIFF_MIN _I32_MIN # define PTRDIFF_MAX _I32_MAX #endif // _WIN64 ] #define SIG_ATOMIC_MIN INT_MIN #define SIG_ATOMIC_MAX INT_MAX #ifndef SIZE_MAX // [ # ifdef _WIN64 // [ # define SIZE_MAX _UI64_MAX # else // _WIN64 ][ # define SIZE_MAX _UI32_MAX # endif // _WIN64 ] #endif // SIZE_MAX ] // WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h> #ifndef WCHAR_MIN // [ # define WCHAR_MIN 0 #endif // WCHAR_MIN ] #ifndef WCHAR_MAX // [ # define WCHAR_MAX _UI16_MAX #endif // WCHAR_MAX ] #define WINT_MIN 0 #define WINT_MAX _UI16_MAX #endif // __STDC_LIMIT_MACROS ] // 7.18.4 Limits of other integer types #if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 // 7.18.4.1 Macros for minimum-width integer constants #define INT8_C(val) val##i8 #define INT16_C(val) val##i16 #define INT32_C(val) val##i32 #define INT64_C(val) val##i64 #define UINT8_C(val) val##ui8 #define UINT16_C(val) val##ui16 #define UINT32_C(val) val##ui32 #define UINT64_C(val) val##ui64 // 7.18.4.2 Macros for greatest-width integer constants #define INTMAX_C INT64_C #define UINTMAX_C UINT64_C #endif // __STDC_CONSTANT_MACROS ] #endif // _MSC_STDINT_H_ ]
7,728
30.165323
122
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/jemalloc_rename.sh
#!/bin/sh public_symbols_txt=$1 cat <<EOF /* * Name mangling for public symbols is controlled by --with-mangling and * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by * these macro definitions. */ #ifndef JEMALLOC_NO_RENAME EOF for nm in `cat ${public_symbols_txt}` ; do n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` m=`echo ${nm} |tr ':' ' ' |awk '{print $2}'` echo "# define je_${n} ${m}" done cat <<EOF #endif EOF
460
19.043478
79
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/jemalloc.sh
#!/bin/sh objroot=$1 cat <<EOF #ifndef JEMALLOC_H_ #define JEMALLOC_H_ #ifdef __cplusplus extern "C" { #endif EOF for hdr in jemalloc_defs.h jemalloc_rename.h jemalloc_macros.h \ jemalloc_protos.h jemalloc_typedefs.h jemalloc_mangle.h ; do cat "${objroot}include/jemalloc/${hdr}" \ | grep -v 'Generated from .* by configure\.' \ | sed -e 's/^#define /#define /g' \ | sed -e 's/ $//g' echo done cat <<EOF #ifdef __cplusplus } #endif #endif /* JEMALLOC_H_ */ EOF
498
16.206897
71
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/jemalloc_mangle.sh
#!/bin/sh public_symbols_txt=$1 symbol_prefix=$2 cat <<EOF /* * By default application code must explicitly refer to mangled symbol names, * so that it is possible to use jemalloc in conjunction with another allocator * in the same application. Define JEMALLOC_MANGLE in order to cause automatic * name mangling that matches the API prefixing that happened as a result of * --with-mangling and/or --with-jemalloc-prefix configuration settings. */ #ifdef JEMALLOC_MANGLE # ifndef JEMALLOC_NO_DEMANGLE # define JEMALLOC_NO_DEMANGLE # endif EOF for nm in `cat ${public_symbols_txt}` ; do n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` echo "# define ${n} ${symbol_prefix}${n}" done cat <<EOF #endif /* * The ${symbol_prefix}* macros can be used as stable alternative names for the * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily * meant for use in jemalloc itself, but it can be used by application code to * provide isolation from the name mangling specified via --with-mangling * and/or --with-jemalloc-prefix. */ #ifndef JEMALLOC_NO_DEMANGLE EOF for nm in `cat ${public_symbols_txt}` ; do n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` echo "# undef ${symbol_prefix}${n}" done cat <<EOF #endif EOF
1,258
26.369565
79
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/public_unnamespace.sh
#!/bin/sh for nm in `cat $1` ; do n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` echo "#undef je_${n}" done
111
15
46
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/mutex.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct malloc_mutex_s malloc_mutex_t; #if (defined(_WIN32) || defined(JEMALLOC_OSSPIN)\ || defined(JEMALLOC_MUTEX_INIT_CB)\ || defined(JEMALLOC_DISABLE_BSD_MALLOC_HOOKS)) #define JEMALLOC_NO_RWLOCKS typedef malloc_mutex_t malloc_rwlock_t; #else typedef struct malloc_rwlock_s malloc_rwlock_t; #endif #if (defined(JEMALLOC_OSSPIN)) # define MALLOC_MUTEX_INITIALIZER {0} #elif (defined(JEMALLOC_MUTEX_INIT_CB)) # define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL} #else # if (defined(PTHREAD_MUTEX_ADAPTIVE_NP) && \ defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)) # define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP # define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP} # else # define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT # define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER} # endif #endif #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct malloc_mutex_s { #ifdef _WIN32 CRITICAL_SECTION lock; #elif (defined(JEMALLOC_OSSPIN)) OSSpinLock lock; #elif (defined(JEMALLOC_MUTEX_INIT_CB)) pthread_mutex_t lock; malloc_mutex_t *postponed_next; #else pthread_mutex_t lock; #endif }; #ifndef JEMALLOC_NO_RWLOCKS struct malloc_rwlock_s { pthread_rwlock_t lock; }; #endif #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_LAZY_LOCK extern bool isthreaded; #else # undef isthreaded /* Undo private_namespace.h definition. */ # define isthreaded true #endif bool malloc_mutex_init(malloc_mutex_t *mutex); void malloc_mutex_prefork(malloc_mutex_t *mutex); void malloc_mutex_postfork_parent(malloc_mutex_t *mutex); void malloc_mutex_postfork_child(malloc_mutex_t *mutex); bool mutex_boot(void); #ifdef JEMALLOC_NO_RWLOCKS #undef malloc_rwlock_init #undef malloc_rwlock_destroy #define malloc_rwlock_init malloc_mutex_init #define malloc_rwlock_destroy malloc_mutex_destroy #endif void malloc_rwlock_prefork(malloc_rwlock_t *rwlock); void malloc_rwlock_postfork_parent(malloc_rwlock_t *rwlock); void malloc_rwlock_postfork_child(malloc_rwlock_t *rwlock); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE void malloc_mutex_lock(malloc_mutex_t *mutex); void malloc_mutex_unlock(malloc_mutex_t *mutex); void malloc_mutex_destroy(malloc_mutex_t *mutex); #ifndef JEMALLOC_NO_RWLOCKS bool malloc_rwlock_init(malloc_rwlock_t *rwlock); void malloc_rwlock_destroy(malloc_rwlock_t *rwlock); #endif void malloc_rwlock_rdlock(malloc_rwlock_t *rwlock); void malloc_rwlock_wrlock(malloc_rwlock_t *rwlock); void malloc_rwlock_unlock(malloc_rwlock_t *rwlock); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) JEMALLOC_INLINE void malloc_mutex_lock(malloc_mutex_t *mutex) { if (isthreaded) { #ifdef _WIN32 EnterCriticalSection(&mutex->lock); #elif (defined(JEMALLOC_OSSPIN)) OSSpinLockLock(&mutex->lock); #else pthread_mutex_lock(&mutex->lock); #endif } } JEMALLOC_INLINE void malloc_mutex_unlock(malloc_mutex_t *mutex) { if (isthreaded) { #ifdef _WIN32 LeaveCriticalSection(&mutex->lock); #elif (defined(JEMALLOC_OSSPIN)) OSSpinLockUnlock(&mutex->lock); #else pthread_mutex_unlock(&mutex->lock); #endif } } JEMALLOC_INLINE void malloc_mutex_destroy(malloc_mutex_t *mutex) { #if (!defined(_WIN32) && !defined(JEMALLOC_OSSPIN)\ && !defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_JET)) pthread_mutex_destroy(&mutex->lock); #endif } JEMALLOC_INLINE void malloc_rwlock_rdlock(malloc_rwlock_t *rwlock) { if (isthreaded) { #ifdef _WIN32 EnterCriticalSection(&rwlock->lock); #elif (defined(JEMALLOC_OSSPIN)) OSSpinLockLock(&rwlock->lock); #elif (defined(JEMALLOC_NO_RWLOCKS)) pthread_mutex_lock(&rwlock->lock); #else pthread_rwlock_rdlock(&rwlock->lock); #endif } } JEMALLOC_INLINE void malloc_rwlock_wrlock(malloc_rwlock_t *rwlock) { if (isthreaded) { #ifdef _WIN32 EnterCriticalSection(&rwlock->lock); #elif (defined(JEMALLOC_OSSPIN)) OSSpinLockLock(&rwlock->lock); #elif (defined(JEMALLOC_NO_RWLOCKS)) pthread_mutex_lock(&rwlock->lock); #else pthread_rwlock_wrlock(&rwlock->lock); #endif } } JEMALLOC_INLINE void malloc_rwlock_unlock(malloc_rwlock_t *rwlock) { if (isthreaded) { #ifdef _WIN32 LeaveCriticalSection(&rwlock->lock); #elif (defined(JEMALLOC_OSSPIN)) OSSpinLockUnlock(&rwlock->lock); #elif (defined(JEMALLOC_NO_RWLOCKS)) pthread_mutex_unlock(&rwlock->lock); #else pthread_rwlock_unlock(&rwlock->lock); #endif } } #ifndef JEMALLOC_NO_RWLOCKS JEMALLOC_INLINE bool malloc_rwlock_init(malloc_rwlock_t *rwlock) { if (isthreaded) { if (pthread_rwlock_init(&rwlock->lock, NULL) != 0) return (true); } return (false); } JEMALLOC_INLINE void malloc_rwlock_destroy(malloc_rwlock_t *rwlock) { if (isthreaded) { pthread_rwlock_destroy(&rwlock->lock); } } #endif #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
5,281
24.516908
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/ctl.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct ctl_node_s ctl_node_t; typedef struct ctl_named_node_s ctl_named_node_t; typedef struct ctl_indexed_node_s ctl_indexed_node_t; typedef struct ctl_arena_stats_s ctl_arena_stats_t; typedef struct ctl_stats_s ctl_stats_t; #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct ctl_node_s { bool named; }; struct ctl_named_node_s { struct ctl_node_s node; const char *name; /* If (nchildren == 0), this is a terminal node. */ unsigned nchildren; const ctl_node_t *children; int (*ctl)(const size_t *, size_t, void *, size_t *, void *, size_t); }; struct ctl_indexed_node_s { struct ctl_node_s node; const ctl_named_node_t *(*index)(const size_t *, size_t, size_t); }; struct ctl_arena_stats_s { bool initialized; unsigned nthreads; const char *dss; size_t pactive; size_t pdirty; arena_stats_t astats; /* Aggregate stats for small size classes, based on bin stats. */ size_t allocated_small; uint64_t nmalloc_small; uint64_t ndalloc_small; uint64_t nrequests_small; malloc_bin_stats_t bstats[NBINS]; malloc_large_stats_t *lstats; /* nlclasses elements. */ }; struct ctl_stats_s { struct { size_t current; /* stats_chunks.curchunks */ uint64_t total; /* stats_chunks.nchunks */ size_t high; /* stats_chunks.highchunks */ } chunks; unsigned narenas; ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen); int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp); int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); bool ctl_boot(void); void ctl_prefork(void); void ctl_postfork_parent(void); void ctl_postfork_child(void); #define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ != 0) { \ malloc_printf( \ "<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \ name); \ abort(); \ } \ } while (0) #define xmallctlnametomib(name, mibp, miblenp) do { \ if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \ malloc_printf("<jemalloc>: Failure in " \ "xmallctlnametomib(\"%s\", ...)\n", name); \ abort(); \ } \ } while (0) #define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \ newlen) != 0) { \ malloc_write( \ "<jemalloc>: Failure in xmallctlbymib()\n"); \ abort(); \ } \ } while (0) #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
3,172
27.845455
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/vector.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct vector_s vector_t; typedef struct vec_list_s vec_list_t; #define VECTOR_MIN_PART_SIZE 8 #define VECTOR_INITIALIZER JEMALLOC_ARG_CONCAT({.data = NULL, .size = 0}) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct vec_list_s { vec_list_t *next; int length; void *data[]; }; struct vector_s { vec_list_t *list; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void *vec_get(vector_t *vector, int index); void vec_set(vector_t *vector, int index, void *val); void vec_delete(vector_t *vector); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
1,063
26.282051
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/arena.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* * RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized * as small as possible such that this setting is still honored, without * violating other constraints. The goal is to make runs as small as possible * without exceeding a per run external fragmentation threshold. * * We use binary fixed point math for overhead computations, where the binary * point is implicitly RUN_BFP bits to the left. * * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be * honored for some/all object sizes, since when heap profiling is enabled * there is one pointer of header overhead per object (plus a constant). This * constraint is relaxed (ignored) for runs that are so small that the * per-region overhead is greater than: * * (RUN_MAX_OVRHD / (reg_interval << (3+RUN_BFP)) */ #define RUN_BFP 12 /* \/ Implicit binary fixed point. */ #define RUN_MAX_OVRHD 0x0000003dU #define RUN_MAX_OVRHD_RELAX 0x00001800U /* Maximum number of regions in one run. */ #define LG_RUN_MAXREGS 11 #define RUN_MAXREGS (1U << LG_RUN_MAXREGS) /* * Minimum redzone size. Redzones may be larger than this if necessary to * preserve region alignment. */ #define REDZONE_MINSIZE 16 /* * The minimum ratio of active:dirty pages per arena is computed as: * * (nactive >> opt_lg_dirty_mult) >= ndirty * * So, supposing that opt_lg_dirty_mult is 3, there can be no less than 8 times * as many active pages as dirty pages. */ #define LG_DIRTY_MULT_DEFAULT 3 typedef struct arena_chunk_map_s arena_chunk_map_t; typedef struct arena_chunk_s arena_chunk_t; typedef struct arena_run_s arena_run_t; typedef struct arena_bin_info_s arena_bin_info_t; typedef struct arena_bin_s arena_bin_t; typedef struct arena_s arena_t; #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS /* Each element of the chunk map corresponds to one page within the chunk. */ struct arena_chunk_map_s { #ifndef JEMALLOC_PROF /* * Overlay prof_ctx in order to allow it to be referenced by dead code. * Such antics aren't warranted for per arena data structures, but * chunk map overhead accounts for a percentage of memory, rather than * being just a fixed cost. */ union { #endif union { /* * Linkage for run trees. There are two disjoint uses: * * 1) arena_t's runs_avail tree. * 2) arena_run_t conceptually uses this linkage for in-use * non-full runs, rather than directly embedding linkage. */ rb_node(arena_chunk_map_t) rb_link; /* * List of runs currently in purgatory. arena_chunk_purge() * temporarily allocates runs that contain dirty pages while * purging, so that other threads cannot use the runs while the * purging thread is operating without the arena lock held. */ ql_elm(arena_chunk_map_t) ql_link; } u; /* Profile counters, used for large object runs. */ prof_ctx_t *prof_ctx; #ifndef JEMALLOC_PROF }; /* union { ... }; */ #endif /* * Run address (or size) and various flags are stored together. The bit * layout looks like (assuming 32-bit system): * * ???????? ???????? ????nnnn nnnndula * * ? : Unallocated: Run address for first/last pages, unset for internal * pages. * Small: Run page offset. * Large: Run size for first page, unset for trailing pages. * n : binind for small size class, BININD_INVALID for large size class. * d : dirty? * u : unzeroed? * l : large? * a : allocated? * * Following are example bit patterns for the three types of runs. * * p : run page offset * s : run size * n : binind for size class; large objects set these to BININD_INVALID * x : don't care * - : 0 * + : 1 * [DULA] : bit set * [dula] : bit unset * * Unallocated (clean): * ssssssss ssssssss ssss++++ ++++du-a * xxxxxxxx xxxxxxxx xxxxxxxx xxxx-Uxx * ssssssss ssssssss ssss++++ ++++dU-a * * Unallocated (dirty): * ssssssss ssssssss ssss++++ ++++D--a * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx * ssssssss ssssssss ssss++++ ++++D--a * * Small: * pppppppp pppppppp ppppnnnn nnnnd--A * pppppppp pppppppp ppppnnnn nnnn---A * pppppppp pppppppp ppppnnnn nnnnd--A * * Large: * ssssssss ssssssss ssss++++ ++++D-LA * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx * -------- -------- ----++++ ++++D-LA * * Large (sampled, size <= PAGE): * ssssssss ssssssss ssssnnnn nnnnD-LA * * Large (not sampled, size == PAGE): * ssssssss ssssssss ssss++++ ++++D-LA */ size_t bits; #define CHUNK_MAP_BININD_SHIFT 4 #define BININD_INVALID ((size_t)0xffU) /* CHUNK_MAP_BININD_MASK == (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) */ #define CHUNK_MAP_BININD_MASK ((size_t)0xff0U) #define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK #define CHUNK_MAP_FLAGS_MASK ((size_t)0xcU) #define CHUNK_MAP_DIRTY ((size_t)0x8U) #define CHUNK_MAP_UNZEROED ((size_t)0x4U) #define CHUNK_MAP_LARGE ((size_t)0x2U) #define CHUNK_MAP_ALLOCATED ((size_t)0x1U) #define CHUNK_MAP_KEY CHUNK_MAP_ALLOCATED }; typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t; typedef rb_tree(arena_chunk_map_t) arena_run_tree_t; typedef ql_head(arena_chunk_map_t) arena_chunk_mapelms_t; /* Arena chunk header. */ struct arena_chunk_s { /* Arena that owns the chunk. */ arena_t *arena; /* Linkage for tree of arena chunks that contain dirty runs. */ rb_node(arena_chunk_t) dirty_link; /* Number of dirty pages. */ size_t ndirty; /* Number of available runs. */ size_t nruns_avail; /* * Number of available run adjacencies that purging could coalesce. * Clean and dirty available runs are not coalesced, which causes * virtual memory fragmentation. The ratio of * (nruns_avail-nruns_adjac):nruns_adjac is used for tracking this * fragmentation. */ size_t nruns_adjac; /* * Map of pages within chunk that keeps track of free/large/small. The * first map_bias entries are omitted, since the chunk header does not * need to be tracked in the map. This omission saves a header page * for common chunk sizes (e.g. 4 MiB). */ arena_chunk_map_t map[1]; /* Dynamically sized. */ }; typedef rb_tree(arena_chunk_t) arena_chunk_tree_t; struct arena_run_s { /* Bin this run is associated with. */ arena_bin_t *bin; /* Index of next region that has never been allocated, or nregs. */ uint32_t nextind; /* Number of free regions in run. */ unsigned nfree; }; /* * Read-only information associated with each element of arena_t's bins array * is stored separately, partly to reduce memory usage (only one copy, rather * than one per arena), but mainly to avoid false cacheline sharing. * * Each run has the following layout: * * /--------------------\ * | arena_run_t header | * | ... | * bitmap_offset | bitmap | * | ... | * |--------------------| * | redzone | * reg0_offset | region 0 | * | redzone | * |--------------------| \ * | redzone | | * | region 1 | > reg_interval * | redzone | / * |--------------------| * | ... | * | ... | * | ... | * |--------------------| * | redzone | * | region nregs-1 | * | redzone | * |--------------------| * | alignment pad? | * \--------------------/ * * reg_interval has at least the same minimum alignment as reg_size; this * preserves the alignment constraint that sa2u() depends on. Alignment pad is * either 0 or redzone_size; it is present only if needed to align reg0_offset. */ struct arena_bin_info_s { /* Size of regions in a run for this bin's size class. */ size_t reg_size; /* Redzone size. */ size_t redzone_size; /* Interval between regions (reg_size + (redzone_size << 1)). */ size_t reg_interval; /* Total size of a run for this bin's size class. */ size_t run_size; /* Total number of regions in a run for this bin's size class. */ uint32_t nregs; /* * Offset of first bitmap_t element in a run header for this bin's size * class. */ uint32_t bitmap_offset; /* * Metadata used to manipulate bitmaps for runs associated with this * bin. */ bitmap_info_t bitmap_info; /* Offset of first region in a run for this bin's size class. */ uint32_t reg0_offset; }; struct arena_bin_s { /* * All operations on runcur, runs, and stats require that lock be * locked. Run allocation/deallocation are protected by the arena lock, * which may be acquired while holding one or more bin locks, but not * vise versa. */ malloc_mutex_t lock; /* * Current run being used to service allocations of this bin's size * class. */ arena_run_t *runcur; /* * Tree of non-full runs. This tree is used when looking for an * existing run when runcur is no longer usable. We choose the * non-full run that is lowest in memory; this policy tends to keep * objects packed well, and it can also help reduce the number of * almost-empty chunks. */ arena_run_tree_t runs; /* Bin statistics. */ malloc_bin_stats_t stats; }; struct arena_s { /* This arena's index within the arenas array. */ unsigned ind; /* This arena's pool. */ pool_t *pool; /* * Number of threads currently assigned to this arena. This field is * protected by arenas_lock. */ unsigned nthreads; /* * There are three classes of arena operations from a locking * perspective: * 1) Thread asssignment (modifies nthreads) is protected by * arenas_lock. * 2) Bin-related operations are protected by bin locks. * 3) Chunk- and run-related operations are protected by this mutex. */ malloc_mutex_t lock; arena_stats_t stats; /* * List of tcaches for extant threads associated with this arena. * Stats from these are merged incrementally, and at exit. */ ql_head(tcache_t) tcache_ql; uint64_t prof_accumbytes; dss_prec_t dss_prec; /* Tree of dirty-page-containing chunks this arena manages. */ arena_chunk_tree_t chunks_dirty; /* * In order to avoid rapid chunk allocation/deallocation when an arena * oscillates right on the cusp of needing a new chunk, cache the most * recently freed chunk. The spare is left in the arena's chunk trees * until it is deleted. * * There is one spare chunk per arena, rather than one spare total, in * order to avoid interactions between multiple threads that could make * a single spare inadequate. */ arena_chunk_t *spare; /* Number of pages in active runs and huge regions. */ size_t nactive; /* * Current count of pages within unused runs that are potentially * dirty, and for which madvise(... MADV_DONTNEED) has not been called. * By tracking this, we can institute a limit on how much dirty unused * memory is mapped for each arena. */ size_t ndirty; /* * Approximate number of pages being purged. It is possible for * multiple threads to purge dirty pages concurrently, and they use * npurgatory to indicate the total number of pages all threads are * attempting to purge. */ size_t npurgatory; /* * Size/address-ordered trees of this arena's available runs. The trees * are used for first-best-fit run allocation. */ arena_avail_tree_t runs_avail; /* * user-configureable chunk allocation and deallocation functions. */ chunk_alloc_t *chunk_alloc; chunk_dalloc_t *chunk_dalloc; /* bins is used to store trees of free regions. */ arena_bin_t bins[NBINS]; }; arena_chunk_map_t * arena_runs_avail_tree_iter(arena_t *arena, arena_chunk_map_t *(*cb) (arena_avail_tree_t *, arena_chunk_map_t *, void *), void *arg); #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS extern ssize_t opt_lg_dirty_mult; /* * small_size2bin_tab is a compact lookup table that rounds request sizes up to * size classes. In order to reduce cache footprint, the table is compressed, * and all accesses are via small_size2bin(). */ extern uint8_t const small_size2bin_tab[]; /* * small_bin2size_tab duplicates information in arena_bin_info, but in a const * array, for which it is easier for the compiler to optimize repeated * dereferences. */ extern uint32_t const small_bin2size_tab[NBINS]; extern arena_bin_info_t arena_bin_info[NBINS]; /* Number of large size classes. */ #define nlclasses (chunk_npages - map_bias) void *arena_chunk_alloc_huge(arena_t *arena, void *new_addr, size_t size, size_t alignment, bool *zero); void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t size); void arena_purge_all(arena_t *arena); void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind, uint64_t prof_accumbytes); void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero); #ifdef JEMALLOC_JET typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t, uint8_t); extern arena_redzone_corruption_t *arena_redzone_corruption; typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *); extern arena_dalloc_junk_small_t *arena_dalloc_junk_small; #else void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info); #endif void arena_quarantine_junk_small(void *ptr, size_t usize); void *arena_malloc_small(arena_t *arena, size_t size, bool zero); void *arena_malloc_large(arena_t *arena, size_t size, bool zero); void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero); void arena_prof_promoted(const void *ptr, size_t size); void arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_chunk_map_t *mapelm); void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t pageind, arena_chunk_map_t *mapelm); void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t pageind); #ifdef JEMALLOC_JET typedef void (arena_dalloc_junk_large_t)(void *, size_t); extern arena_dalloc_junk_large_t *arena_dalloc_junk_large; #endif void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr); void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr); #ifdef JEMALLOC_JET typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t); extern arena_ralloc_junk_large_t *arena_ralloc_junk_large; #endif bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, bool zero); void *arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc); dss_prec_t arena_dss_prec_get(arena_t *arena); bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec); void arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive, size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats); bool arena_new(pool_t *pool, arena_t *arena, unsigned ind); bool arena_boot(arena_t *arena); void arena_params_boot(void); void arena_prefork(arena_t *arena); void arena_postfork_parent(arena_t *arena); void arena_postfork_child(arena_t *arena); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE size_t small_size2bin_compute(size_t size); size_t small_size2bin_lookup(size_t size); size_t small_size2bin(size_t size); size_t small_bin2size_compute(size_t binind); size_t small_bin2size_lookup(size_t binind); size_t small_bin2size(size_t binind); size_t small_s2u_compute(size_t size); size_t small_s2u_lookup(size_t size); size_t small_s2u(size_t size); size_t arena_mapelm_to_pageind(arena_chunk_map_t *mapelm); arena_chunk_map_t *arena_mapp_get(arena_chunk_t *chunk, size_t pageind); size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbitsp_read(size_t *mapbitsp); size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind); void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits); void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t flags); void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, size_t size); void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t flags); void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, size_t binind); void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind, size_t binind, size_t flags); void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind, size_t unzeroed); bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes); bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes); bool arena_prof_accum(arena_t *arena, uint64_t accumbytes); size_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits); size_t arena_bin_index(arena_t *arena, arena_bin_t *bin); unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr); prof_ctx_t *arena_prof_ctx_get(const void *ptr); void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx); void *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache); size_t arena_salloc(const void *ptr, bool demote); void arena_dalloc(arena_chunk_t *chunk, void *ptr, bool try_tcache); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) # ifdef JEMALLOC_ARENA_INLINE_A JEMALLOC_INLINE size_t small_size2bin_compute(size_t size) { #if (NTBINS != 0) if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1; size_t lg_ceil = lg_floor(pow2_ceil(size)); return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin); } else #endif { size_t x = lg_floor((size<<1)-1); size_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 : x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM); size_t grp = shift << LG_SIZE_CLASS_GROUP; size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1) ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1; size_t mod = ((size - 1) >> lg_delta) & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); size_t bin = NTBINS + grp + mod; return (bin); } } JEMALLOC_ALWAYS_INLINE size_t small_size2bin_lookup(size_t size) { assert(size <= LOOKUP_MAXCLASS); { size_t ret = ((size_t)(small_size2bin_tab[(size-1) >> LG_TINY_MIN])); assert(ret == small_size2bin_compute(size)); return (ret); } } JEMALLOC_ALWAYS_INLINE size_t small_size2bin(size_t size) { assert(size > 0); if (size <= LOOKUP_MAXCLASS) return (small_size2bin_lookup(size)); else return (small_size2bin_compute(size)); } JEMALLOC_INLINE size_t small_bin2size_compute(size_t binind) { #if (NTBINS > 0) if (binind < NTBINS) return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + binind)); else #endif { size_t reduced_binind = binind - NTBINS; size_t grp = reduced_binind >> LG_SIZE_CLASS_GROUP; size_t mod = reduced_binind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); size_t grp_size_mask = ~((!!grp)-1); size_t grp_size = ((ZU(1) << (LG_QUANTUM + (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask; size_t shift = (grp == 0) ? 1 : grp; size_t lg_delta = shift + (LG_QUANTUM-1); size_t mod_size = (mod+1) << lg_delta; size_t usize = grp_size + mod_size; return (usize); } } JEMALLOC_ALWAYS_INLINE size_t small_bin2size_lookup(size_t binind) { assert(binind < NBINS); { size_t ret = ((size_t)(small_bin2size_tab[binind])); assert(ret == small_bin2size_compute(binind)); return (ret); } } JEMALLOC_ALWAYS_INLINE size_t small_bin2size(size_t binind) { return (small_bin2size_lookup(binind)); } JEMALLOC_ALWAYS_INLINE size_t small_s2u_compute(size_t size) { #if (NTBINS > 0) if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1; size_t lg_ceil = lg_floor(pow2_ceil(size)); return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) : (ZU(1) << lg_ceil)); } else #endif { size_t x = lg_floor((size<<1)-1); size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1) ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1; size_t delta = ZU(1) << lg_delta; size_t delta_mask = delta - 1; size_t usize = (size + delta_mask) & ~delta_mask; return (usize); } } JEMALLOC_ALWAYS_INLINE size_t small_s2u_lookup(size_t size) { size_t ret = (small_bin2size(small_size2bin(size))); assert(ret == small_s2u_compute(size)); return (ret); } JEMALLOC_ALWAYS_INLINE size_t small_s2u(size_t size) { assert(size > 0); if (size <= LOOKUP_MAXCLASS) return (small_s2u_lookup(size)); else return (small_s2u_compute(size)); } # endif /* JEMALLOC_ARENA_INLINE_A */ # ifdef JEMALLOC_ARENA_INLINE_B JEMALLOC_ALWAYS_INLINE size_t arena_mapelm_to_pageind(arena_chunk_map_t *mapelm) { uintptr_t map_offset = CHUNK_ADDR2OFFSET(mapelm) - offsetof(arena_chunk_t, map); return ((map_offset / sizeof(arena_chunk_map_t)) + map_bias); } JEMALLOC_ALWAYS_INLINE arena_chunk_map_t * arena_mapp_get(arena_chunk_t *chunk, size_t pageind) { assert(pageind >= map_bias); assert(pageind < chunk_npages); return (&chunk->map[pageind-map_bias]); } JEMALLOC_ALWAYS_INLINE size_t * arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind) { return (&arena_mapp_get(chunk, pageind)->bits); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbitsp_read(size_t *mapbitsp) { return (*mapbitsp); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind) { return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind))); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); return (mapbits & ~PAGE_MASK); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)); return (mapbits & ~PAGE_MASK); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == CHUNK_MAP_ALLOCATED); return (mapbits >> LG_PAGE); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; size_t binind; mapbits = arena_mapbits_get(chunk, pageind); binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; assert(binind < NBINS || binind == BININD_INVALID); return (binind); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); return (mapbits & CHUNK_MAP_DIRTY); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); return (mapbits & CHUNK_MAP_UNZEROED); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); return (mapbits & CHUNK_MAP_LARGE); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); return (mapbits & CHUNK_MAP_ALLOCATED); } JEMALLOC_ALWAYS_INLINE void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits) { *mapbitsp = mapbits; } JEMALLOC_ALWAYS_INLINE void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t flags) { size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); assert((size & PAGE_MASK) == 0); assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0); assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags); arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, size_t size) { size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); size_t mapbits = arena_mapbitsp_read(mapbitsp); assert((size & PAGE_MASK) == 0); assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); arena_mapbitsp_write(mapbitsp, size | (mapbits & PAGE_MASK)); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t flags) { size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); size_t mapbits = arena_mapbitsp_read(mapbitsp); size_t unzeroed; assert((size & PAGE_MASK) == 0); assert((flags & CHUNK_MAP_DIRTY) == flags); unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */ arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags | unzeroed | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, size_t binind) { size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); size_t mapbits = arena_mapbitsp_read(mapbitsp); assert(binind <= BININD_INVALID); assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE); arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) | (binind << CHUNK_MAP_BININD_SHIFT)); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind, size_t binind, size_t flags) { size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); size_t mapbits = arena_mapbitsp_read(mapbitsp); size_t unzeroed; assert(binind < BININD_INVALID); assert(pageind - runind >= map_bias); assert((flags & CHUNK_MAP_DIRTY) == flags); unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */ arena_mapbitsp_write(mapbitsp, (runind << LG_PAGE) | (binind << CHUNK_MAP_BININD_SHIFT) | flags | unzeroed | CHUNK_MAP_ALLOCATED); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind, size_t unzeroed) { size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); size_t mapbits = arena_mapbitsp_read(mapbitsp); arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_UNZEROED) | unzeroed); } JEMALLOC_INLINE bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes) { cassert(config_prof); assert(prof_interval != 0); arena->prof_accumbytes += accumbytes; if (arena->prof_accumbytes >= prof_interval) { arena->prof_accumbytes -= prof_interval; return (true); } return (false); } JEMALLOC_INLINE bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes) { cassert(config_prof); if (prof_interval == 0) return (false); return (arena_prof_accum_impl(arena, accumbytes)); } JEMALLOC_INLINE bool arena_prof_accum(arena_t *arena, uint64_t accumbytes) { cassert(config_prof); if (prof_interval == 0) return (false); { bool ret; malloc_mutex_lock(&arena->lock); ret = arena_prof_accum_impl(arena, accumbytes); malloc_mutex_unlock(&arena->lock); return (ret); } } JEMALLOC_ALWAYS_INLINE size_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits) { size_t binind; binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; if (config_debug) { arena_chunk_t *chunk; arena_t *arena; size_t pageind; size_t actual_mapbits; arena_run_t *run; arena_bin_t *bin; size_t actual_binind; arena_bin_info_t *bin_info; assert(binind != BININD_INVALID); assert(binind < NBINS); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); arena = chunk->arena; pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; actual_mapbits = arena_mapbits_get(chunk, pageind); assert(mapbits == actual_mapbits); assert(arena_mapbits_large_get(chunk, pageind) == 0); assert(arena_mapbits_allocated_get(chunk, pageind) != 0); run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - (actual_mapbits >> LG_PAGE)) << LG_PAGE)); bin = run->bin; actual_binind = bin - arena->bins; assert(binind == actual_binind); bin_info = &arena_bin_info[actual_binind]; assert(((uintptr_t)ptr - ((uintptr_t)run + (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval == 0); } return (binind); } # endif /* JEMALLOC_ARENA_INLINE_B */ # ifdef JEMALLOC_ARENA_INLINE_C JEMALLOC_INLINE size_t arena_bin_index(arena_t *arena, arena_bin_t *bin) { size_t binind = bin - arena->bins; assert(binind < NBINS); return (binind); } JEMALLOC_INLINE unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) { unsigned shift, diff, regind; size_t interval; /* * Freeing a pointer lower than region zero can cause assertion * failure. */ assert((uintptr_t)ptr >= (uintptr_t)run + (uintptr_t)bin_info->reg0_offset); /* * Avoid doing division with a variable divisor if possible. Using * actual division here can reduce allocator throughput by over 20%! */ diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - bin_info->reg0_offset); /* Rescale (factor powers of 2 out of the numerator and denominator). */ interval = bin_info->reg_interval; shift = jemalloc_ffs((int)interval) - 1; diff >>= shift; interval >>= shift; if (interval == 1) { /* The divisor was a power of 2. */ regind = diff; } else { /* * To divide by a number D that is not a power of two we * multiply by (2^21 / D) and then right shift by 21 positions. * * X / D * * becomes * * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT * * We can omit the first three elements, because we never * divide by 0, and 1 and 2 are both powers of two, which are * handled above. */ #define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS) #define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1) static const unsigned interval_invs[] = { SIZE_INV(3), SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7), SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11), SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15), SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19), SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23), SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27), SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31) }; if (interval <= ((sizeof(interval_invs) / sizeof(unsigned)) + 2)) { regind = (diff * interval_invs[interval - 3]) >> SIZE_INV_SHIFT; } else regind = diff / (unsigned)interval; #undef SIZE_INV #undef SIZE_INV_SHIFT } assert(diff == regind * interval); assert(regind < bin_info->nregs); return (regind); } JEMALLOC_INLINE prof_ctx_t * arena_prof_ctx_get(const void *ptr) { prof_ctx_t *ret; arena_chunk_t *chunk; size_t pageind, mapbits; cassert(config_prof); assert(ptr != NULL); assert(CHUNK_ADDR2BASE(ptr) != ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); if ((mapbits & CHUNK_MAP_LARGE) == 0) ret = (prof_ctx_t *)(uintptr_t)1U; else ret = arena_mapp_get(chunk, pageind)->prof_ctx; return (ret); } JEMALLOC_INLINE void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) { arena_chunk_t *chunk; size_t pageind; cassert(config_prof); assert(ptr != NULL); assert(CHUNK_ADDR2BASE(ptr) != ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; assert(arena_mapbits_allocated_get(chunk, pageind) != 0); if (arena_mapbits_large_get(chunk, pageind) != 0) arena_mapp_get(chunk, pageind)->prof_ctx = ctx; } JEMALLOC_ALWAYS_INLINE void * arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache) { tcache_t *tcache; pool_t *pool = arena->pool; assert(size != 0); assert(size <= arena_maxclass); if (size <= SMALL_MAXCLASS) { if (try_tcache && (tcache = tcache_get(pool, true)) != NULL) return (tcache_alloc_small(tcache, size, zero)); else { return (arena_malloc_small(choose_arena(arena), size, zero)); } } else { /* * Initialize tcache after checking size in order to avoid * infinite recursion during tcache initialization. */ if (try_tcache && size <= tcache_maxclass && (tcache = tcache_get(pool, true)) != NULL) return (tcache_alloc_large(tcache, size, zero)); else { return (arena_malloc_large(choose_arena(arena), size, zero)); } } } /* Return the size of the allocation pointed to by ptr. */ JEMALLOC_ALWAYS_INLINE size_t arena_salloc(const void *ptr, bool demote) { size_t ret; arena_chunk_t *chunk; size_t pageind, binind; assert(ptr != NULL); assert(CHUNK_ADDR2BASE(ptr) != ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; assert(arena_mapbits_allocated_get(chunk, pageind) != 0); binind = arena_mapbits_binind_get(chunk, pageind); if (binind == BININD_INVALID || (config_prof && demote == false && arena_mapbits_large_get(chunk, pageind) != 0)) { /* * Large allocation. In the common case (demote == true), and * as this is an inline function, most callers will only end up * looking at binind to determine that ptr is a small * allocation. */ assert(((uintptr_t)ptr & PAGE_MASK) == 0); ret = arena_mapbits_large_size_get(chunk, pageind); assert(ret != 0); assert(pageind + (ret>>LG_PAGE) <= chunk_npages); assert(ret == PAGE || arena_mapbits_large_size_get(chunk, pageind+(ret>>LG_PAGE)-1) == 0); assert(binind == arena_mapbits_binind_get(chunk, pageind+(ret>>LG_PAGE)-1)); assert(arena_mapbits_dirty_get(chunk, pageind) == arena_mapbits_dirty_get(chunk, pageind+(ret>>LG_PAGE)-1)); } else { /* Small allocation (possibly promoted to a large object). */ assert(arena_mapbits_large_get(chunk, pageind) != 0 || arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, pageind)) == binind); ret = small_bin2size(binind); } return (ret); } JEMALLOC_ALWAYS_INLINE void arena_dalloc(arena_chunk_t *chunk, void *ptr, bool try_tcache) { size_t pageind, mapbits; tcache_t *tcache; assert(ptr != NULL); assert(CHUNK_ADDR2BASE(ptr) != ptr); pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; mapbits = arena_mapbits_get(chunk, pageind); assert(arena_mapbits_allocated_get(chunk, pageind) != 0); if ((mapbits & CHUNK_MAP_LARGE) == 0) { /* Small allocation. */ if (try_tcache && (tcache = tcache_get(chunk->arena->pool, false)) != NULL) { size_t binind; binind = arena_ptr_small_binind_get(ptr, mapbits); tcache_dalloc_small(tcache, ptr, binind); } else arena_dalloc_small(chunk->arena, chunk, ptr, pageind); } else { size_t size = arena_mapbits_large_size_get(chunk, pageind); assert(((uintptr_t)ptr & PAGE_MASK) == 0); if (try_tcache && size <= tcache_maxclass && (tcache = tcache_get(chunk->arena->pool, false)) != NULL) { tcache_dalloc_large(tcache, ptr, size); } else arena_dalloc_large(chunk->arena, chunk, ptr); } } # endif /* JEMALLOC_ARENA_INLINE_C */ #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
36,460
29.562448
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/ql.h
/* * List definitions. */ #define ql_head(a_type) \ struct { \ a_type *qlh_first; \ } #define ql_head_initializer(a_head) {NULL} #define ql_elm(a_type) qr(a_type) /* List functions. */ #define ql_new(a_head) do { \ (a_head)->qlh_first = NULL; \ } while (0) #define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) #define ql_first(a_head) ((a_head)->qlh_first) #define ql_last(a_head, a_field) \ ((ql_first(a_head) != NULL) \ ? qr_prev(ql_first(a_head), a_field) : NULL) #define ql_next(a_head, a_elm, a_field) \ ((ql_last(a_head, a_field) != (a_elm)) \ ? qr_next((a_elm), a_field) : NULL) #define ql_prev(a_head, a_elm, a_field) \ ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ : NULL) #define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ qr_before_insert((a_qlelm), (a_elm), a_field); \ if (ql_first(a_head) == (a_qlelm)) { \ ql_first(a_head) = (a_elm); \ } \ } while (0) #define ql_after_insert(a_qlelm, a_elm, a_field) \ qr_after_insert((a_qlelm), (a_elm), a_field) #define ql_head_insert(a_head, a_elm, a_field) do { \ if (ql_first(a_head) != NULL) { \ qr_before_insert(ql_first(a_head), (a_elm), a_field); \ } \ ql_first(a_head) = (a_elm); \ } while (0) #define ql_tail_insert(a_head, a_elm, a_field) do { \ if (ql_first(a_head) != NULL) { \ qr_before_insert(ql_first(a_head), (a_elm), a_field); \ } \ ql_first(a_head) = qr_next((a_elm), a_field); \ } while (0) #define ql_remove(a_head, a_elm, a_field) do { \ if (ql_first(a_head) == (a_elm)) { \ ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ } \ if (ql_first(a_head) != (a_elm)) { \ qr_remove((a_elm), a_field); \ } else { \ ql_first(a_head) = NULL; \ } \ } while (0) #define ql_head_remove(a_head, a_type, a_field) do { \ a_type *t = ql_first(a_head); \ ql_remove((a_head), t, a_field); \ } while (0) #define ql_tail_remove(a_head, a_type, a_field) do { \ a_type *t = ql_last(a_head, a_field); \ ql_remove((a_head), t, a_field); \ } while (0) #define ql_foreach(a_var, a_head, a_field) \ qr_foreach((a_var), ql_first(a_head), a_field) #define ql_reverse_foreach(a_var, a_head, a_field) \ qr_reverse_foreach((a_var), ql_first(a_head), a_field)
2,373
27.261905
65
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/qr.h
/* Ring definitions. */ #define qr(a_type) \ struct { \ a_type *qre_next; \ a_type *qre_prev; \ } /* Ring functions. */ #define qr_new(a_qr, a_field) do { \ (a_qr)->a_field.qre_next = (a_qr); \ (a_qr)->a_field.qre_prev = (a_qr); \ } while (0) #define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) #define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) #define qr_before_insert(a_qrelm, a_qr, a_field) do { \ (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \ (a_qr)->a_field.qre_next = (a_qrelm); \ (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \ (a_qrelm)->a_field.qre_prev = (a_qr); \ } while (0) #define qr_after_insert(a_qrelm, a_qr, a_field) \ do \ { \ (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \ (a_qr)->a_field.qre_prev = (a_qrelm); \ (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \ (a_qrelm)->a_field.qre_next = (a_qr); \ } while (0) #define qr_meld(a_qr_a, a_qr_b, a_field) do { \ void *t; \ (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ t = (a_qr_a)->a_field.qre_prev; \ (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \ (a_qr_b)->a_field.qre_prev = t; \ } while (0) /* qr_meld() and qr_split() are functionally equivalent, so there's no need to * have two copies of the code. */ #define qr_split(a_qr_a, a_qr_b, a_field) \ qr_meld((a_qr_a), (a_qr_b), a_field) #define qr_remove(a_qr, a_field) do { \ (a_qr)->a_field.qre_prev->a_field.qre_next \ = (a_qr)->a_field.qre_next; \ (a_qr)->a_field.qre_next->a_field.qre_prev \ = (a_qr)->a_field.qre_prev; \ (a_qr)->a_field.qre_next = (a_qr); \ (a_qr)->a_field.qre_prev = (a_qr); \ } while (0) #define qr_foreach(var, a_qr, a_field) \ for ((var) = (a_qr); \ (var) != NULL; \ (var) = (((var)->a_field.qre_next != (a_qr)) \ ? (var)->a_field.qre_next : NULL)) #define qr_reverse_foreach(var, a_qr, a_field) \ for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ (var) != NULL; \ (var) = (((var) != (a_qr)) \ ? (var)->a_field.qre_prev : NULL))
2,255
32.176471
78
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/public_namespace.sh
#!/bin/sh for nm in `cat $1` ; do n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` echo "#define je_${n} JEMALLOC_N(${n})" done
129
17.571429
46
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/chunk_mmap.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS bool pages_purge(void *addr, size_t length, bool file_mapped); void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero); bool chunk_dalloc_mmap(void *chunk, size_t size); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
819
34.652174
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/private_unnamespace.sh
#!/bin/sh for symbol in `cat $1` ; do echo "#undef ${symbol}" done
70
10.833333
27
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/chunk.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* * Size and alignment of memory chunks that are allocated by the OS's virtual * memory system. */ #define LG_CHUNK_DEFAULT 22 /* Return the chunk address for allocation address a. */ #define CHUNK_ADDR2BASE(a) \ ((void *)((uintptr_t)(a) & ~chunksize_mask)) /* Return the chunk offset of address a. */ #define CHUNK_ADDR2OFFSET(a) \ ((size_t)((uintptr_t)(a) & chunksize_mask)) /* Return the smallest chunk multiple that is >= s. */ #define CHUNK_CEILING(s) \ (((s) + chunksize_mask) & ~chunksize_mask) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS extern size_t opt_lg_chunk; extern const char *opt_dss; extern size_t chunksize; extern size_t chunksize_mask; /* (chunksize - 1). */ extern size_t chunk_npages; extern size_t map_bias; /* Number of arena chunk header pages. */ extern size_t arena_maxclass; /* Max size class for arenas. */ void *chunk_alloc_base(pool_t *pool, size_t size); void *chunk_alloc_arena(chunk_alloc_t *chunk_alloc, chunk_dalloc_t *chunk_dalloc, arena_t *arena, void *new_addr, size_t size, size_t alignment, bool *zero); void *chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, unsigned arena_ind, pool_t *pool); void chunk_unmap(pool_t *pool, void *chunk, size_t size); bool chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind, pool_t *pool); void chunk_record(pool_t *pool, extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk, size_t size, bool zeroed); bool chunk_global_boot(); bool chunk_boot(pool_t *pool); bool chunk_init(pool_t *pool); void chunk_prefork0(pool_t *pool); void chunk_prefork1(pool_t *pool); void chunk_postfork_parent0(pool_t *pool); void chunk_postfork_parent1(pool_t *pool); void chunk_postfork_child0(pool_t *pool); void chunk_postfork_child1(pool_t *pool); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ #include "jemalloc/internal/chunk_dss.h" #include "jemalloc/internal/chunk_mmap.h"
2,490
35.632353
86
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/ckh.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct ckh_s ckh_t; typedef struct ckhc_s ckhc_t; /* Typedefs to allow easy function pointer passing. */ typedef void ckh_hash_t (const void *, size_t[2]); typedef bool ckh_keycomp_t (const void *, const void *); /* Maintain counters used to get an idea of performance. */ /* #define CKH_COUNT */ /* Print counter values in ckh_delete() (requires CKH_COUNT). */ /* #define CKH_VERBOSE */ /* * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit * one bucket per L1 cache line. */ #define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS /* Hash table cell. */ struct ckhc_s { const void *key; const void *data; }; struct ckh_s { #ifdef CKH_COUNT /* Counters used to get an idea of performance. */ uint64_t ngrows; uint64_t nshrinks; uint64_t nshrinkfails; uint64_t ninserts; uint64_t nrelocs; #endif /* Used for pseudo-random number generation. */ #define CKH_A 1103515241 #define CKH_C 12347 uint32_t prng_state; /* Total number of items. */ size_t count; /* * Minimum and current number of hash table buckets. There are * 2^LG_CKH_BUCKET_CELLS cells per bucket. */ unsigned lg_minbuckets; unsigned lg_curbuckets; /* Hash and comparison functions. */ ckh_hash_t *hash; ckh_keycomp_t *keycomp; /* Hash table with 2^lg_curbuckets buckets. */ ckhc_t *tab; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS bool ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp); void ckh_delete(ckh_t *ckh); size_t ckh_count(ckh_t *ckh); bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); bool ckh_insert(ckh_t *ckh, const void *key, const void *data); bool ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data); bool ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data); void ckh_string_hash(const void *key, size_t r_hash[2]); bool ckh_string_keycomp(const void *k1, const void *k2); void ckh_pointer_hash(const void *key, size_t r_hash[2]); bool ckh_pointer_keycomp(const void *k1, const void *k2); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
2,646
28.741573
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/rb.h
/*- ******************************************************************************* * * cpp macro implementation of left-leaning 2-3 red-black trees. Parent * pointers are not used, and color bits are stored in the least significant * bit of right-child pointers (if RB_COMPACT is defined), thus making node * linkage as compact as is possible for red-black trees. * * Usage: * * #include <stdint.h> * #include <stdbool.h> * #define NDEBUG // (Optional, see assert(3).) * #include <assert.h> * #define RB_COMPACT // (Optional, embed color bits in right-child pointers.) * #include <rb.h> * ... * ******************************************************************************* */ #ifndef RB_H_ #define RB_H_ /* XXX Avoid super-slow compile with older versions of clang */ #define NOSANITIZE #if (__clang_major__ == 3 && __clang_minor__ < 9) #if __has_attribute(__no_sanitize__) #undef NOSANITIZE #define NOSANITIZE __attribute__((no_sanitize("undefined"))) #endif #endif #ifdef RB_COMPACT /* Node structure. */ #define rb_node(a_type) \ struct { \ a_type *rbn_left; \ a_type *rbn_right_red; \ } #else #define rb_node(a_type) \ struct { \ a_type *rbn_left; \ a_type *rbn_right; \ bool rbn_red; \ } #endif /* Root structure. */ #define rb_tree(a_type) \ struct { \ a_type *rbt_root; \ a_type rbt_nil; \ } /* Left accessors. */ #define rbtn_left_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_left) #define rbtn_left_set(a_type, a_field, a_node, a_left) do { \ (a_node)->a_field.rbn_left = a_left; \ } while (0) #ifdef RB_COMPACT /* Right accessors. */ #define rbtn_right_get(a_type, a_field, a_node) \ ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \ & ((ssize_t)-2))) #define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \ | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \ } while (0) /* Color accessors. */ #define rbtn_red_get(a_type, a_field, a_node) \ ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \ & ((size_t)1))) #define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \ (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \ | ((ssize_t)a_red)); \ } while (0) #define rbtn_red_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \ (a_node)->a_field.rbn_right_red) | ((size_t)1)); \ } while (0) #define rbtn_black_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \ (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \ } while (0) #else /* Right accessors. */ #define rbtn_right_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_right) #define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ (a_node)->a_field.rbn_right = a_right; \ } while (0) /* Color accessors. */ #define rbtn_red_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_red) #define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ (a_node)->a_field.rbn_red = (a_red); \ } while (0) #define rbtn_red_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_red = true; \ } while (0) #define rbtn_black_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_red = false; \ } while (0) #endif /* Node initializer. */ #define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ rbtn_left_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \ rbtn_right_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \ rbtn_red_set(a_type, a_field, (a_node)); \ } while (0) /* Tree initializer. */ #define rb_new(a_type, a_field, a_rbt) do { \ (a_rbt)->rbt_root = &(a_rbt)->rbt_nil; \ rbt_node_new(a_type, a_field, a_rbt, &(a_rbt)->rbt_nil); \ rbtn_black_set(a_type, a_field, &(a_rbt)->rbt_nil); \ } while (0) /* Internal utility macros. */ #define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \ (r_node) = (a_root); \ if ((r_node) != &(a_rbt)->rbt_nil) { \ for (; \ rbtn_left_get(a_type, a_field, (r_node)) != &(a_rbt)->rbt_nil;\ (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \ } \ } \ } while (0) #define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \ (r_node) = (a_root); \ if ((r_node) != &(a_rbt)->rbt_nil) { \ for (; rbtn_right_get(a_type, a_field, (r_node)) != \ &(a_rbt)->rbt_nil; (r_node) = rbtn_right_get(a_type, a_field, \ (r_node))) { \ } \ } \ } while (0) #define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \ (r_node) = rbtn_right_get(a_type, a_field, (a_node)); \ rbtn_right_set(a_type, a_field, (a_node), \ rbtn_left_get(a_type, a_field, (r_node))); \ rbtn_left_set(a_type, a_field, (r_node), (a_node)); \ } while (0) #define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \ (r_node) = rbtn_left_get(a_type, a_field, (a_node)); \ rbtn_left_set(a_type, a_field, (a_node), \ rbtn_right_get(a_type, a_field, (r_node))); \ rbtn_right_set(a_type, a_field, (r_node), (a_node)); \ } while (0) /* * The rb_proto() macro generates function prototypes that correspond to the * functions generated by an equivalently parameterized call to rb_gen(). */ #define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \ a_attr void \ a_prefix##new(a_rbt_type *rbtree); \ a_attr a_type * \ a_prefix##first(a_rbt_type *rbtree); \ a_attr a_type * \ a_prefix##last(a_rbt_type *rbtree); \ a_attr a_type * \ a_prefix##next(a_rbt_type *rbtree, a_type *node); \ a_attr a_type * \ a_prefix##prev(a_rbt_type *rbtree, a_type *node); \ a_attr a_type * \ a_prefix##search(a_rbt_type *rbtree, a_type *key); \ a_attr a_type * \ a_prefix##nsearch(a_rbt_type *rbtree, a_type *key); \ a_attr a_type * \ a_prefix##psearch(a_rbt_type *rbtree, a_type *key); \ a_attr void \ a_prefix##insert(a_rbt_type *rbtree, a_type *node); \ a_attr void \ a_prefix##remove(a_rbt_type *rbtree, a_type *node); \ a_attr a_type * \ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ a_rbt_type *, a_type *, void *), void *arg); \ a_attr a_type * \ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); /* * The rb_gen() macro generates a type-specific red-black tree implementation, * based on the above cpp macros. * * Arguments: * * a_attr : Function attribute for generated functions (ex: static). * a_prefix : Prefix for generated functions (ex: ex_). * a_rb_type : Type for red-black tree data structure (ex: ex_t). * a_type : Type for red-black tree node data structure (ex: ex_node_t). * a_field : Name of red-black tree node linkage (ex: ex_link). * a_cmp : Node comparison function name, with the following prototype: * int (a_cmp *)(a_type *a_node, a_type *a_other); * ^^^^^^ * or a_key * Interpretation of comparison function return values: * -1 : a_node < a_other * 0 : a_node == a_other * 1 : a_node > a_other * In all cases, the a_node or a_key macro argument is the first * argument to the comparison function, which makes it possible * to write comparison functions that treat the first argument * specially. * * Assuming the following setup: * * typedef struct ex_node_s ex_node_t; * struct ex_node_s { * rb_node(ex_node_t) ex_link; * }; * typedef rb_tree(ex_node_t) ex_t; * rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp) * * The following API is generated: * * static void * ex_new(ex_t *tree); * Description: Initialize a red-black tree structure. * Args: * tree: Pointer to an uninitialized red-black tree object. * * static ex_node_t * * ex_first(ex_t *tree); * static ex_node_t * * ex_last(ex_t *tree); * Description: Get the first/last node in tree. * Args: * tree: Pointer to an initialized red-black tree object. * Ret: First/last node in tree, or NULL if tree is empty. * * static ex_node_t * * ex_next(ex_t *tree, ex_node_t *node); * static ex_node_t * * ex_prev(ex_t *tree, ex_node_t *node); * Description: Get node's successor/predecessor. * Args: * tree: Pointer to an initialized red-black tree object. * node: A node in tree. * Ret: node's successor/predecessor in tree, or NULL if node is * last/first. * * static ex_node_t * * ex_search(ex_t *tree, ex_node_t *key); * Description: Search for node that matches key. * Args: * tree: Pointer to an initialized red-black tree object. * key : Search key. * Ret: Node in tree that matches key, or NULL if no match. * * static ex_node_t * * ex_nsearch(ex_t *tree, ex_node_t *key); * static ex_node_t * * ex_psearch(ex_t *tree, ex_node_t *key); * Description: Search for node that matches key. If no match is found, * return what would be key's successor/predecessor, were * key in tree. * Args: * tree: Pointer to an initialized red-black tree object. * key : Search key. * Ret: Node in tree that matches key, or if no match, hypothetical node's * successor/predecessor (NULL if no successor/predecessor). * * static void * ex_insert(ex_t *tree, ex_node_t *node); * Description: Insert node into tree. * Args: * tree: Pointer to an initialized red-black tree object. * node: Node to be inserted into tree. * * static void * ex_remove(ex_t *tree, ex_node_t *node); * Description: Remove node from tree. * Args: * tree: Pointer to an initialized red-black tree object. * node: Node in tree to be removed. * * static ex_node_t * * ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *, * ex_node_t *, void *), void *arg); * static ex_node_t * * ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *, * ex_node_t *, void *), void *arg); * Description: Iterate forward/backward over tree, starting at node. If * tree is modified, iteration must be immediately * terminated by the callback function that causes the * modification. * Args: * tree : Pointer to an initialized red-black tree object. * start: Node at which to start iteration, or NULL to start at * first/last node. * cb : Callback function, which is called for each node during * iteration. Under normal circumstances the callback function * should return NULL, which causes iteration to continue. If a * callback function returns non-NULL, iteration is immediately * terminated and the non-NULL return value is returned by the * iterator. This is useful for re-starting iteration after * modifying tree. * arg : Opaque pointer passed to cb(). * Ret: NULL if iteration completed, or the non-NULL callback return value * that caused termination of the iteration. */ #define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ a_attr void \ a_prefix##new(a_rbt_type *rbtree) { \ rb_new(a_type, a_field, rbtree); \ } \ a_attr a_type * \ a_prefix##first(a_rbt_type *rbtree) { \ a_type *ret; \ rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ if (ret == &rbtree->rbt_nil) { \ ret = NULL; \ } \ return (ret); \ } \ a_attr a_type * \ a_prefix##last(a_rbt_type *rbtree) { \ a_type *ret; \ rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ if (ret == &rbtree->rbt_nil) { \ ret = NULL; \ } \ return (ret); \ } \ a_attr a_type * \ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ a_type *ret; \ if (rbtn_right_get(a_type, a_field, node) != &rbtree->rbt_nil) { \ rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \ a_field, node), ret); \ } else { \ a_type *tnode = rbtree->rbt_root; \ assert(tnode != &rbtree->rbt_nil); \ ret = &rbtree->rbt_nil; \ while (true) { \ int cmp = (a_cmp)(node, tnode); \ if (cmp < 0) { \ ret = tnode; \ tnode = rbtn_left_get(a_type, a_field, tnode); \ } else if (cmp > 0) { \ tnode = rbtn_right_get(a_type, a_field, tnode); \ } else { \ break; \ } \ assert(tnode != &rbtree->rbt_nil); \ } \ } \ if (ret == &rbtree->rbt_nil) { \ ret = (NULL); \ } \ return (ret); \ } \ a_attr a_type * \ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ a_type *ret; \ if (rbtn_left_get(a_type, a_field, node) != &rbtree->rbt_nil) { \ rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \ a_field, node), ret); \ } else { \ a_type *tnode = rbtree->rbt_root; \ assert(tnode != &rbtree->rbt_nil); \ ret = &rbtree->rbt_nil; \ while (true) { \ int cmp = (a_cmp)(node, tnode); \ if (cmp < 0) { \ tnode = rbtn_left_get(a_type, a_field, tnode); \ } else if (cmp > 0) { \ ret = tnode; \ tnode = rbtn_right_get(a_type, a_field, tnode); \ } else { \ break; \ } \ assert(tnode != &rbtree->rbt_nil); \ } \ } \ if (ret == &rbtree->rbt_nil) { \ ret = (NULL); \ } \ return (ret); \ } \ a_attr a_type * \ a_prefix##search(a_rbt_type *rbtree, a_type *key) { \ a_type *ret; \ int cmp; \ ret = rbtree->rbt_root; \ while (ret != &rbtree->rbt_nil \ && (cmp = (a_cmp)(key, ret)) != 0) { \ if (cmp < 0) { \ ret = rbtn_left_get(a_type, a_field, ret); \ } else { \ ret = rbtn_right_get(a_type, a_field, ret); \ } \ } \ if (ret == &rbtree->rbt_nil) { \ ret = (NULL); \ } \ return (ret); \ } \ a_attr a_type * \ a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \ a_type *ret; \ a_type *tnode = rbtree->rbt_root; \ ret = &rbtree->rbt_nil; \ while (tnode != &rbtree->rbt_nil) { \ int cmp = (a_cmp)(key, tnode); \ if (cmp < 0) { \ ret = tnode; \ tnode = rbtn_left_get(a_type, a_field, tnode); \ } else if (cmp > 0) { \ tnode = rbtn_right_get(a_type, a_field, tnode); \ } else { \ ret = tnode; \ break; \ } \ } \ if (ret == &rbtree->rbt_nil) { \ ret = (NULL); \ } \ return (ret); \ } \ a_attr a_type * \ a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \ a_type *ret; \ a_type *tnode = rbtree->rbt_root; \ ret = &rbtree->rbt_nil; \ while (tnode != &rbtree->rbt_nil) { \ int cmp = (a_cmp)(key, tnode); \ if (cmp < 0) { \ tnode = rbtn_left_get(a_type, a_field, tnode); \ } else if (cmp > 0) { \ ret = tnode; \ tnode = rbtn_right_get(a_type, a_field, tnode); \ } else { \ ret = tnode; \ break; \ } \ } \ if (ret == &rbtree->rbt_nil) { \ ret = (NULL); \ } \ return (ret); \ } \ a_attr void \ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ struct { \ a_type *node; \ int cmp; \ } path[sizeof(void *) << 4], *pathp; \ rbt_node_new(a_type, a_field, rbtree, node); \ /* Wind. */ \ path->node = rbtree->rbt_root; \ for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \ int cmp = pathp->cmp = a_cmp(node, pathp->node); \ assert(cmp != 0); \ if (cmp < 0) { \ pathp[1].node = rbtn_left_get(a_type, a_field, \ pathp->node); \ } else { \ pathp[1].node = rbtn_right_get(a_type, a_field, \ pathp->node); \ } \ } \ pathp->node = node; \ /* Unwind. */ \ for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ a_type *cnode = pathp->node; \ if (pathp->cmp < 0) { \ a_type *left = pathp[1].node; \ rbtn_left_set(a_type, a_field, cnode, left); \ if (rbtn_red_get(a_type, a_field, left)) { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ if (rbtn_red_get(a_type, a_field, leftleft)) { \ /* Fix up 4-node. */ \ a_type *tnode; \ rbtn_black_set(a_type, a_field, leftleft); \ rbtn_rotate_right(a_type, a_field, cnode, tnode); \ cnode = tnode; \ } \ } else { \ return; \ } \ } else { \ a_type *right = pathp[1].node; \ rbtn_right_set(a_type, a_field, cnode, right); \ if (rbtn_red_get(a_type, a_field, right)) { \ a_type *left = rbtn_left_get(a_type, a_field, cnode); \ if (rbtn_red_get(a_type, a_field, left)) { \ /* Split 4-node. */ \ rbtn_black_set(a_type, a_field, left); \ rbtn_black_set(a_type, a_field, right); \ rbtn_red_set(a_type, a_field, cnode); \ } else { \ /* Lean left. */ \ a_type *tnode; \ bool tred = rbtn_red_get(a_type, a_field, cnode); \ rbtn_rotate_left(a_type, a_field, cnode, tnode); \ rbtn_color_set(a_type, a_field, tnode, tred); \ rbtn_red_set(a_type, a_field, cnode); \ cnode = tnode; \ } \ } else { \ return; \ } \ } \ pathp->node = cnode; \ } \ /* Set root, and make it black. */ \ rbtree->rbt_root = path->node; \ rbtn_black_set(a_type, a_field, rbtree->rbt_root); \ } \ a_attr void NOSANITIZE \ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ struct { \ a_type *node; \ int cmp; \ } *pathp, *nodep, path[sizeof(void *) << 4]; \ /* Wind. */ \ nodep = NULL; /* Silence compiler warning. */ \ path->node = rbtree->rbt_root; \ for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \ int cmp = pathp->cmp = a_cmp(node, pathp->node); \ if (cmp < 0) { \ pathp[1].node = rbtn_left_get(a_type, a_field, \ pathp->node); \ } else { \ pathp[1].node = rbtn_right_get(a_type, a_field, \ pathp->node); \ if (cmp == 0) { \ /* Find node's successor, in preparation for swap. */ \ pathp->cmp = 1; \ nodep = pathp; \ for (pathp++; pathp->node != &rbtree->rbt_nil; \ pathp++) { \ pathp->cmp = -1; \ pathp[1].node = rbtn_left_get(a_type, a_field, \ pathp->node); \ } \ break; \ } \ } \ } \ assert(nodep->node == node); \ pathp--; \ if (pathp->node != node) { \ /* Swap node with its successor. */ \ bool tred = rbtn_red_get(a_type, a_field, pathp->node); \ rbtn_color_set(a_type, a_field, pathp->node, \ rbtn_red_get(a_type, a_field, node)); \ rbtn_left_set(a_type, a_field, pathp->node, \ rbtn_left_get(a_type, a_field, node)); \ /* If node's successor is its right child, the following code */\ /* will do the wrong thing for the right child pointer. */\ /* However, it doesn't matter, because the pointer will be */\ /* properly set when the successor is pruned. */\ rbtn_right_set(a_type, a_field, pathp->node, \ rbtn_right_get(a_type, a_field, node)); \ rbtn_color_set(a_type, a_field, node, tred); \ /* The pruned leaf node's child pointers are never accessed */\ /* again, so don't bother setting them to nil. */\ nodep->node = pathp->node; \ pathp->node = node; \ if (nodep == path) { \ rbtree->rbt_root = nodep->node; \ } else { \ if (nodep[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, nodep[-1].node, \ nodep->node); \ } else { \ rbtn_right_set(a_type, a_field, nodep[-1].node, \ nodep->node); \ } \ } \ } else { \ a_type *left = rbtn_left_get(a_type, a_field, node); \ if (left != &rbtree->rbt_nil) { \ /* node has no successor, but it has a left child. */\ /* Splice node out, without losing the left child. */\ assert(rbtn_red_get(a_type, a_field, node) == false); \ assert(rbtn_red_get(a_type, a_field, left)); \ rbtn_black_set(a_type, a_field, left); \ if (pathp == path) { \ rbtree->rbt_root = left; \ } else { \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ left); \ } else { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ left); \ } \ } \ return; \ } else if (pathp == path) { \ /* The tree only contained one node. */ \ rbtree->rbt_root = &rbtree->rbt_nil; \ return; \ } \ } \ if (rbtn_red_get(a_type, a_field, pathp->node)) { \ /* Prune red node, which requires no fixup. */ \ assert(pathp[-1].cmp < 0); \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ &rbtree->rbt_nil); \ return; \ } \ /* The node to be pruned is black, so unwind until balance is */\ /* restored. */\ pathp->node = &rbtree->rbt_nil; \ for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ assert(pathp->cmp != 0); \ if (pathp->cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp->node, \ pathp[1].node); \ assert(rbtn_red_get(a_type, a_field, pathp[1].node) \ == false); \ if (rbtn_red_get(a_type, a_field, pathp->node)) { \ a_type *right = rbtn_right_get(a_type, a_field, \ pathp->node); \ a_type *rightleft = rbtn_left_get(a_type, a_field, \ right); \ a_type *tnode; \ if (rbtn_red_get(a_type, a_field, rightleft)) { \ /* In the following diagrams, ||, //, and \\ */\ /* indicate the path to the removed node. */\ /* */\ /* || */\ /* pathp(r) */\ /* // \ */\ /* (b) (b) */\ /* / */\ /* (r) */\ /* */\ rbtn_black_set(a_type, a_field, pathp->node); \ rbtn_rotate_right(a_type, a_field, right, tnode); \ rbtn_right_set(a_type, a_field, pathp->node, tnode);\ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ } else { \ /* || */\ /* pathp(r) */\ /* // \ */\ /* (b) (b) */\ /* / */\ /* (b) */\ /* */\ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ } \ /* Balance restored, but rotation modified subtree */\ /* root. */\ assert((uintptr_t)pathp > (uintptr_t)path); \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ tnode); \ } else { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ tnode); \ } \ return; \ } else { \ a_type *right = rbtn_right_get(a_type, a_field, \ pathp->node); \ a_type *rightleft = rbtn_left_get(a_type, a_field, \ right); \ if (rbtn_red_get(a_type, a_field, rightleft)) { \ /* || */\ /* pathp(b) */\ /* // \ */\ /* (b) (b) */\ /* / */\ /* (r) */\ a_type *tnode; \ rbtn_black_set(a_type, a_field, rightleft); \ rbtn_rotate_right(a_type, a_field, right, tnode); \ rbtn_right_set(a_type, a_field, pathp->node, tnode);\ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ /* Balance restored, but rotation modified */\ /* subree root, which may actually be the tree */\ /* root. */\ if (pathp == path) { \ /* Set root. */ \ rbtree->rbt_root = tnode; \ } else { \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, \ pathp[-1].node, tnode); \ } else { \ rbtn_right_set(a_type, a_field, \ pathp[-1].node, tnode); \ } \ } \ return; \ } else { \ /* || */\ /* pathp(b) */\ /* // \ */\ /* (b) (b) */\ /* / */\ /* (b) */\ a_type *tnode; \ rbtn_red_set(a_type, a_field, pathp->node); \ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ pathp->node = tnode; \ } \ } \ } else { \ a_type *left; \ rbtn_right_set(a_type, a_field, pathp->node, \ pathp[1].node); \ left = rbtn_left_get(a_type, a_field, pathp->node); \ if (rbtn_red_get(a_type, a_field, left)) { \ a_type *tnode; \ a_type *leftright = rbtn_right_get(a_type, a_field, \ left); \ a_type *leftrightleft = rbtn_left_get(a_type, a_field, \ leftright); \ if (rbtn_red_get(a_type, a_field, leftrightleft)) { \ /* || */\ /* pathp(b) */\ /* / \\ */\ /* (r) (b) */\ /* \ */\ /* (b) */\ /* / */\ /* (r) */\ a_type *unode; \ rbtn_black_set(a_type, a_field, leftrightleft); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ unode); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ rbtn_right_set(a_type, a_field, unode, tnode); \ rbtn_rotate_left(a_type, a_field, unode, tnode); \ } else { \ /* || */\ /* pathp(b) */\ /* / \\ */\ /* (r) (b) */\ /* \ */\ /* (b) */\ /* / */\ /* (b) */\ assert(leftright != &rbtree->rbt_nil); \ rbtn_red_set(a_type, a_field, leftright); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ rbtn_black_set(a_type, a_field, tnode); \ } \ /* Balance restored, but rotation modified subtree */\ /* root, which may actually be the tree root. */\ if (pathp == path) { \ /* Set root. */ \ rbtree->rbt_root = tnode; \ } else { \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ tnode); \ } else { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ tnode); \ } \ } \ return; \ } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ if (rbtn_red_get(a_type, a_field, leftleft)) { \ /* || */\ /* pathp(r) */\ /* / \\ */\ /* (b) (b) */\ /* / */\ /* (r) */\ a_type *tnode; \ rbtn_black_set(a_type, a_field, pathp->node); \ rbtn_red_set(a_type, a_field, left); \ rbtn_black_set(a_type, a_field, leftleft); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ /* Balance restored, but rotation modified */\ /* subtree root. */\ assert((uintptr_t)pathp > (uintptr_t)path); \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ tnode); \ } else { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ tnode); \ } \ return; \ } else { \ /* || */\ /* pathp(r) */\ /* / \\ */\ /* (b) (b) */\ /* / */\ /* (b) */\ rbtn_red_set(a_type, a_field, left); \ rbtn_black_set(a_type, a_field, pathp->node); \ /* Balance restored. */ \ return; \ } \ } else { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ if (rbtn_red_get(a_type, a_field, leftleft)) { \ /* || */\ /* pathp(b) */\ /* / \\ */\ /* (b) (b) */\ /* / */\ /* (r) */\ a_type *tnode; \ rbtn_black_set(a_type, a_field, leftleft); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ /* Balance restored, but rotation modified */\ /* subtree root, which may actually be the tree */\ /* root. */\ if (pathp == path) { \ /* Set root. */ \ rbtree->rbt_root = tnode; \ } else { \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, \ pathp[-1].node, tnode); \ } else { \ rbtn_right_set(a_type, a_field, \ pathp[-1].node, tnode); \ } \ } \ return; \ } else { \ /* || */\ /* pathp(b) */\ /* / \\ */\ /* (b) (b) */\ /* / */\ /* (b) */\ rbtn_red_set(a_type, a_field, left); \ } \ } \ } \ } \ /* Set root. */ \ rbtree->rbt_root = path->node; \ assert(rbtn_red_get(a_type, a_field, rbtree->rbt_root) == false); \ } \ a_attr a_type * \ a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ if (node == &rbtree->rbt_nil) { \ return (&rbtree->rbt_nil); \ } else { \ a_type *ret; \ if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \ a_field, node), cb, arg)) != &rbtree->rbt_nil \ || (ret = cb(rbtree, node, arg)) != NULL) { \ return (ret); \ } \ return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ a_field, node), cb, arg)); \ } \ } \ a_attr a_type * \ a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ int cmp = a_cmp(start, node); \ if (cmp < 0) { \ a_type *ret; \ if ((ret = a_prefix##iter_start(rbtree, start, \ rbtn_left_get(a_type, a_field, node), cb, arg)) != \ &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ return (ret); \ } \ return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ a_field, node), cb, arg)); \ } else if (cmp > 0) { \ return (a_prefix##iter_start(rbtree, start, \ rbtn_right_get(a_type, a_field, node), cb, arg)); \ } else { \ a_type *ret; \ if ((ret = cb(rbtree, node, arg)) != NULL) { \ return (ret); \ } \ return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ a_field, node), cb, arg)); \ } \ } \ a_attr a_type * \ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ a_rbt_type *, a_type *, void *), void *arg) { \ a_type *ret; \ if (start != NULL) { \ ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \ cb, arg); \ } else { \ ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\ } \ if (ret == &rbtree->rbt_nil) { \ ret = NULL; \ } \ return (ret); \ } \ a_attr a_type * \ a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ if (node == &rbtree->rbt_nil) { \ return (&rbtree->rbt_nil); \ } else { \ a_type *ret; \ if ((ret = a_prefix##reverse_iter_recurse(rbtree, \ rbtn_right_get(a_type, a_field, node), cb, arg)) != \ &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ return (ret); \ } \ return (a_prefix##reverse_iter_recurse(rbtree, \ rbtn_left_get(a_type, a_field, node), cb, arg)); \ } \ } \ a_attr a_type * \ a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \ a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \ void *arg) { \ int cmp = a_cmp(start, node); \ if (cmp > 0) { \ a_type *ret; \ if ((ret = a_prefix##reverse_iter_start(rbtree, start, \ rbtn_right_get(a_type, a_field, node), cb, arg)) != \ &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ return (ret); \ } \ return (a_prefix##reverse_iter_recurse(rbtree, \ rbtn_left_get(a_type, a_field, node), cb, arg)); \ } else if (cmp < 0) { \ return (a_prefix##reverse_iter_start(rbtree, start, \ rbtn_left_get(a_type, a_field, node), cb, arg)); \ } else { \ a_type *ret; \ if ((ret = cb(rbtree, node, arg)) != NULL) { \ return (ret); \ } \ return (a_prefix##reverse_iter_recurse(rbtree, \ rbtn_left_get(a_type, a_field, node), cb, arg)); \ } \ } \ a_attr a_type * \ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ a_type *ret; \ if (start != NULL) { \ ret = a_prefix##reverse_iter_start(rbtree, start, \ rbtree->rbt_root, cb, arg); \ } else { \ ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \ cb, arg); \ } \ if (ret == &rbtree->rbt_nil) { \ ret = NULL; \ } \ return (ret); \ } #endif /* RB_H_ */
37,224
37.023493
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/rtree.h
/* * This radix tree implementation is tailored to the singular purpose of * tracking which chunks are currently owned by jemalloc. This functionality * is mandatory for OS X, where jemalloc must be able to respond to object * ownership queries. * ******************************************************************************* */ #ifdef JEMALLOC_H_TYPES typedef struct rtree_s rtree_t; /* * Size of each radix tree node (must be a power of 2). This impacts tree * depth. */ #define RTREE_NODESIZE (1U << 16) typedef void *(rtree_alloc_t)(pool_t *, size_t); typedef void (rtree_dalloc_t)(pool_t *, void *); #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct rtree_s { rtree_alloc_t *alloc; rtree_dalloc_t *dalloc; pool_t *pool; malloc_mutex_t mutex; void **root; unsigned height; unsigned level2bits[1]; /* Dynamically sized. */ }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS rtree_t *rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc, pool_t *pool); void rtree_delete(rtree_t *rtree); void rtree_prefork(rtree_t *rtree); void rtree_postfork_parent(rtree_t *rtree); void rtree_postfork_child(rtree_t *rtree); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE #ifdef JEMALLOC_DEBUG uint8_t rtree_get_locked(rtree_t *rtree, uintptr_t key); #endif uint8_t rtree_get(rtree_t *rtree, uintptr_t key); bool rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_)) #define RTREE_GET_GENERATE(f) \ /* The least significant bits of the key are ignored. */ \ JEMALLOC_INLINE uint8_t \ f(rtree_t *rtree, uintptr_t key) \ { \ uint8_t ret; \ uintptr_t subkey; \ unsigned i, lshift, height, bits; \ void **node, **child; \ \ RTREE_LOCK(&rtree->mutex); \ for (i = lshift = 0, height = rtree->height, node = rtree->root;\ i < height - 1; \ i++, lshift += bits, node = child) { \ bits = rtree->level2bits[i]; \ subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \ 3)) - bits); \ child = (void**)node[subkey]; \ if (child == NULL) { \ RTREE_UNLOCK(&rtree->mutex); \ return (0); \ } \ } \ \ /* \ * node is a leaf, so it contains values rather than node \ * pointers. \ */ \ bits = rtree->level2bits[i]; \ subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \ bits); \ { \ uint8_t *leaf = (uint8_t *)node; \ ret = leaf[subkey]; \ } \ RTREE_UNLOCK(&rtree->mutex); \ \ RTREE_GET_VALIDATE \ return (ret); \ } #ifdef JEMALLOC_DEBUG # define RTREE_LOCK(l) malloc_mutex_lock(l) # define RTREE_UNLOCK(l) malloc_mutex_unlock(l) # define RTREE_GET_VALIDATE RTREE_GET_GENERATE(rtree_get_locked) # undef RTREE_LOCK # undef RTREE_UNLOCK # undef RTREE_GET_VALIDATE #endif #define RTREE_LOCK(l) #define RTREE_UNLOCK(l) #ifdef JEMALLOC_DEBUG /* * Suppose that it were possible for a jemalloc-allocated chunk to be * munmap()ped, followed by a different allocator in another thread re-using * overlapping virtual memory, all without invalidating the cached rtree * value. The result would be a false positive (the rtree would claim that * jemalloc owns memory that it had actually discarded). This scenario * seems impossible, but the following assertion is a prudent sanity check. */ # define RTREE_GET_VALIDATE \ assert(rtree_get_locked(rtree, key) == ret); #else # define RTREE_GET_VALIDATE #endif RTREE_GET_GENERATE(rtree_get) #undef RTREE_LOCK #undef RTREE_UNLOCK #undef RTREE_GET_VALIDATE JEMALLOC_INLINE bool rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val) { uintptr_t subkey; unsigned i, lshift, height, bits; void **node, **child; malloc_mutex_lock(&rtree->mutex); for (i = lshift = 0, height = rtree->height, node = rtree->root; i < height - 1; i++, lshift += bits, node = child) { bits = rtree->level2bits[i]; subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits); child = (void**)node[subkey]; if (child == NULL) { size_t size = ((i + 1 < height - 1) ? sizeof(void *) : (sizeof(uint8_t))) << rtree->level2bits[i+1]; child = (void**)rtree->alloc(rtree->pool, size); if (child == NULL) { malloc_mutex_unlock(&rtree->mutex); return (true); } memset(child, 0, size); node[subkey] = child; } } /* node is a leaf, so it contains values rather than node pointers. */ bits = rtree->level2bits[i]; subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits); { uint8_t *leaf = (uint8_t *)node; leaf[subkey] = val; } malloc_mutex_unlock(&rtree->mutex); return (false); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
5,206
28.754286
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/size_classes.sh
#!/bin/sh # The following limits are chosen such that they cover all supported platforms. # Pointer sizes. lg_zarr="2 3" # Quanta. lg_qarr="3 4" # The range of tiny size classes is [2^lg_tmin..2^(lg_q-1)]. lg_tmin=3 # Maximum lookup size. lg_kmax=12 # Page sizes. lg_parr="12 13 16" # Size class group size (number of size classes for each size doubling). lg_g=2 pow2() { e=$1 pow2_result=1 while [ ${e} -gt 0 ] ; do pow2_result=$((${pow2_result} + ${pow2_result})) e=$((${e} - 1)) done } lg() { x=$1 lg_result=0 while [ ${x} -gt 1 ] ; do lg_result=$((${lg_result} + 1)) x=$((${x} / 2)) done } size_class() { index=$1 lg_grp=$2 lg_delta=$3 ndelta=$4 lg_p=$5 lg_kmax=$6 lg ${ndelta}; lg_ndelta=${lg_result}; pow2 ${lg_ndelta} if [ ${pow2_result} -lt ${ndelta} ] ; then rem="yes" else rem="no" fi lg_size=${lg_grp} if [ $((${lg_delta} + ${lg_ndelta})) -eq ${lg_grp} ] ; then lg_size=$((${lg_grp} + 1)) else lg_size=${lg_grp} rem="yes" fi if [ ${lg_size} -lt ${lg_p} ] ; then bin="yes" else bin="no" fi if [ ${lg_size} -lt ${lg_kmax} \ -o ${lg_size} -eq ${lg_kmax} -a ${rem} = "no" ] ; then lg_delta_lookup=${lg_delta} else lg_delta_lookup="no" fi printf ' SC(%3d, %6d, %8d, %6d, %3s, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${bin} ${lg_delta_lookup} # Defined upon return: # - lg_delta_lookup (${lg_delta} or "no") # - bin ("yes" or "no") } sep_line() { echo " \\" } size_classes() { lg_z=$1 lg_q=$2 lg_t=$3 lg_p=$4 lg_g=$5 pow2 $((${lg_z} + 3)); ptr_bits=${pow2_result} pow2 ${lg_g}; g=${pow2_result} echo "#define SIZE_CLASSES \\" echo " /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \\" ntbins=0 nlbins=0 lg_tiny_maxclass='"NA"' nbins=0 # Tiny size classes. ndelta=0 index=0 lg_grp=${lg_t} lg_delta=${lg_grp} while [ ${lg_grp} -lt ${lg_q} ] ; do size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} if [ ${lg_delta_lookup} != "no" ] ; then nlbins=$((${index} + 1)) fi if [ ${bin} != "no" ] ; then nbins=$((${index} + 1)) fi ntbins=$((${ntbins} + 1)) lg_tiny_maxclass=${lg_grp} # Final written value is correct. index=$((${index} + 1)) lg_delta=${lg_grp} lg_grp=$((${lg_grp} + 1)) done # First non-tiny group. if [ ${ntbins} -gt 0 ] ; then sep_line # The first size class has an unusual encoding, because the size has to be # split between grp and delta*ndelta. lg_grp=$((${lg_grp} - 1)) ndelta=1 size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} index=$((${index} + 1)) lg_grp=$((${lg_grp} + 1)) lg_delta=$((${lg_delta} + 1)) fi while [ ${ndelta} -lt ${g} ] ; do size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} index=$((${index} + 1)) ndelta=$((${ndelta} + 1)) done # All remaining groups. lg_grp=$((${lg_grp} + ${lg_g})) while [ ${lg_grp} -lt ${ptr_bits} ] ; do sep_line ndelta=1 if [ ${lg_grp} -eq $((${ptr_bits} - 1)) ] ; then ndelta_limit=$((${g} - 1)) else ndelta_limit=${g} fi while [ ${ndelta} -le ${ndelta_limit} ] ; do size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} if [ ${lg_delta_lookup} != "no" ] ; then nlbins=$((${index} + 1)) # Final written value is correct: lookup_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" fi if [ ${bin} != "no" ] ; then nbins=$((${index} + 1)) # Final written value is correct: small_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" fi index=$((${index} + 1)) ndelta=$((${ndelta} + 1)) done lg_grp=$((${lg_grp} + 1)) lg_delta=$((${lg_delta} + 1)) done echo # Defined upon completion: # - ntbins # - nlbins # - nbins # - lg_tiny_maxclass # - lookup_maxclass # - small_maxclass } cat <<EOF /* This file was automatically generated by size_classes.sh. */ /******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* * This header requires LG_SIZEOF_PTR, LG_TINY_MIN, LG_QUANTUM, and LG_PAGE to * be defined prior to inclusion, and it in turn defines: * * LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling. * SIZE_CLASSES: Complete table of * SC(index, lg_delta, size, bin, lg_delta_lookup) tuples. * index: Size class index. * lg_grp: Lg group base size (no deltas added). * lg_delta: Lg delta to previous size class. * ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta * bin: 'yes' if a small bin size class, 'no' otherwise. * lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no' * otherwise. * NTBINS: Number of tiny bins. * NLBINS: Number of bins supported by the lookup table. * NBINS: Number of small size class bins. * LG_TINY_MAXCLASS: Lg of maximum tiny size class. * LOOKUP_MAXCLASS: Maximum size class included in lookup table. * SMALL_MAXCLASS: Maximum small size class. */ #define LG_SIZE_CLASS_GROUP ${lg_g} EOF for lg_z in ${lg_zarr} ; do for lg_q in ${lg_qarr} ; do lg_t=${lg_tmin} while [ ${lg_t} -le ${lg_q} ] ; do # Iterate through page sizes and compute how many bins there are. for lg_p in ${lg_parr} ; do echo "#if (LG_SIZEOF_PTR == ${lg_z} && LG_TINY_MIN == ${lg_t} && LG_QUANTUM == ${lg_q} && LG_PAGE == ${lg_p})" size_classes ${lg_z} ${lg_q} ${lg_t} ${lg_p} ${lg_g} echo "#define SIZE_CLASSES_DEFINED" echo "#define NTBINS ${ntbins}" echo "#define NLBINS ${nlbins}" echo "#define NBINS ${nbins}" echo "#define LG_TINY_MAXCLASS ${lg_tiny_maxclass}" echo "#define LOOKUP_MAXCLASS ${lookup_maxclass}" echo "#define SMALL_MAXCLASS ${small_maxclass}" echo "#endif" echo done lg_t=$((${lg_t} + 1)) done done done cat <<EOF #ifndef SIZE_CLASSES_DEFINED # error "No size class definitions match configuration" #endif #undef SIZE_CLASSES_DEFINED /* * The small_size2bin lookup table uses uint8_t to encode each bin index, so we * cannot support more than 256 small size classes. Further constrain NBINS to * 255 since all small size classes, plus a "not small" size class must be * stored in 8 bits of arena_chunk_map_t's bits field. */ #if (NBINS > 255) # error "Too many small size classes" #endif #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ EOF
7,216
26.029963
119
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/private_namespace.sh
#!/bin/sh for symbol in `cat $1` ; do echo "#define ${symbol} JEMALLOC_N(${symbol})" done
93
14.666667
48
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/stats.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct tcache_bin_stats_s tcache_bin_stats_t; typedef struct malloc_bin_stats_s malloc_bin_stats_t; typedef struct malloc_large_stats_s malloc_large_stats_t; typedef struct arena_stats_s arena_stats_t; typedef struct chunk_stats_s chunk_stats_t; #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct tcache_bin_stats_s { /* * Number of allocation requests that corresponded to the size of this * bin. */ uint64_t nrequests; }; struct malloc_bin_stats_s { /* * Current number of bytes allocated, including objects currently * cached by tcache. */ size_t allocated; /* * Total number of allocation/deallocation requests served directly by * the bin. Note that tcache may allocate an object, then recycle it * many times, resulting many increments to nrequests, but only one * each to nmalloc and ndalloc. */ uint64_t nmalloc; uint64_t ndalloc; /* * Number of allocation requests that correspond to the size of this * bin. This includes requests served by tcache, though tcache only * periodically merges into this counter. */ uint64_t nrequests; /* Number of tcache fills from this bin. */ uint64_t nfills; /* Number of tcache flushes to this bin. */ uint64_t nflushes; /* Total number of runs created for this bin's size class. */ uint64_t nruns; /* * Total number of runs reused by extracting them from the runs tree for * this bin's size class. */ uint64_t reruns; /* Current number of runs in this bin. */ size_t curruns; }; struct malloc_large_stats_s { /* * Total number of allocation/deallocation requests served directly by * the arena. Note that tcache may allocate an object, then recycle it * many times, resulting many increments to nrequests, but only one * each to nmalloc and ndalloc. */ uint64_t nmalloc; uint64_t ndalloc; /* * Number of allocation requests that correspond to this size class. * This includes requests served by tcache, though tcache only * periodically merges into this counter. */ uint64_t nrequests; /* Current number of runs of this size class. */ size_t curruns; }; struct arena_stats_s { /* Number of bytes currently mapped. */ size_t mapped; /* * Total number of purge sweeps, total number of madvise calls made, * and total pages purged in order to keep dirty unused memory under * control. */ uint64_t npurge; uint64_t nmadvise; uint64_t purged; /* Per-size-category statistics. */ size_t allocated_large; uint64_t nmalloc_large; uint64_t ndalloc_large; uint64_t nrequests_large; size_t allocated_huge; uint64_t nmalloc_huge; uint64_t ndalloc_huge; uint64_t nrequests_huge; /* * One element for each possible size class, including sizes that * overlap with bin size classes. This is necessary because ipalloc() * sometimes has to use such large objects in order to assure proper * alignment. */ malloc_large_stats_t *lstats; }; struct chunk_stats_s { /* Number of chunks that were allocated. */ uint64_t nchunks; /* High-water mark for number of chunks allocated. */ size_t highchunks; /* * Current number of chunks allocated. This value isn't maintained for * any other purpose, so keep track of it in order to be able to set * highchunks. */ size_t curchunks; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS extern bool opt_stats_print; void stats_print(pool_t *pool, void (*write)(void *, const char *), void *cbopaque, const char *opts); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE size_t stats_cactive_get(pool_t *pool); void stats_cactive_add(pool_t *pool, size_t size); void stats_cactive_sub(pool_t *pool, size_t size); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_)) JEMALLOC_INLINE size_t stats_cactive_get(pool_t *pool) { return (atomic_read_z(&(pool->stats_cactive))); } JEMALLOC_INLINE void stats_cactive_add(pool_t *pool, size_t size) { atomic_add_z(&(pool->stats_cactive), size); } JEMALLOC_INLINE void stats_cactive_sub(pool_t *pool, size_t size) { atomic_sub_z(&(pool->stats_cactive), size); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
4,604
25.016949
83
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/util.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* Size of stack-allocated buffer passed to buferror(). */ #define BUFERROR_BUF 64 /* * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be * large enough for all possible uses within jemalloc. */ #define MALLOC_PRINTF_BUFSIZE 4096 /* * Wrap a cpp argument that contains commas such that it isn't broken up into * multiple arguments. */ #define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__ /* * Silence compiler warnings due to uninitialized values. This is used * wherever the compiler fails to recognize that the variable is never used * uninitialized. */ #ifdef JEMALLOC_CC_SILENCE # define JEMALLOC_CC_SILENCE_INIT(v) = v #else # define JEMALLOC_CC_SILENCE_INIT(v) #endif #ifndef likely #ifdef __GNUC__ #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else #define likely(x) !!(x) #define unlikely(x) !!(x) #endif #endif /* * Define a custom assert() in order to reduce the chances of deadlock during * assertion failure. */ #ifndef assert #define assert(e) do { \ if (config_debug && !(e)) { \ malloc_printf( \ "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \ __FILE__, __LINE__, #e); \ abort(); \ } \ } while (0) #endif #ifndef not_reached #define not_reached() do { \ if (config_debug) { \ malloc_printf( \ "<jemalloc>: %s:%d: Unreachable code reached\n", \ __FILE__, __LINE__); \ abort(); \ } \ } while (0) #endif #ifndef not_implemented #define not_implemented() do { \ if (config_debug) { \ malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \ __FILE__, __LINE__); \ abort(); \ } \ } while (0) #endif #ifndef assert_not_implemented #define assert_not_implemented(e) do { \ if (config_debug && !(e)) \ not_implemented(); \ } while (0) #endif /* Use to assert a particular configuration, e.g., cassert(config_debug). */ #define cassert(c) do { \ if ((c) == false) \ not_reached(); \ } while (0) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS int buferror(int err, char *buf, size_t buflen); uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base); void malloc_write(const char *s); /* * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating * point math. */ int malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap); int malloc_snprintf(char *str, size_t size, const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4)); void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, const char *format, va_list ap); void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque, const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4)); void malloc_printf(const char *format, ...) JEMALLOC_ATTR(format(printf, 1, 2)); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE int jemalloc_ffsl(long bitmap); int jemalloc_ffs(int bitmap); size_t pow2_ceil(size_t x); size_t lg_floor(size_t x); void set_errno(int errnum); int get_errno(void); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_)) /* Sanity check: */ #if !defined(JEMALLOC_INTERNAL_FFSL) || !defined(JEMALLOC_INTERNAL_FFS) # error Both JEMALLOC_INTERNAL_FFSL && JEMALLOC_INTERNAL_FFS should have been defined by configure #endif JEMALLOC_ALWAYS_INLINE int jemalloc_ffsl(long bitmap) { return (JEMALLOC_INTERNAL_FFSL(bitmap)); } JEMALLOC_ALWAYS_INLINE int jemalloc_ffs(int bitmap) { return (JEMALLOC_INTERNAL_FFS(bitmap)); } /* Compute the smallest power of 2 that is >= x. */ JEMALLOC_INLINE size_t pow2_ceil(size_t x) { x--; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; #if (LG_SIZEOF_PTR == 3) x |= x >> 32; #endif x++; return (x); } #if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) JEMALLOC_INLINE size_t lg_floor(size_t x) { size_t ret; asm ("bsr %1, %0" : "=r"(ret) // Outputs. : "r"(x) // Inputs. ); return (ret); } #elif (defined(_MSC_VER)) JEMALLOC_INLINE size_t lg_floor(size_t x) { unsigned long ret; #if (LG_SIZEOF_PTR == 3) _BitScanReverse64(&ret, x); #elif (LG_SIZEOF_PTR == 2) _BitScanReverse(&ret, x); #else # error "Unsupported type size for lg_floor()" #endif return ((unsigned)ret); } #elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ)) JEMALLOC_INLINE size_t lg_floor(size_t x) { #if (LG_SIZEOF_PTR == LG_SIZEOF_INT) return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x)); #elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG) return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x)); #else # error "Unsupported type sizes for lg_floor()" #endif } #else JEMALLOC_INLINE size_t lg_floor(size_t x) { x |= (x >> 1); x |= (x >> 2); x |= (x >> 4); x |= (x >> 8); x |= (x >> 16); #if (LG_SIZEOF_PTR == 3 && LG_SIZEOF_PTR == LG_SIZEOF_LONG) x |= (x >> 32); if (x == KZU(0xffffffffffffffff)) return (63); x++; return (jemalloc_ffsl(x) - 2); #elif (LG_SIZEOF_PTR == 2) if (x == KZU(0xffffffff)) return (31); x++; return (jemalloc_ffs(x) - 2); #else # error "Unsupported type sizes for lg_floor()" #endif } #endif /* Sets error code */ JEMALLOC_INLINE void set_errno(int errnum) { #ifdef _WIN32 int err = errnum; errno = err; SetLastError(errnum); #else errno = errnum; #endif } /* Get last error code */ JEMALLOC_INLINE int get_errno(void) { #ifdef _WIN32 return (GetLastError()); #else return (errno); #endif } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
6,088
21.977358
99
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/tcache.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct tcache_bin_info_s tcache_bin_info_t; typedef struct tcache_bin_s tcache_bin_t; typedef struct tcache_s tcache_t; typedef struct tsd_tcache_s tsd_tcache_t; /* * tcache pointers close to NULL are used to encode state information that is * used for two purposes: preventing thread caching on a per thread basis and * cleaning up during thread shutdown. */ #define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1) #define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2) #define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3) #define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY /* * Absolute maximum number of cache slots for each small bin in the thread * cache. This is an additional constraint beyond that imposed as: twice the * number of regions per run for this size class. * * This constant must be an even number. */ #define TCACHE_NSLOTS_SMALL_MAX 200 /* Number of cache slots for large size classes. */ #define TCACHE_NSLOTS_LARGE 20 /* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */ #define LG_TCACHE_MAXCLASS_DEFAULT 15 /* * TCACHE_GC_SWEEP is the approximate number of allocation events between * full GC sweeps. Integer rounding may cause the actual number to be * slightly higher, since GC is performed incrementally. */ #define TCACHE_GC_SWEEP 8192 /* Number of tcache allocation/deallocation events between incremental GCs. */ #define TCACHE_GC_INCR \ ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1)) #define TSD_TCACHE_INITIALIZER JEMALLOC_ARG_CONCAT({.npools = 0, .seqno = NULL, .tcaches = NULL}) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS typedef enum { tcache_enabled_false = 0, /* Enable cast to/from bool. */ tcache_enabled_true = 1, tcache_enabled_default = 2 } tcache_enabled_t; /* * Read-only information associated with each element of tcache_t's tbins array * is stored separately, mainly to reduce memory usage. */ struct tcache_bin_info_s { unsigned ncached_max; /* Upper limit on ncached. */ }; struct tcache_bin_s { tcache_bin_stats_t tstats; int low_water; /* Min # cached since last GC. */ unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */ unsigned ncached; /* # of cached objects. */ void **avail; /* Stack of available objects. */ }; struct tcache_s { ql_elm(tcache_t) link; /* Used for aggregating stats. */ uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */ arena_t *arena; /* This thread's arena. */ unsigned ev_cnt; /* Event count since incremental GC. */ unsigned next_gc_bin; /* Next bin to GC. */ tcache_bin_t tbins[1]; /* Dynamically sized. */ /* * The pointer stacks associated with tbins follow as a contiguous * array. During tcache initialization, the avail pointer in each * element of tbins is initialized to point to the proper offset within * this array. */ }; struct tsd_tcache_s { size_t npools; unsigned *seqno; /* Sequence number of pool */ tcache_t **tcaches; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS extern bool opt_tcache; extern ssize_t opt_lg_tcache_max; extern tcache_bin_info_t *tcache_bin_info; /* * Number of tcache bins. There are NBINS small-object bins, plus 0 or more * large-object bins. */ extern size_t nhbins; /* Maximum cached size class. */ extern size_t tcache_maxclass; size_t tcache_salloc(const void *ptr); void tcache_event_hard(tcache_t *tcache); void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind); void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem, tcache_t *tcache); void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem, tcache_t *tcache); void tcache_arena_associate(tcache_t *tcache, arena_t *arena); void tcache_arena_dissociate(tcache_t *tcache); tcache_t *tcache_get_hard(tcache_t *tcache, pool_t *pool, bool create); tcache_t *tcache_create(arena_t *arena); void tcache_destroy(tcache_t *tcache); bool tcache_tsd_extend(tsd_tcache_t *tsd, unsigned len); void tcache_thread_cleanup(void *arg); void tcache_stats_merge(tcache_t *tcache, arena_t *arena); bool tcache_boot0(void); bool tcache_boot1(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache, tsd_tcache_t) malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache_enabled, tcache_enabled_t) void tcache_event(tcache_t *tcache); void tcache_flush(pool_t *pool); bool tcache_enabled_get(void); tcache_t *tcache_get(pool_t *pool, bool create); void tcache_enabled_set(bool enabled); void *tcache_alloc_easy(tcache_bin_t *tbin); void *tcache_alloc_small(tcache_t *tcache, size_t size, bool zero); void *tcache_alloc_large(tcache_t *tcache, size_t size, bool zero); void tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind); void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_)) /* Map of thread-specific caches. */ malloc_tsd_externs(tcache, tsd_tcache_t) malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache, tsd_tcache_t, { 0 }, tcache_thread_cleanup) /* Per thread flag that allows thread caches to be disabled. */ malloc_tsd_externs(tcache_enabled, tcache_enabled_t) malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache_enabled, tcache_enabled_t, tcache_enabled_default, malloc_tsd_no_cleanup) JEMALLOC_INLINE void tcache_flush(pool_t *pool) { tsd_tcache_t *tsd = tcache_tsd_get(); tcache_t *tcache = tsd->tcaches[pool->pool_id]; if (tsd->seqno[pool->pool_id] == pool->seqno) { cassert(config_tcache); if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) return; tcache_destroy(tcache); } tsd->tcaches[pool->pool_id] = NULL; } JEMALLOC_INLINE bool tcache_enabled_get(void) { tcache_enabled_t tcache_enabled; cassert(config_tcache); tcache_enabled = *tcache_enabled_tsd_get(); if (tcache_enabled == tcache_enabled_default) { tcache_enabled = (tcache_enabled_t)opt_tcache; tcache_enabled_tsd_set(&tcache_enabled); } return ((bool)tcache_enabled); } JEMALLOC_INLINE void tcache_enabled_set(bool enabled) { tcache_enabled_t tcache_enabled; tsd_tcache_t *tsd; tcache_t *tcache; int i; cassert(config_tcache); tcache_enabled = (tcache_enabled_t)enabled; tcache_enabled_tsd_set(&tcache_enabled); tsd = tcache_tsd_get(); malloc_mutex_lock(&pools_lock); for (i = 0; i < tsd->npools; i++) { tcache = tsd->tcaches[i]; if (tcache != NULL) { if (enabled) { if (tcache == TCACHE_STATE_DISABLED) { tsd->tcaches[i] = NULL; } } else /* disabled */ { if (tcache > TCACHE_STATE_MAX) { if (pools[i] != NULL && tsd->seqno[i] == pools[i]->seqno) tcache_destroy(tcache); tcache = NULL; } if (tcache == NULL) { tsd->tcaches[i] = TCACHE_STATE_DISABLED; } } } } malloc_mutex_unlock(&pools_lock); } JEMALLOC_ALWAYS_INLINE tcache_t * tcache_get(pool_t *pool, bool create) { tcache_t *tcache; tsd_tcache_t *tsd; if (config_tcache == false) return (NULL); if (config_lazy_lock && isthreaded == false) return (NULL); tsd = tcache_tsd_get(); /* expand tcaches array if necessary */ if ((tsd->npools <= pool->pool_id) && tcache_tsd_extend(tsd, pool->pool_id)) { return (NULL); } /* * All subsequent pools with the same id have to cleanup tcache before * calling tcache_get_hard. */ if (tsd->seqno[pool->pool_id] != pool->seqno) { tsd->tcaches[pool->pool_id] = NULL; } tcache = tsd->tcaches[pool->pool_id]; if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) { if (tcache == TCACHE_STATE_DISABLED) return (NULL); tcache = tcache_get_hard(tcache, pool, create); } return (tcache); } JEMALLOC_ALWAYS_INLINE void tcache_event(tcache_t *tcache) { if (TCACHE_GC_INCR == 0) return; tcache->ev_cnt++; assert(tcache->ev_cnt <= TCACHE_GC_INCR); if (tcache->ev_cnt == TCACHE_GC_INCR) tcache_event_hard(tcache); } JEMALLOC_ALWAYS_INLINE void * tcache_alloc_easy(tcache_bin_t *tbin) { void *ret; if (tbin->ncached == 0) { tbin->low_water = -1; return (NULL); } tbin->ncached--; if ((int)tbin->ncached < tbin->low_water) tbin->low_water = tbin->ncached; ret = tbin->avail[tbin->ncached]; return (ret); } JEMALLOC_ALWAYS_INLINE void * tcache_alloc_small(tcache_t *tcache, size_t size, bool zero) { void *ret; size_t binind; tcache_bin_t *tbin; binind = small_size2bin(size); assert(binind < NBINS); tbin = &tcache->tbins[binind]; size = small_bin2size(binind); ret = tcache_alloc_easy(tbin); if (ret == NULL) { ret = tcache_alloc_small_hard(tcache, tbin, binind); if (ret == NULL) return (NULL); } assert(tcache_salloc(ret) == size); if (zero == false) { if (config_fill) { if (opt_junk) { arena_alloc_junk_small(ret, &arena_bin_info[binind], false); } else if (opt_zero) memset(ret, 0, size); } } else { if (config_fill && opt_junk) { arena_alloc_junk_small(ret, &arena_bin_info[binind], true); } memset(ret, 0, size); } if (config_stats) tbin->tstats.nrequests++; if (config_prof) tcache->prof_accumbytes += size; tcache_event(tcache); return (ret); } JEMALLOC_ALWAYS_INLINE void * tcache_alloc_large(tcache_t *tcache, size_t size, bool zero) { void *ret; size_t binind; tcache_bin_t *tbin; size = PAGE_CEILING(size); assert(size <= tcache_maxclass); binind = NBINS + (size >> LG_PAGE) - 1; assert(binind < nhbins); tbin = &tcache->tbins[binind]; ret = tcache_alloc_easy(tbin); if (ret == NULL) { /* * Only allocate one large object at a time, because it's quite * expensive to create one and not use it. */ ret = arena_malloc_large(tcache->arena, size, zero); if (ret == NULL) return (NULL); } else { if (config_prof && size == PAGE) { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret); size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >> LG_PAGE); arena_mapbits_large_binind_set(chunk, pageind, BININD_INVALID); } if (zero == false) { if (config_fill) { if (opt_junk) memset(ret, 0xa5, size); else if (opt_zero) memset(ret, 0, size); } } else memset(ret, 0, size); if (config_stats) tbin->tstats.nrequests++; if (config_prof) tcache->prof_accumbytes += size; } tcache_event(tcache); return (ret); } JEMALLOC_ALWAYS_INLINE void tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind) { tcache_bin_t *tbin; tcache_bin_info_t *tbin_info; assert(tcache_salloc(ptr) <= SMALL_MAXCLASS); if (config_fill && opt_junk) arena_dalloc_junk_small(ptr, &arena_bin_info[binind]); tbin = &tcache->tbins[binind]; tbin_info = &tcache_bin_info[binind]; if (tbin->ncached == tbin_info->ncached_max) { tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >> 1), tcache); } assert(tbin->ncached < tbin_info->ncached_max); tbin->avail[tbin->ncached] = ptr; tbin->ncached++; tcache_event(tcache); } JEMALLOC_ALWAYS_INLINE void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size) { size_t binind; tcache_bin_t *tbin; tcache_bin_info_t *tbin_info; assert((size & PAGE_MASK) == 0); assert(tcache_salloc(ptr) > SMALL_MAXCLASS); assert(tcache_salloc(ptr) <= tcache_maxclass); binind = NBINS + (size >> LG_PAGE) - 1; if (config_fill && opt_junk) memset(ptr, 0x5a, size); tbin = &tcache->tbins[binind]; tbin_info = &tcache_bin_info[binind]; if (tbin->ncached == tbin_info->ncached_max) { tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >> 1), tcache); } assert(tbin->ncached < tbin_info->ncached_max); tbin->avail[tbin->ncached] = ptr; tbin->ncached++; tcache_event(tcache); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
12,206
26.187082
97
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/base.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void *base_alloc(pool_t *pool, size_t size); void *base_calloc(pool_t *pool, size_t number, size_t size); extent_node_t *base_node_alloc(pool_t *pool); void base_node_dalloc(pool_t *pool, extent_node_t *node); size_t base_node_prealloc(pool_t *pool, size_t number); bool base_boot(pool_t *pool); bool base_init(pool_t *pool); void base_prefork(pool_t *pool); void base_postfork_parent(pool_t *pool); void base_postfork_child(pool_t *pool); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
1,078
36.206897
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/bitmap.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ #define LG_BITMAP_MAXBITS LG_RUN_MAXREGS typedef struct bitmap_level_s bitmap_level_t; typedef struct bitmap_info_s bitmap_info_t; typedef unsigned long bitmap_t; #define LG_SIZEOF_BITMAP LG_SIZEOF_LONG /* Number of bits per group. */ #define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) #define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS) #define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) /* Maximum number of levels possible. */ #define BITMAP_MAX_LEVELS \ (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \ + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct bitmap_level_s { /* Offset of this level's groups within the array of groups. */ size_t group_offset; }; struct bitmap_info_s { /* Logical number of bits in bitmap (stored at bottom level). */ size_t nbits; /* Number of levels necessary for nbits. */ unsigned nlevels; /* * Only the first (nlevels+1) elements are used, and levels are ordered * bottom to top (e.g. the bottom level is stored in levels[0]). */ bitmap_level_t levels[BITMAP_MAX_LEVELS+1]; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void bitmap_info_init(bitmap_info_t *binfo, size_t nbits); size_t bitmap_info_ngroups(const bitmap_info_t *binfo); size_t bitmap_size(size_t nbits); void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo); bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo); void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_)) JEMALLOC_INLINE bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) { size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1; bitmap_t rg = bitmap[rgoff]; /* The bitmap is full iff the root group is 0. */ return (rg == 0); } JEMALLOC_INLINE bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { size_t goff; bitmap_t g; assert(bit < binfo->nbits); goff = bit >> LG_BITMAP_GROUP_NBITS; g = bitmap[goff]; return (!(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)))); } JEMALLOC_INLINE void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { size_t goff; bitmap_t *gp; bitmap_t g; assert(bit < binfo->nbits); assert(bitmap_get(bitmap, binfo, bit) == false); goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[goff]; g = *gp; assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))); g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; assert(bitmap_get(bitmap, binfo, bit)); /* Propagate group state transitions up the tree. */ if (g == 0) { unsigned i; for (i = 1; i < binfo->nlevels; i++) { bit = goff; goff = bit >> LG_BITMAP_GROUP_NBITS; if (bitmap != NULL) gp = &bitmap[binfo->levels[i].group_offset + goff]; g = *gp; assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))); g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; if (g != 0) break; } } } /* sfu: set first unset. */ JEMALLOC_INLINE size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) { size_t bit; bitmap_t g; unsigned i; assert(bitmap_full(bitmap, binfo) == false); i = binfo->nlevels - 1; g = bitmap[binfo->levels[i].group_offset]; bit = jemalloc_ffsl(g) - 1; while (i > 0) { i--; g = bitmap[binfo->levels[i].group_offset + bit]; bit = (bit << LG_BITMAP_GROUP_NBITS) + (jemalloc_ffsl(g) - 1); } bitmap_set(bitmap, binfo, bit); return (bit); } JEMALLOC_INLINE void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { size_t goff; bitmap_t *gp; bitmap_t g; bool propagate; assert(bit < binfo->nbits); assert(bitmap_get(bitmap, binfo, bit)); goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[goff]; g = *gp; propagate = (g == 0); assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; assert(bitmap_get(bitmap, binfo, bit) == false); /* Propagate group state transitions up the tree. */ if (propagate) { unsigned i; for (i = 1; i < binfo->nlevels; i++) { bit = goff; goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[binfo->levels[i].group_offset + goff]; g = *gp; propagate = (g == 0); assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; if (propagate == false) break; } } } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
5,240
27.177419
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/prng.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* * Simple linear congruential pseudo-random number generator: * * prng(y) = (a*x + c) % m * * where the following constants ensure maximal period: * * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4. * c == Odd number (relatively prime to 2^n). * m == 2^32 * * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. * * This choice of m has the disadvantage that the quality of the bits is * proportional to bit position. For example. the lowest bit has a cycle of 2, * the next has a cycle of 4, etc. For this reason, we prefer to use the upper * bits. * * Macro parameters: * uint32_t r : Result. * unsigned lg_range : (0..32], number of least significant bits to return. * uint32_t state : Seed value. * const uint32_t a, c : See above discussion. */ #define prng32(r, lg_range, state, a, c) do { \ assert(lg_range > 0); \ assert(lg_range <= 32); \ \ r = (state * (a)) + (c); \ state = r; \ r >>= (32 - lg_range); \ } while (false) /* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */ #define prng64(r, lg_range, state, a, c) do { \ assert(lg_range > 0); \ assert(lg_range <= 64); \ \ r = (state * (a)) + (c); \ state = r; \ r >>= (64 - lg_range); \ } while (false) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
2,017
32.081967
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/huge.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void *huge_malloc(arena_t *arena, size_t size, bool zero); void *huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero); bool huge_ralloc_no_move(pool_t *pool, void *ptr, size_t oldsize, size_t size, size_t extra, bool zero); void *huge_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero, bool try_tcache_dalloc); #ifdef JEMALLOC_JET typedef void (huge_dalloc_junk_t)(void *, size_t); extern huge_dalloc_junk_t *huge_dalloc_junk; #endif void huge_dalloc(pool_t *pool, void *ptr); size_t huge_salloc(const void *ptr); size_t huge_pool_salloc(pool_t *pool, const void *ptr); prof_ctx_t *huge_prof_ctx_get(const void *ptr); void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx); bool huge_boot(pool_t *pool); bool huge_init(pool_t *pool); void huge_prefork(pool_t *pool); void huge_postfork_parent(pool_t *pool); void huge_postfork_child(pool_t *pool); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
1,568
39.230769
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/atomic.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #define atomic_read_uint64(p) atomic_add_uint64(p, 0) #define atomic_read_uint32(p) atomic_add_uint32(p, 0) #define atomic_read_z(p) atomic_add_z(p, 0) #define atomic_read_u(p) atomic_add_u(p, 0) #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x); uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x); uint32_t atomic_add_uint32(uint32_t *p, uint32_t x); uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x); size_t atomic_add_z(size_t *p, size_t x); size_t atomic_sub_z(size_t *p, size_t x); unsigned atomic_add_u(unsigned *p, unsigned x); unsigned atomic_sub_u(unsigned *p, unsigned x); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_)) /******************************************************************************/ /* 64-bit operations. */ #if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) # ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { return (__sync_add_and_fetch(p, x)); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { return (__sync_sub_and_fetch(p, x)); } #elif (defined(_MSC_VER)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { return (InterlockedExchangeAdd64(p, x)); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { return (InterlockedExchangeAdd64(p, -((int64_t)x))); } #elif (defined(JEMALLOC_OSATOMIC)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { return (OSAtomicAdd64((int64_t)x, (int64_t *)p)); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p)); } # elif (defined(__amd64__) || defined(__x86_64__)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { asm volatile ( "lock; xaddq %0, %1;" : "+r" (x), "=m" (*p) /* Outputs. */ : "m" (*p) /* Inputs. */ ); return (x); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { x = (uint64_t)(-(int64_t)x); asm volatile ( "lock; xaddq %0, %1;" : "+r" (x), "=m" (*p) /* Outputs. */ : "m" (*p) /* Inputs. */ ); return (x); } # elif (defined(JEMALLOC_ATOMIC9)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { /* * atomic_fetchadd_64() doesn't exist, but we only ever use this * function on LP64 systems, so atomic_fetchadd_long() will do. */ assert(sizeof(uint64_t) == sizeof(unsigned long)); return (atomic_fetchadd_long(p, (unsigned long)x) + x); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { assert(sizeof(uint64_t) == sizeof(unsigned long)); return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x); } # elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { return (__sync_add_and_fetch(p, x)); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { return (__sync_sub_and_fetch(p, x)); } # else # error "Missing implementation for 64-bit atomic operations" # endif #endif /******************************************************************************/ /* 32-bit operations. */ #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { return (__sync_add_and_fetch(p, x)); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { return (__sync_sub_and_fetch(p, x)); } #elif (defined(_MSC_VER)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { return (InterlockedExchangeAdd(p, x)); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { return (InterlockedExchangeAdd(p, -((int32_t)x))); } #elif (defined(JEMALLOC_OSATOMIC)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { return (OSAtomicAdd32((int32_t)x, (int32_t *)p)); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p)); } #elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { asm volatile ( "lock; xaddl %0, %1;" : "+r" (x), "=m" (*p) /* Outputs. */ : "m" (*p) /* Inputs. */ ); return (x); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { x = (uint32_t)(-(int32_t)x); asm volatile ( "lock; xaddl %0, %1;" : "+r" (x), "=m" (*p) /* Outputs. */ : "m" (*p) /* Inputs. */ ); return (x); } #elif (defined(JEMALLOC_ATOMIC9)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { return (atomic_fetchadd_32(p, x) + x); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x); } #elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { return (__sync_add_and_fetch(p, x)); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { return (__sync_sub_and_fetch(p, x)); } #else # error "Missing implementation for 32-bit atomic operations" #endif /******************************************************************************/ /* size_t operations. */ JEMALLOC_INLINE size_t atomic_add_z(size_t *p, size_t x) { #if (LG_SIZEOF_PTR == 3) return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); #elif (LG_SIZEOF_PTR == 2) return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); #endif } JEMALLOC_INLINE size_t atomic_sub_z(size_t *p, size_t x) { #if (LG_SIZEOF_PTR == 3) return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x))); #elif (LG_SIZEOF_PTR == 2) return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x))); #endif } /******************************************************************************/ /* unsigned operations. */ JEMALLOC_INLINE unsigned atomic_add_u(unsigned *p, unsigned x) { #if (LG_SIZEOF_INT == 3) return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); #elif (LG_SIZEOF_INT == 2) return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); #endif } JEMALLOC_INLINE unsigned atomic_sub_u(unsigned *p, unsigned x) { #if (LG_SIZEOF_INT == 3) return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x))); #elif (LG_SIZEOF_INT == 2) return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x))); #endif } /******************************************************************************/ #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
7,206
22.629508
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
#ifndef JEMALLOC_INTERNAL_DECLS_H #define JEMALLOC_INTERNAL_DECLS_H #include <math.h> #ifdef _WIN32 # include <windows.h> # include "msvc_compat/windows_extra.h" #else # include <sys/param.h> # include <sys/mman.h> # if !defined(__pnacl__) && !defined(__native_client__) # include <sys/syscall.h> # if !defined(SYS_write) && defined(__NR_write) # define SYS_write __NR_write # endif # include <sys/uio.h> # endif # include <pthread.h> # include <errno.h> #endif #include <sys/types.h> #include <limits.h> #ifndef SIZE_T_MAX # define SIZE_T_MAX SIZE_MAX #endif #include <stdarg.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <stddef.h> #ifndef offsetof # define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) #endif #include <inttypes.h> #include <string.h> #include <strings.h> #include <ctype.h> #ifdef _MSC_VER # include <io.h> typedef intptr_t ssize_t; # define STDERR_FILENO 2 # define __func__ __FUNCTION__ /* Disable warnings about deprecated system functions */ # pragma warning(disable: 4996) #else # include <unistd.h> #endif #include <fcntl.h> # define JE_PATH_MAX 1024 #endif /* JEMALLOC_INTERNAL_H */
1,205
21.754717
68
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/mb.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE void mb_write(void); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_)) #ifdef __i386__ /* * According to the Intel Architecture Software Developer's Manual, current * processors execute instructions in order from the perspective of other * processors in a multiprocessor system, but 1) Intel reserves the right to * change that, and 2) the compiler's optimizer could re-order instructions if * there weren't some form of barrier. Therefore, even if running on an * architecture that does not need memory barriers (everything through at least * i686), an "optimizer barrier" is necessary. */ JEMALLOC_INLINE void mb_write(void) { # if 0 /* This is a true memory barrier. */ asm volatile ("pusha;" "xor %%eax,%%eax;" "cpuid;" "popa;" : /* Outputs. */ : /* Inputs. */ : "memory" /* Clobbers. */ ); #else /* * This is hopefully enough to keep the compiler from reordering * instructions around this one. */ asm volatile ("nop;" : /* Outputs. */ : /* Inputs. */ : "memory" /* Clobbers. */ ); #endif } #elif (defined(__amd64__) || defined(__x86_64__)) JEMALLOC_INLINE void mb_write(void) { asm volatile ("sfence" : /* Outputs. */ : /* Inputs. */ : "memory" /* Clobbers. */ ); } #elif defined(__powerpc__) JEMALLOC_INLINE void mb_write(void) { asm volatile ("eieio" : /* Outputs. */ : /* Inputs. */ : "memory" /* Clobbers. */ ); } #elif defined(__sparc64__) JEMALLOC_INLINE void mb_write(void) { asm volatile ("membar #StoreStore" : /* Outputs. */ : /* Inputs. */ : "memory" /* Clobbers. */ ); } #elif defined(__tile__) JEMALLOC_INLINE void mb_write(void) { __sync_synchronize(); } #else /* * This is much slower than a simple memory barrier, but the semantics of mutex * unlock make this work. */ JEMALLOC_INLINE void mb_write(void) { malloc_mutex_t mtx; malloc_mutex_init(&mtx); malloc_mutex_lock(&mtx); malloc_mutex_unlock(&mtx); } #endif #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
2,687
22.172414
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/quarantine.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct quarantine_obj_s quarantine_obj_t; typedef struct quarantine_s quarantine_t; /* Default per thread quarantine size if valgrind is enabled. */ #define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct quarantine_obj_s { void *ptr; size_t usize; }; struct quarantine_s { size_t curbytes; size_t curobjs; size_t first; #define LG_MAXOBJS_INIT 10 size_t lg_maxobjs; quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */ }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS quarantine_t *quarantine_init(size_t lg_maxobjs); void quarantine(void *ptr); void quarantine_cleanup(void *arg); bool quarantine_boot(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE malloc_tsd_protos(JEMALLOC_ATTR(unused), quarantine, quarantine_t *) void quarantine_alloc_hook(void); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_)) malloc_tsd_externs(quarantine, quarantine_t *) malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, quarantine, quarantine_t *, NULL, quarantine_cleanup) JEMALLOC_ALWAYS_INLINE void quarantine_alloc_hook(void) { quarantine_t *quarantine; assert(config_fill && opt_quarantine); quarantine = *quarantine_tsd_get(); if (quarantine == NULL) quarantine_init(LG_MAXOBJS_INIT); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
1,849
26.205882
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/valgrind.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #ifdef JEMALLOC_VALGRIND #include <valgrind/valgrind.h> /* * The size that is reported to Valgrind must be consistent through a chain of * malloc..realloc..realloc calls. Request size isn't recorded anywhere in * jemalloc, so it is critical that all callers of these macros provide usize * rather than request size. As a result, buffer overflow detection is * technically weakened for the standard API, though it is generally accepted * practice to consider any extra bytes reported by malloc_usable_size() as * usable space. */ #define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do { \ if (in_valgrind) \ valgrind_make_mem_noaccess(ptr, usize); \ } while (0) #define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do { \ if (in_valgrind) \ valgrind_make_mem_undefined(ptr, usize); \ } while (0) #define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do { \ if (in_valgrind) \ valgrind_make_mem_defined(ptr, usize); \ } while (0) /* * The VALGRIND_MALLOCLIKE_BLOCK() and VALGRIND_RESIZEINPLACE_BLOCK() macro * calls must be embedded in macros rather than in functions so that when * Valgrind reports errors, there are no extra stack frames in the backtraces. */ #define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \ if (in_valgrind && cond) \ VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \ } while (0) #define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \ ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \ zero) do { \ if (in_valgrind) { \ if (!maybe_moved || ptr == old_ptr) { \ VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \ usize, p2rz(ptr)); \ if (zero && old_usize < usize) { \ valgrind_make_mem_defined( \ (void *)((uintptr_t)ptr + \ old_usize), usize - old_usize); \ } \ } else { \ if (!old_ptr_maybe_null || old_ptr != NULL) { \ valgrind_freelike_block(old_ptr, \ old_rzsize); \ } \ if (!ptr_maybe_null || ptr != NULL) { \ size_t copy_size = (old_usize < usize) \ ? old_usize : usize; \ size_t tail_size = usize - copy_size; \ VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \ p2rz(ptr), false); \ if (copy_size > 0) { \ valgrind_make_mem_defined(ptr, \ copy_size); \ } \ if (zero && tail_size > 0) { \ valgrind_make_mem_defined( \ (void *)((uintptr_t)ptr + \ copy_size), tail_size); \ } \ } \ } \ } \ } while (0) #define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \ if (in_valgrind) \ valgrind_freelike_block(ptr, rzsize); \ } while (0) #else #define RUNNING_ON_VALGRIND ((unsigned)0) #define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0) #define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0) #define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0) #define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0) #define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \ ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \ zero) do {} while (0) #define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0) #endif #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_VALGRIND void valgrind_make_mem_noaccess(void *ptr, size_t usize); void valgrind_make_mem_undefined(void *ptr, size_t usize); void valgrind_make_mem_defined(void *ptr, size_t usize); void valgrind_freelike_block(void *ptr, size_t usize); #endif #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
4,149
36.727273
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/extent.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct extent_node_s extent_node_t; #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS /* Tree of extents. */ struct extent_node_s { /* Linkage for the size/address-ordered tree. */ rb_node(extent_node_t) link_szad; /* Linkage for the address-ordered tree. */ rb_node(extent_node_t) link_ad; /* Profile counters, used for huge objects. */ prof_ctx_t *prof_ctx; /* Pointer to the extent that this tree node is responsible for. */ void *addr; /* Total region size. */ size_t size; /* Arena from which this extent came, if any */ arena_t *arena; /* True if zero-filled; used by chunk recycling code. */ bool zeroed; }; typedef rb_tree(extent_node_t) extent_tree_t; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t) rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t) #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
1,408
27.18
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/chunk_dss.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef enum { dss_prec_disabled = 0, dss_prec_primary = 1, dss_prec_secondary = 2, dss_prec_limit = 3 } dss_prec_t; #define DSS_PREC_DEFAULT dss_prec_secondary #define DSS_DEFAULT "secondary" #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS extern const char *dss_prec_names[]; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS dss_prec_t chunk_dss_prec_get(void); bool chunk_dss_prec_set(dss_prec_t dss_prec); void *chunk_alloc_dss(size_t size, size_t alignment, bool *zero); bool chunk_in_dss(void *chunk); bool chunk_dss_boot(void); void chunk_dss_prefork(void); void chunk_dss_postfork_parent(void); void chunk_dss_postfork_child(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
1,196
29.692308
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/pool.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #define POOLS_MIN 16 #define POOLS_MAX 32768 /* * We want to expose pool_t to the library user * as a result typedef for pool_s is located in "jemalloc.h" */ typedef struct tsd_pool_s tsd_pool_t; /* * Dummy arena is used to pass pool structure to choose_arena function * through various alloc/free variants */ #define ARENA_DUMMY_IND (~0) #define DUMMY_ARENA_INITIALIZE(name, p) \ do { \ (name).ind = ARENA_DUMMY_IND; \ (name).pool = (p); \ } while (0) #define TSD_POOL_INITIALIZER JEMALLOC_ARG_CONCAT({.npools = 0, .arenas = NULL, .seqno = NULL }) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS typedef struct pool_memory_range_node_s { uintptr_t addr; uintptr_t addr_end; uintptr_t usable_addr; uintptr_t usable_addr_end; struct pool_memory_range_node_s *next; } pool_memory_range_node_t; struct pool_s { /* This pool's index within the pools array. */ unsigned pool_id; /* * Unique pool number. A pool_id can be reused, seqno helping to check * that data in Thread Storage Data are still valid. */ unsigned seqno; /* Protects arenas initialization (arenas, arenas_total). */ malloc_rwlock_t arenas_lock; /* * Arenas that are used to service external requests. Not all elements of the * arenas array are necessarily used; arenas are created lazily as needed. * * arenas[0..narenas_auto) are used for automatic multiplexing of threads and * arenas. arenas[narenas_auto..narenas_total) are only used if the application * takes some action to create them and allocate from them. */ arena_t **arenas; unsigned narenas_total; unsigned narenas_auto; /* Tree of chunks that are stand-alone huge allocations. */ extent_tree_t huge; /* Protects chunk-related data structures. */ malloc_mutex_t huge_mtx; malloc_mutex_t chunks_mtx; chunk_stats_t stats_chunks; /* * Trees of chunks that were previously allocated (trees differ only in node * ordering). These are used when allocating chunks, in an attempt to re-use * address space. Depending on function, different tree orderings are needed, * which is why there are two trees with the same contents. */ extent_tree_t chunks_szad_mmap; extent_tree_t chunks_ad_mmap; extent_tree_t chunks_szad_dss; extent_tree_t chunks_ad_dss; rtree_t *chunks_rtree; /* Protects base-related data structures. */ malloc_mutex_t base_mtx; malloc_mutex_t base_node_mtx; /* * Current pages that are being used for internal memory allocations. These * pages are carved up in cacheline-size quanta, so that there is no chance of * false cache line sharing. */ void *base_next_addr; void *base_past_addr; /* Addr immediately past base_pages. */ extent_node_t *base_nodes; /* * Per pool statistics variables */ bool ctl_initialized; ctl_stats_t ctl_stats; size_t ctl_stats_allocated; size_t ctl_stats_active; size_t ctl_stats_mapped; size_t stats_cactive; /* Protects list of memory ranges. */ malloc_mutex_t memory_range_mtx; /* List of memory ranges inside pool, useful for pool_check(). */ pool_memory_range_node_t *memory_range_list; }; struct tsd_pool_s { size_t npools; /* size of the arrays */ unsigned *seqno; /* Sequence number of pool */ arena_t **arenas; /* array of arenas indexed by pool id */ }; /* * Minimal size of pool, includes header alignment to cache line size, * initial space for base allocator, and size of at least one chunk * of memory with address alignment to multiple of chunksize. */ #define POOL_MINIMAL_SIZE (3*chunksize) #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS bool pool_boot(pool_t *pool, unsigned pool_id); bool pool_runtime_init(pool_t *pool, unsigned pool_id); bool pool_new(pool_t *pool, unsigned pool_id); void pool_destroy(pool_t *pool); extern malloc_mutex_t pools_lock; extern malloc_mutex_t pool_base_lock; void pool_prefork(); void pool_postfork_parent(); void pool_postfork_child(); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE bool pool_is_file_mapped(pool_t *pool); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined (JEMALLOC_POOL_C_)) JEMALLOC_INLINE bool pool_is_file_mapped(pool_t *pool) { return pool->pool_id != 0; } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
4,687
28.484277
96
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
/* * JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for * functions that are static inline functions if inlining is enabled, and * single-definition library-private functions if inlining is disabled. * * JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in * which case the denoted functions are always static, regardless of whether * inlining is enabled. */ #if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE) /* Disable inlining to make debugging/profiling easier. */ # define JEMALLOC_ALWAYS_INLINE # define JEMALLOC_ALWAYS_INLINE_C static # define JEMALLOC_INLINE # define JEMALLOC_INLINE_C static # define inline #else # define JEMALLOC_ENABLE_INLINE # ifdef JEMALLOC_HAVE_ATTR # define JEMALLOC_ALWAYS_INLINE \ static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline) # define JEMALLOC_ALWAYS_INLINE_C \ static inline JEMALLOC_ATTR(always_inline) # else # define JEMALLOC_ALWAYS_INLINE static inline # define JEMALLOC_ALWAYS_INLINE_C static inline # endif # define JEMALLOC_INLINE static inline # define JEMALLOC_INLINE_C static inline #endif #ifdef JEMALLOC_CC_SILENCE # define UNUSED JEMALLOC_ATTR(unused) #else # define UNUSED #endif #define ZU(z) ((size_t)(z)) #define ZI(z) ((ssize_t)(z)) #define QU(q) ((uint64_t)(q)) #define QI(q) ((int64_t)(q)) #define KZU(z) ZU(z##ULL) #define KZI(z) ZI(z##LL) #define KQU(q) QU(q##ULL) #define KQI(q) QI(q##LL) #ifndef __DECONST # define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) #endif #ifndef JEMALLOC_HAS_RESTRICT # define restrict #endif
1,623
28.527273
78
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/prof.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct prof_bt_s prof_bt_t; typedef struct prof_cnt_s prof_cnt_t; typedef struct prof_thr_cnt_s prof_thr_cnt_t; typedef struct prof_ctx_s prof_ctx_t; typedef struct prof_tdata_s prof_tdata_t; /* Option defaults. */ #ifdef JEMALLOC_PROF # define PROF_PREFIX_DEFAULT "jeprof" #else # define PROF_PREFIX_DEFAULT "" #endif #define LG_PROF_SAMPLE_DEFAULT 19 #define LG_PROF_INTERVAL_DEFAULT -1 /* * Hard limit on stack backtrace depth. The version of prof_backtrace() that * is based on __builtin_return_address() necessarily has a hard-coded number * of backtrace frame handlers, and should be kept in sync with this setting. */ #define PROF_BT_MAX 128 /* Maximum number of backtraces to store in each per thread LRU cache. */ #define PROF_TCMAX 1024 /* Initial hash table size. */ #define PROF_CKH_MINITEMS 64 /* Size of memory buffer to use when writing dump files. */ #define PROF_DUMP_BUFSIZE 65536 /* Size of stack-allocated buffer used by prof_printf(). */ #define PROF_PRINTF_BUFSIZE 128 /* * Number of mutexes shared among all ctx's. No space is allocated for these * unless profiling is enabled, so it's okay to over-provision. */ #define PROF_NCTX_LOCKS 1024 /* * prof_tdata pointers close to NULL are used to encode state information that * is used for cleaning up during thread shutdown. */ #define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1) #define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2) #define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct prof_bt_s { /* Backtrace, stored as len program counters. */ void **vec; unsigned len; }; #ifdef JEMALLOC_PROF_LIBGCC /* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */ typedef struct { prof_bt_t *bt; unsigned max; } prof_unwind_data_t; #endif struct prof_cnt_s { /* * Profiling counters. An allocation/deallocation pair can operate on * different prof_thr_cnt_t objects that are linked into the same * prof_ctx_t cnts_ql, so it is possible for the cur* counters to go * negative. In principle it is possible for the *bytes counters to * overflow/underflow, but a general solution would require something * like 128-bit counters; this implementation doesn't bother to solve * that problem. */ int64_t curobjs; int64_t curbytes; uint64_t accumobjs; uint64_t accumbytes; }; struct prof_thr_cnt_s { /* Linkage into prof_ctx_t's cnts_ql. */ ql_elm(prof_thr_cnt_t) cnts_link; /* Linkage into thread's LRU. */ ql_elm(prof_thr_cnt_t) lru_link; /* * Associated context. If a thread frees an object that it did not * allocate, it is possible that the context is not cached in the * thread's hash table, in which case it must be able to look up the * context, insert a new prof_thr_cnt_t into the thread's hash table, * and link it into the prof_ctx_t's cnts_ql. */ prof_ctx_t *ctx; /* * Threads use memory barriers to update the counters. Since there is * only ever one writer, the only challenge is for the reader to get a * consistent read of the counters. * * The writer uses this series of operations: * * 1) Increment epoch to an odd number. * 2) Update counters. * 3) Increment epoch to an even number. * * The reader must assure 1) that the epoch is even while it reads the * counters, and 2) that the epoch doesn't change between the time it * starts and finishes reading the counters. */ unsigned epoch; /* Profiling counters. */ prof_cnt_t cnts; }; struct prof_ctx_s { /* Associated backtrace. */ prof_bt_t *bt; /* Protects nlimbo, cnt_merged, and cnts_ql. */ malloc_mutex_t *lock; /* * Number of threads that currently cause this ctx to be in a state of * limbo due to one of: * - Initializing per thread counters associated with this ctx. * - Preparing to destroy this ctx. * - Dumping a heap profile that includes this ctx. * nlimbo must be 1 (single destroyer) in order to safely destroy the * ctx. */ unsigned nlimbo; /* Temporary storage for summation during dump. */ prof_cnt_t cnt_summed; /* When threads exit, they merge their stats into cnt_merged. */ prof_cnt_t cnt_merged; /* * List of profile counters, one for each thread that has allocated in * this context. */ ql_head(prof_thr_cnt_t) cnts_ql; /* Linkage for list of contexts to be dumped. */ ql_elm(prof_ctx_t) dump_link; }; typedef ql_head(prof_ctx_t) prof_ctx_list_t; struct prof_tdata_s { /* * Hash of (prof_bt_t *)-->(prof_thr_cnt_t *). Each thread keeps a * cache of backtraces, with associated thread-specific prof_thr_cnt_t * objects. Other threads may read the prof_thr_cnt_t contents, but no * others will ever write them. * * Upon thread exit, the thread must merge all the prof_thr_cnt_t * counter data into the associated prof_ctx_t objects, and unlink/free * the prof_thr_cnt_t objects. */ ckh_t bt2cnt; /* LRU for contents of bt2cnt. */ ql_head(prof_thr_cnt_t) lru_ql; /* Backtrace vector, used for calls to prof_backtrace(). */ void **vec; /* Sampling state. */ uint64_t prng_state; uint64_t bytes_until_sample; /* State used to avoid dumping while operating on prof internals. */ bool enq; bool enq_idump; bool enq_gdump; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS extern bool opt_prof; /* * Even if opt_prof is true, sampling can be temporarily disabled by setting * opt_prof_active to false. No locking is used when updating opt_prof_active, * so there are no guarantees regarding how long it will take for all threads * to notice state changes. */ extern bool opt_prof_active; extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ extern bool opt_prof_gdump; /* High-water memory dumping. */ extern bool opt_prof_final; /* Final profile dumping. */ extern bool opt_prof_leak; /* Dump leak summary at exit. */ extern bool opt_prof_accum; /* Report cumulative bytes. */ extern char opt_prof_prefix[ /* Minimize memory bloat for non-prof builds. */ #ifdef JEMALLOC_PROF JE_PATH_MAX + #endif 1]; /* * Profile dump interval, measured in bytes allocated. Each arena triggers a * profile dump when it reaches this threshold. The effect is that the * interval between profile dumps averages prof_interval, though the actual * interval between dumps will tend to be sporadic, and the interval will be a * maximum of approximately (prof_interval * narenas). */ extern uint64_t prof_interval; void bt_init(prof_bt_t *bt, void **vec); void prof_backtrace(prof_bt_t *bt); prof_thr_cnt_t *prof_lookup(prof_bt_t *bt); #ifdef JEMALLOC_JET size_t prof_bt_count(void); typedef int (prof_dump_open_t)(bool, const char *); extern prof_dump_open_t *prof_dump_open; #endif void prof_idump(void); bool prof_mdump(const char *filename); void prof_gdump(void); prof_tdata_t *prof_tdata_init(void); void prof_tdata_cleanup(void *arg); void prof_boot0(void); void prof_boot1(void); bool prof_boot2(void); void prof_prefork(void); void prof_postfork_parent(void); void prof_postfork_child(void); void prof_sample_threshold_update(prof_tdata_t *prof_tdata); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #define PROF_ALLOC_PREP(size, ret) do { \ prof_tdata_t *prof_tdata; \ prof_bt_t bt; \ \ assert(size == s2u(size)); \ \ if (!opt_prof_active || \ prof_sample_accum_update(size, false, &prof_tdata)) { \ ret = (prof_thr_cnt_t *)(uintptr_t)1U; \ } else { \ bt_init(&bt, prof_tdata->vec); \ prof_backtrace(&bt); \ ret = prof_lookup(&bt); \ } \ } while (0) #ifndef JEMALLOC_ENABLE_INLINE malloc_tsd_protos(JEMALLOC_ATTR(unused), prof_tdata, prof_tdata_t *) prof_tdata_t *prof_tdata_get(bool create); bool prof_sample_accum_update(size_t size, bool commit, prof_tdata_t **prof_tdata_out); prof_ctx_t *prof_ctx_get(const void *ptr); void prof_ctx_set(const void *ptr, prof_ctx_t *ctx); void prof_malloc_record_object(const void *ptr, size_t usize, prof_thr_cnt_t *cnt); void prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt); void prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt, size_t old_usize, prof_ctx_t *old_ctx); void prof_free(const void *ptr, size_t size); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_)) /* Thread-specific backtrace cache, used to reduce bt2ctx contention. */ malloc_tsd_externs(prof_tdata, prof_tdata_t *) malloc_tsd_funcs(JEMALLOC_INLINE, prof_tdata, prof_tdata_t *, NULL, prof_tdata_cleanup) JEMALLOC_INLINE prof_tdata_t * prof_tdata_get(bool create) { prof_tdata_t *prof_tdata; cassert(config_prof); prof_tdata = *prof_tdata_tsd_get(); if (create && prof_tdata == NULL) prof_tdata = prof_tdata_init(); return (prof_tdata); } JEMALLOC_INLINE prof_ctx_t * prof_ctx_get(const void *ptr) { prof_ctx_t *ret; arena_chunk_t *chunk; cassert(config_prof); assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (chunk != ptr) { /* Region. */ ret = arena_prof_ctx_get(ptr); } else ret = huge_prof_ctx_get(ptr); return (ret); } JEMALLOC_INLINE void prof_ctx_set(const void *ptr, prof_ctx_t *ctx) { arena_chunk_t *chunk; cassert(config_prof); assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (chunk != ptr) { /* Region. */ arena_prof_ctx_set(ptr, ctx); } else huge_prof_ctx_set(ptr, ctx); } JEMALLOC_INLINE bool prof_sample_accum_update(size_t size, bool commit, prof_tdata_t **prof_tdata_out) { prof_tdata_t *prof_tdata; cassert(config_prof); prof_tdata = prof_tdata_get(true); if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) prof_tdata = NULL; if (prof_tdata_out != NULL) *prof_tdata_out = prof_tdata; if (prof_tdata == NULL) return (true); if (prof_tdata->bytes_until_sample >= size) { if (commit) prof_tdata->bytes_until_sample -= size; return (true); } else { /* Compute new sample threshold. */ if (commit) prof_sample_threshold_update(prof_tdata); return (false); } } JEMALLOC_INLINE void prof_malloc_record_object(const void *ptr, size_t usize, prof_thr_cnt_t *cnt) { prof_ctx_set(ptr, cnt->ctx); cnt->epoch++; /*********/ mb_write(); /*********/ cnt->cnts.curobjs++; cnt->cnts.curbytes += usize; if (opt_prof_accum) { cnt->cnts.accumobjs++; cnt->cnts.accumbytes += usize; } /*********/ mb_write(); /*********/ cnt->epoch++; /*********/ mb_write(); /*********/ } JEMALLOC_INLINE void prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt) { cassert(config_prof); assert(ptr != NULL); assert(usize == isalloc(ptr, true)); if (prof_sample_accum_update(usize, true, NULL)) { /* * Don't sample. For malloc()-like allocation, it is * always possible to tell in advance how large an * object's usable size will be, so there should never * be a difference between the usize passed to * PROF_ALLOC_PREP() and prof_malloc(). */ assert((uintptr_t)cnt == (uintptr_t)1U); } if ((uintptr_t)cnt > (uintptr_t)1U) prof_malloc_record_object(ptr, usize, cnt); else prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U); } JEMALLOC_INLINE void prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt, size_t old_usize, prof_ctx_t *old_ctx) { prof_thr_cnt_t *told_cnt; cassert(config_prof); assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U); if (ptr != NULL) { assert(usize == isalloc(ptr, true)); if (prof_sample_accum_update(usize, true, NULL)) { /* * Don't sample. The usize passed to * PROF_ALLOC_PREP() was larger than what * actually got allocated, so a backtrace was * captured for this allocation, even though * its actual usize was insufficient to cross * the sample threshold. */ cnt = (prof_thr_cnt_t *)(uintptr_t)1U; } } if ((uintptr_t)old_ctx > (uintptr_t)1U) { told_cnt = prof_lookup(old_ctx->bt); if (told_cnt == NULL) { /* * It's too late to propagate OOM for this realloc(), * so operate directly on old_cnt->ctx->cnt_merged. */ malloc_mutex_lock(old_ctx->lock); old_ctx->cnt_merged.curobjs--; old_ctx->cnt_merged.curbytes -= old_usize; malloc_mutex_unlock(old_ctx->lock); told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U; } } else told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U; if ((uintptr_t)told_cnt > (uintptr_t)1U) told_cnt->epoch++; if ((uintptr_t)cnt > (uintptr_t)1U) { prof_ctx_set(ptr, cnt->ctx); cnt->epoch++; } else if (ptr != NULL) prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U); /*********/ mb_write(); /*********/ if ((uintptr_t)told_cnt > (uintptr_t)1U) { told_cnt->cnts.curobjs--; told_cnt->cnts.curbytes -= old_usize; } if ((uintptr_t)cnt > (uintptr_t)1U) { cnt->cnts.curobjs++; cnt->cnts.curbytes += usize; if (opt_prof_accum) { cnt->cnts.accumobjs++; cnt->cnts.accumbytes += usize; } } /*********/ mb_write(); /*********/ if ((uintptr_t)told_cnt > (uintptr_t)1U) told_cnt->epoch++; if ((uintptr_t)cnt > (uintptr_t)1U) cnt->epoch++; /*********/ mb_write(); /* Not strictly necessary. */ } JEMALLOC_INLINE void prof_free(const void *ptr, size_t size) { prof_ctx_t *ctx = prof_ctx_get(ptr); cassert(config_prof); if ((uintptr_t)ctx > (uintptr_t)1) { prof_thr_cnt_t *tcnt; assert(size == isalloc(ptr, true)); tcnt = prof_lookup(ctx->bt); if (tcnt != NULL) { tcnt->epoch++; /*********/ mb_write(); /*********/ tcnt->cnts.curobjs--; tcnt->cnts.curbytes -= size; /*********/ mb_write(); /*********/ tcnt->epoch++; /*********/ mb_write(); /*********/ } else { /* * OOM during free() cannot be propagated, so operate * directly on cnt->ctx->cnt_merged. */ malloc_mutex_lock(ctx->lock); ctx->cnt_merged.curobjs--; ctx->cnt_merged.curbytes -= size; malloc_mutex_unlock(ctx->lock); } } } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
14,522
26.610266
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/hash.h
/* * The following hash function is based on MurmurHash3, placed into the public * domain by Austin Appleby. See http://code.google.com/p/smhasher/ for * details. */ /******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE uint32_t hash_x86_32(const void *key, int len, uint32_t seed); void hash_x86_128(const void *key, const int len, uint32_t seed, uint64_t r_out[2]); void hash_x64_128(const void *key, const int len, const uint32_t seed, uint64_t r_out[2]); void hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_)) /******************************************************************************/ /* Internal implementation. */ JEMALLOC_INLINE uint32_t hash_rotl_32(uint32_t x, int8_t r) { return (x << r) | (x >> (32 - r)); } JEMALLOC_INLINE uint64_t hash_rotl_64(uint64_t x, int8_t r) { return (x << r) | (x >> (64 - r)); } JEMALLOC_INLINE uint32_t hash_get_block_32(const uint32_t *p, int i) { return (p[i]); } JEMALLOC_INLINE uint64_t hash_get_block_64(const uint64_t *p, int i) { return (p[i]); } JEMALLOC_INLINE uint32_t hash_fmix_32(uint32_t h) { h ^= h >> 16; h *= 0x85ebca6b; h ^= h >> 13; h *= 0xc2b2ae35; h ^= h >> 16; return (h); } JEMALLOC_INLINE uint64_t hash_fmix_64(uint64_t k) { k ^= k >> 33; k *= KQU(0xff51afd7ed558ccd); k ^= k >> 33; k *= KQU(0xc4ceb9fe1a85ec53); k ^= k >> 33; return (k); } JEMALLOC_INLINE uint32_t hash_x86_32(const void *key, int len, uint32_t seed) { const uint8_t *data = (const uint8_t *) key; const int nblocks = len / 4; uint32_t h1 = seed; const uint32_t c1 = 0xcc9e2d51; const uint32_t c2 = 0x1b873593; /* body */ { const uint32_t *blocks = (const uint32_t *) (data + nblocks*4); int i; for (i = -nblocks; i; i++) { uint32_t k1 = hash_get_block_32(blocks, i); k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; h1 = hash_rotl_32(h1, 13); h1 = h1*5 + 0xe6546b64; } } /* tail */ { const uint8_t *tail = (const uint8_t *) (data + nblocks*4); uint32_t k1 = 0; switch (len & 3) { case 3: k1 ^= tail[2] << 16; case 2: k1 ^= tail[1] << 8; case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; } } /* finalization */ h1 ^= len; h1 = hash_fmix_32(h1); return (h1); } UNUSED JEMALLOC_INLINE void hash_x86_128(const void *key, const int len, uint32_t seed, uint64_t r_out[2]) { const uint8_t * data = (const uint8_t *) key; const int nblocks = len / 16; uint32_t h1 = seed; uint32_t h2 = seed; uint32_t h3 = seed; uint32_t h4 = seed; const uint32_t c1 = 0x239b961b; const uint32_t c2 = 0xab0e9789; const uint32_t c3 = 0x38b34ae5; const uint32_t c4 = 0xa1e38b93; /* body */ { const uint32_t *blocks = (const uint32_t *) (data + nblocks*16); int i; for (i = -nblocks; i; i++) { uint32_t k1 = hash_get_block_32(blocks, i*4 + 0); uint32_t k2 = hash_get_block_32(blocks, i*4 + 1); uint32_t k3 = hash_get_block_32(blocks, i*4 + 2); uint32_t k4 = hash_get_block_32(blocks, i*4 + 3); k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; h1 = hash_rotl_32(h1, 19); h1 += h2; h1 = h1*5 + 0x561ccd1b; k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; h2 = hash_rotl_32(h2, 17); h2 += h3; h2 = h2*5 + 0x0bcaa747; k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; h3 = hash_rotl_32(h3, 15); h3 += h4; h3 = h3*5 + 0x96cd1c35; k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; h4 = hash_rotl_32(h4, 13); h4 += h1; h4 = h4*5 + 0x32ac3b17; } } /* tail */ { const uint8_t *tail = (const uint8_t *) (data + nblocks*16); uint32_t k1 = 0; uint32_t k2 = 0; uint32_t k3 = 0; uint32_t k4 = 0; switch (len & 15) { case 15: k4 ^= tail[14] << 16; case 14: k4 ^= tail[13] << 8; case 13: k4 ^= tail[12] << 0; k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; case 12: k3 ^= tail[11] << 24; case 11: k3 ^= tail[10] << 16; case 10: k3 ^= tail[ 9] << 8; case 9: k3 ^= tail[ 8] << 0; k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; case 8: k2 ^= tail[ 7] << 24; case 7: k2 ^= tail[ 6] << 16; case 6: k2 ^= tail[ 5] << 8; case 5: k2 ^= tail[ 4] << 0; k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; case 4: k1 ^= tail[ 3] << 24; case 3: k1 ^= tail[ 2] << 16; case 2: k1 ^= tail[ 1] << 8; case 1: k1 ^= tail[ 0] << 0; k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; } } /* finalization */ h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; h1 += h2; h1 += h3; h1 += h4; h2 += h1; h3 += h1; h4 += h1; h1 = hash_fmix_32(h1); h2 = hash_fmix_32(h2); h3 = hash_fmix_32(h3); h4 = hash_fmix_32(h4); h1 += h2; h1 += h3; h1 += h4; h2 += h1; h3 += h1; h4 += h1; r_out[0] = (((uint64_t) h2) << 32) | h1; r_out[1] = (((uint64_t) h4) << 32) | h3; } UNUSED JEMALLOC_INLINE void hash_x64_128(const void *key, const int len, const uint32_t seed, uint64_t r_out[2]) { const uint8_t *data = (const uint8_t *) key; const int nblocks = len / 16; uint64_t h1 = seed; uint64_t h2 = seed; const uint64_t c1 = KQU(0x87c37b91114253d5); const uint64_t c2 = KQU(0x4cf5ad432745937f); /* body */ { const uint64_t *blocks = (const uint64_t *) (data); int i; for (i = 0; i < nblocks; i++) { uint64_t k1 = hash_get_block_64(blocks, i*2 + 0); uint64_t k2 = hash_get_block_64(blocks, i*2 + 1); k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; h1 = hash_rotl_64(h1, 27); h1 += h2; h1 = h1*5 + 0x52dce729; k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; h2 = hash_rotl_64(h2, 31); h2 += h1; h2 = h2*5 + 0x38495ab5; } } /* tail */ { const uint8_t *tail = (const uint8_t*)(data + nblocks*16); uint64_t k1 = 0; uint64_t k2 = 0; switch (len & 15) { case 15: k2 ^= ((uint64_t)(tail[14])) << 48; case 14: k2 ^= ((uint64_t)(tail[13])) << 40; case 13: k2 ^= ((uint64_t)(tail[12])) << 32; case 12: k2 ^= ((uint64_t)(tail[11])) << 24; case 11: k2 ^= ((uint64_t)(tail[10])) << 16; case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0; k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0; k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; } } /* finalization */ h1 ^= len; h2 ^= len; h1 += h2; h2 += h1; h1 = hash_fmix_64(h1); h2 = hash_fmix_64(h2); h1 += h2; h2 += h1; r_out[0] = h1; r_out[1] = h2; } /******************************************************************************/ /* API. */ JEMALLOC_INLINE void hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) { #if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN)) hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash); #else uint64_t hashes[2]; hash_x86_128(key, len, seed, hashes); r_hash[0] = (size_t)hashes[0]; r_hash[1] = (size_t)hashes[1]; #endif } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
7,940
22.633929
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/tsd.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* Maximum number of malloc_tsd users with cleanup functions. */ #define MALLOC_TSD_CLEANUPS_MAX 8 typedef bool (*malloc_tsd_cleanup_t)(void); #if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ !defined(_WIN32)) typedef struct tsd_init_block_s tsd_init_block_t; typedef struct tsd_init_head_s tsd_init_head_t; #endif /* * TLS/TSD-agnostic macro-based implementation of thread-specific data. There * are four macros that support (at least) three use cases: file-private, * library-private, and library-private inlined. Following is an example * library-private tsd variable: * * In example.h: * typedef struct { * int x; * int y; * } example_t; * #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0}) * malloc_tsd_protos(, example, example_t *) * malloc_tsd_externs(example, example_t *) * In example.c: * malloc_tsd_data(, example, example_t *, EX_INITIALIZER) * malloc_tsd_funcs(, example, example_t *, EX_INITIALIZER, * example_tsd_cleanup) * * The result is a set of generated functions, e.g.: * * bool example_tsd_boot(void) {...} * example_t **example_tsd_get() {...} * void example_tsd_set(example_t **val) {...} * * Note that all of the functions deal in terms of (a_type *) rather than * (a_type) so that it is possible to support non-pointer types (unlike * pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is * cast to (void *). This means that the cleanup function needs to cast *and* * dereference the function argument, e.g.: * * void * example_tsd_cleanup(void *arg) * { * example_t *example = *(example_t **)arg; * * [...] * if ([want the cleanup function to be called again]) { * example_tsd_set(&example); * } * } * * If example_tsd_set() is called within example_tsd_cleanup(), it will be * called again. This is similar to how pthreads TSD destruction works, except * that pthreads only calls the cleanup function again if the value was set to * non-NULL. */ /* malloc_tsd_protos(). */ #define malloc_tsd_protos(a_attr, a_name, a_type) \ a_attr bool \ a_name##_tsd_boot(void); \ a_attr a_type * \ a_name##_tsd_get(void); \ a_attr void \ a_name##_tsd_set(a_type *val); /* malloc_tsd_externs(). */ #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP #define malloc_tsd_externs(a_name, a_type) \ extern __thread a_type a_name##_tls; \ extern __thread bool a_name##_initialized; \ extern bool a_name##_booted; #elif (defined(JEMALLOC_TLS)) #define malloc_tsd_externs(a_name, a_type) \ extern __thread a_type a_name##_tls; \ extern pthread_key_t a_name##_tsd; \ extern bool a_name##_booted; #elif (defined(_WIN32)) #define malloc_tsd_externs(a_name, a_type) \ extern DWORD a_name##_tsd; \ extern bool a_name##_booted; #else #define malloc_tsd_externs(a_name, a_type) \ extern pthread_key_t a_name##_tsd; \ extern tsd_init_head_t a_name##_tsd_init_head; \ extern bool a_name##_booted; #endif /* malloc_tsd_data(). */ #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP #define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ a_attr __thread a_type JEMALLOC_TLS_MODEL \ a_name##_tls = a_initializer; \ a_attr __thread bool JEMALLOC_TLS_MODEL \ a_name##_initialized = false; \ a_attr bool a_name##_booted = false; #elif (defined(JEMALLOC_TLS)) #define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ a_attr __thread a_type JEMALLOC_TLS_MODEL \ a_name##_tls = a_initializer; \ a_attr pthread_key_t a_name##_tsd; \ a_attr bool a_name##_booted = false; #elif (defined(_WIN32)) #define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ a_attr DWORD a_name##_tsd; \ a_attr bool a_name##_booted = false; #else #define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ a_attr pthread_key_t a_name##_tsd; \ a_attr tsd_init_head_t a_name##_tsd_init_head = { \ ql_head_initializer(blocks), \ MALLOC_MUTEX_INITIALIZER \ }; \ a_attr bool a_name##_booted = false; #endif /* malloc_tsd_funcs(). */ #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP #define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ a_cleanup) \ /* Initialization/cleanup. */ \ a_attr bool \ a_name##_tsd_cleanup_wrapper(void) \ { \ \ if (a_name##_initialized) { \ a_name##_initialized = false; \ a_cleanup(&a_name##_tls); \ } \ return (a_name##_initialized); \ } \ a_attr bool \ a_name##_tsd_boot(void) \ { \ \ if (a_cleanup != malloc_tsd_no_cleanup) { \ malloc_tsd_cleanup_register( \ &a_name##_tsd_cleanup_wrapper); \ } \ a_name##_booted = true; \ return (false); \ } \ /* Get/set. */ \ a_attr a_type * \ a_name##_tsd_get(void) \ { \ \ assert(a_name##_booted); \ return (&a_name##_tls); \ } \ a_attr void \ a_name##_tsd_set(a_type *val) \ { \ \ assert(a_name##_booted); \ a_name##_tls = (*val); \ if (a_cleanup != malloc_tsd_no_cleanup) \ a_name##_initialized = true; \ } #elif (defined(JEMALLOC_TLS)) #define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ a_cleanup) \ /* Initialization/cleanup. */ \ a_attr bool \ a_name##_tsd_boot(void) \ { \ \ if (a_cleanup != malloc_tsd_no_cleanup) { \ if (pthread_key_create(&a_name##_tsd, a_cleanup) != 0) \ return (true); \ } \ a_name##_booted = true; \ return (false); \ } \ /* Get/set. */ \ a_attr a_type * \ a_name##_tsd_get(void) \ { \ \ assert(a_name##_booted); \ return (&a_name##_tls); \ } \ a_attr void \ a_name##_tsd_set(a_type *val) \ { \ \ assert(a_name##_booted); \ a_name##_tls = (*val); \ if (a_cleanup != malloc_tsd_no_cleanup) { \ if (pthread_setspecific(a_name##_tsd, \ (void *)(&a_name##_tls))) { \ malloc_write("<jemalloc>: Error" \ " setting TSD for "#a_name"\n"); \ if (opt_abort) \ abort(); \ } \ } \ } #elif (defined(_WIN32)) #define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ a_cleanup) \ /* Data structure. */ \ typedef struct { \ bool initialized; \ a_type val; \ } a_name##_tsd_wrapper_t; \ /* Initialization/cleanup. */ \ a_attr bool \ a_name##_tsd_cleanup_wrapper(void) \ { \ a_name##_tsd_wrapper_t *wrapper; \ \ wrapper = (a_name##_tsd_wrapper_t *) TlsGetValue(a_name##_tsd); \ if (wrapper == NULL) \ return (false); \ if (a_cleanup != malloc_tsd_no_cleanup && \ wrapper->initialized) { \ a_type val = wrapper->val; \ a_type tsd_static_data = a_initializer; \ wrapper->initialized = false; \ wrapper->val = tsd_static_data; \ a_cleanup(&val); \ if (wrapper->initialized) { \ /* Trigger another cleanup round. */ \ return (true); \ } \ } \ malloc_tsd_dalloc(wrapper); \ return (false); \ } \ a_attr bool \ a_name##_tsd_boot(void) \ { \ \ a_name##_tsd = TlsAlloc(); \ if (a_name##_tsd == TLS_OUT_OF_INDEXES) \ return (true); \ if (a_cleanup != malloc_tsd_no_cleanup) { \ malloc_tsd_cleanup_register( \ &a_name##_tsd_cleanup_wrapper); \ } \ a_name##_booted = true; \ return (false); \ } \ /* Get/set. */ \ a_attr a_name##_tsd_wrapper_t * \ a_name##_tsd_get_wrapper(void) \ { \ a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \ TlsGetValue(a_name##_tsd); \ \ if (wrapper == NULL) { \ wrapper = (a_name##_tsd_wrapper_t *) \ malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \ if (wrapper == NULL) { \ malloc_write("<jemalloc>: Error allocating" \ " TSD for "#a_name"\n"); \ abort(); \ } else { \ static a_type tsd_static_data = a_initializer; \ wrapper->initialized = false; \ wrapper->val = tsd_static_data; \ } \ if (!TlsSetValue(a_name##_tsd, (void *)wrapper)) { \ malloc_write("<jemalloc>: Error setting" \ " TSD for "#a_name"\n"); \ abort(); \ } \ } \ return (wrapper); \ } \ a_attr a_type * \ a_name##_tsd_get(void) \ { \ a_name##_tsd_wrapper_t *wrapper; \ \ assert(a_name##_booted); \ wrapper = a_name##_tsd_get_wrapper(); \ return (&wrapper->val); \ } \ a_attr void \ a_name##_tsd_set(a_type *val) \ { \ a_name##_tsd_wrapper_t *wrapper; \ \ assert(a_name##_booted); \ wrapper = a_name##_tsd_get_wrapper(); \ wrapper->val = *(val); \ if (a_cleanup != malloc_tsd_no_cleanup) \ wrapper->initialized = true; \ } #else #define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ a_cleanup) \ /* Data structure. */ \ typedef struct { \ bool initialized; \ a_type val; \ } a_name##_tsd_wrapper_t; \ /* Initialization/cleanup. */ \ a_attr void \ a_name##_tsd_cleanup_wrapper(void *arg) \ { \ a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *)arg;\ \ if (a_cleanup != malloc_tsd_no_cleanup && \ wrapper->initialized) { \ wrapper->initialized = false; \ a_cleanup(&wrapper->val); \ if (wrapper->initialized) { \ /* Trigger another cleanup round. */ \ if (pthread_setspecific(a_name##_tsd, \ (void *)wrapper)) { \ malloc_write("<jemalloc>: Error" \ " setting TSD for "#a_name"\n"); \ if (opt_abort) \ abort(); \ } \ return; \ } \ } \ malloc_tsd_dalloc(wrapper); \ } \ a_attr bool \ a_name##_tsd_boot(void) \ { \ \ if (pthread_key_create(&a_name##_tsd, \ a_name##_tsd_cleanup_wrapper) != 0) \ return (true); \ a_name##_booted = true; \ return (false); \ } \ /* Get/set. */ \ a_attr a_name##_tsd_wrapper_t * \ a_name##_tsd_get_wrapper(void) \ { \ a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \ pthread_getspecific(a_name##_tsd); \ \ if (wrapper == NULL) { \ tsd_init_block_t block; \ wrapper = tsd_init_check_recursion( \ &a_name##_tsd_init_head, &block); \ if (wrapper) \ return (wrapper); \ wrapper = (a_name##_tsd_wrapper_t *) \ malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \ block.data = wrapper; \ if (wrapper == NULL) { \ malloc_write("<jemalloc>: Error allocating" \ " TSD for "#a_name"\n"); \ abort(); \ } else { \ static a_type tsd_static_data = a_initializer; \ wrapper->initialized = false; \ wrapper->val = tsd_static_data; \ } \ if (pthread_setspecific(a_name##_tsd, \ (void *)wrapper)) { \ malloc_write("<jemalloc>: Error setting" \ " TSD for "#a_name"\n"); \ abort(); \ } \ tsd_init_finish(&a_name##_tsd_init_head, &block); \ } \ return (wrapper); \ } \ a_attr a_type * \ a_name##_tsd_get(void) \ { \ a_name##_tsd_wrapper_t *wrapper; \ \ assert(a_name##_booted); \ wrapper = a_name##_tsd_get_wrapper(); \ return (&wrapper->val); \ } \ a_attr void \ a_name##_tsd_set(a_type *val) \ { \ a_name##_tsd_wrapper_t *wrapper; \ \ assert(a_name##_booted); \ wrapper = a_name##_tsd_get_wrapper(); \ wrapper->val = *(val); \ if (a_cleanup != malloc_tsd_no_cleanup) \ wrapper->initialized = true; \ } #endif /* * Vector data container implemented as TSD/TLS macros. * These functions behave exactly like the regular version, * except for the fact that they take an index argument in accessor functions. */ /* malloc_tsd_vector_protos(). */ #define malloc_tsd_vector_protos(a_attr, a_name) \ malloc_tsd_protos(a_attr, a_name, vector_t) #define malloc_tsd_vector_externs(a_name) \ malloc_tsd_externs(a_name, vector_t) #define malloc_tsd_vector_data(a_attr, a_name) \ malloc_tsd_data(a_attr, a_name, vector_t, VECTOR_INITIALIZER) #define malloc_tsd_vector_funcs(a_attr, a_name, a_type, a_cleanup) \ malloc_tsd_funcs(a_attr, a_name, vector_t, VECTOR_INITIALIZER, \ a_cleanup) \ \ a_attr a_type * \ a_name##_vec_tsd_get(uint32_t index) \ { \ vector_t *v = a_name##_tsd_get(); \ return (a_type *)vec_get(v, index); \ } \ \ a_attr void \ a_name##_vec_tsd_set(uint32_t index, a_type *val) \ { \ vector_t *v = a_name##_tsd_get(); \ vec_set(v, index, (void *)val); \ } \ #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ !defined(_WIN32)) struct tsd_init_block_s { ql_elm(tsd_init_block_t) link; pthread_t thread; void *data; }; struct tsd_init_head_s { ql_head(tsd_init_block_t) blocks; malloc_mutex_t lock; }; #endif #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void *malloc_tsd_malloc(size_t size); void malloc_tsd_dalloc(void *wrapper); void malloc_tsd_no_cleanup(void *); void malloc_tsd_cleanup_register(bool (*f)(void)); void malloc_tsd_boot(void); #if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ !defined(_WIN32)) void *tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block); void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block); #endif #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
14,493
29.772824
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/include/libpmemblk.h
/* * Copyright 2014-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libpmemblk.h -- definitions of libpmemblk entry points * * This library provides support for programming with persistent memory (pmem). * * libpmemblk provides support for arrays of atomically-writable blocks. * * See libpmemblk(3) for details. */ #ifndef LIBPMEMBLK_H #define LIBPMEMBLK_H 1 #ifdef _WIN32 #include <pmemcompat.h> #ifndef PMDK_UTF8_API #define pmemblk_open pmemblk_openW #define pmemblk_create pmemblk_createW #define pmemblk_check pmemblk_checkW #define pmemblk_check_version pmemblk_check_versionW #define pmemblk_errormsg pmemblk_errormsgW #define pmemblk_ctl_get pmemblk_ctl_getW #define pmemblk_ctl_set pmemblk_ctl_setW #define pmemblk_ctl_exec pmemblk_ctl_execW #else #define pmemblk_open pmemblk_openU #define pmemblk_create pmemblk_createU #define pmemblk_check pmemblk_checkU #define pmemblk_check_version pmemblk_check_versionU #define pmemblk_errormsg pmemblk_errormsgU #define pmemblk_ctl_get pmemblk_ctl_getU #define pmemblk_ctl_set pmemblk_ctl_setU #define pmemblk_ctl_exec pmemblk_ctl_execU #endif #endif #ifdef __cplusplus extern "C" { #endif #include <sys/types.h> /* * opaque type, internal to libpmemblk */ typedef struct pmemblk PMEMblkpool; /* * PMEMBLK_MAJOR_VERSION and PMEMBLK_MINOR_VERSION provide the current version * of the libpmemblk API as provided by this header file. Applications can * verify that the version available at run-time is compatible with the version * used at compile-time by passing these defines to pmemblk_check_version(). */ #define PMEMBLK_MAJOR_VERSION 1 #define PMEMBLK_MINOR_VERSION 1 #ifndef _WIN32 const char *pmemblk_check_version(unsigned major_required, unsigned minor_required); #else const char *pmemblk_check_versionU(unsigned major_required, unsigned minor_required); const wchar_t *pmemblk_check_versionW(unsigned major_required, unsigned minor_required); #endif /* XXX - unify minimum pool size for both OS-es */ #ifndef _WIN32 /* minimum pool size: 16MiB + 4KiB (minimum BTT size + mmap alignment) */ #define PMEMBLK_MIN_POOL ((size_t)((1u << 20) * 16 + (1u << 10) * 8)) #else /* minimum pool size: 16MiB + 64KiB (minimum BTT size + mmap alignment) */ #define PMEMBLK_MIN_POOL ((size_t)((1u << 20) * 16 + (1u << 10) * 64)) #endif /* * This limit is set arbitrary to incorporate a pool header and required * alignment plus supply. */ #define PMEMBLK_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */ #define PMEMBLK_MIN_BLK ((size_t)512) #ifndef _WIN32 PMEMblkpool *pmemblk_open(const char *path, size_t bsize); #else PMEMblkpool *pmemblk_openU(const char *path, size_t bsize); PMEMblkpool *pmemblk_openW(const wchar_t *path, size_t bsize); #endif #ifndef _WIN32 PMEMblkpool *pmemblk_create(const char *path, size_t bsize, size_t poolsize, mode_t mode); #else PMEMblkpool *pmemblk_createU(const char *path, size_t bsize, size_t poolsize, mode_t mode); PMEMblkpool *pmemblk_createW(const wchar_t *path, size_t bsize, size_t poolsize, mode_t mode); #endif #ifndef _WIN32 int pmemblk_check(const char *path, size_t bsize); #else int pmemblk_checkU(const char *path, size_t bsize); int pmemblk_checkW(const wchar_t *path, size_t bsize); #endif void pmemblk_close(PMEMblkpool *pbp); size_t pmemblk_bsize(PMEMblkpool *pbp); size_t pmemblk_nblock(PMEMblkpool *pbp); int pmemblk_read(PMEMblkpool *pbp, void *buf, long long blockno); int pmemblk_write(PMEMblkpool *pbp, const void *buf, long long blockno); int pmemblk_set_zero(PMEMblkpool *pbp, long long blockno); int pmemblk_set_error(PMEMblkpool *pbp, long long blockno); /* * Passing NULL to pmemblk_set_funcs() tells libpmemblk to continue to use the * default for that function. The replacement functions must not make calls * back into libpmemblk. */ void pmemblk_set_funcs( void *(*malloc_func)(size_t size), void (*free_func)(void *ptr), void *(*realloc_func)(void *ptr, size_t size), char *(*strdup_func)(const char *s)); #ifndef _WIN32 const char *pmemblk_errormsg(void); #else const char *pmemblk_errormsgU(void); const wchar_t *pmemblk_errormsgW(void); #endif #ifndef _WIN32 /* EXPERIMENTAL */ int pmemblk_ctl_get(PMEMblkpool *pbp, const char *name, void *arg); int pmemblk_ctl_set(PMEMblkpool *pbp, const char *name, void *arg); int pmemblk_ctl_exec(PMEMblkpool *pbp, const char *name, void *arg); #else int pmemblk_ctl_getU(PMEMblkpool *pbp, const char *name, void *arg); int pmemblk_ctl_getW(PMEMblkpool *pbp, const wchar_t *name, void *arg); int pmemblk_ctl_setU(PMEMblkpool *pbp, const char *name, void *arg); int pmemblk_ctl_setW(PMEMblkpool *pbp, const wchar_t *name, void *arg); int pmemblk_ctl_execU(PMEMblkpool *pbp, const char *name, void *arg); int pmemblk_ctl_execW(PMEMblkpool *pbp, const wchar_t *name, void *arg); #endif #ifdef __cplusplus } #endif #endif /* libpmemblk.h */
6,395
33.203209
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/include/libpmempool.h
/* * Copyright 2016-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libpmempool.h -- definitions of libpmempool entry points * * See libpmempool(3) for details. */ #ifndef LIBPMEMPOOL_H #define LIBPMEMPOOL_H 1 #ifdef _WIN32 #include <pmemcompat.h> #ifndef PMDK_UTF8_API #define pmempool_check_status pmempool_check_statusW #define pmempool_check_args pmempool_check_argsW #define pmempool_check_init pmempool_check_initW #define pmempool_check pmempool_checkW #define pmempool_sync pmempool_syncW #define pmempool_transform pmempool_transformW #define pmempool_rm pmempool_rmW #define pmempool_check_version pmempool_check_versionW #define pmempool_errormsg pmempool_errormsgW #define pmempool_feature_enable pmempool_feature_enableW #define pmempool_feature_disable pmempool_feature_disableW #define pmempool_feature_query pmempool_feature_queryW #else #define pmempool_check_status pmempool_check_statusU #define pmempool_check_args pmempool_check_argsU #define pmempool_check_init pmempool_check_initU #define pmempool_check pmempool_checkU #define pmempool_sync pmempool_syncU #define pmempool_transform pmempool_transformU #define pmempool_rm pmempool_rmU #define pmempool_check_version pmempool_check_versionU #define pmempool_errormsg pmempool_errormsgU #define pmempool_feature_enable pmempool_feature_enableU #define pmempool_feature_disable pmempool_feature_disableU #define pmempool_feature_query pmempool_feature_queryU #endif #endif #ifdef __cplusplus extern "C" { #endif #include <stdint.h> #include <stddef.h> #include <limits.h> /* PMEMPOOL CHECK */ /* * pool types */ enum pmempool_pool_type { PMEMPOOL_POOL_TYPE_DETECT, PMEMPOOL_POOL_TYPE_LOG, PMEMPOOL_POOL_TYPE_BLK, PMEMPOOL_POOL_TYPE_OBJ, PMEMPOOL_POOL_TYPE_BTT, PMEMPOOL_POOL_TYPE_CTO, }; /* * perform repairs */ #define PMEMPOOL_CHECK_REPAIR (1U << 0) /* * emulate repairs */ #define PMEMPOOL_CHECK_DRY_RUN (1U << 1) /* * perform hazardous repairs */ #define PMEMPOOL_CHECK_ADVANCED (1U << 2) /* * do not ask before repairs */ #define PMEMPOOL_CHECK_ALWAYS_YES (1U << 3) /* * generate info statuses */ #define PMEMPOOL_CHECK_VERBOSE (1U << 4) /* * generate string format statuses */ #define PMEMPOOL_CHECK_FORMAT_STR (1U << 5) /* * types of check statuses */ enum pmempool_check_msg_type { PMEMPOOL_CHECK_MSG_TYPE_INFO, PMEMPOOL_CHECK_MSG_TYPE_ERROR, PMEMPOOL_CHECK_MSG_TYPE_QUESTION, }; /* * check result types */ enum pmempool_check_result { PMEMPOOL_CHECK_RESULT_CONSISTENT, PMEMPOOL_CHECK_RESULT_NOT_CONSISTENT, PMEMPOOL_CHECK_RESULT_REPAIRED, PMEMPOOL_CHECK_RESULT_CANNOT_REPAIR, PMEMPOOL_CHECK_RESULT_ERROR, PMEMPOOL_CHECK_RESULT_SYNC_REQ, }; /* * check context */ typedef struct pmempool_check_ctx PMEMpoolcheck; /* * finalize the check and get the result */ enum pmempool_check_result pmempool_check_end(PMEMpoolcheck *ppc); /* PMEMPOOL RM */ #define PMEMPOOL_RM_FORCE (1U << 0) /* ignore any errors */ #define PMEMPOOL_RM_POOLSET_LOCAL (1U << 1) /* remove local poolsets */ #define PMEMPOOL_RM_POOLSET_REMOTE (1U << 2) /* remove remote poolsets */ /* * LIBPMEMPOOL SYNC */ /* * fix bad blocks - it requires creating or reading special recovery files */ #define PMEMPOOL_SYNC_FIX_BAD_BLOCKS (1U << 0) /* * do not apply changes, only check if operation is viable */ #define PMEMPOOL_SYNC_DRY_RUN (1U << 1) /* * LIBPMEMPOOL TRANSFORM */ /* * do not apply changes, only check if operation is viable */ #define PMEMPOOL_TRANSFORM_DRY_RUN (1U << 1) /* * PMEMPOOL_MAJOR_VERSION and PMEMPOOL_MINOR_VERSION provide the current version * of the libpmempool API as provided by this header file. Applications can * verify that the version available at run-time is compatible with the version * used at compile-time by passing these defines to pmempool_check_version(). */ #define PMEMPOOL_MAJOR_VERSION 1 #define PMEMPOOL_MINOR_VERSION 3 /* * check status */ struct pmempool_check_statusU { enum pmempool_check_msg_type type; struct { const char *msg; const char *answer; } str; }; #ifndef _WIN32 #define pmempool_check_status pmempool_check_statusU #else struct pmempool_check_statusW { enum pmempool_check_msg_type type; struct { const wchar_t *msg; const wchar_t *answer; } str; }; #endif /* * check context arguments */ struct pmempool_check_argsU { const char *path; const char *backup_path; enum pmempool_pool_type pool_type; unsigned flags; }; #ifndef _WIN32 #define pmempool_check_args pmempool_check_argsU #else struct pmempool_check_argsW { const wchar_t *path; const wchar_t *backup_path; enum pmempool_pool_type pool_type; unsigned flags; }; #endif /* * initialize a check context */ #ifndef _WIN32 PMEMpoolcheck * pmempool_check_init(struct pmempool_check_args *args, size_t args_size); #else PMEMpoolcheck * pmempool_check_initU(struct pmempool_check_argsU *args, size_t args_size); PMEMpoolcheck * pmempool_check_initW(struct pmempool_check_argsW *args, size_t args_size); #endif /* * start / resume the check */ #ifndef _WIN32 struct pmempool_check_status *pmempool_check(PMEMpoolcheck *ppc); #else struct pmempool_check_statusU *pmempool_checkU(PMEMpoolcheck *ppc); struct pmempool_check_statusW *pmempool_checkW(PMEMpoolcheck *ppc); #endif /* * LIBPMEMPOOL SYNC & TRANSFORM */ /* * Synchronize data between replicas within a poolset. * * EXPERIMENTAL */ #ifndef _WIN32 int pmempool_sync(const char *poolset_file, unsigned flags); #else int pmempool_syncU(const char *poolset_file, unsigned flags); int pmempool_syncW(const wchar_t *poolset_file, unsigned flags); #endif /* * Modify internal structure of a poolset. * * EXPERIMENTAL */ #ifndef _WIN32 int pmempool_transform(const char *poolset_file_src, const char *poolset_file_dst, unsigned flags); #else int pmempool_transformU(const char *poolset_file_src, const char *poolset_file_dst, unsigned flags); int pmempool_transformW(const wchar_t *poolset_file_src, const wchar_t *poolset_file_dst, unsigned flags); #endif /* PMEMPOOL feature enable, disable, query */ /* * feature types */ enum pmempool_feature { PMEMPOOL_FEAT_SINGLEHDR, PMEMPOOL_FEAT_CKSUM_2K, PMEMPOOL_FEAT_SHUTDOWN_STATE, PMEMPOOL_FEAT_CHECK_BAD_BLOCKS, }; /* PMEMPOOL FEATURE ENABLE */ #ifndef _WIN32 int pmempool_feature_enable(const char *path, enum pmempool_feature feature, unsigned flags); #else int pmempool_feature_enableU(const char *path, enum pmempool_feature feature, unsigned flags); int pmempool_feature_enableW(const wchar_t *path, enum pmempool_feature feature, unsigned flags); #endif /* PMEMPOOL FEATURE DISABLE */ #ifndef _WIN32 int pmempool_feature_disable(const char *path, enum pmempool_feature feature, unsigned flags); #else int pmempool_feature_disableU(const char *path, enum pmempool_feature feature, unsigned flags); int pmempool_feature_disableW(const wchar_t *path, enum pmempool_feature feature, unsigned flags); #endif /* PMEMPOOL FEATURE QUERY */ #ifndef _WIN32 int pmempool_feature_query(const char *path, enum pmempool_feature feature, unsigned flags); #else int pmempool_feature_queryU(const char *path, enum pmempool_feature feature, unsigned flags); int pmempool_feature_queryW(const wchar_t *path, enum pmempool_feature feature, unsigned flags); #endif /* PMEMPOOL RM */ #ifndef _WIN32 int pmempool_rm(const char *path, unsigned flags); #else int pmempool_rmU(const char *path, unsigned flags); int pmempool_rmW(const wchar_t *path, unsigned flags); #endif #ifndef _WIN32 const char *pmempool_check_version(unsigned major_required, unsigned minor_required); #else const char *pmempool_check_versionU(unsigned major_required, unsigned minor_required); const wchar_t *pmempool_check_versionW(unsigned major_required, unsigned minor_required); #endif #ifndef _WIN32 const char *pmempool_errormsg(void); #else const char *pmempool_errormsgU(void); const wchar_t *pmempool_errormsgW(void); #endif #ifdef __cplusplus } #endif #endif /* libpmempool.h */
9,501
24.820652
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/include/libpmemcto.h
/* * Copyright 2014-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libpmemcto -- definitions of libpmemcto entry points * * This library exposes memory-mapped files as persistent memory heap * with malloc-like interfaces. * * See libpmemcto(3) for details. */ #ifndef LIBPMEMCTO_H #define LIBPMEMCTO_H 1 #ifdef _WIN32 #include <pmemcompat.h> #ifndef PMDK_UTF8_API #define pmemcto_open pmemcto_openW #define pmemcto_create pmemcto_createW #define pmemcto_check pmemcto_checkW #define pmemcto_check_version pmemcto_check_versionW #define pmemcto_errormsg pmemcto_errormsgW #else #define pmemcto_open pmemcto_openU #define pmemcto_create pmemcto_createU #define pmemcto_check pmemcto_checkU #define pmemcto_check_version pmemcto_check_versionU #define pmemcto_errormsg pmemcto_errormsgU #endif #endif #ifdef __cplusplus extern "C" { #endif #include <sys/types.h> #include <wchar.h> /* * opaque type, internal to libpmemcto */ typedef struct pmemcto PMEMctopool; /* * PMEMCTO_MAJOR_VERSION and PMEMCTO_MINOR_VERSION provide the current version * of the libpmemcto API as provided by this header file. Applications can * verify that the version available at run-time is compatible with the version * used at compile-time by passing these defines to pmemcto_check_version(). */ #define PMEMCTO_MAJOR_VERSION 1 #define PMEMCTO_MINOR_VERSION 0 #ifndef _WIN32 const char *pmemcto_check_version(unsigned major_required, unsigned minor_required); #else const char *pmemcto_check_versionU(unsigned major_required, unsigned minor_required); const wchar_t *pmemcto_check_versionW(unsigned major_required, unsigned minor_required); #endif /* minimum pool size: 16MB */ #define PMEMCTO_MIN_POOL ((size_t)(1024 * 1024 * 16)) /* * This limit is set arbitrary to incorporate a pool header and required * alignment plus supply. */ #define PMEMCTO_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */ /* maximum layout size */ #define PMEMCTO_MAX_LAYOUT ((size_t)1024) #ifndef _WIN32 PMEMctopool *pmemcto_open(const char *path, const char *layout); #else PMEMctopool *pmemcto_openU(const char *path, const char *layout); PMEMctopool *pmemcto_openW(const wchar_t *path, const wchar_t *layout); #endif #ifndef _WIN32 PMEMctopool *pmemcto_create(const char *path, const char *layout, size_t poolsize, mode_t mode); #else PMEMctopool *pmemcto_createU(const char *path, const char *layout, size_t poolsize, mode_t mode); PMEMctopool *pmemcto_createW(const wchar_t *path, const wchar_t *layout, size_t poolsize, mode_t mode); #endif #ifndef _WIN32 int pmemcto_check(const char *path, const char *layout); #else int pmemcto_checkU(const char *path, const char *layout); int pmemcto_checkW(const wchar_t *path, const wchar_t *layout); #endif void pmemcto_close(PMEMctopool *pcp); void pmemcto_stats_print(PMEMctopool *pcp, const char *opts); /* * support for malloc and friends... */ void *pmemcto_malloc(PMEMctopool *pcp, size_t size); void pmemcto_free(PMEMctopool *pcp, void *ptr); void *pmemcto_calloc(PMEMctopool *pcp, size_t nmemb, size_t size); void *pmemcto_realloc(PMEMctopool *pcp, void *ptr, size_t size); void *pmemcto_aligned_alloc(PMEMctopool *pcp, size_t alignment, size_t size); char *pmemcto_strdup(PMEMctopool *pcp, const char *s); wchar_t *pmemcto_wcsdup(PMEMctopool *pcp, const wchar_t *s); size_t pmemcto_malloc_usable_size(PMEMctopool *pcp, void *ptr); /* * close-to-open persistence... */ void pmemcto_set_root_pointer(PMEMctopool *pcp, void *ptr); void *pmemcto_get_root_pointer(PMEMctopool *pcp); /* * Passing NULL to pmemcto_set_funcs() tells libpmemcto to continue to use * the default for that function. The replacement functions must * not make calls back into libpmemcto. * * The print_func is called by libpmemcto based on the environment * variable PMEMCTO_LOG_LEVEL: * 0 or unset: print_func is only called for pmemcto_stats_print() * 1: additional details are logged when errors are returned * 2: basic operations (allocations/frees) are logged * 3: produce very verbose tracing of function calls in libpmemcto * 4: also log obscure stuff used to debug the library itself * * The default print_func prints to stderr. Applications can override this * by setting the environment variable PMEMCTO_LOG_FILE, or by supplying a * replacement print function. */ void pmemcto_set_funcs( void *(*malloc_func)(size_t size), void (*free_func)(void *ptr), void *(*realloc_func)(void *ptr, size_t size), char *(*strdup_func)(const char *s), void (*print_func)(const char *s)); #ifndef _WIN32 const char *pmemcto_errormsg(void); #else const char *pmemcto_errormsgU(void); const wchar_t *pmemcto_errormsgW(void); #endif #ifdef __cplusplus } #endif #endif /* libpmemcto.h */
6,315
32.956989
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/include/librpmem.h
/* * Copyright 2016-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * librpmem.h -- definitions of librpmem entry points (EXPERIMENTAL) * * This library provides low-level support for remote access to persistent * memory utilizing RDMA-capable RNICs. * * See librpmem(3) for details. */ #ifndef LIBRPMEM_H #define LIBRPMEM_H 1 #ifdef __cplusplus extern "C" { #endif #include <sys/types.h> #include <stdint.h> typedef struct rpmem_pool RPMEMpool; #define RPMEM_POOL_HDR_SIG_LEN 8 #define RPMEM_POOL_HDR_UUID_LEN 16 /* uuid byte length */ #define RPMEM_POOL_USER_FLAGS_LEN 16 struct rpmem_pool_attr { char signature[RPMEM_POOL_HDR_SIG_LEN]; /* pool signature */ uint32_t major; /* format major version number */ uint32_t compat_features; /* mask: compatible "may" features */ uint32_t incompat_features; /* mask: "must support" features */ uint32_t ro_compat_features; /* mask: force RO if unsupported */ unsigned char poolset_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* pool uuid */ unsigned char uuid[RPMEM_POOL_HDR_UUID_LEN]; /* first part uuid */ unsigned char next_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* next pool uuid */ unsigned char prev_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* prev pool uuid */ unsigned char user_flags[RPMEM_POOL_USER_FLAGS_LEN]; /* user flags */ }; RPMEMpool *rpmem_create(const char *target, const char *pool_set_name, void *pool_addr, size_t pool_size, unsigned *nlanes, const struct rpmem_pool_attr *create_attr); RPMEMpool *rpmem_open(const char *target, const char *pool_set_name, void *pool_addr, size_t pool_size, unsigned *nlanes, struct rpmem_pool_attr *open_attr); int rpmem_set_attr(RPMEMpool *rpp, const struct rpmem_pool_attr *attr); int rpmem_close(RPMEMpool *rpp); #define RPMEM_PERSIST_RELAXED (1U << 0) int rpmem_persist(RPMEMpool *rpp, size_t offset, size_t length, unsigned lane, unsigned flags); int rpmem_read(RPMEMpool *rpp, void *buff, size_t offset, size_t length, unsigned lane); int rpmem_deep_persist(RPMEMpool *rpp, size_t offset, size_t length, unsigned lane); #define RPMEM_REMOVE_FORCE 0x1 #define RPMEM_REMOVE_POOL_SET 0x2 int rpmem_remove(const char *target, const char *pool_set, int flags); /* * RPMEM_MAJOR_VERSION and RPMEM_MINOR_VERSION provide the current version of * the librpmem API as provided by this header file. Applications can verify * that the version available at run-time is compatible with the version used * at compile-time by passing these defines to rpmem_check_version(). */ #define RPMEM_MAJOR_VERSION 1 #define RPMEM_MINOR_VERSION 2 const char *rpmem_check_version(unsigned major_required, unsigned minor_required); const char *rpmem_errormsg(void); /* minimum size of a pool */ #define RPMEM_MIN_POOL ((size_t)(1024 * 8)) /* 8 KB */ /* * This limit is set arbitrary to incorporate a pool header and required * alignment plus supply. */ #define RPMEM_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */ #ifdef __cplusplus } #endif #endif /* librpmem.h */
4,513
35.699187
77
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/include/libvmmalloc.h
/* * Copyright 2014-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libvmmalloc.h -- definitions of libvmmalloc entry points * * This library exposes memory-mapped files as volatile memory (a la malloc) * * See libvmmalloc(3) for details. */ #ifndef LIBVMMALLOC_H #define LIBVMMALLOC_H 1 #ifdef __cplusplus extern "C" { #endif #define VMMALLOC_MAJOR_VERSION 1 #define VMMALLOC_MINOR_VERSION 1 #include <sys/types.h> #define VMMALLOC_MIN_POOL ((size_t)(1024 * 1024 * 14)) /* min pool size: 14MB */ /* * check compiler support for various function attributes */ #if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER) #define GCC_VER (__GNUC__ * 100 + __GNUC_MINOR__) #if GCC_VER >= 296 #define __ATTR_MALLOC__ __attribute__((malloc)) #else #define __ATTR_MALLOC__ #endif #if GCC_VER >= 303 #define __ATTR_NONNULL__(x) __attribute__((nonnull(x))) #else #define __ATTR_NONNULL__(x) #endif #if GCC_VER >= 403 #define __ATTR_ALLOC_SIZE__(...) __attribute__((alloc_size(__VA_ARGS__))) #else #define __ATTR_ALLOC_SIZE__(...) #endif #if GCC_VER >= 409 #define __ATTR_ALLOC_ALIGN__(x) __attribute__((alloc_align(x))) #else #define __ATTR_ALLOC_ALIGN__(x) #endif #else /* clang, icc and other compilers */ #ifndef __has_attribute #define __has_attribute(x) 0 #endif #if __has_attribute(malloc) #define __ATTR_MALLOC__ __attribute__((malloc)) #else #define __ATTR_MALLOC__ #endif #if __has_attribute(nonnull) #define __ATTR_NONNULL__(x) __attribute__((nonnull(x))) #else #define __ATTR_NONNULL__(x) #endif #if __has_attribute(alloc_size) #define __ATTR_ALLOC_SIZE__(...) __attribute__((alloc_size(__VA_ARGS__))) #else #define __ATTR_ALLOC_SIZE__(...) #endif #if __has_attribute(alloc_align) #define __ATTR_ALLOC_ALIGN__(x) __attribute__((alloc_align(x))) #else #define __ATTR_ALLOC_ALIGN__(x) #endif #endif /* __GNUC__ */ extern void *malloc(size_t size) __ATTR_MALLOC__ __ATTR_ALLOC_SIZE__(1); extern void *calloc(size_t nmemb, size_t size) __ATTR_MALLOC__ __ATTR_ALLOC_SIZE__(1, 2); extern void *realloc(void *ptr, size_t size) __ATTR_ALLOC_SIZE__(2); extern void free(void *ptr); extern void cfree(void *ptr); extern int posix_memalign(void **memptr, size_t alignment, size_t size) __ATTR_NONNULL__(1); extern void *memalign(size_t boundary, size_t size) __ATTR_MALLOC__ __ATTR_ALLOC_ALIGN__(1) __ATTR_ALLOC_SIZE__(2); extern void *aligned_alloc(size_t alignment, size_t size) __ATTR_MALLOC__ __ATTR_ALLOC_ALIGN__(1) __ATTR_ALLOC_SIZE__(2); extern void *valloc(size_t size) __ATTR_MALLOC__ __ATTR_ALLOC_SIZE__(1); extern void *pvalloc(size_t size) __ATTR_MALLOC__ __ATTR_ALLOC_SIZE__(1); extern size_t malloc_usable_size(void *ptr); #ifdef __cplusplus } #endif #endif /* libvmmalloc.h */
4,280
27.925676
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/include/libpmemobj.h
/* * Copyright 2014-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libpmemobj.h -- definitions of libpmemobj entry points * * This library provides support for programming with persistent memory (pmem). * * libpmemobj provides a pmem-resident transactional object store. * * See libpmemobj(3) for details. */ #ifndef LIBPMEMOBJ_H #define LIBPMEMOBJ_H 1 #include <libpmemobj/action.h> #include <libpmemobj/atomic.h> #include <libpmemobj/ctl.h> #include <libpmemobj/iterator.h> #include <libpmemobj/lists_atomic.h> #include <libpmemobj/pool.h> #include <libpmemobj/thread.h> #include <libpmemobj/tx.h> #define PMEMOBJ_F_MEM_NODRAIN (1U << 0) #define PMEMOBJ_F_MEM_NONTEMPORAL (1U << 1) #define PMEMOBJ_F_MEM_TEMPORAL (1U << 2) #define PMEMOBJ_F_MEM_WC (1U << 3) #define PMEMOBJ_F_MEM_WB (1U << 4) #define PMEMOBJ_F_MEM_NOFLUSH (1U << 5) #define PMEMOBJ_F_RELAXED (1U << 31) #endif /* libpmemobj.h */
2,460
35.191176
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/include/libpmemlog.h
/* * Copyright 2014-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libpmemlog.h -- definitions of libpmemlog entry points * * This library provides support for programming with persistent memory (pmem). * * libpmemlog provides support for pmem-resident log files. * * See libpmemlog(3) for details. */ #ifndef LIBPMEMLOG_H #define LIBPMEMLOG_H 1 #ifdef _WIN32 #include <pmemcompat.h> #ifndef PMDK_UTF8_API #define pmemlog_open pmemlog_openW #define pmemlog_create pmemlog_createW #define pmemlog_check pmemlog_checkW #define pmemlog_check_version pmemlog_check_versionW #define pmemlog_errormsg pmemlog_errormsgW #define pmemlog_ctl_get pmemlog_ctl_getW #define pmemlog_ctl_set pmemlog_ctl_setW #define pmemlog_ctl_exec pmemlog_ctl_execW #else #define pmemlog_open pmemlog_openU #define pmemlog_create pmemlog_createU #define pmemlog_check pmemlog_checkU #define pmemlog_check_version pmemlog_check_versionU #define pmemlog_errormsg pmemlog_errormsgU #define pmemlog_ctl_get pmemlog_ctl_getU #define pmemlog_ctl_set pmemlog_ctl_setU #define pmemlog_ctl_exec pmemlog_ctl_execU #endif #else #include <sys/uio.h> #endif #ifdef __cplusplus extern "C" { #endif #include <sys/types.h> /* * opaque type, internal to libpmemlog */ typedef struct pmemlog PMEMlogpool; /* * PMEMLOG_MAJOR_VERSION and PMEMLOG_MINOR_VERSION provide the current * version of the libpmemlog API as provided by this header file. * Applications can verify that the version available at run-time * is compatible with the version used at compile-time by passing * these defines to pmemlog_check_version(). */ #define PMEMLOG_MAJOR_VERSION 1 #define PMEMLOG_MINOR_VERSION 1 #ifndef _WIN32 const char *pmemlog_check_version(unsigned major_required, unsigned minor_required); #else const char *pmemlog_check_versionU(unsigned major_required, unsigned minor_required); const wchar_t *pmemlog_check_versionW(unsigned major_required, unsigned minor_required); #endif /* * support for PMEM-resident log files... */ #define PMEMLOG_MIN_POOL ((size_t)(1024 * 1024 * 2)) /* min pool size: 2MiB */ /* * This limit is set arbitrary to incorporate a pool header and required * alignment plus supply. */ #define PMEMLOG_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */ #ifndef _WIN32 PMEMlogpool *pmemlog_open(const char *path); #else PMEMlogpool *pmemlog_openU(const char *path); PMEMlogpool *pmemlog_openW(const wchar_t *path); #endif #ifndef _WIN32 PMEMlogpool *pmemlog_create(const char *path, size_t poolsize, mode_t mode); #else PMEMlogpool *pmemlog_createU(const char *path, size_t poolsize, mode_t mode); PMEMlogpool *pmemlog_createW(const wchar_t *path, size_t poolsize, mode_t mode); #endif #ifndef _WIN32 int pmemlog_check(const char *path); #else int pmemlog_checkU(const char *path); int pmemlog_checkW(const wchar_t *path); #endif void pmemlog_close(PMEMlogpool *plp); size_t pmemlog_nbyte(PMEMlogpool *plp); int pmemlog_append(PMEMlogpool *plp, const void *buf, size_t count); int pmemlog_appendv(PMEMlogpool *plp, const struct iovec *iov, int iovcnt); long long pmemlog_tell(PMEMlogpool *plp); void pmemlog_rewind(PMEMlogpool *plp); void pmemlog_walk(PMEMlogpool *plp, size_t chunksize, int (*process_chunk)(const void *buf, size_t len, void *arg), void *arg); /* * Passing NULL to pmemlog_set_funcs() tells libpmemlog to continue to use the * default for that function. The replacement functions must not make calls * back into libpmemlog. */ void pmemlog_set_funcs( void *(*malloc_func)(size_t size), void (*free_func)(void *ptr), void *(*realloc_func)(void *ptr, size_t size), char *(*strdup_func)(const char *s)); #ifndef _WIN32 const char *pmemlog_errormsg(void); #else const char *pmemlog_errormsgU(void); const wchar_t *pmemlog_errormsgW(void); #endif #ifndef _WIN32 /* EXPERIMENTAL */ int pmemlog_ctl_get(PMEMlogpool *plp, const char *name, void *arg); int pmemlog_ctl_set(PMEMlogpool *plp, const char *name, void *arg); int pmemlog_ctl_exec(PMEMlogpool *plp, const char *name, void *arg); #else int pmemlog_ctl_getU(PMEMlogpool *plp, const char *name, void *arg); int pmemlog_ctl_getW(PMEMlogpool *plp, const wchar_t *name, void *arg); int pmemlog_ctl_setU(PMEMlogpool *plp, const char *name, void *arg); int pmemlog_ctl_setW(PMEMlogpool *plp, const wchar_t *name, void *arg); int pmemlog_ctl_execU(PMEMlogpool *plp, const char *name, void *arg); int pmemlog_ctl_execW(PMEMlogpool *plp, const wchar_t *name, void *arg); #endif #ifdef __cplusplus } #endif #endif /* libpmemlog.h */
6,055
32.274725
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/include/libvmem.h
/* * Copyright 2014-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libvmem.h -- definitions of libvmem entry points * * This library exposes memory-mapped files as volatile memory (a la malloc) * * See libvmem(3) for details. */ #ifndef LIBVMEM_H #define LIBVMEM_H 1 #ifdef _WIN32 #ifndef PMDK_UTF8_API #define vmem_create vmem_createW #define vmem_check_version vmem_check_versionW #define vmem_errormsg vmem_errormsgW #else #define vmem_create vmem_createU #define vmem_check_version vmem_check_versionU #define vmem_errormsg vmem_errormsgU #endif #endif #ifdef __cplusplus extern "C" { #endif #include <sys/types.h> #include <stddef.h> typedef struct vmem VMEM; /* opaque type internal to libvmem */ /* * managing volatile memory pools... */ #define VMEM_MIN_POOL ((size_t)(1024 * 1024 * 14)) /* min pool size: 14MB */ #ifndef _WIN32 VMEM *vmem_create(const char *dir, size_t size); #else VMEM *vmem_createU(const char *dir, size_t size); VMEM *vmem_createW(const wchar_t *dir, size_t size); #endif VMEM *vmem_create_in_region(void *addr, size_t size); void vmem_delete(VMEM *vmp); int vmem_check(VMEM *vmp); void vmem_stats_print(VMEM *vmp, const char *opts); /* * support for malloc and friends... */ void *vmem_malloc(VMEM *vmp, size_t size); void vmem_free(VMEM *vmp, void *ptr); void *vmem_calloc(VMEM *vmp, size_t nmemb, size_t size); void *vmem_realloc(VMEM *vmp, void *ptr, size_t size); void *vmem_aligned_alloc(VMEM *vmp, size_t alignment, size_t size); char *vmem_strdup(VMEM *vmp, const char *s); wchar_t *vmem_wcsdup(VMEM *vmp, const wchar_t *s); size_t vmem_malloc_usable_size(VMEM *vmp, void *ptr); /* * managing overall library behavior... */ /* * VMEM_MAJOR_VERSION and VMEM_MINOR_VERSION provide the current * version of the libvmem API as provided by this header file. * Applications can verify that the version available at run-time * is compatible with the version used at compile-time by passing * these defines to vmem_check_version(). */ #define VMEM_MAJOR_VERSION 1 #define VMEM_MINOR_VERSION 1 #ifndef _WIN32 const char *vmem_check_version(unsigned major_required, unsigned minor_required); #else const char *vmem_check_versionU(unsigned major_required, unsigned minor_required); const wchar_t *vmem_check_versionW(unsigned major_required, unsigned minor_required); #endif /* * Passing NULL to vmem_set_funcs() tells libvmem to continue to use * the default for that function. The replacement functions must * not make calls back into libvmem. * * The print_func is called by libvmem based on the environment * variable VMEM_LOG_LEVEL: * 0 or unset: print_func is only called for vmem_stats_print() * 1: additional details are logged when errors are returned * 2: basic operations (allocations/frees) are logged * 3: produce very verbose tracing of function calls in libvmem * 4: also log obscure stuff used to debug the library itself * * The default print_func prints to stderr. Applications can override this * by setting the environment variable VMEM_LOG_FILE, or by supplying a * replacement print function. */ void vmem_set_funcs( void *(*malloc_func)(size_t size), void (*free_func)(void *ptr), void *(*realloc_func)(void *ptr, size_t size), char *(*strdup_func)(const char *s), void (*print_func)(const char *s)); #ifndef _WIN32 const char *vmem_errormsg(void); #else const char *vmem_errormsgU(void); const wchar_t *vmem_errormsgW(void); #endif #ifdef __cplusplus } #endif #endif /* libvmem.h */
5,075
31.961039
76
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/include/libpmem.h
/* * Copyright 2014-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libpmem.h -- definitions of libpmem entry points * * This library provides support for programming with persistent memory (pmem). * * libpmem provides support for using raw pmem directly. * * See libpmem(3) for details. */ #ifndef LIBPMEM_H #define LIBPMEM_H 1 #ifdef _WIN32 #include <pmemcompat.h> #ifndef PMDK_UTF8_API #define pmem_map_file pmem_map_fileW #define pmem_check_version pmem_check_versionW #define pmem_errormsg pmem_errormsgW #else #define pmem_map_file pmem_map_fileU #define pmem_check_version pmem_check_versionU #define pmem_errormsg pmem_errormsgU #endif #endif #ifdef __cplusplus extern "C" { #endif #include <sys/types.h> /* * This limit is set arbitrary to incorporate a pool header and required * alignment plus supply. */ #define PMEM_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */ /* * flags supported by pmem_map_file() */ #define PMEM_FILE_CREATE (1 << 0) #define PMEM_FILE_EXCL (1 << 1) #define PMEM_FILE_SPARSE (1 << 2) #define PMEM_FILE_TMPFILE (1 << 3) #ifndef _WIN32 void *pmem_map_file(const char *path, size_t len, int flags, mode_t mode, size_t *mapped_lenp, int *is_pmemp); #else void *pmem_map_fileU(const char *path, size_t len, int flags, mode_t mode, size_t *mapped_lenp, int *is_pmemp); void *pmem_map_fileW(const wchar_t *path, size_t len, int flags, mode_t mode, size_t *mapped_lenp, int *is_pmemp); #endif int pmem_unmap(void *addr, size_t len); int pmem_is_pmem(const void *addr, size_t len); void pmem_persist(const void *addr, size_t len); int pmem_msync(const void *addr, size_t len); int pmem_has_auto_flush(void); void pmem_flush(const void *addr, size_t len); void pmem_deep_flush(const void *addr, size_t len); int pmem_deep_drain(const void *addr, size_t len); int pmem_deep_persist(const void *addr, size_t len); void pmem_drain(void); int pmem_has_hw_drain(void); void *pmem_memmove_persist(void *pmemdest, const void *src, size_t len); void *pmem_memcpy_persist(void *pmemdest, const void *src, size_t len); void *pmem_memset_persist(void *pmemdest, int c, size_t len); void *pmem_memmove_nodrain(void *pmemdest, const void *src, size_t len); void *pmem_memcpy_nodrain(void *pmemdest, const void *src, size_t len); void *pmem_memset_nodrain(void *pmemdest, int c, size_t len); #define PMEM_F_MEM_NODRAIN (1U << 0) #define PMEM_F_MEM_NONTEMPORAL (1U << 1) #define PMEM_F_MEM_TEMPORAL (1U << 2) #define PMEM_F_MEM_WC (1U << 3) #define PMEM_F_MEM_WB (1U << 4) #define PMEM_F_MEM_NOFLUSH (1U << 5) #define PMEM_F_MEM_VALID_FLAGS (PMEM_F_MEM_NODRAIN | \ PMEM_F_MEM_NONTEMPORAL | \ PMEM_F_MEM_TEMPORAL | \ PMEM_F_MEM_WC | \ PMEM_F_MEM_WB | \ PMEM_F_MEM_NOFLUSH) void *pmem_memmove(void *pmemdest, const void *src, size_t len, unsigned flags); void *pmem_memcpy(void *pmemdest, const void *src, size_t len, unsigned flags); void *pmem_memset(void *pmemdest, int c, size_t len, unsigned flags); /* * PMEM_MAJOR_VERSION and PMEM_MINOR_VERSION provide the current version of the * libpmem API as provided by this header file. Applications can verify that * the version available at run-time is compatible with the version used at * compile-time by passing these defines to pmem_check_version(). */ #define PMEM_MAJOR_VERSION 1 #define PMEM_MINOR_VERSION 1 #ifndef _WIN32 const char *pmem_check_version(unsigned major_required, unsigned minor_required); #else const char *pmem_check_versionU(unsigned major_required, unsigned minor_required); const wchar_t *pmem_check_versionW(unsigned major_required, unsigned minor_required); #endif #ifndef _WIN32 const char *pmem_errormsg(void); #else const char *pmem_errormsgU(void); const wchar_t *pmem_errormsgW(void); #endif #ifdef __cplusplus } #endif #endif /* libpmem.h */
5,344
32.198758
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/include/pmemcompat.h
/* * Copyright 2016-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * pmemcompat.h -- compatibility layer for libpmem* libraries */ #ifndef PMEMCOMPAT_H #define PMEMCOMPAT_H #include <windows.h> /* for backward compatibility */ #ifdef NVML_UTF8_API #pragma message( "NVML_UTF8_API macro is obsolete, please use PMDK_UTF8_API instead." ) #ifndef PMDK_UTF8_API #define PMDK_UTF8_API #endif #endif struct iovec { void *iov_base; size_t iov_len; }; typedef int mode_t; /* * XXX: this code will not work on windows if our library is included in * an extern block. */ #if defined(__cplusplus) && defined(_MSC_VER) && !defined(__typeof__) #include <type_traits> /* * These templates are used to remove a type reference(T&) which, in some * cases, is returned by decltype */ namespace pmem { namespace detail { template<typename T> struct get_type { using type = T; }; template<typename T> struct get_type<T*> { using type = T*; }; template<typename T> struct get_type<T&> { using type = T; }; } /* namespace detail */ } /* namespace pmem */ #define __typeof__(p) pmem::detail::get_type<decltype(p)>::type #endif #endif
2,676
27.784946
87
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/include/libpmemobj/ctl.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libpmemobj/ctl.h -- definitions of pmemobj_ctl related entry points */ #ifndef LIBPMEMOBJ_CTL_H #define LIBPMEMOBJ_CTL_H 1 #include <stddef.h> #include <sys/types.h> #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * Allocation class interface * * When requesting an object from the allocator, the first step is to determine * which allocation class best approximates the size of the object. * Once found, the appropriate free list, called bucket, for that * class is selected in a fashion that minimizes contention between threads. * Depending on the requested size and the allocation class, it might happen * that the object size (including required metadata) would be bigger than the * allocation class size - called unit size. In those situations, the object is * constructed from two or more units (up to 64). * * If the requested number of units cannot be retrieved from the selected * bucket, the thread reaches out to the global, shared, heap which manages * memory in 256 kilobyte chunks and gives it out in a best-fit fashion. This * operation must be performed under an exclusive lock. * Once the thread is in the possession of a chunk, the lock is dropped, and the * memory is split into units that repopulate the bucket. * * These are the CTL entry points that control allocation classes: * - heap.alloc_class.[class_id].desc * Creates/retrieves allocation class information * * It's VERY important to remember that the allocation classes are a RUNTIME * property of the allocator - they are NOT stored persistently in the pool. * It's recommended to always create custom allocation classes immediately after * creating or opening the pool, before any use. * If there are existing objects created using a class that is no longer stored * in the runtime state of the allocator, they can be normally freed, but * allocating equivalent objects will be done using the allocation class that * is currently defined for that size. * * Please see the libpmemobj man page for more information about entry points. */ /* * Persistent allocation header */ enum pobj_header_type { /* * 64-byte header used up until the version 1.3 of the library, * functionally equivalent to the compact header. * It's not recommended to create any new classes with this header. */ POBJ_HEADER_LEGACY, /* * 16-byte header used by the default allocation classes. All library * metadata is by default allocated using this header. * Supports type numbers and variably sized allocations. */ POBJ_HEADER_COMPACT, /* * 0-byte header with metadata stored exclusively in a bitmap. This * ensures that objects are allocated in memory contiguously and * without attached headers. * This can be used to create very small allocation classes, but it * does not support type numbers. * Additionally, allocations with this header can only span a single * unit. * Objects allocated with this header do show up when iterating through * the heap using pmemobj_first/pmemobj_next functions, but have a * type_num equal 0. */ POBJ_HEADER_NONE, MAX_POBJ_HEADER_TYPES }; /* * Description of allocation classes */ struct pobj_alloc_class_desc { /* * The number of bytes in a single unit of allocation. A single * allocation can span up to 64 units (or 1 in the case of no header). * If one creates an allocation class with a certain unit size and * forces it to handle bigger sizes, more than one unit * will be used. * For example, an allocation class with a compact header and 128 bytes * unit size, for a request of 200 bytes will create a memory block * containing 256 bytes that spans two units. The usable size of that * allocation will be 240 bytes: 2 * 128 - 16 (header). */ size_t unit_size; /* * Desired alignment of objects from the allocation class. * If non zero, must be a power of two and an even divisor of unit size. * * All allocation classes have default alignment * of 64. User data alignment is affected by the size of a header. For * compact one this means that the alignment is 48 bytes. * */ size_t alignment; /* * The minimum number of units that must be present in a * single, contiguous, memory block. * Those blocks (internally called runs), are fetched on demand from the * heap. Accessing that global state is a serialization point for the * allocator and thus it is imperative for performance and scalability * that a reasonable amount of memory is fetched in a single call. * Threads generally do not share memory blocks from which they * allocate, but blocks do go back to the global heap if they are no * longer actively used for allocation. */ unsigned units_per_block; /* * The header of allocations that originate from this allocation class. */ enum pobj_header_type header_type; /* * The identifier of this allocation class. */ unsigned class_id; }; #ifndef _WIN32 /* EXPERIMENTAL */ int pmemobj_ctl_get(PMEMobjpool *pop, const char *name, void *arg); int pmemobj_ctl_set(PMEMobjpool *pop, const char *name, void *arg); int pmemobj_ctl_exec(PMEMobjpool *pop, const char *name, void *arg); #else int pmemobj_ctl_getU(PMEMobjpool *pop, const char *name, void *arg); int pmemobj_ctl_getW(PMEMobjpool *pop, const wchar_t *name, void *arg); int pmemobj_ctl_setU(PMEMobjpool *pop, const char *name, void *arg); int pmemobj_ctl_setW(PMEMobjpool *pop, const wchar_t *name, void *arg); int pmemobj_ctl_execU(PMEMobjpool *pop, const char *name, void *arg); int pmemobj_ctl_execW(PMEMobjpool *pop, const wchar_t *name, void *arg); #ifndef PMDK_UTF8_API #define pmemobj_ctl_get pmemobj_ctl_getW #define pmemobj_ctl_set pmemobj_ctl_setW #define pmemobj_ctl_exec pmemobj_ctl_execW #else #define pmemobj_ctl_get pmemobj_ctl_getU #define pmemobj_ctl_set pmemobj_ctl_setU #define pmemobj_ctl_exec pmemobj_ctl_execU #endif #endif #ifdef __cplusplus } #endif #endif /* libpmemobj/ctl.h */
7,572
37.247475
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/include/libpmemobj/lists_atomic.h
/* * Copyright 2014-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libpmemobj/lists_atomic.h -- definitions of libpmemobj atomic lists macros */ #ifndef LIBPMEMOBJ_LISTS_ATOMIC_H #define LIBPMEMOBJ_LISTS_ATOMIC_H 1 #include <libpmemobj/lists_atomic_base.h> #include <libpmemobj/thread.h> #include <libpmemobj/types.h> #ifdef __cplusplus extern "C" { #endif /* * Non-transactional persistent atomic circular doubly-linked list */ #define POBJ_LIST_ENTRY(type)\ struct {\ TOID(type) pe_next;\ TOID(type) pe_prev;\ } #define POBJ_LIST_HEAD(name, type)\ struct name {\ TOID(type) pe_first;\ PMEMmutex lock;\ } #define POBJ_LIST_FIRST(head) ((head)->pe_first) #define POBJ_LIST_LAST(head, field) (\ TOID_IS_NULL((head)->pe_first) ?\ (head)->pe_first :\ D_RO((head)->pe_first)->field.pe_prev) #define POBJ_LIST_EMPTY(head) (TOID_IS_NULL((head)->pe_first)) #define POBJ_LIST_NEXT(elm, field) (D_RO(elm)->field.pe_next) #define POBJ_LIST_PREV(elm, field) (D_RO(elm)->field.pe_prev) #define POBJ_LIST_DEST_HEAD 1 #define POBJ_LIST_DEST_TAIL 0 #define POBJ_LIST_DEST_BEFORE 1 #define POBJ_LIST_DEST_AFTER 0 #define POBJ_LIST_FOREACH(var, head, field)\ for (_pobj_debug_notice("POBJ_LIST_FOREACH", __FILE__, __LINE__),\ (var) = POBJ_LIST_FIRST((head));\ TOID_IS_NULL((var)) == 0;\ TOID_EQUALS(POBJ_LIST_NEXT((var), field),\ POBJ_LIST_FIRST((head))) ?\ TOID_ASSIGN((var), OID_NULL) :\ ((var) = POBJ_LIST_NEXT((var), field))) #define POBJ_LIST_FOREACH_REVERSE(var, head, field)\ for (_pobj_debug_notice("POBJ_LIST_FOREACH_REVERSE", __FILE__, __LINE__),\ (var) = POBJ_LIST_LAST((head), field);\ TOID_IS_NULL((var)) == 0;\ TOID_EQUALS(POBJ_LIST_PREV((var), field),\ POBJ_LIST_LAST((head), field)) ?\ TOID_ASSIGN((var), OID_NULL) :\ ((var) = POBJ_LIST_PREV((var), field))) #define POBJ_LIST_INSERT_HEAD(pop, head, elm, field)\ pmemobj_list_insert((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), OID_NULL,\ POBJ_LIST_DEST_HEAD, (elm).oid) #define POBJ_LIST_INSERT_TAIL(pop, head, elm, field)\ pmemobj_list_insert((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), OID_NULL,\ POBJ_LIST_DEST_TAIL, (elm).oid) #define POBJ_LIST_INSERT_AFTER(pop, head, listelm, elm, field)\ pmemobj_list_insert((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), (listelm).oid,\ 0 /* after */, (elm).oid) #define POBJ_LIST_INSERT_BEFORE(pop, head, listelm, elm, field)\ pmemobj_list_insert((pop), \ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), (listelm).oid,\ 1 /* before */, (elm).oid) #define POBJ_LIST_INSERT_NEW_HEAD(pop, head, field, size, constr, arg)\ pmemobj_list_insert_new((pop),\ TOID_OFFSETOF((head)->pe_first, field),\ (head), OID_NULL, POBJ_LIST_DEST_HEAD, (size),\ TOID_TYPE_NUM_OF((head)->pe_first), (constr), (arg)) #define POBJ_LIST_INSERT_NEW_TAIL(pop, head, field, size, constr, arg)\ pmemobj_list_insert_new((pop),\ TOID_OFFSETOF((head)->pe_first, field),\ (head), OID_NULL, POBJ_LIST_DEST_TAIL, (size),\ TOID_TYPE_NUM_OF((head)->pe_first), (constr), (arg)) #define POBJ_LIST_INSERT_NEW_AFTER(pop, head, listelm, field, size,\ constr, arg)\ pmemobj_list_insert_new((pop),\ TOID_OFFSETOF((head)->pe_first, field),\ (head), (listelm).oid, 0 /* after */, (size),\ TOID_TYPE_NUM_OF((head)->pe_first), (constr), (arg)) #define POBJ_LIST_INSERT_NEW_BEFORE(pop, head, listelm, field, size,\ constr, arg)\ pmemobj_list_insert_new((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), (listelm).oid, 1 /* before */, (size),\ TOID_TYPE_NUM_OF((head)->pe_first), (constr), (arg)) #define POBJ_LIST_REMOVE(pop, head, elm, field)\ pmemobj_list_remove((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), (elm).oid, 0 /* no free */) #define POBJ_LIST_REMOVE_FREE(pop, head, elm, field)\ pmemobj_list_remove((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), (elm).oid, 1 /* free */) #define POBJ_LIST_MOVE_ELEMENT_HEAD(pop, head, head_new, elm, field, field_new)\ pmemobj_list_move((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head_new), field_new),\ (head_new), OID_NULL, POBJ_LIST_DEST_HEAD, (elm).oid) #define POBJ_LIST_MOVE_ELEMENT_TAIL(pop, head, head_new, elm, field, field_new)\ pmemobj_list_move((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head_new), field_new),\ (head_new), OID_NULL, POBJ_LIST_DEST_TAIL, (elm).oid) #define POBJ_LIST_MOVE_ELEMENT_AFTER(pop,\ head, head_new, listelm, elm, field, field_new)\ pmemobj_list_move((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head_new), field_new),\ (head_new),\ (listelm).oid,\ 0 /* after */, (elm).oid) #define POBJ_LIST_MOVE_ELEMENT_BEFORE(pop,\ head, head_new, listelm, elm, field, field_new)\ pmemobj_list_move((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head_new), field_new),\ (head_new),\ (listelm).oid,\ 1 /* before */, (elm).oid) #ifdef __cplusplus } #endif #endif /* libpmemobj/lists_atomic.h */
6,636
33.21134
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/include/libpmemobj/iterator.h
/* * Copyright 2014-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libpmemobj/iterator.h -- definitions of libpmemobj iterator macros */ #ifndef LIBPMEMOBJ_ITERATOR_H #define LIBPMEMOBJ_ITERATOR_H 1 #include <libpmemobj/iterator_base.h> #include <libpmemobj/types.h> #ifdef __cplusplus extern "C" { #endif static inline PMEMoid POBJ_FIRST_TYPE_NUM(PMEMobjpool *pop, uint64_t type_num) { PMEMoid _pobj_ret = pmemobj_first(pop); while (!OID_IS_NULL(_pobj_ret) && pmemobj_type_num(_pobj_ret) != type_num) { _pobj_ret = pmemobj_next(_pobj_ret); } return _pobj_ret; } static inline PMEMoid POBJ_NEXT_TYPE_NUM(PMEMoid o) { PMEMoid _pobj_ret = o; do { _pobj_ret = pmemobj_next(_pobj_ret);\ } while (!OID_IS_NULL(_pobj_ret) && pmemobj_type_num(_pobj_ret) != pmemobj_type_num(o)); return _pobj_ret; } #define POBJ_FIRST(pop, t) ((TOID(t))POBJ_FIRST_TYPE_NUM(pop, TOID_TYPE_NUM(t))) #define POBJ_NEXT(o) ((__typeof__(o))POBJ_NEXT_TYPE_NUM((o).oid)) /* * Iterates through every existing allocated object. */ #define POBJ_FOREACH(pop, varoid)\ for (_pobj_debug_notice("POBJ_FOREACH", __FILE__, __LINE__),\ varoid = pmemobj_first(pop);\ (varoid).off != 0; varoid = pmemobj_next(varoid)) /* * Safe variant of POBJ_FOREACH in which pmemobj_free on varoid is allowed */ #define POBJ_FOREACH_SAFE(pop, varoid, nvaroid)\ for (_pobj_debug_notice("POBJ_FOREACH_SAFE", __FILE__, __LINE__),\ varoid = pmemobj_first(pop);\ (varoid).off != 0 && (nvaroid = pmemobj_next(varoid), 1);\ varoid = nvaroid) /* * Iterates through every object of the specified type. */ #define POBJ_FOREACH_TYPE(pop, var)\ POBJ_FOREACH(pop, (var).oid)\ if (pmemobj_type_num((var).oid) == TOID_TYPE_NUM_OF(var)) /* * Safe variant of POBJ_FOREACH_TYPE in which pmemobj_free on var * is allowed. */ #define POBJ_FOREACH_SAFE_TYPE(pop, var, nvar)\ POBJ_FOREACH_SAFE(pop, (var).oid, (nvar).oid)\ if (pmemobj_type_num((var).oid) == TOID_TYPE_NUM_OF(var)) #ifdef __cplusplus } #endif #endif /* libpmemobj/iterator.h */
3,557
30.486726
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/include/libpmemobj/lists_atomic_base.h
/* * Copyright 2014-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libpmemobj/lists_atomic_base.h -- definitions of libpmemobj atomic lists */ #ifndef LIBPMEMOBJ_LISTS_ATOMIC_BASE_H #define LIBPMEMOBJ_LISTS_ATOMIC_BASE_H 1 #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * Non-transactional persistent atomic circular doubly-linked list */ int pmemobj_list_insert(PMEMobjpool *pop, size_t pe_offset, void *head, PMEMoid dest, int before, PMEMoid oid); PMEMoid pmemobj_list_insert_new(PMEMobjpool *pop, size_t pe_offset, void *head, PMEMoid dest, int before, size_t size, uint64_t type_num, pmemobj_constr constructor, void *arg); int pmemobj_list_remove(PMEMobjpool *pop, size_t pe_offset, void *head, PMEMoid oid, int free); int pmemobj_list_move(PMEMobjpool *pop, size_t pe_old_offset, void *head_old, size_t pe_new_offset, void *head_new, PMEMoid dest, int before, PMEMoid oid); #ifdef __cplusplus } #endif #endif /* libpmemobj/lists_atomic_base.h */
2,537
35.782609
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/include/libpmemobj/tx_base.h
/* * Copyright 2014-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libpmemobj/tx_base.h -- definitions of libpmemobj transactional entry points */ #ifndef LIBPMEMOBJ_TX_BASE_H #define LIBPMEMOBJ_TX_BASE_H 1 #include <setjmp.h> #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * Transactions * * Stages are changed only by the pmemobj_tx_* functions, each transition * to the TX_STAGE_ONABORT is followed by a longjmp to the jmp_buf provided in * the pmemobj_tx_begin function. */ enum pobj_tx_stage { TX_STAGE_NONE, /* no transaction in this thread */ TX_STAGE_WORK, /* transaction in progress */ TX_STAGE_ONCOMMIT, /* successfully committed */ TX_STAGE_ONABORT, /* tx_begin failed or transaction aborted */ TX_STAGE_FINALLY, /* always called */ MAX_TX_STAGE }; /* * Always returns the current transaction stage for a thread. */ enum pobj_tx_stage pmemobj_tx_stage(void); enum pobj_tx_param { TX_PARAM_NONE, TX_PARAM_MUTEX, /* PMEMmutex */ TX_PARAM_RWLOCK, /* PMEMrwlock */ TX_PARAM_CB, /* pmemobj_tx_callback cb, void *arg */ }; #if !defined(_has_deprecated_with_message) && defined(__clang__) #if __has_extension(attribute_deprecated_with_message) #define _has_deprecated_with_message #endif #endif #if !defined(_has_deprecated_with_message) && \ defined(__GNUC__) && !defined(__INTEL_COMPILER) #if __GNUC__ * 100 + __GNUC_MINOR__ >= 601 /* 6.1 */ #define _has_deprecated_with_message #endif #endif #ifdef _has_deprecated_with_message #define tx_lock_deprecated __attribute__((deprecated(\ "enum pobj_tx_lock is deprecated, use enum pobj_tx_param"))) #else #define tx_lock_deprecated #endif /* deprecated, do not use */ enum tx_lock_deprecated pobj_tx_lock { TX_LOCK_NONE tx_lock_deprecated = TX_PARAM_NONE, TX_LOCK_MUTEX tx_lock_deprecated = TX_PARAM_MUTEX, TX_LOCK_RWLOCK tx_lock_deprecated = TX_PARAM_RWLOCK, }; typedef void (*pmemobj_tx_callback)(PMEMobjpool *pop, enum pobj_tx_stage stage, void *); #define POBJ_TX_XALLOC_VALID_FLAGS (POBJ_XALLOC_ZERO |\ POBJ_XALLOC_NO_FLUSH |\ POBJ_XALLOC_CLASS_MASK) #define POBJ_XADD_NO_FLUSH POBJ_FLAG_NO_FLUSH #define POBJ_XADD_VALID_FLAGS POBJ_XADD_NO_FLUSH /* * Starts a new transaction in the current thread. * If called within an open transaction, starts a nested transaction. * * If successful, transaction stage changes to TX_STAGE_WORK and function * returns zero. Otherwise, stage changes to TX_STAGE_ONABORT and an error * number is returned. */ int pmemobj_tx_begin(PMEMobjpool *pop, jmp_buf env, ...); /* * Adds lock of given type to current transaction. */ int pmemobj_tx_lock(enum pobj_tx_param type, void *lockp); /* * Aborts current transaction * * Causes transition to TX_STAGE_ONABORT. * * This function must be called during TX_STAGE_WORK. */ void pmemobj_tx_abort(int errnum); /* * Commits current transaction * * This function must be called during TX_STAGE_WORK. */ void pmemobj_tx_commit(void); /* * Cleanups current transaction. Must always be called after pmemobj_tx_begin, * even if starting the transaction failed. * * If called during TX_STAGE_NONE, has no effect. * * Always causes transition to TX_STAGE_NONE. * * If transaction was successful, returns 0. Otherwise returns error code set * by pmemobj_tx_abort. * * This function must *not* be called during TX_STAGE_WORK. */ int pmemobj_tx_end(void); /* * Performs the actions associated with current stage of the transaction, * and makes the transition to the next stage. Current stage must always * be obtained by calling pmemobj_tx_stage. * * This function must be called in transaction. */ void pmemobj_tx_process(void); /* * Returns last transaction error code. */ int pmemobj_tx_errno(void); /* * Takes a "snapshot" of the memory block of given size and located at given * offset 'off' in the object 'oid' and saves it in the undo log. * The application is then free to directly modify the object in that memory * range. In case of failure or abort, all the changes within this range will * be rolled-back automatically. * * If successful, returns zero. * Otherwise, state changes to TX_STAGE_ONABORT and an error number is returned. * * This function must be called during TX_STAGE_WORK. */ int pmemobj_tx_add_range(PMEMoid oid, uint64_t off, size_t size); /* * Takes a "snapshot" of the given memory region and saves it in the undo log. * The application is then free to directly modify the object in that memory * range. In case of failure or abort, all the changes within this range will * be rolled-back automatically. The supplied block of memory has to be within * the given pool. * * If successful, returns zero. * Otherwise, state changes to TX_STAGE_ONABORT and an error number is returned. * * This function must be called during TX_STAGE_WORK. */ int pmemobj_tx_add_range_direct(const void *ptr, size_t size); /* * Behaves exactly the same as pmemobj_tx_add_range when 'flags' equals 0. * 'Flags' is a bitmask of the following values: * - POBJ_XADD_NO_FLUSH - skips flush on commit */ int pmemobj_tx_xadd_range(PMEMoid oid, uint64_t off, size_t size, uint64_t flags); /* * Behaves exactly the same as pmemobj_tx_add_range_direct when 'flags' equals * 0. 'Flags' is a bitmask of the following values: * - POBJ_XADD_NO_FLUSH - skips flush on commit */ int pmemobj_tx_xadd_range_direct(const void *ptr, size_t size, uint64_t flags); /* * Transactionally allocates a new object. * * If successful, returns PMEMoid. * Otherwise, state changes to TX_STAGE_ONABORT and an OID_NULL is returned. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_alloc(size_t size, uint64_t type_num); /* * Transactionally allocates a new object. * * If successful, returns PMEMoid. * Otherwise, state changes to TX_STAGE_ONABORT and an OID_NULL is returned. * 'Flags' is a bitmask of the following values: * - POBJ_XALLOC_ZERO - zero the allocated object * - POBJ_XALLOC_NO_FLUSH - skip flush on commit * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_xalloc(size_t size, uint64_t type_num, uint64_t flags); /* * Transactionally allocates new zeroed object. * * If successful, returns PMEMoid. * Otherwise, state changes to TX_STAGE_ONABORT and an OID_NULL is returned. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_zalloc(size_t size, uint64_t type_num); /* * Transactionally resizes an existing object. * * If successful, returns PMEMoid. * Otherwise, state changes to TX_STAGE_ONABORT and an OID_NULL is returned. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_realloc(PMEMoid oid, size_t size, uint64_t type_num); /* * Transactionally resizes an existing object, if extended new space is zeroed. * * If successful, returns PMEMoid. * Otherwise, state changes to TX_STAGE_ONABORT and an OID_NULL is returned. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_zrealloc(PMEMoid oid, size_t size, uint64_t type_num); /* * Transactionally allocates a new object with duplicate of the string s. * * If successful, returns PMEMoid. * Otherwise, state changes to TX_STAGE_ONABORT and an OID_NULL is returned. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_strdup(const char *s, uint64_t type_num); /* * Transactionally allocates a new object with duplicate of the wide character * string s. * * If successful, returns PMEMoid. * Otherwise, state changes to TX_STAGE_ONABORT and an OID_NULL is returned. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_wcsdup(const wchar_t *s, uint64_t type_num); /* * Transactionally frees an existing object. * * If successful, returns zero. * Otherwise, state changes to TX_STAGE_ONABORT and an error number is returned. * * This function must be called during TX_STAGE_WORK. */ int pmemobj_tx_free(PMEMoid oid); #ifdef __cplusplus } #endif #endif /* libpmemobj/tx_base.h */
9,598
30.369281
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/include/libpmemobj/pool_base.h
/* * Copyright 2014-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libpmemobj/pool_base.h -- definitions of libpmemobj pool entry points */ #ifndef LIBPMEMOBJ_POOL_BASE_H #define LIBPMEMOBJ_POOL_BASE_H 1 #include <stddef.h> #include <sys/types.h> #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif #define PMEMOBJ_MIN_POOL ((size_t)(1024 * 1024 * 8)) /* 8 MiB */ /* * This limit is set arbitrary to incorporate a pool header and required * alignment plus supply. */ #define PMEMOBJ_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */ /* * Pool management. */ #ifdef _WIN32 #ifndef PMDK_UTF8_API #define pmemobj_open pmemobj_openW #define pmemobj_create pmemobj_createW #define pmemobj_check pmemobj_checkW #else #define pmemobj_open pmemobj_openU #define pmemobj_create pmemobj_createU #define pmemobj_check pmemobj_checkU #endif #endif #ifndef _WIN32 PMEMobjpool *pmemobj_open(const char *path, const char *layout); #else PMEMobjpool *pmemobj_openU(const char *path, const char *layout); PMEMobjpool *pmemobj_openW(const wchar_t *path, const wchar_t *layout); #endif #ifndef _WIN32 PMEMobjpool *pmemobj_create(const char *path, const char *layout, size_t poolsize, mode_t mode); #else PMEMobjpool *pmemobj_createU(const char *path, const char *layout, size_t poolsize, mode_t mode); PMEMobjpool *pmemobj_createW(const wchar_t *path, const wchar_t *layout, size_t poolsize, mode_t mode); #endif #ifndef _WIN32 int pmemobj_check(const char *path, const char *layout); #else int pmemobj_checkU(const char *path, const char *layout); int pmemobj_checkW(const wchar_t *path, const wchar_t *layout); #endif void pmemobj_close(PMEMobjpool *pop); /* * If called for the first time on a newly created pool, the root object * of given size is allocated. Otherwise, it returns the existing root object. * In such case, the size must be not less than the actual root object size * stored in the pool. If it's larger, the root object is automatically * resized. * * This function is thread-safe. */ PMEMoid pmemobj_root(PMEMobjpool *pop, size_t size); /* * Same as above, but calls the constructor function when the object is first * created and on all subsequent reallocations. */ PMEMoid pmemobj_root_construct(PMEMobjpool *pop, size_t size, pmemobj_constr constructor, void *arg); /* * Returns the size in bytes of the root object. Always equal to the requested * size. */ size_t pmemobj_root_size(PMEMobjpool *pop); #ifdef __cplusplus } #endif #endif /* libpmemobj/pool_base.h */
4,067
31.544
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/include/libpmemobj/action_base.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libpmemobj/action_base.h -- definitions of libpmemobj action interface */ #ifndef LIBPMEMOBJ_ACTION_BASE_H #define LIBPMEMOBJ_ACTION_BASE_H 1 #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif enum pobj_action_type { /* a heap action (e.g., alloc) */ POBJ_ACTION_TYPE_HEAP, /* a single memory operation (e.g., value set) */ POBJ_ACTION_TYPE_MEM, POBJ_MAX_ACTION_TYPE }; struct pobj_action { /* * These fields are internal for the implementation and are not * guaranteed to be stable across different versions of the API. * Use with caution. * * This structure should NEVER be stored on persistent memory! */ enum pobj_action_type type; uint32_t data[3]; union { struct { /* offset to the element being freed/allocated */ uint64_t offset; } heap; uint64_t data2[14]; }; }; #define POBJ_ACTION_XRESERVE_VALID_FLAGS\ (POBJ_XALLOC_CLASS_MASK | POBJ_XALLOC_ZERO) PMEMoid pmemobj_reserve(PMEMobjpool *pop, struct pobj_action *act, size_t size, uint64_t type_num); PMEMoid pmemobj_xreserve(PMEMobjpool *pop, struct pobj_action *act, size_t size, uint64_t type_num, uint64_t flags); void pmemobj_set_value(PMEMobjpool *pop, struct pobj_action *act, uint64_t *ptr, uint64_t value); void pmemobj_defer_free(PMEMobjpool *pop, PMEMoid oid, struct pobj_action *act); int pmemobj_publish(PMEMobjpool *pop, struct pobj_action *actv, size_t actvcnt); int pmemobj_tx_publish(struct pobj_action *actv, size_t actvcnt); void pmemobj_cancel(PMEMobjpool *pop, struct pobj_action *actv, size_t actvcnt); #ifdef __cplusplus } #endif #endif /* libpmemobj/action_base.h */
3,226
32.614583
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/include/libpmemobj/types.h
/* * Copyright 2014-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libpmemobj/types.h -- definitions of libpmemobj type-safe macros */ #ifndef LIBPMEMOBJ_TYPES_H #define LIBPMEMOBJ_TYPES_H 1 #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif #define TOID_NULL(t) ((TOID(t))OID_NULL) #define PMEMOBJ_MAX_LAYOUT ((size_t)1024) /* * Type safety macros */ #ifndef _MSC_VER #define TOID_ASSIGN(o, value)(\ {\ (o).oid = value;\ (o); /* to avoid "error: statement with no effect" */\ }) #else /* _MSC_VER */ #define TOID_ASSIGN(o, value) ((o).oid = value, (o)) /* * XXX - workaround for offsetof issue in VS 15.3 */ #ifdef PMEMOBJ_OFFSETOF_WA #ifdef _CRT_USE_BUILTIN_OFFSETOF #undef offsetof #define offsetof(s, m) ((size_t)&reinterpret_cast < char const volatile& > \ ((((s *)0)->m))) #endif #else #ifdef _CRT_USE_BUILTIN_OFFSETOF #error "Invalid definition of offsetof() macro - see: \ https://developercommunity.visualstudio.com/content/problem/96174/. \ Please upgrade your VS, fix offsetof as described under the link or define \ PMEMOBJ_OFFSETOF_WA to enable workaround in libpmemobj.h" #endif #endif #endif /* _MSC_VER */ #define TOID_EQUALS(lhs, rhs)\ ((lhs).oid.off == (rhs).oid.off &&\ (lhs).oid.pool_uuid_lo == (rhs).oid.pool_uuid_lo) /* type number of root object */ #define POBJ_ROOT_TYPE_NUM 0 #define _toid_struct #define _toid_union #define _toid_enum #define _POBJ_LAYOUT_REF(name) (sizeof(_pobj_layout_##name##_ref)) /* * Typed OID */ #define TOID(t)\ union _toid_##t##_toid #ifdef __cplusplus #define _TOID_CONSTR(t)\ _toid_##t##_toid()\ { }\ _toid_##t##_toid(PMEMoid _oid) : oid(_oid)\ { } #else #define _TOID_CONSTR(t) #endif /* * Declaration of typed OID */ #define _TOID_DECLARE(t, i)\ typedef uint8_t _toid_##t##_toid_type_num[(i) + 1];\ TOID(t)\ {\ _TOID_CONSTR(t)\ PMEMoid oid;\ t *_type;\ _toid_##t##_toid_type_num *_type_num;\ } /* * Declaration of typed OID of an object */ #define TOID_DECLARE(t, i) _TOID_DECLARE(t, i) /* * Declaration of typed OID of a root object */ #define TOID_DECLARE_ROOT(t) _TOID_DECLARE(t, POBJ_ROOT_TYPE_NUM) /* * Type number of specified type */ #define TOID_TYPE_NUM(t) (sizeof(_toid_##t##_toid_type_num) - 1) /* * Type number of object read from typed OID */ #define TOID_TYPE_NUM_OF(o) (sizeof(*(o)._type_num) - 1) /* * NULL check */ #define TOID_IS_NULL(o) ((o).oid.off == 0) /* * Validates whether type number stored in typed OID is the same * as type number stored in object's metadata */ #define TOID_VALID(o) (TOID_TYPE_NUM_OF(o) == pmemobj_type_num((o).oid)) /* * Checks whether the object is of a given type */ #define OID_INSTANCEOF(o, t) (TOID_TYPE_NUM(t) == pmemobj_type_num(o)) /* * Begin of layout declaration */ #define POBJ_LAYOUT_BEGIN(name)\ typedef uint8_t _pobj_layout_##name##_ref[__COUNTER__ + 1] /* * End of layout declaration */ #define POBJ_LAYOUT_END(name)\ typedef char _pobj_layout_##name##_cnt[__COUNTER__ + 1 -\ _POBJ_LAYOUT_REF(name)]; /* * Number of types declared inside layout without the root object */ #define POBJ_LAYOUT_TYPES_NUM(name) (sizeof(_pobj_layout_##name##_cnt) - 1) /* * Declaration of typed OID inside layout declaration */ #define POBJ_LAYOUT_TOID(name, t)\ TOID_DECLARE(t, (__COUNTER__ + 1 - _POBJ_LAYOUT_REF(name))); /* * Declaration of typed OID of root inside layout declaration */ #define POBJ_LAYOUT_ROOT(name, t)\ TOID_DECLARE_ROOT(t); /* * Name of declared layout */ #define POBJ_LAYOUT_NAME(name) #name #define TOID_TYPEOF(o) __typeof__(*(o)._type) #define TOID_OFFSETOF(o, field) offsetof(TOID_TYPEOF(o), field) /* * XXX - DIRECT_RW and DIRECT_RO are not available when compiled using VC++ * as C code (/TC). Use /TP option. */ #ifndef _MSC_VER #define DIRECT_RW(o) (\ {__typeof__(o) _o; _o._type = NULL; (void)_o;\ (__typeof__(*(o)._type) *)pmemobj_direct((o).oid); }) #define DIRECT_RO(o) ((const __typeof__(*(o)._type) *)pmemobj_direct((o).oid)) #elif defined(__cplusplus) /* * XXX - On Windows, these macros do not behave exactly the same as on Linux. */ #define DIRECT_RW(o) \ (reinterpret_cast < __typeof__((o)._type) > (pmemobj_direct((o).oid))) #define DIRECT_RO(o) \ (reinterpret_cast < const __typeof__((o)._type) > \ (pmemobj_direct((o).oid))) #endif /* (defined(_MSC_VER) || defined(__cplusplus)) */ #define D_RW DIRECT_RW #define D_RO DIRECT_RO #ifdef __cplusplus } #endif #endif /* libpmemobj/types.h */
5,982
25.126638
78
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/include/libpmemobj/base.h
/* * Copyright 2014-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libpmemobj/base.h -- definitions of base libpmemobj entry points */ #ifndef LIBPMEMOBJ_BASE_H #define LIBPMEMOBJ_BASE_H 1 #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS #endif #include <stddef.h> #include <stdint.h> #ifdef _WIN32 #include <pmemcompat.h> #ifndef PMDK_UTF8_API #define pmemobj_check_version pmemobj_check_versionW #define pmemobj_errormsg pmemobj_errormsgW #else #define pmemobj_check_version pmemobj_check_versionU #define pmemobj_errormsg pmemobj_errormsgU #endif #endif #ifdef __cplusplus extern "C" { #endif /* * opaque type internal to libpmemobj */ typedef struct pmemobjpool PMEMobjpool; #define PMEMOBJ_MAX_ALLOC_SIZE ((size_t)0x3FFDFFFC0) /* * allocation functions flags */ #define POBJ_FLAG_ZERO (((uint64_t)1) << 0) #define POBJ_FLAG_NO_FLUSH (((uint64_t)1) << 1) #define POBJ_CLASS_ID(id) (((uint64_t)(id)) << 48) #define POBJ_XALLOC_CLASS_MASK ((((uint64_t)1 << 16) - 1) << 48) #define POBJ_XALLOC_ZERO POBJ_FLAG_ZERO #define POBJ_XALLOC_NO_FLUSH POBJ_FLAG_NO_FLUSH /* * Persistent memory object */ /* * Object handle */ typedef struct pmemoid { uint64_t pool_uuid_lo; uint64_t off; } PMEMoid; static const PMEMoid OID_NULL = { 0, 0 }; #define OID_IS_NULL(o) ((o).off == 0) #define OID_EQUALS(lhs, rhs)\ ((lhs).off == (rhs).off &&\ (lhs).pool_uuid_lo == (rhs).pool_uuid_lo) PMEMobjpool *pmemobj_pool_by_ptr(const void *addr); PMEMobjpool *pmemobj_pool_by_oid(PMEMoid oid); #ifndef _WIN32 extern int _pobj_cache_invalidate; extern __thread struct _pobj_pcache { PMEMobjpool *pop; uint64_t uuid_lo; int invalidate; } _pobj_cached_pool; /* * Returns the direct pointer of an object. */ static inline void * pmemobj_direct_inline(PMEMoid oid) { if (oid.off == 0 || oid.pool_uuid_lo == 0) return NULL; struct _pobj_pcache *cache = &_pobj_cached_pool; if (_pobj_cache_invalidate != cache->invalidate || cache->uuid_lo != oid.pool_uuid_lo) { cache->invalidate = _pobj_cache_invalidate; if (!(cache->pop = pmemobj_pool_by_oid(oid))) { cache->uuid_lo = 0; return NULL; } cache->uuid_lo = oid.pool_uuid_lo; } return (void *)((uintptr_t)cache->pop + oid.off); } #endif /* _WIN32 */ /* * Returns the direct pointer of an object. */ #if defined(_WIN32) || defined(_PMEMOBJ_INTRNL) ||\ defined(PMEMOBJ_DIRECT_NON_INLINE) void *pmemobj_direct(PMEMoid oid); #else #define pmemobj_direct pmemobj_direct_inline #endif struct pmemvlt { uint64_t runid; }; #define PMEMvlt(T)\ struct {\ struct pmemvlt vlt;\ T value;\ } void *pmemobj_volatile(PMEMobjpool *pop, struct pmemvlt *vlt, void *ptr, size_t size, int (*constr)(void *ptr, void *arg), void *arg); /* * Returns the OID of the object pointed to by addr. */ PMEMoid pmemobj_oid(const void *addr); /* * Returns the number of usable bytes in the object. May be greater than * the requested size of the object because of internal alignment. * * Can be used with objects allocated by any of the available methods. */ size_t pmemobj_alloc_usable_size(PMEMoid oid); /* * Returns the type number of the object. */ uint64_t pmemobj_type_num(PMEMoid oid); /* * Pmemobj specific low-level memory manipulation functions. * * These functions are meant to be used with pmemobj pools, because they provide * additional functionality specific to this type of pool. These may include * for example replication support. They also take advantage of the knowledge * of the type of memory in the pool (pmem/non-pmem) to assure persistence. */ /* * Pmemobj version of memcpy. Data copied is made persistent. */ void *pmemobj_memcpy_persist(PMEMobjpool *pop, void *dest, const void *src, size_t len); /* * Pmemobj version of memset. Data range set is made persistent. */ void *pmemobj_memset_persist(PMEMobjpool *pop, void *dest, int c, size_t len); /* * Pmemobj version of memcpy. Data copied is made persistent (unless opted-out * using flags). */ void *pmemobj_memcpy(PMEMobjpool *pop, void *dest, const void *src, size_t len, unsigned flags); /* * Pmemobj version of memmove. Data copied is made persistent (unless opted-out * using flags). */ void *pmemobj_memmove(PMEMobjpool *pop, void *dest, const void *src, size_t len, unsigned flags); /* * Pmemobj version of memset. Data range set is made persistent (unless * opted-out using flags). */ void *pmemobj_memset(PMEMobjpool *pop, void *dest, int c, size_t len, unsigned flags); /* * Pmemobj version of pmem_persist. */ void pmemobj_persist(PMEMobjpool *pop, const void *addr, size_t len); /* * Pmemobj version of pmem_persist with additional flags argument. */ int pmemobj_xpersist(PMEMobjpool *pop, const void *addr, size_t len, unsigned flags); /* * Pmemobj version of pmem_flush. */ void pmemobj_flush(PMEMobjpool *pop, const void *addr, size_t len); /* * Pmemobj version of pmem_flush with additional flags argument. */ int pmemobj_xflush(PMEMobjpool *pop, const void *addr, size_t len, unsigned flags); /* * Pmemobj version of pmem_drain. */ void pmemobj_drain(PMEMobjpool *pop); /* * Version checking. */ /* * PMEMOBJ_MAJOR_VERSION and PMEMOBJ_MINOR_VERSION provide the current version * of the libpmemobj API as provided by this header file. Applications can * verify that the version available at run-time is compatible with the version * used at compile-time by passing these defines to pmemobj_check_version(). */ #define PMEMOBJ_MAJOR_VERSION 2 #define PMEMOBJ_MINOR_VERSION 4 #ifndef _WIN32 const char *pmemobj_check_version(unsigned major_required, unsigned minor_required); #else const char *pmemobj_check_versionU(unsigned major_required, unsigned minor_required); const wchar_t *pmemobj_check_versionW(unsigned major_required, unsigned minor_required); #endif /* * Passing NULL to pmemobj_set_funcs() tells libpmemobj to continue to use the * default for that function. The replacement functions must not make calls * back into libpmemobj. */ void pmemobj_set_funcs( void *(*malloc_func)(size_t size), void (*free_func)(void *ptr), void *(*realloc_func)(void *ptr, size_t size), char *(*strdup_func)(const char *s)); typedef int (*pmemobj_constr)(PMEMobjpool *pop, void *ptr, void *arg); /* * (debug helper function) logs notice message if used inside a transaction */ void _pobj_debug_notice(const char *func_name, const char *file, int line); #ifndef _WIN32 const char *pmemobj_errormsg(void); #else const char *pmemobj_errormsgU(void); const wchar_t *pmemobj_errormsgW(void); #endif #ifdef __cplusplus } #endif #endif /* libpmemobj/base.h */
8,153
25.910891
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/include/libpmemobj/tx.h
/* * Copyright 2014-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libpmemobj/tx.h -- definitions of libpmemobj transactional macros */ #ifndef LIBPMEMOBJ_TX_H #define LIBPMEMOBJ_TX_H 1 #include <errno.h> #include <string.h> #include <libpmemobj/tx_base.h> #include <libpmemobj/types.h> #ifdef __cplusplus extern "C" { #endif #ifdef POBJ_TX_CRASH_ON_NO_ONABORT #define TX_ONABORT_CHECK do {\ if (_stage == TX_STAGE_ONABORT)\ abort();\ } while (0) #else #define TX_ONABORT_CHECK do {} while (0) #endif #define _POBJ_TX_BEGIN(pop, ...)\ {\ jmp_buf _tx_env;\ enum pobj_tx_stage _stage;\ int _pobj_errno;\ if (setjmp(_tx_env)) {\ errno = pmemobj_tx_errno();\ } else {\ _pobj_errno = pmemobj_tx_begin(pop, _tx_env, __VA_ARGS__,\ TX_PARAM_NONE);\ if (_pobj_errno)\ errno = _pobj_errno;\ }\ while ((_stage = pmemobj_tx_stage()) != TX_STAGE_NONE) {\ switch (_stage) {\ case TX_STAGE_WORK: #define TX_BEGIN_PARAM(pop, ...)\ _POBJ_TX_BEGIN(pop, ##__VA_ARGS__) #define TX_BEGIN_LOCK TX_BEGIN_PARAM /* Just to let compiler warn when incompatible function pointer is used */ static inline pmemobj_tx_callback _pobj_validate_cb_sig(pmemobj_tx_callback cb) { return cb; } #define TX_BEGIN_CB(pop, cb, arg, ...) _POBJ_TX_BEGIN(pop, TX_PARAM_CB,\ _pobj_validate_cb_sig(cb), arg, ##__VA_ARGS__) #define TX_BEGIN(pop) _POBJ_TX_BEGIN(pop, TX_PARAM_NONE) #define TX_ONABORT\ pmemobj_tx_process();\ break;\ case TX_STAGE_ONABORT: #define TX_ONCOMMIT\ pmemobj_tx_process();\ break;\ case TX_STAGE_ONCOMMIT: #define TX_FINALLY\ pmemobj_tx_process();\ break;\ case TX_STAGE_FINALLY: #define TX_END\ pmemobj_tx_process();\ break;\ default:\ TX_ONABORT_CHECK;\ pmemobj_tx_process();\ break;\ }\ }\ _pobj_errno = pmemobj_tx_end();\ if (_pobj_errno)\ errno = _pobj_errno;\ } #define TX_ADD(o)\ pmemobj_tx_add_range((o).oid, 0, sizeof(*(o)._type)) #define TX_ADD_FIELD(o, field)\ TX_ADD_DIRECT(&(D_RO(o)->field)) #define TX_ADD_DIRECT(p)\ pmemobj_tx_add_range_direct(p, sizeof(*(p))) #define TX_ADD_FIELD_DIRECT(p, field)\ pmemobj_tx_add_range_direct(&(p)->field, sizeof((p)->field)) #define TX_XADD(o, flags)\ pmemobj_tx_xadd_range((o).oid, 0, sizeof(*(o)._type), flags) #define TX_XADD_FIELD(o, field, flags)\ TX_XADD_DIRECT(&(D_RO(o)->field), flags) #define TX_XADD_DIRECT(p, flags)\ pmemobj_tx_xadd_range_direct(p, sizeof(*(p)), flags) #define TX_XADD_FIELD_DIRECT(p, field, flags)\ pmemobj_tx_xadd_range_direct(&(p)->field, sizeof((p)->field), flags) #define TX_NEW(t)\ ((TOID(t))pmemobj_tx_alloc(sizeof(t), TOID_TYPE_NUM(t))) #define TX_ALLOC(t, size)\ ((TOID(t))pmemobj_tx_alloc(size, TOID_TYPE_NUM(t))) #define TX_ZNEW(t)\ ((TOID(t))pmemobj_tx_zalloc(sizeof(t), TOID_TYPE_NUM(t))) #define TX_ZALLOC(t, size)\ ((TOID(t))pmemobj_tx_zalloc(size, TOID_TYPE_NUM(t))) #define TX_XALLOC(t, size, flags)\ ((TOID(t))pmemobj_tx_xalloc(size, TOID_TYPE_NUM(t), flags)) /* XXX - not available when compiled with VC++ as C code (/TC) */ #if !defined(_MSC_VER) || defined(__cplusplus) #define TX_REALLOC(o, size)\ ((__typeof__(o))pmemobj_tx_realloc((o).oid, size, TOID_TYPE_NUM_OF(o))) #define TX_ZREALLOC(o, size)\ ((__typeof__(o))pmemobj_tx_zrealloc((o).oid, size, TOID_TYPE_NUM_OF(o))) #endif /* !defined(_MSC_VER) || defined(__cplusplus) */ #define TX_STRDUP(s, type_num)\ pmemobj_tx_strdup(s, type_num) #define TX_WCSDUP(s, type_num)\ pmemobj_tx_wcsdup(s, type_num) #define TX_FREE(o)\ pmemobj_tx_free((o).oid) #define TX_SET(o, field, value) (\ TX_ADD_FIELD(o, field),\ D_RW(o)->field = (value)) #define TX_SET_DIRECT(p, field, value) (\ TX_ADD_FIELD_DIRECT(p, field),\ (p)->field = (value)) static inline void * TX_MEMCPY(void *dest, const void *src, size_t num) { pmemobj_tx_add_range_direct(dest, num); return memcpy(dest, src, num); } static inline void * TX_MEMSET(void *dest, int c, size_t num) { pmemobj_tx_add_range_direct(dest, num); return memset(dest, c, num); } #ifdef __cplusplus } #endif #endif /* libpmemobj/tx.h */
5,589
26.004831
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/include/libpmemobj/atomic_base.h
/* * Copyright 2014-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libpmemobj/atomic_base.h -- definitions of libpmemobj atomic entry points */ #ifndef LIBPMEMOBJ_ATOMIC_BASE_H #define LIBPMEMOBJ_ATOMIC_BASE_H 1 #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * Non-transactional atomic allocations * * Those functions can be used outside transactions. The allocations are always * aligned to the cache-line boundary. */ #define POBJ_XALLOC_VALID_FLAGS (POBJ_XALLOC_ZERO |\ POBJ_XALLOC_CLASS_MASK) /* * Allocates a new object from the pool and calls a constructor function before * returning. It is guaranteed that allocated object is either properly * initialized, or if it's interrupted before the constructor completes, the * memory reserved for the object is automatically reclaimed. */ int pmemobj_alloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num, pmemobj_constr constructor, void *arg); /* * Allocates with flags a new object from the pool. */ int pmemobj_xalloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num, uint64_t flags, pmemobj_constr constructor, void *arg); /* * Allocates a new zeroed object from the pool. */ int pmemobj_zalloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num); /* * Resizes an existing object. */ int pmemobj_realloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num); /* * Resizes an existing object, if extended new space is zeroed. */ int pmemobj_zrealloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num); /* * Allocates a new object with duplicate of the string s. */ int pmemobj_strdup(PMEMobjpool *pop, PMEMoid *oidp, const char *s, uint64_t type_num); /* * Allocates a new object with duplicate of the wide character string s. */ int pmemobj_wcsdup(PMEMobjpool *pop, PMEMoid *oidp, const wchar_t *s, uint64_t type_num); /* * Frees an existing object. */ void pmemobj_free(PMEMoid *oidp); #ifdef __cplusplus } #endif #endif /* libpmemobj/atomic_base.h */
3,592
31.080357
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/include/libpmemobj/thread.h
/* * Copyright 2014-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libpmemobj/thread.h -- definitions of libpmemobj thread/locking entry points */ #ifndef LIBPMEMOBJ_THREAD_H #define LIBPMEMOBJ_THREAD_H 1 #include <time.h> #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * Locking. */ #define _POBJ_CL_SIZE 64 /* cache line size */ typedef union { long long align; char padding[_POBJ_CL_SIZE]; } PMEMmutex; typedef union { long long align; char padding[_POBJ_CL_SIZE]; } PMEMrwlock; typedef union { long long align; char padding[_POBJ_CL_SIZE]; } PMEMcond; void pmemobj_mutex_zero(PMEMobjpool *pop, PMEMmutex *mutexp); int pmemobj_mutex_lock(PMEMobjpool *pop, PMEMmutex *mutexp); int pmemobj_mutex_timedlock(PMEMobjpool *pop, PMEMmutex *__restrict mutexp, const struct timespec *__restrict abs_timeout); int pmemobj_mutex_trylock(PMEMobjpool *pop, PMEMmutex *mutexp); int pmemobj_mutex_unlock(PMEMobjpool *pop, PMEMmutex *mutexp); void pmemobj_rwlock_zero(PMEMobjpool *pop, PMEMrwlock *rwlockp); int pmemobj_rwlock_rdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp); int pmemobj_rwlock_wrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp); int pmemobj_rwlock_timedrdlock(PMEMobjpool *pop, PMEMrwlock *__restrict rwlockp, const struct timespec *__restrict abs_timeout); int pmemobj_rwlock_timedwrlock(PMEMobjpool *pop, PMEMrwlock *__restrict rwlockp, const struct timespec *__restrict abs_timeout); int pmemobj_rwlock_tryrdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp); int pmemobj_rwlock_trywrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp); int pmemobj_rwlock_unlock(PMEMobjpool *pop, PMEMrwlock *rwlockp); void pmemobj_cond_zero(PMEMobjpool *pop, PMEMcond *condp); int pmemobj_cond_broadcast(PMEMobjpool *pop, PMEMcond *condp); int pmemobj_cond_signal(PMEMobjpool *pop, PMEMcond *condp); int pmemobj_cond_timedwait(PMEMobjpool *pop, PMEMcond *__restrict condp, PMEMmutex *__restrict mutexp, const struct timespec *__restrict abs_timeout); int pmemobj_cond_wait(PMEMobjpool *pop, PMEMcond *condp, PMEMmutex *__restrict mutexp); #ifdef __cplusplus } #endif #endif /* libpmemobj/thread.h */
3,665
35.29703
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/include/libpmemobj/action.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libpmemobj/action.h -- definitions of libpmemobj action interface */ #ifndef LIBPMEMOBJ_ACTION_H #define LIBPMEMOBJ_ACTION_H 1 #include <libpmemobj/action_base.h> #ifdef __cplusplus extern "C" { #endif #define POBJ_RESERVE_NEW(pop, t, act)\ ((TOID(t))pmemobj_reserve(pop, act, sizeof(t), TOID_TYPE_NUM(t))) #define POBJ_RESERVE_ALLOC(pop, t, size, act)\ ((TOID(t))pmemobj_reserve(pop, act, size, TOID_TYPE_NUM(t))) #define POBJ_XRESERVE_NEW(pop, t, act, flags)\ ((TOID(t))pmemobj_xreserve(pop, act, sizeof(t), TOID_TYPE_NUM(t), flags)) #define POBJ_XRESERVE_ALLOC(pop, t, size, act, flags)\ ((TOID(t))pmemobj_xreserve(pop, act, size, TOID_TYPE_NUM(t), flags)) #ifdef __cplusplus } #endif #endif /* libpmemobj/action_base.h */
2,344
36.222222
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/include/libpmemobj/atomic.h
/* * Copyright 2014-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libpmemobj/atomic.h -- definitions of libpmemobj atomic macros */ #ifndef LIBPMEMOBJ_ATOMIC_H #define LIBPMEMOBJ_ATOMIC_H 1 #include <libpmemobj/atomic_base.h> #include <libpmemobj/types.h> #ifdef __cplusplus extern "C" { #endif #define POBJ_NEW(pop, o, t, constr, arg)\ pmemobj_alloc((pop), (PMEMoid *)(o), sizeof(t), TOID_TYPE_NUM(t),\ (constr), (arg)) #define POBJ_ALLOC(pop, o, t, size, constr, arg)\ pmemobj_alloc((pop), (PMEMoid *)(o), (size), TOID_TYPE_NUM(t),\ (constr), (arg)) #define POBJ_ZNEW(pop, o, t)\ pmemobj_zalloc((pop), (PMEMoid *)(o), sizeof(t), TOID_TYPE_NUM(t)) #define POBJ_ZALLOC(pop, o, t, size)\ pmemobj_zalloc((pop), (PMEMoid *)(o), (size), TOID_TYPE_NUM(t)) #define POBJ_REALLOC(pop, o, t, size)\ pmemobj_realloc((pop), (PMEMoid *)(o), (size), TOID_TYPE_NUM(t)) #define POBJ_ZREALLOC(pop, o, t, size)\ pmemobj_zrealloc((pop), (PMEMoid *)(o), (size), TOID_TYPE_NUM(t)) #define POBJ_FREE(o)\ pmemobj_free((PMEMoid *)(o)) #ifdef __cplusplus } #endif #endif /* libpmemobj/atomic.h */
2,630
34.08
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/include/libpmemobj/pool.h
/* * Copyright 2014-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libpmemobj/pool.h -- definitions of libpmemobj pool macros */ #ifndef LIBPMEMOBJ_POOL_H #define LIBPMEMOBJ_POOL_H 1 #include <libpmemobj/pool_base.h> #include <libpmemobj/types.h> #define POBJ_ROOT(pop, t) (\ (TOID(t))pmemobj_root((pop), sizeof(t))) #endif /* libpmemobj/pool.h */
1,894
39.319149
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/include/libpmemobj/iterator_base.h
/* * Copyright 2014-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libpmemobj/iterator_base.h -- definitions of libpmemobj iterator entry points */ #ifndef LIBPMEMOBJ_ITERATOR_BASE_H #define LIBPMEMOBJ_ITERATOR_BASE_H 1 #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * The following functions allow access to the entire collection of objects. * * Use with conjunction with non-transactional allocations. Pmemobj pool acts * as a generic container (list) of objects that are not assigned to any * user-defined data structures. */ /* * Returns the first object of the specified type number. */ PMEMoid pmemobj_first(PMEMobjpool *pop); /* * Returns the next object of the same type. */ PMEMoid pmemobj_next(PMEMoid oid); #ifdef __cplusplus } #endif #endif /* libpmemobj/iterator_base.h */
2,371
32.885714
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/utils/magic-install.sh
#!/usr/bin/env bash # # Copyright 2014-2017, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # magic-install.sh -- Script for installing magic script # set -e if ! grep -q "File: pmdk" /etc/magic then echo "Appending PMDK magic to /etc/magic" cat /usr/share/pmdk/pmdk.magic >> /etc/magic else echo "PMDK magic already exists" fi
1,829
40.590909
73
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/utils/md2man.sh
#!/usr/bin/env bash # # Copyright 2016-2018, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # # md2man.sh -- convert markdown to groff man pages # # usage: md2man.sh file template outfile # # This script converts markdown file into groff man page using pandoc. # It performs some pre- and post-processing for better results: # - uses m4 to preprocess OS-specific directives. See doc/macros.man. # - parse input file for YAML metadata block and read man page title, # section and version # - cut-off metadata block and license # - unindent code blocks # - cut-off windows and web specific parts of documentation # # If the TESTOPTS variable is set, generates a preprocessed markdown file # with the header stripped off for testing purposes. # set -e set -o pipefail filename=$1 template=$2 outfile=$3 title=`sed -n 's/^title:\ _MP(*\([A-Za-z_-]*\).*$/\1/p' $filename` section=`sed -n 's/^title:.*\([0-9]\))$/\1/p' $filename` version=`sed -n 's/^date:\ *\(.*\)$/\1/p' $filename` if [ "$TESTOPTS" != "" ]; then m4 $TESTOPTS macros.man $filename | sed -n -e '/# NAME #/,$p' > $outfile else OPTS= if [ "$WIN32" == 1 ]; then OPTS="$OPTS -DWIN32" else OPTS="$OPTS -UWIN32" fi if [ "$(uname -s)" == "FreeBSD" ]; then OPTS="$OPTS -DFREEBSD" else OPTS="$OPTS -UFREEBSD" fi if [ "$WEB" == 1 ]; then OPTS="$OPTS -DWEB" mkdir -p "$(dirname $outfile)" m4 $OPTS macros.man $filename | sed -n -e '/---/,$p' > $outfile else dt=$(date +"%F") m4 $OPTS macros.man $filename | sed -n -e '/# NAME #/,$p' |\ pandoc -s -t man -o $outfile.tmp --template=$template \ -V title=$title -V section=$section \ -V date="$dt" -V version="$version" \ -V year=$(date +"%Y") | sed '/^\.IP/{ N /\n\.nf/{ s/IP/PP/ } }' # don't overwrite the output file if the only thing that changed # is modification date (diff output has exactly 4 lines in this case) difflines=`diff $outfile $outfile.tmp | wc -l || true` onlydates=`diff $outfile $outfile.tmp | grep "$dt" | wc -l || true` if [ $difflines -eq 4 -a $onlydates -eq 1 ]; then rm $outfile.tmp else mv $outfile.tmp $outfile fi fi fi
3,587
31.917431
73
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/utils/check-area.sh
#!/usr/bin/env bash # # Copyright 2018, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # # Finds applicable area name for specified commit id. # if [ -z "$1" ]; then echo "Missing commit id argument." exit 1 fi files=`git show $1 | grep -e "^--- a/" -e "^+++ b/" | grep -v /dev/null | sed "s/^--- a\///" | sed "s/^+++ b\///" | uniq` git show -q $1 echo echo "Modified files:" echo "$files" function categorize() { category=$1 shift cat_files=`echo "$files" | grep $*` if [ -n "${cat_files}" ]; then echo "$category" files=`echo "$files" | grep -v $*` fi } echo echo "Areas computed basing on the list of modified files: (see utils/check-area.sh for full algorithm)" categorize pmem -e "^src/libpmem/" -e "^src/include/libpmem.h" categorize rpmem -e "^src/librpmem/" -e "^src/include/librpmem.h" -e "^src/tools/rpmemd/" -e "^src/rpmem_common/" categorize log -e "^src/libpmemlog/" -e "^src/include/libpmemlog.h" categorize blk -e "^src/libpmemblk/" -e "^src/include/libpmemblk.h" categorize obj -e "^src/libpmemobj/" -e "^src/include/libpmemobj.h" -e "^src/include/libpmemobj/" categorize pool -e "^src/libpmempool/" -e "^src/include/libpmempool.h" -e "^src/tools/pmempool/" categorize vmem -e "^src/libvmem/" -e "^src/include/libvmem.h" categorize vmmalloc -e "^src/libvmmalloc/" -e "^src/include/libvmmalloc.h" categorize jemalloc -e "^src/jemalloc/" -e "^src/windows/jemalloc_gen/" categorize cto -e "^src/libpmemcto/" -e "^src/include/libpmemcto.h" categorize benchmark -e "^src/benchmarks/" categorize examples -e "^src/examples/" categorize daxio -e "^src/tools/daxio/" categorize pmreorder -e "^src/tools/pmreorder/" categorize test -e "^src/test/" categorize doc -e "^doc/" -e ".md\$" -e "^ChangeLog" -e "README" categorize common -e "^src/common/" \ -e "^utils/" \ -e ".inc\$" \ -e ".yml\$" \ -e ".gitattributes" \ -e ".gitignore" \ -e "^.mailmap\$" \ -e "^src/PMDK.sln\$" \ -e "Makefile\$" \ -e "^src/freebsd/" \ -e "^src/windows/" \ -e "^src/include/pmemcompat.h" echo echo "If the above list contains more than 1 entry, please consider splitting" echo "your change into more commits, unless those changes don't make sense " echo "individually (they do not build, tests do not pass, etc)." echo "For example, it's perfectly fine to use 'obj' prefix for one commit that" echo "changes libpmemobj source code, its tests and documentation." if [ -n "$files" ]; then echo echo "Uncategorized files:" echo "$files" fi
4,060
37.311321
121
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/utils/check-shebang.sh
#!/usr/bin/env bash # # Copyright 2017, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # utils/check-shebang.sh -- interpreter directive check script # set -e err_count=0 for file in $@ ; do [ ! -f $file ] && continue SHEBANG=`head -n1 $file | cut -d" " -f1` [ "${SHEBANG:0:2}" != "#!" ] && continue if [ "$SHEBANG" != "#!/usr/bin/env" -a $SHEBANG != "#!/bin/sh" ]; then INTERP=`echo $SHEBANG | rev | cut -d"/" -f1 | rev` echo "$file:1: error: invalid interpreter directive:" >&2 echo " (is: \"$SHEBANG\", should be: \"#!/usr/bin/env $INTERP\")" >&2 ((err_count+=1)) fi done if [ "$err_count" == "0" ]; then echo "Interpreter directives are OK." else echo "Found $err_count errors in interpreter directives!" >&2 err_count=1 fi exit $err_count
2,271
35.645161
73
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/utils/get_aliases.sh
#!/usr/bin/env bash # # Copyright 2017, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # # get_aliases.sh -- generate map of manuals functions and libraries # # usage: run from /pmdk/doc/generated location without parameters: # ./../../utils/get_aliases.sh # # This script searches manpages from section 7 then # takes all functions from each section using specified pattern # and at the end to every function it assign real markdown file # representation based on *.gz file content # # Generated libs_map.yml file is used on gh-pages # to handle functions and their aliases # list=("$@") man_child=("$@") function search_aliases { children=$1 for i in ${children[@]} do if [ -e $i ] then echo "Man: $i" content=$(head -c 150 $i) if [[ "$content" == ".so "* ]] ; then content=$(basename ${content#".so"}) i="${i%.*}" echo " $i: $content" >> $map_file else r="${i%.*}" echo " $r: $i" >> $map_file fi fi done } function list_pages { parent="${1%.*}" list=("$@") man_child=("$@") if [ "$parent" == "libpmem" ]; then man_child=($(ls pmem_*.3)) echo -n "- $parent: " >> $map_file echo "${man_child[@]}" >> $map_file fi if [ "$parent" == "libpmemblk" ]; then man_child=($(ls pmemblk_*.3)) echo -n "- $parent: " >> $map_file echo "${man_child[@]}" >> $map_file fi if [ "$parent" == "libpmemcto" ]; then man_child=($(ls pmemcto_*.3)) echo -n "- $parent: " >> $map_file echo "${man_child[@]}" >> $map_file fi if [ "$parent" == "libpmemlog" ]; then man_child=($(ls pmemlog_*.3)) echo -n "- $parent: " >> $map_file echo "${man_child[@]}" >> $map_file fi if [ "$parent" == "libpmemobj" ]; then man_child=($(ls pmemobj_*.3)) man_child+=($(ls pobj_*.3)) man_child+=($(ls oid_*.3)) man_child+=($(ls toid_*.3)) man_child+=($(ls direct_*.3)) man_child+=($(ls d_r*.3)) man_child+=($(ls tx_*.3)) echo -n "- $parent: " >> $map_file echo "${man_child[@]}" >> $map_file fi if [ "$parent" == "libvmmalloc" ]; then man_child=($(ls vmmalloc_*.3 2>/dev/null)) echo -n "- $parent: " >> $map_file fi if [ "$parent" == "libvmem" ]; then man_child=($(ls vmem_*.3)) echo -n "- $parent: " >> $map_file echo "${man_child[@]}" >> $map_file fi if [ "$parent" == "libpmempool" ]; then man_child=($(ls pmempool_*.3)) echo -n "- $parent: " >> $map_file echo "${man_child[@]}" >> $map_file fi if [ "$parent" == "librpmem" ]; then man_child=($(ls rpmem_*.3)) echo -n "- $parent: " >> $map_file echo "${man_child[@]}" >> $map_file fi if [ ${#man_child[@]} -ne 0 ] then list=${man_child[@]} search_aliases "${list[@]}" fi } man7=($(ls *.7)) map_file=libs_map.yml [ -e $map_file ] && rm $map_file touch $map_file for i in "${man7[@]}" do echo "Library: $i" list_pages $i done
4,273
26.397436
73
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/utils/copy-source.sh
#!/usr/bin/env bash # # Copyright 2018, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # utils/copy-source.sh -- copy source files (from HEAD) to 'path_to_dir/pmdk' # directory whether in git repository or not. # # usage: ./copy-source.sh [path_to_dir] [srcversion] set -e DESTDIR="$1" SRCVERSION=$2 if [ -d .git ]; then if [ -n "$(git status --porcelain)" ]; then echo "Error: Working directory is dirty: $(git status --porcelain)" exit 1 fi else echo "Warning: You are not in git repository, working directory might be dirty." fi mkdir -p "$DESTDIR"/pmdk echo -n $SRCVERSION > "$DESTDIR"/pmdk/.version if [ -d .git ]; then git archive HEAD | tar -x -C "$DESTDIR"/pmdk else find . \ -maxdepth 1 \ -not -name $(basename "$DESTDIR") \ -not -name . \ -exec cp -r "{}" "$DESTDIR"/pmdk \; fi
2,304
34.461538
81
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/utils/check-commit.sh
#!/usr/bin/env bash # # Copyright 2016-2018, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # # Used to check whether all the commit messages in a pull request # follow the GIT/PMDK guidelines. # # usage: ./check-commit.sh # if [[ $TRAVIS_REPO_SLUG != "pmem/pmdk" \ || $TRAVIS_EVENT_TYPE != "pull_request" ]]; then echo "SKIP: $0 can only be executed for pull requests to pmem/pmdk" exit 0 fi # Find all the commits for the current build if [[ -n "$TRAVIS_COMMIT_RANGE" ]]; then MERGE_BASE=$(echo $TRAVIS_COMMIT_RANGE | cut -d. -f1) [ -z $MERGE_BASE ] && \ MERGE_BASE=$(git log --pretty="%cN:%H" | grep GitHub | head -n1 | cut -d: -f2) commits=$(git log --pretty=%H $MERGE_BASE..$TRAVIS_COMMIT) else commits=$TRAVIS_COMMIT fi # valid area names AREAS="pmem\|rpmem\|log\|blk\|obj\|pool\|test\|benchmark\|examples\|vmem\|vmmalloc\|jemalloc\|doc\|common\|cto\|daxio\|pmreorder" # Check commit message for commit in $commits; do subject=$(git log --format="%s" -n 1 $commit) commit_len=$(git log --format="%s%n%b" -n 1 $commit | wc -L) prefix=$(echo $subject | sed -n "s/^\($AREAS\)\:.*/\1/p") if [[ $subject =~ ^Merge.* ]]; then # skip continue fi if [ "$prefix" = "" ]; then echo "FAIL: subject line in commit message does not contain valid area name" echo ./utils/check-area.sh $commit exit 1 fi if [ $commit_len -gt 73 ]; then echo "FAIL: commit message exceeds 72 chars per line (commit_len)" echo git log -n 1 $commit exit 1 fi done
2,977
34.035294
129
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/utils/build-rpm.sh
#!/usr/bin/env bash # # Copyright 2014-2018, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # build-rpm.sh - Script for building rpm packages # set -e SCRIPT_DIR=$(dirname $0) source $SCRIPT_DIR/pkg-common.sh check_tool rpmbuild check_file $SCRIPT_DIR/pkg-config.sh source $SCRIPT_DIR/pkg-config.sh # # usage -- print usage message and exit # usage() { [ "$1" ] && echo Error: $1 cat >&2 <<EOF Usage: $0 [ -h ] -t version-tag -s source-dir -w working-dir -o output-dir [ -d distro ] [ -e build-experimental ] [ -c run-check ] [ -r build-rpmem ] [ -n with-ndctl ] [ -f testconfig-file ] -h print this help message -t version-tag source version tag -s source-dir source directory -w working-dir working directory -o output-dir outut directory -d distro Linux distro name -e build-experimental build experimental packages -c run-check run package check -r build-rpmem build librpmem and rpmemd packages -n with-ndctl build with libndctl -f testconfig-file custom testconfig.sh EOF exit 1 } # # command-line argument processing... # args=`getopt he:c:r:n:t:d:s:w:o:f: $*` [ $? != 0 ] && usage set -- $args for arg do receivetype=auto case "$arg" in -e) EXPERIMENTAL="$2" shift 2 ;; -c) BUILD_PACKAGE_CHECK="$2" shift 2 ;; -f) TEST_CONFIG_FILE="$2" shift 2 ;; -r) BUILD_RPMEM="$2" shift 2 ;; -n) NDCTL_ENABLE="$2" shift 2 ;; -t) PACKAGE_VERSION_TAG="$2" shift 2 ;; -s) SOURCE="$2" shift 2 ;; -w) WORKING_DIR="$2" shift 2 ;; -o) OUT_DIR="$2" shift 2 ;; -d) DISTRO="$2" shift 2 ;; --) shift break ;; esac done # check for mandatory arguments if [ -z "$PACKAGE_VERSION_TAG" -o -z "$SOURCE" -o -z "$WORKING_DIR" -o -z "$OUT_DIR" ] then error "Mandatory arguments missing" usage fi # detected distro or defined in cmd if [ -z "${DISTRO}" ] then OS=$(get_os) if [ "$OS" != "1" ] then echo "Detected OS: $OS" DISTRO=$OS else error "Unknown distribution" exit 1 fi fi if [ "$EXTRA_CFLAGS_RELEASE" = "" ]; then export EXTRA_CFLAGS_RELEASE="-ggdb -fno-omit-frame-pointer" fi LIBFABRIC_MIN_VERSION=1.4.2 NDCTL_MIN_VERSION=60.1 RPMBUILD_OPTS=( ) PACKAGE_VERSION=$(get_version $PACKAGE_VERSION_TAG) if [ -z "$PACKAGE_VERSION" ] then error "Can not parse version from '${PACKAGE_VERSION_TAG}'" exit 1 fi PACKAGE_SOURCE=${PACKAGE_NAME}-${PACKAGE_VERSION} SOURCE=$PACKAGE_NAME PACKAGE_TARBALL=$PACKAGE_SOURCE.tar.gz RPM_SPEC_FILE=$PACKAGE_SOURCE/$PACKAGE_NAME.spec MAGIC_INSTALL=$PACKAGE_SOURCE/utils/magic-install.sh MAGIC_UNINSTALL=$PACKAGE_SOURCE/utils/magic-uninstall.sh OLDPWD=$PWD [ -d $WORKING_DIR ] || mkdir -v $WORKING_DIR [ -d $OUT_DIR ] || mkdir $OUT_DIR cd $WORKING_DIR check_dir $SOURCE mv $SOURCE $PACKAGE_SOURCE if [ "$DISTRO" = "SLES_like" ] then RPM_LICENSE="BSD-3-Clause" RPM_GROUP_SYS_BASE="System\/Base" RPM_GROUP_SYS_LIBS="System\/Libraries" RPM_GROUP_DEV_LIBS="Development\/Libraries\/C and C++" RPM_PKG_NAME_SUFFIX="1" RPM_MAKE_FLAGS="BINDIR=""%_bindir"" NORPATH=1" RPM_MAKE_INSTALL="%fdupes %{buildroot}\/%{_prefix}" else RPM_LICENSE="BSD" RPM_GROUP_SYS_BASE="System Environment\/Base" RPM_GROUP_SYS_LIBS="System Environment\/Libraries" RPM_GROUP_DEV_LIBS="Development\/Libraries" RPM_PKG_NAME_SUFFIX="" RPM_MAKE_FLAGS="NORPATH=1" RPM_MAKE_INSTALL="" fi # # Create parametrized spec file required by rpmbuild. # Most of variables are set in pkg-config.sh file in order to # keep descriptive values separately from this script. # sed -e "s/__VERSION__/$PACKAGE_VERSION/g" \ -e "s/__LICENSE__/$RPM_LICENSE/g" \ -e "s/__PACKAGE_MAINTAINER__/$PACKAGE_MAINTAINER/g" \ -e "s/__PACKAGE_SUMMARY__/$PACKAGE_SUMMARY/g" \ -e "s/__GROUP_SYS_BASE__/$RPM_GROUP_SYS_BASE/g" \ -e "s/__GROUP_SYS_LIBS__/$RPM_GROUP_SYS_LIBS/g" \ -e "s/__GROUP_DEV_LIBS__/$RPM_GROUP_DEV_LIBS/g" \ -e "s/__PKG_NAME_SUFFIX__/$RPM_PKG_NAME_SUFFIX/g" \ -e "s/__MAKE_FLAGS__/$RPM_MAKE_FLAGS/g" \ -e "s/__MAKE_INSTALL_FDUPES__/$RPM_MAKE_INSTALL/g" \ -e "s/__LIBFABRIC_MIN_VER__/$LIBFABRIC_MIN_VERSION/g" \ -e "s/__NDCTL_MIN_VER__/$NDCTL_MIN_VERSION/g" \ $OLDPWD/$SCRIPT_DIR/pmdk.spec.in > $RPM_SPEC_FILE if [ "$DISTRO" = "SLES_like" ] then sed -i '/^#.*bugzilla.redhat/d' $RPM_SPEC_FILE fi # do not split on space IFS=$'\n' # experimental features if [ "${EXPERIMENTAL}" = "y" ] then # no experimental features for now RPMBUILD_OPTS+=( ) fi # librpmem & rpmemd if [ "${BUILD_RPMEM}" = "y" ] then RPMBUILD_OPTS+=(--with fabric) else RPMBUILD_OPTS+=(--without fabric) fi # daxio & RAS if [ "${NDCTL_ENABLE}" = "n" ] then RPMBUILD_OPTS+=(--without ndctl) else RPMBUILD_OPTS+=(--with ndctl) fi # use specified testconfig file or default if [[( -n "${TEST_CONFIG_FILE}") && ( -f "$TEST_CONFIG_FILE" ) ]] then echo "Test config file: $TEST_CONFIG_FILE" RPMBUILD_OPTS+=(--define "_testconfig $TEST_CONFIG_FILE") else echo -e "Test config file $TEST_CONFIG_FILE does not exist.\n"\ "Default test config will be used." fi # run make check or not if [ "${BUILD_PACKAGE_CHECK}" == "n" ] then RPMBUILD_OPTS+=(--define "_skip_check 1") fi tar zcf $PACKAGE_TARBALL $PACKAGE_SOURCE # Create directory structure for rpmbuild mkdir -v BUILD SPECS echo "opts: ${RPMBUILD_OPTS[@]}" rpmbuild --define "_topdir `pwd`"\ --define "_rpmdir ${OUT_DIR}"\ --define "_srcrpmdir ${OUT_DIR}"\ -ta $PACKAGE_TARBALL \ ${RPMBUILD_OPTS[@]} echo "Building rpm packages done" exit 0
6,899
23.041812
86
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/utils/pkg-common.sh
# # Copyright 2014-2018, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # pkg-common.sh - common functions and variables for building packages # export LC_ALL="C" function error() { echo -e "error: $@" } function check_dir() { if [ ! -d $1 ] then error "Directory '$1' does not exist." exit 1 fi } function check_file() { if [ ! -f $1 ] then error "File '$1' does not exist." exit 1 fi } function check_tool() { local tool=$1 if [ -z "$(which $tool 2>/dev/null)" ] then error "'${tool}' not installed or not in PATH" exit 1 fi } function format_version () { echo $1 | sed 's|0*\([0-9]\+\)|\1|g' } function get_version_item() { local INPUT=$1 local TARGET=$2 local REGEX="([^0-9]*)(([0-9]+\.){,2}[0-9]+)([.+-]?.*)" local VERSION="0.0" local RELEASE="-$INPUT" if [[ $INPUT =~ $REGEX ]] then VERSION="${BASH_REMATCH[2]}" RELEASE="${BASH_REMATCH[4]}" fi case $TARGET in version) echo -n $VERSION ;; release) echo -n $RELEASE ;; *) error "Wrong target" exit 1 ;; esac } function get_version() { local VERSION=$(get_version_item $1 version) local RELEASE=$(get_version_item $1 release) VERSION=$(format_version $VERSION) if [ -z "$RELEASE" ] then echo -n $VERSION else RELEASE=${RELEASE//[-:_.]/"~"} echo -n ${VERSION}${RELEASE} fi } function get_os() { if [ -f /etc/os-release ] then local OS=$(cat /etc/os-release | grep -m1 -o -P '(?<=NAME=).*($)') [[ "$OS" =~ SLES|openSUSE ]] && echo -n "SLES_like" || ([[ "$OS" =~ "Fedora"|"Red Hat"|"CentOS" ]] && echo -n "RHEL_like" || echo 1) else echo 1 fi } REGEX_DATE_AUTHOR="([a-zA-Z]{3} [a-zA-Z]{3} [0-9]{2} [0-9]{4})\s*(.*)" REGEX_MESSAGE_START="\s*\*\s*(.*)" REGEX_MESSAGE="\s*(\S.*)"
3,230
24.440945
79
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/utils/build-dpkg.sh
#!/usr/bin/env bash # # Copyright 2014-2018, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # build-dpkg.sh - Script for building deb packages # set -e SCRIPT_DIR=$(dirname $0) source $SCRIPT_DIR/pkg-common.sh # # usage -- print usage message and exit # usage() { [ "$1" ] && echo Error: $1 cat >&2 <<EOF Usage: $0 [ -h ] -t version-tag -s source-dir -w working-dir -o output-dir [ -e build-experimental ] [ -c run-check ] [ -n with-ndctl ] [ -f testconfig-file ] -h print this help message -t version-tag source version tag -s source-dir source directory -w working-dir working directory -o output-dir outut directory -e build-experimental build experimental packages -c run-check run package check -n with-ndctl build with libndctl -f testconfig-file custom testconfig.sh EOF exit 1 } # # command-line argument processing... # args=`getopt he:c:r:n:t:d:s:w:o:f: $*` [ $? != 0 ] && usage set -- $args for arg do receivetype=auto case "$arg" in -e) EXPERIMENTAL="$2" shift 2 ;; -c) BUILD_PACKAGE_CHECK="$2" shift 2 ;; -f) TEST_CONFIG_FILE="$2" shift 2 ;; -r) BUILD_RPMEM="$2" shift 2 ;; -n) NDCTL_ENABLE="$2" shift 2 ;; -t) PACKAGE_VERSION_TAG="$2" shift 2 ;; -s) SOURCE="$2" shift 2 ;; -w) WORKING_DIR="$2" shift 2 ;; -o) OUT_DIR="$2" shift 2 ;; --) shift break ;; esac done # check for mandatory arguments if [ -z "$PACKAGE_VERSION_TAG" -o -z "$SOURCE" -o -z "$WORKING_DIR" -o -z "$OUT_DIR" ] then error "Mandatory arguments missing" usage fi PREFIX=usr LIB_DIR=$PREFIX/lib/$(dpkg-architecture -qDEB_HOST_MULTIARCH) INC_DIR=$PREFIX/include MAN1_DIR=$PREFIX/share/man/man1 MAN3_DIR=$PREFIX/share/man/man3 MAN5_DIR=$PREFIX/share/man/man5 MAN7_DIR=$PREFIX/share/man/man7 DOC_DIR=$PREFIX/share/doc if [ "$EXTRA_CFLAGS_RELEASE" = "" ]; then export EXTRA_CFLAGS_RELEASE="-ggdb -fno-omit-frame-pointer" fi LIBFABRIC_MIN_VERSION=1.4.2 NDCTL_MIN_VERSION=60.1 function convert_changelog() { while read line do if [[ $line =~ $REGEX_DATE_AUTHOR ]] then DATE="${BASH_REMATCH[1]}" AUTHOR="${BASH_REMATCH[2]}" echo " * ${DATE} ${AUTHOR}" elif [[ $line =~ $REGEX_MESSAGE_START ]] then MESSAGE="${BASH_REMATCH[1]}" echo " - ${MESSAGE}" elif [[ $line =~ $REGEX_MESSAGE ]] then MESSAGE="${BASH_REMATCH[1]}" echo " ${MESSAGE}" fi done < $1 } function rpmem_install_triggers_overrides() { cat << EOF > debian/librpmem.install $LIB_DIR/librpmem.so.* EOF cat << EOF > debian/librpmem.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug librpmem: package-name-doesnt-match-sonames EOF cat << EOF > debian/librpmem-dev.install $LIB_DIR/pmdk_debug/librpmem.a $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/librpmem.so $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/librpmem.so.* $LIB_DIR/pmdk_dbg/ $LIB_DIR/librpmem.so $LIB_DIR/pkgconfig/librpmem.pc $INC_DIR/librpmem.h $MAN7_DIR/librpmem.7 $MAN3_DIR/rpmem_*.3 EOF cat << EOF > debian/librpmem-dev.triggers interest man-db EOF cat << EOF > debian/librpmem-dev.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug # The following warnings are triggered by a bug in debhelper: # http://bugs.debian.org/204975 postinst-has-useless-call-to-ldconfig postrm-has-useless-call-to-ldconfig # We do not want to compile with -O2 for debug version hardening-no-fortify-functions $LIB_DIR/pmdk_dbg/* EOF cat << EOF > debian/rpmemd.install usr/bin/rpmemd $MAN1_DIR/rpmemd.1 EOF cat << EOF > debian/rpmemd.triggers interest man-db EOF cat << EOF > debian/rpmemd.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug EOF } function append_rpmem_control() { cat << EOF >> $CONTROL_FILE Package: librpmem Architecture: any Depends: libfabric (>= $LIBFABRIC_MIN_VERSION), \${shlibs:Depends}, \${misc:Depends} Description: Persistent Memory remote access support library librpmem provides low-level support for remote access to persistent memory (pmem) utilizing RDMA-capable RNICs. The library can be used to replicate remotely a memory region over RDMA protocol. It utilizes appropriate persistency mechanism based on remote node’s platform capabilities. The librpmem utilizes the ssh client to authenticate a user on remote node and for encryption of connection’s out-of-band configuration data. . This library is for applications that use remote persistent memory directly, without the help of any library-supplied transactions or memory allocation. Higher-level libraries that build on libpmem are available and are recommended for most applications. Package: librpmem-dev Section: libdevel Architecture: any Depends: librpmem (=\${binary:Version}), libpmem-dev, \${shlibs:Depends}, \${misc:Depends} Description: Development files for librpmem librpmem provides low-level support for remote access to persistent memory (pmem) utilizing RDMA-capable RNICs. . This package contains libraries and header files used for linking programs against librpmem. Package: rpmemd Section: misc Architecture: any Priority: optional Depends: libfabric (>= $LIBFABRIC_MIN_VERSION), \${shlibs:Depends}, \${misc:Depends} Description: rpmem daemon Daemon for Remote Persistent Memory support EOF } function daxio_install_triggers_overrides() { cat << EOF > debian/daxio.install usr/bin/daxio $MAN1_DIR/daxio.1 EOF cat << EOF > debian/daxio.triggers interest man-db EOF cat << EOF > debian/daxio.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug EOF } function append_daxio_control() { cat << EOF >> $CONTROL_FILE Package: daxio Section: misc Architecture: any Priority: optional Depends: libpmem (=\${binary:Version}), libndctl (>= $NDCTL_MIN_VERSION), libdaxctl (>= $NDCTL_MIN_VERSION), \${shlibs:Depends}, \${misc:Depends} Description: daxio utility The daxio utility performs I/O on Device DAX devices or zero a Device DAX device. Since the standard I/O APIs (read/write) cannot be used with Device DAX, data transfer is performed on a memory-mapped device. The daxio may be used to dump Device DAX data to a file, restore data from a backup copy, move/copy data to another device or to erase data from a device. EOF } if [ "${BUILD_PACKAGE_CHECK}" == "y" ] then CHECK_CMD=" override_dh_auto_test: dh_auto_test if [ -f $TEST_CONFIG_FILE ]; then\ cp $TEST_CONFIG_FILE src/test/testconfig.sh;\ else\ cp src/test/testconfig.sh.example src/test/testconfig.sh;\ fi make pcheck ${PCHECK_OPTS} " else CHECK_CMD=" override_dh_auto_test: " fi check_tool debuild check_tool dch check_file $SCRIPT_DIR/pkg-config.sh source $SCRIPT_DIR/pkg-config.sh PACKAGE_VERSION=$(get_version $PACKAGE_VERSION_TAG) PACKAGE_RELEASE=1 PACKAGE_SOURCE=${PACKAGE_NAME}-${PACKAGE_VERSION} PACKAGE_TARBALL_ORIG=${PACKAGE_NAME}_${PACKAGE_VERSION}.orig.tar.gz MAGIC_INSTALL=utils/magic-install.sh MAGIC_UNINSTALL=utils/magic-uninstall.sh CONTROL_FILE=debian/control [ -d $WORKING_DIR ] || mkdir $WORKING_DIR [ -d $OUT_DIR ] || mkdir $OUT_DIR OLD_DIR=$PWD cd $WORKING_DIR check_dir $SOURCE mv $SOURCE $PACKAGE_SOURCE tar zcf $PACKAGE_TARBALL_ORIG $PACKAGE_SOURCE cd $PACKAGE_SOURCE mkdir debian # Generate compat file cat << EOF > debian/compat 9 EOF # Generate control file cat << EOF > $CONTROL_FILE Source: $PACKAGE_NAME Maintainer: $PACKAGE_MAINTAINER Section: libs Priority: optional Standards-version: 4.1.4 Build-Depends: debhelper (>= 9) Homepage: http://pmem.io/pmdk/ Package: libpmem Architecture: any Depends: \${shlibs:Depends}, \${misc:Depends} Description: Persistent Memory low level support library libpmem provides low level persistent memory support. In particular, support for the persistent memory instructions for flushing changes to pmem is provided. Package: libpmem-dev Section: libdevel Architecture: any Depends: libpmem (=\${binary:Version}), \${shlibs:Depends}, \${misc:Depends} Description: Development files for libpmem libpmem provides low level persistent memory support. In particular, support for the persistent memory instructions for flushing changes to pmem is provided. Package: libpmemblk Architecture: any Depends: libpmem (=\${binary:Version}), \${shlibs:Depends}, \${misc:Depends} Description: Persistent Memory block array support library libpmemblk implements a pmem-resident array of blocks, all the same size, where a block is updated atomically with respect to power failure or program interruption (no torn blocks). Package: libpmemblk-dev Section: libdevel Architecture: any Depends: libpmemblk (=\${binary:Version}), libpmem-dev, \${shlibs:Depends}, \${misc:Depends} Description: Development files for libpmemblk libpmemblk implements a pmem-resident array of blocks, all the same size, where a block is updated atomically with respect to power failure or program interruption (no torn blocks). Package: libpmemlog Architecture: any Depends: libpmem (=\${binary:Version}), \${shlibs:Depends}, \${misc:Depends} Description: Persistent Memory log file support library libpmemlog implements a pmem-resident log file. Package: libpmemlog-dev Section: libdevel Architecture: any Depends: libpmemlog (=\${binary:Version}), libpmem-dev, \${shlibs:Depends}, \${misc:Depends} Description: Development files for libpmemlog libpmemlog implements a pmem-resident log file. Package: libpmemcto Architecture: any Depends: libpmem (=\${binary:Version}), \${shlibs:Depends}, \${misc:Depends} Description: Persistent Memory allocator cto library The libpmemcto library is a persistent memory allocator providing malloc-like interfaces, with no overhead imposed by run-time flushing or transactional updates. An overhead is imposed only when program exits normally and have to flush the pool contents. . NOTE: This is an experimental API and should not be used in production environments. Package: libpmemcto-dev Section: libdevel Architecture: any Depends: libpmemcto (=\${binary:Version}), libpmem-dev, \${shlibs:Depends}, \${misc:Depends} Description: Development files for libpmemcto The libpmemcto library is a persistent memory allocator providing malloc-like interfaces, with no overhead imposed by run-time flushing or transactional updates. An overhead is imposed only when program exits normally and have to flush the pool contents. . NOTE: This is an experimental API and should not be used in production environments. . This package contains libraries and header files used for linking programs against libpmemcto. Package: libpmemobj Architecture: any Depends: libpmem (=\${binary:Version}), \${shlibs:Depends}, \${misc:Depends} Description: Persistent Memory object store support library libpmemobj turns a persistent memory file into a flexible object store, supporting transactions, memory management, locking, lists, and a number of other features. Package: libpmemobj-dev Section: libdevel Architecture: any Depends: libpmemobj (=\${binary:Version}), libpmem-dev, \${shlibs:Depends}, \${misc:Depends} Description: Development files for libpmemobj libpmemobj turns a persistent memory file into a flexible object store, supporting transactions, memory management, locking, lists, and a number of other features. . This package contains libraries and header files used for linking programs against libpmemobj. Package: libpmempool Architecture: any Depends: libpmem (=\${binary:Version}), \${shlibs:Depends}, \${misc:Depends} Description: Persistent Memory pool management support library libpmempool provides a set of utilities for management, diagnostics and repair of persistent memory pools. A pool in this context means a pmemobj pool, pmemblk pool, pmemlog pool or BTT layout, independent of the underlying storage. The libpmempool is for applications that need high reliability or built-in troubleshooting. It may be useful for testing and debugging purposes also. Package: libpmempool-dev Section: libdevel Architecture: any Depends: libpmempool (=\${binary:Version}), libpmem-dev, \${shlibs:Depends}, \${misc:Depends} Description: Development files for libpmempool libpmempool provides a set of utilities for management, diagnostics and repair of persistent memory pools. . This package contains libraries and header files used for linking programs against libpmempool. Package: libvmem Architecture: any Depends: \${shlibs:Depends}, \${misc:Depends} Description: Persistent Memory volatile memory support library The libvmem library turns a pool of persistent memory into a volatile memory pool, similar to the system heap but kept separate and with its own malloc-style API. . libvmem supports the traditional malloc/free interfaces on a memory mapped file. This allows the use of persistent memory as volatile memory, for cases where the pool of persistent memory is useful to an application, but when the application doesn’t need it to be persistent. Package: libvmem-dev Section: libdevel Architecture: any Depends: libvmem (=\${binary:Version}), \${shlibs:Depends}, \${misc:Depends} Description: Development files for libvmem The libvmem library turns a pool of persistent memory into a volatile memory pool, similar to the system heap but kept separate and with its own malloc-style API. . This package contains libraries and header files used for linking programs against libvmem. Package: libvmmalloc Architecture: any Depends: \${shlibs:Depends}, \${misc:Depends} Description: Persistent Memory dynamic allocation support library The libvmmalloc library transparently converts all the dynamic memory allocations into persistent memory allocations. This allows the use of persistent memory as volatile memory without modifying the target application. Package: libvmmalloc-dev Section: libdevel Architecture: any Depends: libvmmalloc (=\${binary:Version}), \${shlibs:Depends}, \${misc:Depends} Description: Development files for libvmmalloc The libvmmalloc library transparently converts all the dynamic memory allocations into persistent memory allocations. . This package contains libraries and header files used for linking programs against libvmalloc. Package: $PACKAGE_NAME-dbg Section: debug Priority: optional Architecture: any Depends: libvmem (=\${binary:Version}), libvmmalloc (=\${binary:Version}), libpmem (=\${binary:Version}), libpmemblk (=\${binary:Version}), libpmemlog (=\${binary:Version}), libpmemobj (=\${binary:Version}), libpmemcto (=\${binary:Version}), libpmempool (=\${binary:Version}), \${misc:Depends} Description: Debug symbols for PMDK libraries Debug symbols for all PMDK libraries. Package: pmempool Section: misc Architecture: any Priority: optional Depends: \${shlibs:Depends}, \${misc:Depends} Description: Standalone utility for management and off-line analysis of Persistent Memory pools created by PMDK libraries. It provides a set of utilities for administration and diagnostics of Persistent Memory pools. Pmempool may be useful for troubleshooting by system administrators and users of the applications based on PMDK libraries. EOF cp LICENSE debian/copyright if [ -n "$NDCTL_ENABLE" ]; then pass_ndctl_enable="NDCTL_ENABLE=$NDCTL_ENABLE" else pass_ndctl_enable="" fi cat << EOF > debian/rules #!/usr/bin/make -f #export DH_VERBOSE=1 %: dh \$@ override_dh_strip: dh_strip --dbg-package=$PACKAGE_NAME-dbg override_dh_auto_build: dh_auto_build -- EXPERIMENTAL=${EXPERIMENTAL} prefix=/$PREFIX libdir=/$LIB_DIR includedir=/$INC_DIR docdir=/$DOC_DIR man1dir=/$MAN1_DIR man3dir=/$MAN3_DIR man5dir=/$MAN5_DIR man7dir=/$MAN7_DIR sysconfdir=/etc bashcompdir=/usr/share/bash-completion/completions NORPATH=1 ${pass_ndctl_enable} SRCVERSION=$SRCVERSION override_dh_auto_install: dh_auto_install -- EXPERIMENTAL=${EXPERIMENTAL} prefix=/$PREFIX libdir=/$LIB_DIR includedir=/$INC_DIR docdir=/$DOC_DIR man1dir=/$MAN1_DIR man3dir=/$MAN3_DIR man5dir=/$MAN5_DIR man7dir=/$MAN7_DIR sysconfdir=/etc bashcompdir=/usr/share/bash-completion/completions NORPATH=1 ${pass_ndctl_enable} SRCVERSION=$SRCVERSION find -path './debian/*usr/share/man/man*/*.gz' -exec gunzip {} \; override_dh_install: mkdir -p debian/tmp/usr/share/pmdk/ cp utils/pmdk.magic debian/tmp/usr/share/pmdk/ dh_install ${CHECK_CMD} EOF chmod +x debian/rules mkdir debian/source ITP_BUG_EXCUSE="# This is our first package but we do not want to upload it yet. # Please refer to Debian Developer's Reference section 5.1 (New packages) for details: # https://www.debian.org/doc/manuals/developers-reference/pkgs.html#newpackage" cat << EOF > debian/source/format 3.0 (quilt) EOF cat << EOF > debian/libpmem.install $LIB_DIR/libpmem.so.* usr/share/pmdk/pmdk.magic $MAN5_DIR/poolset.5 EOF cat $MAGIC_INSTALL > debian/libpmem.postinst sed -i '1s/.*/\#\!\/bin\/bash/' debian/libpmem.postinst echo $'\n#DEBHELPER#\n' >> debian/libpmem.postinst cat $MAGIC_UNINSTALL > debian/libpmem.prerm sed -i '1s/.*/\#\!\/bin\/bash/' debian/libpmem.prerm echo $'\n#DEBHELPER#\n' >> debian/libpmem.prerm cat << EOF > debian/libpmem.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug libpmem: package-name-doesnt-match-sonames EOF cat << EOF > debian/libpmem-dev.install $LIB_DIR/pmdk_debug/libpmem.a $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libpmem.so $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libpmem.so.* $LIB_DIR/pmdk_dbg/ $LIB_DIR/libpmem.so $LIB_DIR/pkgconfig/libpmem.pc $INC_DIR/libpmem.h $MAN7_DIR/libpmem.7 $MAN3_DIR/pmem_*.3 EOF cat << EOF > debian/libpmem-dev.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug # The following warnings are triggered by a bug in debhelper: # http://bugs.debian.org/204975 postinst-has-useless-call-to-ldconfig postrm-has-useless-call-to-ldconfig # We do not want to compile with -O2 for debug version hardening-no-fortify-functions $LIB_DIR/pmdk_dbg/* # pmdk provides second set of libraries for debugging. # These are in /usr/lib/$arch/pmdk_dbg/, but still trigger ldconfig. # Related issue: https://github.com/pmem/issues/issues/841 libpmem-dev: package-has-unnecessary-activation-of-ldconfig-trigger EOF cat << EOF > debian/libpmemblk.install $LIB_DIR/libpmemblk.so.* EOF cat << EOF > debian/libpmemblk.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug libpmemblk: package-name-doesnt-match-sonames EOF cat << EOF > debian/libpmemblk-dev.install $LIB_DIR/pmdk_debug/libpmemblk.a $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libpmemblk.so $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libpmemblk.so.* $LIB_DIR/pmdk_dbg/ $LIB_DIR/libpmemblk.so $LIB_DIR/pkgconfig/libpmemblk.pc $INC_DIR/libpmemblk.h $MAN7_DIR/libpmemblk.7 $MAN3_DIR/pmemblk_*.3 EOF cat << EOF > debian/libpmemblk-dev.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug # The following warnings are triggered by a bug in debhelper: # http://bugs.debian.org/204975 postinst-has-useless-call-to-ldconfig postrm-has-useless-call-to-ldconfig # We do not want to compile with -O2 for debug version hardening-no-fortify-functions $LIB_DIR/pmdk_dbg/* # pmdk provides second set of libraries for debugging. # These are in /usr/lib/$arch/pmdk_dbg/, but still trigger ldconfig. # Related issue: https://github.com/pmem/issues/issues/841 libpmemblk-dev: package-has-unnecessary-activation-of-ldconfig-trigger EOF cat << EOF > debian/libpmemlog.install $LIB_DIR/libpmemlog.so.* EOF cat << EOF > debian/libpmemlog.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug libpmemlog: package-name-doesnt-match-sonames EOF cat << EOF > debian/libpmemlog-dev.install $LIB_DIR/pmdk_debug/libpmemlog.a $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libpmemlog.so $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libpmemlog.so.* $LIB_DIR/pmdk_dbg/ $LIB_DIR/libpmemlog.so $LIB_DIR/pkgconfig/libpmemlog.pc $INC_DIR/libpmemlog.h $MAN7_DIR/libpmemlog.7 $MAN3_DIR/pmemlog_*.3 EOF cat << EOF > debian/libpmemlog-dev.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug # The following warnings are triggered by a bug in debhelper: # http://bugs.debian.org/204975 postinst-has-useless-call-to-ldconfig postrm-has-useless-call-to-ldconfig # We do not want to compile with -O2 for debug version hardening-no-fortify-functions $LIB_DIR/pmdk_dbg/* # pmdk provides second set of libraries for debugging. # These are in /usr/lib/$arch/pmdk_dbg/, but still trigger ldconfig. # Related issue: https://github.com/pmem/issues/issues/841 libpmemlog-dev: package-has-unnecessary-activation-of-ldconfig-trigger EOF cat << EOF > debian/libpmemcto.install $LIB_DIR/libpmemcto.so.* EOF cat << EOF > debian/libpmemcto.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug libpmemcto: package-name-doesnt-match-sonames EOF cat << EOF > debian/libpmemcto-dev.install $LIB_DIR/pmdk_debug/libpmemcto.a $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libpmemcto.so $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libpmemcto.so.* $LIB_DIR/pmdk_dbg/ $LIB_DIR/libpmemcto.so $LIB_DIR/pkgconfig/libpmemcto.pc $INC_DIR/libpmemcto.h $MAN7_DIR/libpmemcto.7 $MAN3_DIR/pmemcto*.3 EOF cat << EOF > debian/libpmemcto-dev.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug # The following warnings are triggered by a bug in debhelper: # http://bugs.debian.org/204975 postinst-has-useless-call-to-ldconfig postrm-has-useless-call-to-ldconfig # We do not want to compile with -O2 for debug version hardening-no-fortify-functions $LIB_DIR/pmdk_dbg/* # pmdk provides second set of libraries for debugging. # These are in /usr/lib/$arch/pmdk_dbg/, but still trigger ldconfig. # Related issue: https://github.com/pmem/issues/issues/841 libpmemcto-dev: package-has-unnecessary-activation-of-ldconfig-trigger EOF cat << EOF > debian/libpmemobj.install $LIB_DIR/libpmemobj.so.* EOF cat << EOF > debian/libpmemobj.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug libpmemobj: package-name-doesnt-match-sonames EOF cat << EOF > debian/libpmemobj-dev.install $LIB_DIR/pmdk_debug/libpmemobj.a $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libpmemobj.so $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libpmemobj.so.* $LIB_DIR/pmdk_dbg/ $LIB_DIR/libpmemobj.so $LIB_DIR/pkgconfig/libpmemobj.pc $INC_DIR/libpmemobj.h $INC_DIR/libpmemobj/*.h $MAN7_DIR/libpmemobj.7 $MAN3_DIR/pmemobj_*.3 $MAN3_DIR/pobj_*.3 $MAN3_DIR/oid_*.3 $MAN3_DIR/toid*.3 $MAN3_DIR/direct_*.3 $MAN3_DIR/d_r*.3 $MAN3_DIR/tx_*.3 EOF cat << EOF > debian/libpmemobj-dev.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug # The following warnings are triggered by a bug in debhelper: # http://bugs.debian.org/204975 postinst-has-useless-call-to-ldconfig postrm-has-useless-call-to-ldconfig # We do not want to compile with -O2 for debug version hardening-no-fortify-functions $LIB_DIR/pmdk_dbg/* # pmdk provides second set of libraries for debugging. # These are in /usr/lib/$arch/pmdk_dbg/, but still trigger ldconfig. # Related issue: https://github.com/pmem/issues/issues/841 libpmemobj-dev: package-has-unnecessary-activation-of-ldconfig-trigger EOF cat << EOF > debian/libpmempool.install $LIB_DIR/libpmempool.so.* EOF cat << EOF > debian/libpmempool.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug libpmempool: package-name-doesnt-match-sonames EOF cat << EOF > debian/libpmempool-dev.install $LIB_DIR/pmdk_debug/libpmempool.a $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libpmempool.so $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libpmempool.so.* $LIB_DIR/pmdk_dbg/ $LIB_DIR/libpmempool.so $LIB_DIR/pkgconfig/libpmempool.pc $INC_DIR/libpmempool.h $MAN7_DIR/libpmempool.7 $MAN3_DIR/pmempool_*.3 EOF cat << EOF > debian/libpmempool-dev.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug # The following warnings are triggered by a bug in debhelper: # http://bugs.debian.org/204975 postinst-has-useless-call-to-ldconfig postrm-has-useless-call-to-ldconfig # We do not want to compile with -O2 for debug version hardening-no-fortify-functions $LIB_DIR/pmdk_dbg/* # pmdk provides second set of libraries for debugging. # These are in /usr/lib/$arch/pmdk_dbg/, but still trigger ldconfig. # Related issue: https://github.com/pmem/issues/issues/841 libpmempool-dev: package-has-unnecessary-activation-of-ldconfig-trigger EOF cat << EOF > debian/libvmem.install $LIB_DIR/libvmem.so.* EOF cat << EOF > debian/libvmem.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug libvmem: package-name-doesnt-match-sonames EOF cat << EOF > debian/libvmem-dev.install $LIB_DIR/pmdk_debug/libvmem.a $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libvmem.so $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libvmem.so.* $LIB_DIR/pmdk_dbg/ $LIB_DIR/libvmem.so $LIB_DIR/pkgconfig/libvmem.pc $INC_DIR/libvmem.h $MAN7_DIR/libvmem.7 $MAN3_DIR/vmem_*.3 EOF cat << EOF > debian/libvmem-dev.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug # The following warnings are triggered by a bug in debhelper: # http://bugs.debian.org/204975 postinst-has-useless-call-to-ldconfig postrm-has-useless-call-to-ldconfig # We do not want to compile with -O2 for debug version hardening-no-fortify-functions $LIB_DIR/pmdk_dbg/* # pmdk provides second set of libraries for debugging. # These are in /usr/lib/$arch/pmdk_dbg/, but still trigger ldconfig. # Related issue: https://github.com/pmem/issues/issues/841 libvmem-dev: package-has-unnecessary-activation-of-ldconfig-trigger EOF cat << EOF > debian/libvmmalloc.install $LIB_DIR/libvmmalloc.so.* EOF cat << EOF > debian/libvmmalloc.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug libvmmalloc: package-name-doesnt-match-sonames EOF cat << EOF > debian/libvmmalloc-dev.install $LIB_DIR/pmdk_debug/libvmmalloc.a $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libvmmalloc.so $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libvmmalloc.so.* $LIB_DIR/pmdk_dbg/ $LIB_DIR/libvmmalloc.so $LIB_DIR/pkgconfig/libvmmalloc.pc $INC_DIR/libvmmalloc.h $MAN7_DIR/libvmmalloc.7 EOF cat << EOF > debian/libvmmalloc-dev.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug # The following warnings are triggered by a bug in debhelper: # http://bugs.debian.org/204975 postinst-has-useless-call-to-ldconfig postrm-has-useless-call-to-ldconfig # We do not want to compile with -O2 for debug version hardening-no-fortify-functions $LIB_DIR/pmdk_dbg/* # pmdk provides second set of libraries for debugging. # These are in /usr/lib/$arch/pmdk_dbg/, but still trigger ldconfig. # Related issue: https://github.com/pmem/issues/issues/841 libvmmalloc-dev: package-has-unnecessary-activation-of-ldconfig-trigger EOF cat << EOF > debian/$PACKAGE_NAME-dbg.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug EOF cat << EOF > debian/pmempool.install usr/bin/pmempool $MAN1_DIR/pmempool.1 $MAN1_DIR/pmempool-*.1 usr/share/bash-completion/completions/pmempool EOF cat << EOF > debian/pmempool.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug EOF # librpmem & rpmemd if [ "${BUILD_RPMEM}" = "y" -a "${RPMEM_DPKG}" = "y" ] then append_rpmem_control; rpmem_install_triggers_overrides; fi # daxio if [ "${NDCTL_ENABLE}" != "n" ] then append_daxio_control; daxio_install_triggers_overrides; fi # Convert ChangeLog to debian format CHANGELOG_TMP=changelog.tmp dch --create --empty --package $PACKAGE_NAME -v $PACKAGE_VERSION-$PACKAGE_RELEASE -M -c $CHANGELOG_TMP touch debian/changelog head -n1 $CHANGELOG_TMP >> debian/changelog echo "" >> debian/changelog convert_changelog ChangeLog >> debian/changelog echo "" >> debian/changelog tail -n1 $CHANGELOG_TMP >> debian/changelog rm $CHANGELOG_TMP # This is our first release but we do debuild --preserve-envvar=EXTRA_CFLAGS_RELEASE \ --preserve-envvar=EXTRA_CFLAGS_DEBUG \ --preserve-envvar=EXTRA_CFLAGS \ --preserve-envvar=EXTRA_CXXFLAGS \ --preserve-envvar=EXTRA_LDFLAGS \ --preserve-envvar=NDCTL_ENABLE \ -us -uc cd $OLD_DIR find $WORKING_DIR -name "*.deb"\ -or -name "*.dsc"\ -or -name "*.changes"\ -or -name "*.orig.tar.gz"\ -or -name "*.debian.tar.gz" | while read FILE do mv -v $FILE $OUT_DIR/ done exit 0
29,336
29.84858
316
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/utils/check-doc.sh
#!/usr/bin/env bash # # Copyright 2016-2017, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # # Used to check whether changes to the generated documentation directory # are made by the authorised user. Used only by travis builds. # # usage: ./check-doc.sh # directory=doc/generated allowed_user="Generic builder <[email protected]>" if [[ -z "$TRAVIS" ]]; then echo "ERROR: $0 can only be executed on Travis CI." exit 1 fi if [[ $TRAVIS_REPO_SLUG != "pmem/pmdk" \ || $TRAVIS_EVENT_TYPE != "pull_request" ]]; then echo "SKIP: $0 can only be executed for pull requests to pmem/pmdk" exit 0 fi # Find all the commits for the current build if [[ -n "$TRAVIS_COMMIT_RANGE" ]]; then commits=$(git rev-list $TRAVIS_COMMIT_RANGE) else commits=$TRAVIS_COMMIT fi # Check for changes in the generated docs directory # Only new files are allowed (first version) for commit in $commits; do last_author=$(git --no-pager show -s --format='%aN <%aE>' $commit) if [ "$last_author" == "$allowed_user" ]; then continue fi fail=$(git diff-tree --no-commit-id --name-status -r $commit | grep -c ^M.*$directory) if [ $fail -ne 0 ]; then echo "FAIL: changes to ${directory} allowed only by \"${allowed_user}\"" exit 1 fi done
2,727
34.428571
87
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/utils/check-os.sh
#!/usr/bin/env bash # # Copyright 2017, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # # Used to check if there are no banned functions in .o file # # usage: ./check-os.sh [os.h path] [.o file] [.c file] EXCLUDE="os_linux|os_thread_linux" if [[ $2 =~ $EXCLUDE ]]; then echo "skip $2" exit 0 fi symbols=$(nm --demangle --undefined-only --format=posix $2 | sed 's/ U *//g') functions=$(cat $1 | tr '\n' '|') out=$( for sym in $symbols do grep -w $functions <<<"$sym" done | sed 's/$/\(\)/g') [[ ! -z $out ]] && echo -e "`pwd`/$3:1: non wrapped function(s):\n$out\nplease use os wrappers" && rm -f $2 && # remove .o file as it don't match requirments exit 1 exit 0
2,176
35.898305
80
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/utils/magic-uninstall.sh
#!/usr/bin/env bash # # Copyright 2014-2017, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # magic-uninstall.sh -- Script for uninstalling magic script # set -e HDR_LOCAL=$(grep "File: pmdk" /etc/magic) HDR_PKG=$(grep "File: pmdk" /usr/share/pmdk/pmdk.magic) if [[ $HDR_LOCAL == $HDR_PKG ]] then echo "Removing PMDK magic from /etc/magic" HDR_LINE=$(grep -n "File: pmdk" /etc/magic | cut -f1 -d:) HDR_PKG_LINE=$(grep -n "File: pmdk" /usr/share/pmdk/pmdk.magic | cut -f1 -d:) HDR_LINES=$(cat /usr/share/pmdk/pmdk.magic | wc -l) HDR_FIRST=$(($HDR_LINE - $HDR_PKG_LINE + 1)) HDR_LAST=$(($HDR_FIRST + $HDR_LINES)) sed -i "${HDR_FIRST},${HDR_LAST}d" /etc/magic fi
2,166
42.34
78
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/utils/pkg-config.sh
# # Copyright 2014-2017, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Name of package PACKAGE_NAME="pmdk" # Name and email of package maintainer PACKAGE_MAINTAINER="Krzysztof Czurylo <[email protected]>" # Brief description of the package PACKAGE_SUMMARY="Persistent Memory Development Kit" # Full description of the package PACKAGE_DESCRIPTION="The collection of libraries and utilities for Persistent Memory Programming" # Website PACKAGE_URL="http://pmem.io/pmdk"
1,981
42.086957
97
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/utils/style_check.sh
#!/usr/bin/env bash # # Copyright 2016-2017, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # utils/style_check.sh -- common style checking script # set -e ARGS=("$@") CSTYLE_ARGS=() CLANG_ARGS=() CHECK_TYPE=$1 [ -z "$clang_format_bin" ] && clang_format_bin=clang-format # # print script usage # function usage() { echo "$0 <check|format> [C/C++ files]" } # # require clang-format version 3.8 # function check_clang_version() { set +e which ${clang_format_bin} &> /dev/null && ${clang_format_bin} --version |\ grep "version 3\.8"\ &> /dev/null if [ $? -ne 0 ]; then echo "SKIP: requires clang-format version 3.8" exit 0 fi set -e } # # run old cstyle check # function run_cstyle() { if [ $# -eq 0 ]; then return fi ${cstyle_bin} -pP $@ } # # generate diff with clang-format rules # function run_clang_check() { if [ $# -eq 0 ]; then return fi check_clang_version for file in $@ do LINES=$(${clang_format_bin} -style=file $file |\ git diff --no-index $file - | wc -l) if [ $LINES -ne 0 ]; then ${clang_format_bin} -style=file $file | git diff --no-index $file - fi done } # # in-place format according to clang-format rules # function run_clang_format() { if [ $# -eq 0 ]; then return fi check_clang_version ${clang_format_bin} -style=file -i $@ } for ((i=1; i<$#; i++)) { IGNORE="$(dirname ${ARGS[$i]})/.cstyleignore" if [ -e $IGNORE ]; then if grep -q ${ARGS[$i]} $IGNORE ; then echo "SKIP ${ARGS[$i]}" continue fi fi case ${ARGS[$i]} in *.[ch]pp) CLANG_ARGS+="${ARGS[$i]} " ;; *.[ch]) CSTYLE_ARGS+="${ARGS[$i]} " ;; *) echo "Unknown argument" exit 1 ;; esac } case $CHECK_TYPE in check) run_cstyle ${CSTYLE_ARGS} run_clang_check ${CLANG_ARGS} ;; format) run_clang_format ${CLANG_ARGS} ;; *) echo "Invalid parameters" usage exit 1 ;; esac
3,361
21.563758
75
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/utils/version.sh
#!/usr/bin/env bash # # Copyright 2017-2018, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # utils/version.sh -- determine project's version # set -e if [ -f "$1/VERSION" ]; then cat "$1/VERSION" exit 0 fi if [ -f $1/GIT_VERSION ]; then echo -n "\$Format:%h %d\$" | cmp -s $1/GIT_VERSION - && true if [ $? -eq 0 ]; then PARSE_GIT_VERSION=0 else PARSE_GIT_VERSION=1 fi else PARSE_GIT_VERSION=0 fi if [ $PARSE_GIT_VERSION -eq 1 ]; then GIT_VERSION_TAG=$(cat $1/GIT_VERSION | grep tag: | sed 's/.*tag: \([0-9a-z.+-]*\).*/\1/') GIT_VERSION_HASH=$(cat $1/GIT_VERSION | sed -e 's/ .*//') if [ -n "$GIT_VERSION_TAG" ]; then echo "$GIT_VERSION_TAG" exit 0 fi if [ -n "$GIT_VERSION_HASH" ]; then echo "$GIT_VERSION_HASH" exit 0 fi fi cd "$1" GIT_DESCRIBE=$(git describe 2>/dev/null) && true if [ -n "$GIT_DESCRIBE" ]; then echo "$GIT_DESCRIBE" exit 0 fi # try commit it, git describe can fail when there are no tags (e.g. with shallow clone, like on Travis) GIT_COMMIT=$(git log -1 --format=%h) && true if [ -n "$GIT_COMMIT" ]; then echo "$GIT_COMMIT" exit 0 fi cd - >/dev/null # If nothing works, try to get version from directory name VER=$(basename `realpath "$1"` | sed 's/pmdk[-]*\([0-9a-z.+-]*\).*/\1/') if [ -n "$VER" ]; then echo "$VER" exit 0 fi exit 1
2,793
29.043011
103
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/utils/check_license/file-exceptions.sh
#!/bin/sh -e # # Copyright 2016-2018, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # file-exceptions.sh - filter out files not checked for copyright and license grep -v -E -e 'src/jemalloc/' -e 'src/windows/jemalloc_gen/' -e '/queue.h$' -e '/getopt.h$' -e '/getopt.c$' -e 'src/common/valgrind/'
1,796
47.567568
133
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/utils/check_license/check-headers.sh
#!/usr/bin/env bash # # Copyright 2016-2018, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # check-headers.sh - check copyright and license in source files SELF=$0 function usage() { echo "Usage: $SELF <source_root_path> <check_license_bin_path> <license_path> [-h|-v|-a]" echo " -h, --help this help message" echo " -v, --verbose verbose mode" echo " -a, --all check all files (only modified files are checked by default)" } if [ "$#" -lt 3 ]; then usage >&2 exit 2 fi SOURCE_ROOT=$1 shift CHECK_LICENSE=$1 shift LICENSE=$1 shift PATTERN=`mktemp` TMP=`mktemp` TMP2=`mktemp` TEMPFILE=`mktemp` rm -f $PATTERN $TMP $TMP2 function exit_if_not_exist() { if [ ! -f $1 ]; then echo "Error: file $1 does not exist. Exiting..." >&2 exit 1 fi } if [ "$1" == "-h" -o "$1" == "--help" ]; then usage exit 0 fi exit_if_not_exist $LICENSE exit_if_not_exist $CHECK_LICENSE export GIT="git -C ${SOURCE_ROOT}" $GIT rev-parse || exit 1 if [ -f $SOURCE_ROOT/.git/shallow ]; then SHALLOW_CLONE=1 echo echo "Warning: This is a shallow clone. Checking dates in copyright headers" echo " will be skipped in case of files that have no history." echo else SHALLOW_CLONE=0 fi VERBOSE=0 CHECK_ALL=0 while [ "$1" != "" ]; do case $1 in -v|--verbose) VERBOSE=1 ;; -a|--all) CHECK_ALL=1 ;; esac shift done if [ $CHECK_ALL -eq 0 ]; then CURRENT_COMMIT=$($GIT log --pretty=%H -1) MERGE_BASE=$($GIT merge-base HEAD origin/master 2>/dev/null) [ -z $MERGE_BASE ] && \ MERGE_BASE=$($GIT log --pretty="%cN:%H" | grep GitHub | head -n1 | cut -d: -f2) [ -z $MERGE_BASE -o "$CURRENT_COMMIT" = "$MERGE_BASE" ] && \ CHECK_ALL=1 fi if [ $CHECK_ALL -eq 1 ]; then echo "Checking copyright headers of all files..." GIT_COMMAND="ls-tree -r --name-only HEAD" else echo echo "Warning: will check copyright headers of modified files only," echo " in order to check all files issue the following command:" echo " $ $SELF <source_root_path> <check_license_bin_path> <license_path> -a" echo " (e.g.: $ $SELF $SOURCE_ROOT $CHECK_LICENSE $LICENSE -a)" echo echo "Checking copyright headers of modified files only..." GIT_COMMAND="diff --name-only $MERGE_BASE $CURRENT_COMMIT" fi FILES=$($GIT $GIT_COMMAND | ${SOURCE_ROOT}/utils/check_license/file-exceptions.sh | \ grep -E -e '*\.[chs]$' -e '*\.[ch]pp$' -e '*\.sh$' \ -e '*\.py$' -e '*\.map$' -e 'Makefile*' -e 'TEST*' \ -e '/common.inc$' -e '/match$' -e '/check_whitespace$' \ -e 'LICENSE$' -e 'CMakeLists.txt$' -e '*\.cmake$' | \ xargs) # jemalloc.mk has to be checked always, because of the grep rules above FILES="$FILES src/jemalloc/jemalloc.mk" # create a license pattern file $CHECK_LICENSE create $LICENSE $PATTERN [ $? -ne 0 ] && exit 1 RV=0 for file in $FILES ; do # The src_path is a path which should be used in every command except git. # git is called with -C flag so filepaths should be relative to SOURCE_ROOT src_path="${SOURCE_ROOT}/$file" [ ! -f $src_path ] && continue # ensure that file is UTF-8 encoded ENCODING=`file -b --mime-encoding $src_path` iconv -f $ENCODING -t "UTF-8" $src_path > $TEMPFILE YEARS=`$CHECK_LICENSE check-pattern $PATTERN $TEMPFILE $src_path` if [ $? -ne 0 ]; then echo -n $YEARS RV=1 else HEADER_FIRST=`echo $YEARS | cut -d"-" -f1` HEADER_LAST=` echo $YEARS | cut -d"-" -f2` if [ $SHALLOW_CLONE -eq 0 ]; then $GIT log --no-merges --format="%ai %aE" -- $file | sort > $TMP else # mark the grafted commits (commits with no parents) $GIT log --no-merges --format="%ai %aE grafted-%p-commit" -- $file | sort > $TMP fi # skip checking dates for non-Intel commits [[ ! $(tail -n1 $TMP) =~ "@intel.com" ]] && continue # skip checking dates for new files [ $(cat $TMP | wc -l) -le 1 ] && continue # grep out the grafted commits (commits with no parents) # and skip checking dates for non-Intel commits grep -v -e "grafted--commit" $TMP | grep -e "@intel.com" > $TMP2 [ $(cat $TMP2 | wc -l) -eq 0 ] && continue FIRST=`head -n1 $TMP2` LAST=` tail -n1 $TMP2` COMMIT_FIRST=`echo $FIRST | cut -d"-" -f1` COMMIT_LAST=` echo $LAST | cut -d"-" -f1` if [ "$COMMIT_FIRST" != "" -a "$COMMIT_LAST" != "" ]; then if [ $HEADER_LAST -lt $COMMIT_LAST ]; then if [ $HEADER_FIRST -lt $COMMIT_FIRST ]; then COMMIT_FIRST=$HEADER_FIRST fi COMMIT_LAST=`date +%G` if [ $COMMIT_FIRST -eq $COMMIT_LAST ]; then NEW=$COMMIT_LAST else NEW=$COMMIT_FIRST-$COMMIT_LAST fi echo "$file:1: error: wrong copyright date: (is: $YEARS, should be: $NEW)" >&2 RV=1 fi else echo "error: unknown commit dates in file: $file" >&2 RV=1 fi fi done rm -f $TMP $TMP2 $TEMPFILE # check if error found if [ $RV -eq 0 ]; then echo "Copyright headers are OK." else echo "Error(s) in copyright headers found!" >&2 fi exit $RV
6,380
28.817757
90
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/utils/docker/build-local.sh
#!/usr/bin/env bash # # Copyright 2017-2018, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # build-local.sh - runs a Docker container from a Docker image with environment # prepared for building PMDK project and starts building PMDK # # this script is for building PMDK locally (not on Travis) # # Notes: # - run this script from its location or set the variable 'HOST_WORKDIR' to # where the root of the PMDK project is on the host machine, # - set variables 'OS' and 'OS_VER' properly to a system you want to build PMDK # on (for proper values take a look on the list of Dockerfiles at the # utils/docker/images directory), eg. OS=ubuntu, OS_VER=16.04. # - set 'KEEP_TEST_CONFIG' variable to 1 if you do not want the tests to be # reconfigured (your current test configuration will be preserved and used) # - tests with Device Dax are not supported by pcheck yet, so do not provide # these devices in your configuration # set -e # Environment variables that can be customized (default values are after dash): export KEEP_CONTAINER=${KEEP_CONTAINER:-0} export KEEP_TEST_CONFIG=${KEEP_TEST_CONFIG:-0} export TEST_BUILD=${TEST_BUILD:-all} export REMOTE_TESTS=${REMOTE_TESTS:-1} export MAKE_PKG=${MAKE_PKG:-0} export EXTRA_CFLAGS=${EXTRA_CFLAGS} export EXTRA_CXXFLAGS=${EXTRA_CXXFLAGS:-} export PMDK_CC=${PMDK_CC:-gcc} export PMDK_CXX=${PMDK_CXX:-g++} export EXPERIMENTAL=${EXPERIMENTAL:-n} export VALGRIND=${VALGRIND:-1} if [[ -z "$OS" || -z "$OS_VER" ]]; then echo "ERROR: The variables OS and OS_VER have to be set " \ "(eg. OS=ubuntu, OS_VER=16.04)." exit 1 fi if [[ -z "$HOST_WORKDIR" ]]; then HOST_WORKDIR=$(readlink -f ../..) fi if [[ "$KEEP_CONTAINER" != "1" ]]; then RM_SETTING=" --rm" fi imageName=pmem/pmdk:${OS}-${OS_VER} containerName=pmdk-${OS}-${OS_VER} if [[ $MAKE_PKG -eq 1 ]] ; then command="./run-build-package.sh" else command="./run-build.sh" fi if [ -n "$DNS_SERVER" ]; then DNS_SETTING=" --dns=$DNS_SERVER "; fi if [ -z "$NDCTL_ENABLE" ]; then ndctl_enable=; else ndctl_enable="--env NDCTL_ENABLE=$NDCTL_ENABLE"; fi WORKDIR=/pmdk SCRIPTSDIR=$WORKDIR/utils/docker echo Building ${OS}-${OS_VER} # Run a container with # - environment variables set (--env) # - host directory containing PMDK source mounted (-v) # - working directory set (-w) docker run --privileged=true --name=$containerName -ti \ $RM_SETTING \ $DNS_SETTING \ --env http_proxy=$http_proxy \ --env https_proxy=$https_proxy \ --env CC=$PMDK_CC \ --env CXX=$PMDK_CXX \ --env VALGRIND=$VALGRIND \ --env EXTRA_CFLAGS=$EXTRA_CFLAGS \ --env EXTRA_CXXFLAGS=$EXTRA_CXXFLAGS \ --env REMOTE_TESTS=$REMOTE_TESTS \ --env CONFIGURE_TESTS=$CONFIGURE_TESTS \ --env TEST_BUILD=$TEST_BUILD \ --env WORKDIR=$WORKDIR \ --env EXPERIMENTAL=$EXPERIMENTAL \ --env SCRIPTSDIR=$SCRIPTSDIR \ --env CLANG_FORMAT=clang-format-3.8 \ --env KEEP_TEST_CONFIG=$KEEP_TEST_CONFIG \ $ndctl_enable \ -v $HOST_WORKDIR:$WORKDIR \ -v /etc/localtime:/etc/localtime \ $DAX_SETTING \ -w $SCRIPTSDIR \ $imageName $command
4,536
35.007937
103
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/utils/docker/run-build.sh
#!/usr/bin/env bash # # Copyright 2016-2017, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # run-build.sh - is called inside a Docker container; prepares the environment # and starts a build of PMDK project. # set -e # Prepare build environment ./prepare-for-build.sh # Build librpmem even if libfabric is not compiled with ibverbs export RPMEM_DISABLE_LIBIBVERBS=y # Build all and run tests cd $WORKDIR make check-license make cstyle make -j2 USE_LIBUNWIND=1 make -j2 test USE_LIBUNWIND=1 make -j2 pcheck TEST_BUILD=$TEST_BUILD make DESTDIR=/tmp source
2,073
36.709091
78
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/utils/docker/prepare-for-build.sh
#!/usr/bin/env bash # # Copyright 2016-2017, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # prepare-for-build.sh - is called inside a Docker container; prepares # the environment inside a Docker container for # running build of PMDK project. # set -e # Mount filesystem for tests echo $USERPASS | sudo -S mount -t tmpfs none /tmp -osize=6G # Configure tests (e.g. ssh for remote tests) unless the current configuration # should be preserved KEEP_TEST_CONFIG=${KEEP_TEST_CONFIG:-0} if [[ "$KEEP_TEST_CONFIG" == 0 ]]; then ./configure-tests.sh fi # Check for changes in automatically generated docs (only when on Travis) if [[ -n "$TRAVIS" ]]; then ../check-doc.sh fi
2,218
39.345455
78
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/utils/docker/run-build-package.sh
#!/usr/bin/env bash # # Copyright 2016-2018, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # run-build-package.sh - is called inside a Docker container; prepares # the environment and starts a build of PMDK project. # set -e # Prepare build enviromnent ./prepare-for-build.sh # Build librpmem even if libfabric is not compiled with ibverbs export RPMEM_DISABLE_LIBIBVERBS=y # Create fake tag, so that package has proper 'version' field git config user.email "[email protected]" git config user.name "test package" git tag -a 1.4.99 -m "1.4" HEAD~1 # Build all and run tests cd $WORKDIR export PCHECK_OPTS=-j2 make -j2 $PACKAGE_MANAGER # Install packages if [[ "$PACKAGE_MANAGER" == "dpkg" ]]; then cd $PACKAGE_MANAGER echo $USERPASS | sudo -S dpkg --install -R *.deb else cd $PACKAGE_MANAGER/x86_64 echo $USERPASS | sudo -S rpm --install *.rpm fi # Compile and run standalone test cd $WORKDIR/utils/docker/test_package make LIBPMEMOBJ_MIN_VERSION=1.4 ./test_package testfile1
2,508
35.362319
76
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/utils/docker/pull-or-rebuild-image.sh
#!/usr/bin/env bash # # Copyright 2016-2018, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # pull-or-rebuild-image.sh - rebuilds the Docker image used in the # current Travis build if necessary. # # The script rebuilds the Docker image if the Dockerfile for the current # OS version (Dockerfile.${OS}-${OS_VER}) or any .sh script from the directory # with Dockerfiles were modified and committed. # # If the Travis build is not of the "pull_request" type (i.e. in case of # merge after pull_request) and it succeed, the Docker image should be pushed # to the Docker Hub repository. An empty file is created to signal that to # further scripts. # # If the Docker image does not have to be rebuilt, it will be pulled from # Docker Hub. # set -e if [[ "$TRAVIS_EVENT_TYPE" != "cron" && "$TRAVIS_BRANCH" != "coverity_scan" \ && "$COVERITY" -eq 1 ]]; then echo "INFO: Skip Coverity scan job if build is triggered neither by " \ "'cron' nor by a push to 'coverity_scan' branch" exit 0 fi if [[ ( "$TRAVIS_EVENT_TYPE" == "cron" || "$TRAVIS_BRANCH" == "coverity_scan" )\ && "$COVERITY" -ne 1 ]]; then echo "INFO: Skip regular jobs if build is triggered either by 'cron'" \ " or by a push to 'coverity_scan' branch" exit 0 fi if [[ -z "$OS" || -z "$OS_VER" ]]; then echo "ERROR: The variables OS and OS_VER have to be set properly " \ "(eg. OS=ubuntu, OS_VER=16.04)." exit 1 fi if [[ -z "$HOST_WORKDIR" ]]; then echo "ERROR: The variable HOST_WORKDIR has to contain a path to " \ "the root of the PMDK project on the host machine" exit 1 fi # TRAVIS_COMMIT_RANGE is usually invalid for force pushes - ignore such values # when used with non-upstream repository if [ -n "$TRAVIS_COMMIT_RANGE" -a $TRAVIS_REPO_SLUG != "pmem/pmdk" ]; then if ! git rev-list $TRAVIS_COMMIT_RANGE; then TRAVIS_COMMIT_RANGE= fi fi # Find all the commits for the current build if [[ -n "$TRAVIS_COMMIT_RANGE" ]]; then commits=$(git rev-list $TRAVIS_COMMIT_RANGE) else commits=$TRAVIS_COMMIT fi echo "Commits in the commit range:" for commit in $commits; do echo $commit; done # Get the list of files modified by the commits files=$(for commit in $commits; do git diff-tree --no-commit-id --name-only \ -r $commit; done | sort -u) echo "Files modified within the commit range:" for file in $files; do echo $file; done # Path to directory with Dockerfiles and image building scripts images_dir_name=images base_dir=utils/docker/$images_dir_name # Check if committed file modifications require the Docker image to be rebuilt for file in $files; do # Check if modified files are relevant to the current build if [[ $file =~ ^($base_dir)\/Dockerfile\.($OS)-($OS_VER)$ ]] \ || [[ $file =~ ^($base_dir)\/.*\.sh$ ]] then # Rebuild Docker image for the current OS version echo "Rebuilding the Docker image for the Dockerfile.$OS-$OS_VER" pushd $images_dir_name ./build-image.sh ${OS}-${OS_VER} popd # Check if the image has to be pushed to Docker Hub # (i.e. the build is triggered by commits to the pmem/pmdk # repository's master branch, and the Travis build is not # of the "pull_request" type). In that case, create the empty # file. if [[ $TRAVIS_REPO_SLUG == "pmem/pmdk" \ && $TRAVIS_BRANCH == "master" \ && $TRAVIS_EVENT_TYPE != "pull_request" && $PUSH_IMAGE == "1" ]] then echo "The image will be pushed to Docker Hub" touch push_image_to_repo_flag else echo "Skip pushing the image to Docker Hub" fi if [[ $PUSH_IMAGE == "1" ]] then echo "Skip build package check if image has to be pushed" touch skip_build_package_check fi exit 0 fi done # Getting here means rebuilding the Docker image is not required. # Pull the image from Docker Hub. docker pull pmem/pmdk:${OS}-${OS_VER}
5,286
35.462069
80
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/utils/docker/configure-tests.sh
#!/usr/bin/env bash # # Copyright 2016-2018, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # configure-tests.sh - is called inside a Docker container; configures tests # and ssh server for use during build of PMDK project. # set -e # Configure tests cat << EOF > $WORKDIR/src/test/testconfig.sh LONGDIR=LoremipsumdolorsitametconsecteturadipiscingelitVivamuslacinianibhattortordictumsollicitudinNullamvariusvestibulumligulaetegestaselitsemperidMaurisultriciesligulaeuipsumtinciduntluctusMorbimaximusvariusdolorid # this path is ~3000 characters long DIRSUFFIX="$LONGDIR/$LONGDIR/$LONGDIR/$LONGDIR/$LONGDIR" NON_PMEM_FS_DIR=/tmp PMEM_FS_DIR=/tmp PMEM_FS_DIR_FORCE_PMEM=1 TEST_BUILD="debug nondebug" ENABLE_SUDO_TESTS=y TM=1 EOF # Configure remote tests if [[ $REMOTE_TESTS -eq 1 ]]; then echo "Configuring remote tests" cat << EOF >> $WORKDIR/src/test/testconfig.sh NODE[0]=127.0.0.1 NODE_WORKING_DIR[0]=/tmp/node0 NODE_ADDR[0]=127.0.0.1 NODE_ENV[0]="PMEM_IS_PMEM_FORCE=1" NODE[1]=127.0.0.1 NODE_WORKING_DIR[1]=/tmp/node1 NODE_ADDR[1]=127.0.0.1 NODE_ENV[1]="PMEM_IS_PMEM_FORCE=1" NODE[2]=127.0.0.1 NODE_WORKING_DIR[2]=/tmp/node2 NODE_ADDR[2]=127.0.0.1 NODE_ENV[2]="PMEM_IS_PMEM_FORCE=1" NODE[3]=127.0.0.1 NODE_WORKING_DIR[3]=/tmp/node3 NODE_ADDR[3]=127.0.0.1 NODE_ENV[3]="PMEM_IS_PMEM_FORCE=1" TEST_BUILD="debug nondebug" TEST_PROVIDERS=sockets EOF mkdir -p ~/.ssh/cm cat << EOF >> ~/.ssh/config Host 127.0.0.1 StrictHostKeyChecking no ControlPath ~/.ssh/cm/%r@%h:%p ControlMaster auto ControlPersist 10m EOF if [ ! -f /etc/ssh/ssh_host_rsa_key ] then (echo $USERPASS | sudo -S ssh-keygen -t rsa -C $USER@$HOSTNAME -P '' -f /etc/ssh/ssh_host_rsa_key) fi echo $USERPASS | sudo -S sh -c 'cat /etc/ssh/ssh_host_rsa_key.pub >> /etc/ssh/authorized_keys' ssh-keygen -t rsa -C $USER@$HOSTNAME -P '' -f ~/.ssh/id_rsa cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys chmod -R 700 ~/.ssh chmod 640 ~/.ssh/authorized_keys chmod 600 ~/.ssh/config # Start ssh service echo $USERPASS | sudo -S $START_SSH_COMMAND ssh 127.0.0.1 exit 0 else echo "Skipping remote tests" echo echo "Removing all libfabric.pc files in order to simulate that libfabric is not installed:" find /usr -name "libfabric.pc" 2>/dev/null echo $USERPASS | sudo -S sh -c 'find /usr -name "libfabric.pc" -exec rm -f {} + 2>/dev/null' fi
3,843
34.266055
216
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/utils/docker/build-travis.sh
#!/usr/bin/env bash # # Copyright 2016-2018, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # build-travis.sh - runs a Docker container from a Docker image with environment # prepared for building PMDK project and starts building PMDK # # this script is for building PMDK on Travis only # set -e if [[ "$TRAVIS_EVENT_TYPE" != "cron" && "$TRAVIS_BRANCH" != "coverity_scan" \ && "$COVERITY" -eq 1 ]]; then echo "INFO: Skip Coverity scan job if build is triggered neither by " \ "'cron' nor by a push to 'coverity_scan' branch" exit 0 fi if [[ ( "$TRAVIS_EVENT_TYPE" == "cron" || "$TRAVIS_BRANCH" == "coverity_scan" )\ && "$COVERITY" -ne 1 ]]; then echo "INFO: Skip regular jobs if build is triggered either by 'cron'" \ " or by a push to 'coverity_scan' branch" exit 0 fi if [[ -z "$OS" || -z "$OS_VER" ]]; then echo "ERROR: The variables OS and OS_VER have to be set properly " \ "(eg. OS=ubuntu, OS_VER=16.04)." exit 1 fi if [[ -z "$HOST_WORKDIR" ]]; then echo "ERROR: The variable HOST_WORKDIR has to contain a path to " \ "the root of the PMDK project on the host machine" exit 1 fi if [[ -z "$TEST_BUILD" ]]; then TEST_BUILD=all fi imageName=pmem/pmdk:${OS}-${OS_VER} containerName=pmdk-${OS}-${OS_VER} if [[ $MAKE_PKG -eq 0 ]] ; then command="./run-build.sh"; fi if [[ $MAKE_PKG -eq 1 ]] ; then command="./run-build-package.sh"; fi if [[ $COVERAGE -eq 1 ]] ; then command="./run-coverage.sh"; ci_env=`bash <(curl -s https://codecov.io/env)`; fi if [[ ( "$TRAVIS_EVENT_TYPE" == "cron" || "$TRAVIS_BRANCH" == "coverity_scan" )\ && "$COVERITY" -eq 1 ]]; then command="./run-coverity.sh" fi if [ -n "$DNS_SERVER" ]; then DNS_SETTING=" --dns=$DNS_SERVER "; fi if [[ $SKIP_CHECK -eq 1 ]]; then BUILD_PACKAGE_CHECK=n; else BUILD_PACKAGE_CHECK=y; fi if [ -z "$NDCTL_ENABLE" ]; then ndctl_enable=; else ndctl_enable="--env NDCTL_ENABLE=$NDCTL_ENABLE"; fi WORKDIR=/pmdk SCRIPTSDIR=$WORKDIR/utils/docker # Run a container with # - environment variables set (--env) # - host directory containing PMDK source mounted (-v) # - working directory set (-w) docker run --rm --privileged=true --name=$containerName -ti \ $DNS_SETTING \ $ci_env \ --env http_proxy=$http_proxy \ --env https_proxy=$https_proxy \ --env CC=$PMDK_CC \ --env CXX=$PMDK_CXX \ --env VALGRIND=$VALGRIND \ --env EXTRA_CFLAGS=$EXTRA_CFLAGS \ --env EXTRA_CXXFLAGS=$EXTRA_CXXFLAGS \ --env REMOTE_TESTS=$REMOTE_TESTS \ --env TEST_BUILD=$TEST_BUILD \ --env WORKDIR=$WORKDIR \ --env EXPERIMENTAL=$EXPERIMENTAL \ --env BUILD_PACKAGE_CHECK=$BUILD_PACKAGE_CHECK \ --env SCRIPTSDIR=$SCRIPTSDIR \ --env CLANG_FORMAT=clang-format-3.8 \ --env TRAVIS=$TRAVIS \ --env TRAVIS_COMMIT_RANGE=$TRAVIS_COMMIT_RANGE \ --env TRAVIS_COMMIT=$TRAVIS_COMMIT \ --env TRAVIS_REPO_SLUG=$TRAVIS_REPO_SLUG \ --env TRAVIS_BRANCH=$TRAVIS_BRANCH \ --env TRAVIS_EVENT_TYPE=$TRAVIS_EVENT_TYPE \ --env COVERITY_SCAN_TOKEN=$COVERITY_SCAN_TOKEN \ --env COVERITY_SCAN_NOTIFICATION_EMAIL=$COVERITY_SCAN_NOTIFICATION_EMAIL \ $ndctl_enable \ -v $HOST_WORKDIR:$WORKDIR \ -v /etc/localtime:/etc/localtime \ -w $SCRIPTSDIR \ $imageName $command
4,637
36.104
112
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/utils/docker/run-coverity.sh
#!/usr/bin/env bash # # Copyright 2017-2018, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # run-coverity.sh - runs the Coverity scan build # set -e # Prepare build environment ./prepare-for-build.sh # Download Coverity certificate echo -n | openssl s_client -connect scan.coverity.com:443 | \ sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' | \ sudo tee -a /etc/ssl/certs/ca-; # Build librpmem even if libfabric is not compiled with ibverbs export RPMEM_DISABLE_LIBIBVERBS=y export COVERITY_SCAN_PROJECT_NAME="$TRAVIS_REPO_SLUG" [[ "$TRAVIS_EVENT_TYPE" == "cron" ]] \ && export COVERITY_SCAN_BRANCH_PATTERN="master" \ || export COVERITY_SCAN_BRANCH_PATTERN="coverity_scan" export COVERITY_SCAN_BUILD_COMMAND="make -j all" cd $WORKDIR # Run the Coverity scan # XXX: Patch the Coverity script. # Recently, this script regularly exits with an error, even though # the build is successfully submitted. Probably because the status code # is missing in response, or it's not 201. # Changes: # 1) change the expected status code to 200 and # 2) print the full response string. # # This change should be reverted when the Coverity script is fixed. # # The previous version was: # curl -s https://scan.coverity.com/scripts/travisci_build_coverity_scan.sh | bash wget https://scan.coverity.com/scripts/travisci_build_coverity_scan.sh patch < utils/docker/0001-travis-fix-travisci_build_coverity_scan.sh.patch bash ./travisci_build_coverity_scan.sh
2,957
37.921053
82
sh