repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx_clwb.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_clwb_nolog
#define EXPORTED_SYMBOL memmove_movnt_avx_clwb
#define maybe_barrier no_barrier_after_ntstores
#include "memcpy_nt_avx.h"
| 1,755 | 46.459459 | 74 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_sse2_clflushopt.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b pmem_clflushopt
#define flush flush_clflushopt_nolog
#define EXPORTED_SYMBOL memmove_mov_sse2_clflushopt
#include "memcpy_t_sse2.h"
| 1,751 | 46.351351 | 74 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx_empty.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b flush64b_empty
#define flush flush_empty_nolog
#define EXPORTED_SYMBOL memmove_mov_avx_empty
#include "memcpy_t_avx.h"
| 1,738 | 46 | 74 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx512f_clflushopt.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b pmem_clflushopt
#define flush flush_clflushopt_nolog
#define EXPORTED_SYMBOL memmove_mov_avx512f_clflushopt
#include "memcpy_t_avx512f.h"
| 1,757 | 46.513514 | 74 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx_empty.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_empty_nolog
#define EXPORTED_SYMBOL memmove_movnt_avx_empty
#define maybe_barrier barrier_after_ntstores
#include "memcpy_nt_avx.h"
| 1,754 | 46.432432 | 74 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx_clflushopt.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b pmem_clflushopt
#define flush flush_clflushopt_nolog
#define EXPORTED_SYMBOL memmove_mov_avx_clflushopt
#include "memcpy_t_avx.h"
| 1,749 | 46.297297 | 74 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_avx.h"
#include "memcpy_memset.h"
static force_inline void
memmove_mov8x64b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1);
__m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2);
__m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3);
__m256i ymm4 = _mm256_loadu_si256((__m256i *)src + 4);
__m256i ymm5 = _mm256_loadu_si256((__m256i *)src + 5);
__m256i ymm6 = _mm256_loadu_si256((__m256i *)src + 6);
__m256i ymm7 = _mm256_loadu_si256((__m256i *)src + 7);
__m256i ymm8 = _mm256_loadu_si256((__m256i *)src + 8);
__m256i ymm9 = _mm256_loadu_si256((__m256i *)src + 9);
__m256i ymm10 = _mm256_loadu_si256((__m256i *)src + 10);
__m256i ymm11 = _mm256_loadu_si256((__m256i *)src + 11);
__m256i ymm12 = _mm256_loadu_si256((__m256i *)src + 12);
__m256i ymm13 = _mm256_loadu_si256((__m256i *)src + 13);
__m256i ymm14 = _mm256_loadu_si256((__m256i *)src + 14);
__m256i ymm15 = _mm256_loadu_si256((__m256i *)src + 15);
_mm256_store_si256((__m256i *)dest + 0, ymm0);
_mm256_store_si256((__m256i *)dest + 1, ymm1);
_mm256_store_si256((__m256i *)dest + 2, ymm2);
_mm256_store_si256((__m256i *)dest + 3, ymm3);
_mm256_store_si256((__m256i *)dest + 4, ymm4);
_mm256_store_si256((__m256i *)dest + 5, ymm5);
_mm256_store_si256((__m256i *)dest + 6, ymm6);
_mm256_store_si256((__m256i *)dest + 7, ymm7);
_mm256_store_si256((__m256i *)dest + 8, ymm8);
_mm256_store_si256((__m256i *)dest + 9, ymm9);
_mm256_store_si256((__m256i *)dest + 10, ymm10);
_mm256_store_si256((__m256i *)dest + 11, ymm11);
_mm256_store_si256((__m256i *)dest + 12, ymm12);
_mm256_store_si256((__m256i *)dest + 13, ymm13);
_mm256_store_si256((__m256i *)dest + 14, ymm14);
_mm256_store_si256((__m256i *)dest + 15, ymm15);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
}
static force_inline void
memmove_mov4x64b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1);
__m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2);
__m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3);
__m256i ymm4 = _mm256_loadu_si256((__m256i *)src + 4);
__m256i ymm5 = _mm256_loadu_si256((__m256i *)src + 5);
__m256i ymm6 = _mm256_loadu_si256((__m256i *)src + 6);
__m256i ymm7 = _mm256_loadu_si256((__m256i *)src + 7);
_mm256_store_si256((__m256i *)dest + 0, ymm0);
_mm256_store_si256((__m256i *)dest + 1, ymm1);
_mm256_store_si256((__m256i *)dest + 2, ymm2);
_mm256_store_si256((__m256i *)dest + 3, ymm3);
_mm256_store_si256((__m256i *)dest + 4, ymm4);
_mm256_store_si256((__m256i *)dest + 5, ymm5);
_mm256_store_si256((__m256i *)dest + 6, ymm6);
_mm256_store_si256((__m256i *)dest + 7, ymm7);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
}
static force_inline void
memmove_mov2x64b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1);
__m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2);
__m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3);
_mm256_store_si256((__m256i *)dest + 0, ymm0);
_mm256_store_si256((__m256i *)dest + 1, ymm1);
_mm256_store_si256((__m256i *)dest + 2, ymm2);
_mm256_store_si256((__m256i *)dest + 3, ymm3);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
}
static force_inline void
memmove_mov1x64b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1);
_mm256_store_si256((__m256i *)dest + 0, ymm0);
_mm256_store_si256((__m256i *)dest + 1, ymm1);
flush64b(dest + 0 * 64);
}
static force_inline void
memmove_mov_avx_fw(char *dest, const char *src, size_t len)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_avx(dest, src, cnt);
dest += cnt;
src += cnt;
len -= cnt;
}
while (len >= 8 * 64) {
memmove_mov8x64b(dest, src);
dest += 8 * 64;
src += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memmove_mov4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_mov2x64b(dest, src);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_mov1x64b(dest, src);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len)
memmove_small_avx(dest, src, len);
}
static force_inline void
memmove_mov_avx_bw(char *dest, const char *src, size_t len)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_avx(dest, src, cnt);
}
while (len >= 8 * 64) {
dest -= 8 * 64;
src -= 8 * 64;
len -= 8 * 64;
memmove_mov8x64b(dest, src);
}
if (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_mov4x64b(dest, src);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_mov2x64b(dest, src);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_mov1x64b(dest, src);
}
if (len)
memmove_small_avx(dest - len, src - len, len);
}
void
EXPORTED_SYMBOL(char *dest, const char *src, size_t len)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_mov_avx_fw(dest, src, len);
else
memmove_mov_avx_bw(dest, src, len);
avx_zeroupper();
}
| 7,378 | 27.937255 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx512f_clflush.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b _mm_clflush
#define flush flush_clflush_nolog
#define EXPORTED_SYMBOL memmove_mov_avx512f_clflush
#include "memcpy_t_avx512f.h"
| 1,747 | 46.243243 | 74 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_sse2_empty.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_empty_nolog
#define EXPORTED_SYMBOL memmove_movnt_sse2_empty
#define maybe_barrier barrier_after_ntstores
#include "memcpy_nt_sse2.h"
| 1,756 | 46.486486 | 74 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_sse2_clflush.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b _mm_clflush
#define flush flush_clflush_nolog
#define EXPORTED_SYMBOL memmove_mov_sse2_clflush
#include "memcpy_t_sse2.h"
| 1,741 | 46.081081 | 74 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_sse2_clwb.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_clwb_nolog
#define EXPORTED_SYMBOL memmove_movnt_sse2_clwb
#define maybe_barrier no_barrier_after_ntstores
#include "memcpy_nt_sse2.h"
| 1,757 | 46.513514 | 74 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_avx512f.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef PMEM_MEMCPY_AVX512F_H
#define PMEM_MEMCPY_AVX512F_H
#include <stddef.h>
#include "memcpy_avx.h"
static force_inline void
memmove_small_avx512f(char *dest, const char *src, size_t len)
{
/* We can't do better than AVX here. */
memmove_small_avx(dest, src, len);
}
#endif
| 1,886 | 38.3125 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_sse2_empty.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b flush64b_empty
#define flush flush_empty_nolog
#define EXPORTED_SYMBOL memmove_mov_sse2_empty
#include "memcpy_t_sse2.h"
| 1,740 | 46.054054 | 74 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx512f.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_avx512f.h"
#include "memcpy_memset.h"
#include "libpmem.h"
#include "valgrind_internal.h"
static force_inline void
memmove_movnt32x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
__m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1);
__m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2);
__m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3);
__m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4);
__m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5);
__m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6);
__m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7);
__m512i zmm8 = _mm512_loadu_si512((__m512i *)src + 8);
__m512i zmm9 = _mm512_loadu_si512((__m512i *)src + 9);
__m512i zmm10 = _mm512_loadu_si512((__m512i *)src + 10);
__m512i zmm11 = _mm512_loadu_si512((__m512i *)src + 11);
__m512i zmm12 = _mm512_loadu_si512((__m512i *)src + 12);
__m512i zmm13 = _mm512_loadu_si512((__m512i *)src + 13);
__m512i zmm14 = _mm512_loadu_si512((__m512i *)src + 14);
__m512i zmm15 = _mm512_loadu_si512((__m512i *)src + 15);
__m512i zmm16 = _mm512_loadu_si512((__m512i *)src + 16);
__m512i zmm17 = _mm512_loadu_si512((__m512i *)src + 17);
__m512i zmm18 = _mm512_loadu_si512((__m512i *)src + 18);
__m512i zmm19 = _mm512_loadu_si512((__m512i *)src + 19);
__m512i zmm20 = _mm512_loadu_si512((__m512i *)src + 20);
__m512i zmm21 = _mm512_loadu_si512((__m512i *)src + 21);
__m512i zmm22 = _mm512_loadu_si512((__m512i *)src + 22);
__m512i zmm23 = _mm512_loadu_si512((__m512i *)src + 23);
__m512i zmm24 = _mm512_loadu_si512((__m512i *)src + 24);
__m512i zmm25 = _mm512_loadu_si512((__m512i *)src + 25);
__m512i zmm26 = _mm512_loadu_si512((__m512i *)src + 26);
__m512i zmm27 = _mm512_loadu_si512((__m512i *)src + 27);
__m512i zmm28 = _mm512_loadu_si512((__m512i *)src + 28);
__m512i zmm29 = _mm512_loadu_si512((__m512i *)src + 29);
__m512i zmm30 = _mm512_loadu_si512((__m512i *)src + 30);
__m512i zmm31 = _mm512_loadu_si512((__m512i *)src + 31);
_mm512_stream_si512((__m512i *)dest + 0, zmm0);
_mm512_stream_si512((__m512i *)dest + 1, zmm1);
_mm512_stream_si512((__m512i *)dest + 2, zmm2);
_mm512_stream_si512((__m512i *)dest + 3, zmm3);
_mm512_stream_si512((__m512i *)dest + 4, zmm4);
_mm512_stream_si512((__m512i *)dest + 5, zmm5);
_mm512_stream_si512((__m512i *)dest + 6, zmm6);
_mm512_stream_si512((__m512i *)dest + 7, zmm7);
_mm512_stream_si512((__m512i *)dest + 8, zmm8);
_mm512_stream_si512((__m512i *)dest + 9, zmm9);
_mm512_stream_si512((__m512i *)dest + 10, zmm10);
_mm512_stream_si512((__m512i *)dest + 11, zmm11);
_mm512_stream_si512((__m512i *)dest + 12, zmm12);
_mm512_stream_si512((__m512i *)dest + 13, zmm13);
_mm512_stream_si512((__m512i *)dest + 14, zmm14);
_mm512_stream_si512((__m512i *)dest + 15, zmm15);
_mm512_stream_si512((__m512i *)dest + 16, zmm16);
_mm512_stream_si512((__m512i *)dest + 17, zmm17);
_mm512_stream_si512((__m512i *)dest + 18, zmm18);
_mm512_stream_si512((__m512i *)dest + 19, zmm19);
_mm512_stream_si512((__m512i *)dest + 20, zmm20);
_mm512_stream_si512((__m512i *)dest + 21, zmm21);
_mm512_stream_si512((__m512i *)dest + 22, zmm22);
_mm512_stream_si512((__m512i *)dest + 23, zmm23);
_mm512_stream_si512((__m512i *)dest + 24, zmm24);
_mm512_stream_si512((__m512i *)dest + 25, zmm25);
_mm512_stream_si512((__m512i *)dest + 26, zmm26);
_mm512_stream_si512((__m512i *)dest + 27, zmm27);
_mm512_stream_si512((__m512i *)dest + 28, zmm28);
_mm512_stream_si512((__m512i *)dest + 29, zmm29);
_mm512_stream_si512((__m512i *)dest + 30, zmm30);
_mm512_stream_si512((__m512i *)dest + 31, zmm31);
VALGRIND_DO_FLUSH(dest, 32 * 64);
}
static force_inline void
memmove_movnt16x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
__m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1);
__m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2);
__m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3);
__m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4);
__m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5);
__m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6);
__m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7);
__m512i zmm8 = _mm512_loadu_si512((__m512i *)src + 8);
__m512i zmm9 = _mm512_loadu_si512((__m512i *)src + 9);
__m512i zmm10 = _mm512_loadu_si512((__m512i *)src + 10);
__m512i zmm11 = _mm512_loadu_si512((__m512i *)src + 11);
__m512i zmm12 = _mm512_loadu_si512((__m512i *)src + 12);
__m512i zmm13 = _mm512_loadu_si512((__m512i *)src + 13);
__m512i zmm14 = _mm512_loadu_si512((__m512i *)src + 14);
__m512i zmm15 = _mm512_loadu_si512((__m512i *)src + 15);
_mm512_stream_si512((__m512i *)dest + 0, zmm0);
_mm512_stream_si512((__m512i *)dest + 1, zmm1);
_mm512_stream_si512((__m512i *)dest + 2, zmm2);
_mm512_stream_si512((__m512i *)dest + 3, zmm3);
_mm512_stream_si512((__m512i *)dest + 4, zmm4);
_mm512_stream_si512((__m512i *)dest + 5, zmm5);
_mm512_stream_si512((__m512i *)dest + 6, zmm6);
_mm512_stream_si512((__m512i *)dest + 7, zmm7);
_mm512_stream_si512((__m512i *)dest + 8, zmm8);
_mm512_stream_si512((__m512i *)dest + 9, zmm9);
_mm512_stream_si512((__m512i *)dest + 10, zmm10);
_mm512_stream_si512((__m512i *)dest + 11, zmm11);
_mm512_stream_si512((__m512i *)dest + 12, zmm12);
_mm512_stream_si512((__m512i *)dest + 13, zmm13);
_mm512_stream_si512((__m512i *)dest + 14, zmm14);
_mm512_stream_si512((__m512i *)dest + 15, zmm15);
VALGRIND_DO_FLUSH(dest, 16 * 64);
}
static force_inline void
memmove_movnt8x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
__m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1);
__m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2);
__m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3);
__m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4);
__m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5);
__m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6);
__m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7);
_mm512_stream_si512((__m512i *)dest + 0, zmm0);
_mm512_stream_si512((__m512i *)dest + 1, zmm1);
_mm512_stream_si512((__m512i *)dest + 2, zmm2);
_mm512_stream_si512((__m512i *)dest + 3, zmm3);
_mm512_stream_si512((__m512i *)dest + 4, zmm4);
_mm512_stream_si512((__m512i *)dest + 5, zmm5);
_mm512_stream_si512((__m512i *)dest + 6, zmm6);
_mm512_stream_si512((__m512i *)dest + 7, zmm7);
VALGRIND_DO_FLUSH(dest, 8 * 64);
}
static force_inline void
memmove_movnt4x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
__m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1);
__m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2);
__m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3);
_mm512_stream_si512((__m512i *)dest + 0, zmm0);
_mm512_stream_si512((__m512i *)dest + 1, zmm1);
_mm512_stream_si512((__m512i *)dest + 2, zmm2);
_mm512_stream_si512((__m512i *)dest + 3, zmm3);
VALGRIND_DO_FLUSH(dest, 4 * 64);
}
static force_inline void
memmove_movnt2x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
__m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1);
_mm512_stream_si512((__m512i *)dest + 0, zmm0);
_mm512_stream_si512((__m512i *)dest + 1, zmm1);
VALGRIND_DO_FLUSH(dest, 2 * 64);
}
static force_inline void
memmove_movnt1x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
_mm512_stream_si512((__m512i *)dest + 0, zmm0);
VALGRIND_DO_FLUSH(dest, 64);
}
static force_inline void
memmove_movnt1x32b(char *dest, const char *src)
{
__m256i zmm0 = _mm256_loadu_si256((__m256i *)src);
_mm256_stream_si256((__m256i *)dest, zmm0);
VALGRIND_DO_FLUSH(dest, 32);
}
static force_inline void
memmove_movnt1x16b(char *dest, const char *src)
{
__m128i ymm0 = _mm_loadu_si128((__m128i *)src);
_mm_stream_si128((__m128i *)dest, ymm0);
VALGRIND_DO_FLUSH(dest, 16);
}
static force_inline void
memmove_movnt1x8b(char *dest, const char *src)
{
_mm_stream_si64((long long *)dest, *(long long *)src);
VALGRIND_DO_FLUSH(dest, 8);
}
static force_inline void
memmove_movnt1x4b(char *dest, const char *src)
{
_mm_stream_si32((int *)dest, *(int *)src);
VALGRIND_DO_FLUSH(dest, 4);
}
static force_inline void
memmove_movnt_avx512f_fw(char *dest, const char *src, size_t len)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_avx512f(dest, src, cnt);
dest += cnt;
src += cnt;
len -= cnt;
}
while (len >= 32 * 64) {
memmove_movnt32x64b(dest, src);
dest += 32 * 64;
src += 32 * 64;
len -= 32 * 64;
}
if (len >= 16 * 64) {
memmove_movnt16x64b(dest, src);
dest += 16 * 64;
src += 16 * 64;
len -= 16 * 64;
}
if (len >= 8 * 64) {
memmove_movnt8x64b(dest, src);
dest += 8 * 64;
src += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memmove_movnt4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_movnt2x64b(dest, src);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_movnt1x64b(dest, src);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32)
memmove_movnt1x32b(dest, src);
else if (len == 16)
memmove_movnt1x16b(dest, src);
else if (len == 8)
memmove_movnt1x8b(dest, src);
else if (len == 4)
memmove_movnt1x4b(dest, src);
else
goto nonnt;
goto end;
}
nonnt:
memmove_small_avx512f(dest, src, len);
end:
avx_zeroupper();
}
static force_inline void
memmove_movnt_avx512f_bw(char *dest, const char *src, size_t len)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_avx512f(dest, src, cnt);
}
while (len >= 32 * 64) {
dest -= 32 * 64;
src -= 32 * 64;
len -= 32 * 64;
memmove_movnt32x64b(dest, src);
}
if (len >= 16 * 64) {
dest -= 16 * 64;
src -= 16 * 64;
len -= 16 * 64;
memmove_movnt16x64b(dest, src);
}
if (len >= 8 * 64) {
dest -= 8 * 64;
src -= 8 * 64;
len -= 8 * 64;
memmove_movnt8x64b(dest, src);
}
if (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_movnt2x64b(dest, src);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_movnt1x64b(dest, src);
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32) {
dest -= 32;
src -= 32;
memmove_movnt1x32b(dest, src);
} else if (len == 16) {
dest -= 16;
src -= 16;
memmove_movnt1x16b(dest, src);
} else if (len == 8) {
dest -= 8;
src -= 8;
memmove_movnt1x8b(dest, src);
} else if (len == 4) {
dest -= 4;
src -= 4;
memmove_movnt1x4b(dest, src);
} else {
goto nonnt;
}
goto end;
}
nonnt:
dest -= len;
src -= len;
memmove_small_avx512f(dest, src, len);
end:
avx_zeroupper();
}
void
EXPORTED_SYMBOL(char *dest, const char *src, size_t len)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_movnt_avx512f_fw(dest, src, len);
else
memmove_movnt_avx512f_bw(dest, src, len);
maybe_barrier();
}
| 13,191 | 28.446429 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx512f_empty.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_empty_nolog
#define EXPORTED_SYMBOL memmove_movnt_avx512f_empty
#define maybe_barrier barrier_after_ntstores
#include "memcpy_nt_avx512f.h"
| 1,762 | 46.648649 | 74 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_sse2.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memcpy_sse2.h"
#include "valgrind_internal.h"
static force_inline void
memmove_movnt4x64b(char *dest, const char *src)
{
__m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0);
__m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1);
__m128i xmm2 = _mm_loadu_si128((__m128i *)src + 2);
__m128i xmm3 = _mm_loadu_si128((__m128i *)src + 3);
__m128i xmm4 = _mm_loadu_si128((__m128i *)src + 4);
__m128i xmm5 = _mm_loadu_si128((__m128i *)src + 5);
__m128i xmm6 = _mm_loadu_si128((__m128i *)src + 6);
__m128i xmm7 = _mm_loadu_si128((__m128i *)src + 7);
__m128i xmm8 = _mm_loadu_si128((__m128i *)src + 8);
__m128i xmm9 = _mm_loadu_si128((__m128i *)src + 9);
__m128i xmm10 = _mm_loadu_si128((__m128i *)src + 10);
__m128i xmm11 = _mm_loadu_si128((__m128i *)src + 11);
__m128i xmm12 = _mm_loadu_si128((__m128i *)src + 12);
__m128i xmm13 = _mm_loadu_si128((__m128i *)src + 13);
__m128i xmm14 = _mm_loadu_si128((__m128i *)src + 14);
__m128i xmm15 = _mm_loadu_si128((__m128i *)src + 15);
_mm_stream_si128((__m128i *)dest + 0, xmm0);
_mm_stream_si128((__m128i *)dest + 1, xmm1);
_mm_stream_si128((__m128i *)dest + 2, xmm2);
_mm_stream_si128((__m128i *)dest + 3, xmm3);
_mm_stream_si128((__m128i *)dest + 4, xmm4);
_mm_stream_si128((__m128i *)dest + 5, xmm5);
_mm_stream_si128((__m128i *)dest + 6, xmm6);
_mm_stream_si128((__m128i *)dest + 7, xmm7);
_mm_stream_si128((__m128i *)dest + 8, xmm8);
_mm_stream_si128((__m128i *)dest + 9, xmm9);
_mm_stream_si128((__m128i *)dest + 10, xmm10);
_mm_stream_si128((__m128i *)dest + 11, xmm11);
_mm_stream_si128((__m128i *)dest + 12, xmm12);
_mm_stream_si128((__m128i *)dest + 13, xmm13);
_mm_stream_si128((__m128i *)dest + 14, xmm14);
_mm_stream_si128((__m128i *)dest + 15, xmm15);
VALGRIND_DO_FLUSH(dest, 4 * 64);
}
static force_inline void
memmove_movnt2x64b(char *dest, const char *src)
{
__m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0);
__m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1);
__m128i xmm2 = _mm_loadu_si128((__m128i *)src + 2);
__m128i xmm3 = _mm_loadu_si128((__m128i *)src + 3);
__m128i xmm4 = _mm_loadu_si128((__m128i *)src + 4);
__m128i xmm5 = _mm_loadu_si128((__m128i *)src + 5);
__m128i xmm6 = _mm_loadu_si128((__m128i *)src + 6);
__m128i xmm7 = _mm_loadu_si128((__m128i *)src + 7);
_mm_stream_si128((__m128i *)dest + 0, xmm0);
_mm_stream_si128((__m128i *)dest + 1, xmm1);
_mm_stream_si128((__m128i *)dest + 2, xmm2);
_mm_stream_si128((__m128i *)dest + 3, xmm3);
_mm_stream_si128((__m128i *)dest + 4, xmm4);
_mm_stream_si128((__m128i *)dest + 5, xmm5);
_mm_stream_si128((__m128i *)dest + 6, xmm6);
_mm_stream_si128((__m128i *)dest + 7, xmm7);
VALGRIND_DO_FLUSH(dest, 2 * 64);
}
static force_inline void
memmove_movnt1x64b(char *dest, const char *src)
{
__m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0);
__m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1);
__m128i xmm2 = _mm_loadu_si128((__m128i *)src + 2);
__m128i xmm3 = _mm_loadu_si128((__m128i *)src + 3);
_mm_stream_si128((__m128i *)dest + 0, xmm0);
_mm_stream_si128((__m128i *)dest + 1, xmm1);
_mm_stream_si128((__m128i *)dest + 2, xmm2);
_mm_stream_si128((__m128i *)dest + 3, xmm3);
VALGRIND_DO_FLUSH(dest, 64);
}
static force_inline void
memmove_movnt1x32b(char *dest, const char *src)
{
__m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0);
__m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1);
_mm_stream_si128((__m128i *)dest + 0, xmm0);
_mm_stream_si128((__m128i *)dest + 1, xmm1);
VALGRIND_DO_FLUSH(dest, 32);
}
static force_inline void
memmove_movnt1x16b(char *dest, const char *src)
{
__m128i xmm0 = _mm_loadu_si128((__m128i *)src);
_mm_stream_si128((__m128i *)dest, xmm0);
VALGRIND_DO_FLUSH(dest, 16);
}
static force_inline void
memmove_movnt1x8b(char *dest, const char *src)
{
_mm_stream_si64((long long *)dest, *(long long *)src);
VALGRIND_DO_FLUSH(dest, 8);
}
static force_inline void
memmove_movnt1x4b(char *dest, const char *src)
{
_mm_stream_si32((int *)dest, *(int *)src);
VALGRIND_DO_FLUSH(dest, 4);
}
static force_inline void
memmove_movnt_sse_fw(char *dest, const char *src, size_t len)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_sse2(dest, src, cnt);
dest += cnt;
src += cnt;
len -= cnt;
}
while (len >= 4 * 64) {
memmove_movnt4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_movnt2x64b(dest, src);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_movnt1x64b(dest, src);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len == 0)
return;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32)
memmove_movnt1x32b(dest, src);
else if (len == 16)
memmove_movnt1x16b(dest, src);
else if (len == 8)
memmove_movnt1x8b(dest, src);
else if (len == 4)
memmove_movnt1x4b(dest, src);
else
goto nonnt;
return;
}
nonnt:
memmove_small_sse2(dest, src, len);
}
static force_inline void
memmove_movnt_sse_bw(char *dest, const char *src, size_t len)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_sse2(dest, src, cnt);
}
while (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_movnt2x64b(dest, src);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_movnt1x64b(dest, src);
}
if (len == 0)
return;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32) {
dest -= 32;
src -= 32;
memmove_movnt1x32b(dest, src);
} else if (len == 16) {
dest -= 16;
src -= 16;
memmove_movnt1x16b(dest, src);
} else if (len == 8) {
dest -= 8;
src -= 8;
memmove_movnt1x8b(dest, src);
} else if (len == 4) {
dest -= 4;
src -= 4;
memmove_movnt1x4b(dest, src);
} else {
goto nonnt;
}
return;
}
nonnt:
dest -= len;
src -= len;
memmove_small_sse2(dest, src, len);
}
void
EXPORTED_SYMBOL(char *dest, const char *src, size_t len)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_movnt_sse_fw(dest, src, len);
else
memmove_movnt_sse_bw(dest, src, len);
maybe_barrier();
}
| 8,204 | 25.813725 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_sse2_clwb.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b pmem_clwb
#define flush flush_clwb_nolog
#define EXPORTED_SYMBOL memmove_mov_sse2_clwb
#include "memcpy_t_sse2.h"
| 1,733 | 45.864865 | 74 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx512f_clwb.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_clwb_nolog
#define EXPORTED_SYMBOL memmove_movnt_avx512f_clwb
#define maybe_barrier no_barrier_after_ntstores
#include "memcpy_nt_avx512f.h"
| 1,763 | 46.675676 | 74 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_avx.h"
#include "memcpy_memset.h"
#include "valgrind_internal.h"
static force_inline void
memmove_movnt8x64b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1);
__m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2);
__m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3);
__m256i ymm4 = _mm256_loadu_si256((__m256i *)src + 4);
__m256i ymm5 = _mm256_loadu_si256((__m256i *)src + 5);
__m256i ymm6 = _mm256_loadu_si256((__m256i *)src + 6);
__m256i ymm7 = _mm256_loadu_si256((__m256i *)src + 7);
__m256i ymm8 = _mm256_loadu_si256((__m256i *)src + 8);
__m256i ymm9 = _mm256_loadu_si256((__m256i *)src + 9);
__m256i ymm10 = _mm256_loadu_si256((__m256i *)src + 10);
__m256i ymm11 = _mm256_loadu_si256((__m256i *)src + 11);
__m256i ymm12 = _mm256_loadu_si256((__m256i *)src + 12);
__m256i ymm13 = _mm256_loadu_si256((__m256i *)src + 13);
__m256i ymm14 = _mm256_loadu_si256((__m256i *)src + 14);
__m256i ymm15 = _mm256_loadu_si256((__m256i *)src + 15);
_mm256_stream_si256((__m256i *)dest + 0, ymm0);
_mm256_stream_si256((__m256i *)dest + 1, ymm1);
_mm256_stream_si256((__m256i *)dest + 2, ymm2);
_mm256_stream_si256((__m256i *)dest + 3, ymm3);
_mm256_stream_si256((__m256i *)dest + 4, ymm4);
_mm256_stream_si256((__m256i *)dest + 5, ymm5);
_mm256_stream_si256((__m256i *)dest + 6, ymm6);
_mm256_stream_si256((__m256i *)dest + 7, ymm7);
_mm256_stream_si256((__m256i *)dest + 8, ymm8);
_mm256_stream_si256((__m256i *)dest + 9, ymm9);
_mm256_stream_si256((__m256i *)dest + 10, ymm10);
_mm256_stream_si256((__m256i *)dest + 11, ymm11);
_mm256_stream_si256((__m256i *)dest + 12, ymm12);
_mm256_stream_si256((__m256i *)dest + 13, ymm13);
_mm256_stream_si256((__m256i *)dest + 14, ymm14);
_mm256_stream_si256((__m256i *)dest + 15, ymm15);
VALGRIND_DO_FLUSH(dest, 8 * 64);
}
static force_inline void
memmove_movnt4x64b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1);
__m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2);
__m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3);
__m256i ymm4 = _mm256_loadu_si256((__m256i *)src + 4);
__m256i ymm5 = _mm256_loadu_si256((__m256i *)src + 5);
__m256i ymm6 = _mm256_loadu_si256((__m256i *)src + 6);
__m256i ymm7 = _mm256_loadu_si256((__m256i *)src + 7);
_mm256_stream_si256((__m256i *)dest + 0, ymm0);
_mm256_stream_si256((__m256i *)dest + 1, ymm1);
_mm256_stream_si256((__m256i *)dest + 2, ymm2);
_mm256_stream_si256((__m256i *)dest + 3, ymm3);
_mm256_stream_si256((__m256i *)dest + 4, ymm4);
_mm256_stream_si256((__m256i *)dest + 5, ymm5);
_mm256_stream_si256((__m256i *)dest + 6, ymm6);
_mm256_stream_si256((__m256i *)dest + 7, ymm7);
VALGRIND_DO_FLUSH(dest, 4 * 64);
}
static force_inline void
memmove_movnt2x64b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1);
__m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2);
__m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3);
_mm256_stream_si256((__m256i *)dest + 0, ymm0);
_mm256_stream_si256((__m256i *)dest + 1, ymm1);
_mm256_stream_si256((__m256i *)dest + 2, ymm2);
_mm256_stream_si256((__m256i *)dest + 3, ymm3);
VALGRIND_DO_FLUSH(dest, 2 * 64);
}
static force_inline void
memmove_movnt1x64b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1);
_mm256_stream_si256((__m256i *)dest + 0, ymm0);
_mm256_stream_si256((__m256i *)dest + 1, ymm1);
VALGRIND_DO_FLUSH(dest, 64);
}
static force_inline void
memmove_movnt1x32b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src);
_mm256_stream_si256((__m256i *)dest, ymm0);
VALGRIND_DO_FLUSH(dest, 32);
}
static force_inline void
memmove_movnt1x16b(char *dest, const char *src)
{
__m128i xmm0 = _mm_loadu_si128((__m128i *)src);
_mm_stream_si128((__m128i *)dest, xmm0);
VALGRIND_DO_FLUSH(dest, 16);
}
static force_inline void
memmove_movnt1x8b(char *dest, const char *src)
{
_mm_stream_si64((long long *)dest, *(long long *)src);
VALGRIND_DO_FLUSH(dest, 8);
}
static force_inline void
memmove_movnt1x4b(char *dest, const char *src)
{
_mm_stream_si32((int *)dest, *(int *)src);
VALGRIND_DO_FLUSH(dest, 4);
}
static force_inline void
memmove_movnt_avx_fw(char *dest, const char *src, size_t len)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_avx(dest, src, cnt);
dest += cnt;
src += cnt;
len -= cnt;
}
while (len >= 8 * 64) {
memmove_movnt8x64b(dest, src);
dest += 8 * 64;
src += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memmove_movnt4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_movnt2x64b(dest, src);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_movnt1x64b(dest, src);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32)
memmove_movnt1x32b(dest, src);
else if (len == 16)
memmove_movnt1x16b(dest, src);
else if (len == 8)
memmove_movnt1x8b(dest, src);
else if (len == 4)
memmove_movnt1x4b(dest, src);
else
goto nonnt;
goto end;
}
nonnt:
memmove_small_avx(dest, src, len);
end:
avx_zeroupper();
}
static force_inline void
memmove_movnt_avx_bw(char *dest, const char *src, size_t len)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_avx(dest, src, cnt);
}
while (len >= 8 * 64) {
dest -= 8 * 64;
src -= 8 * 64;
len -= 8 * 64;
memmove_movnt8x64b(dest, src);
}
if (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_movnt2x64b(dest, src);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_movnt1x64b(dest, src);
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32) {
dest -= 32;
src -= 32;
memmove_movnt1x32b(dest, src);
} else if (len == 16) {
dest -= 16;
src -= 16;
memmove_movnt1x16b(dest, src);
} else if (len == 8) {
dest -= 8;
src -= 8;
memmove_movnt1x8b(dest, src);
} else if (len == 4) {
dest -= 4;
src -= 4;
memmove_movnt1x4b(dest, src);
} else {
goto nonnt;
}
goto end;
}
nonnt:
dest -= len;
src -= len;
memmove_small_avx(dest, src, len);
end:
avx_zeroupper();
}
void
EXPORTED_SYMBOL(char *dest, const char *src, size_t len)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_movnt_avx_fw(dest, src, len);
else
memmove_movnt_avx_bw(dest, src, len);
maybe_barrier();
}
| 8,883 | 25.519403 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx_clwb.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b pmem_clwb
#define flush flush_clwb_nolog
#define EXPORTED_SYMBOL memmove_mov_avx_clwb
#include "memcpy_t_avx.h"
| 1,731 | 45.810811 | 74 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libpmem/aarch64/flush.h | /*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef ARM64_FLUSH_H
#define ARM64_FLUSH_H
#include <stdint.h>
#include "arm_cacheops.h"
#include "util.h"
#define FLUSH_ALIGN ((uintptr_t)64)
/*
* flush_clflushopt_nolog -- flush the CPU cache, using
* arm_clean_and_invalidate_va_to_poc (see arm_cacheops.h) {DC CIVAC}
*/
static force_inline void
flush_dcache_invalidate_opt_nolog(const void *addr, size_t len)
{
uintptr_t uptr;
arm_data_memory_barrier();
for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1);
uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) {
arm_clean_and_invalidate_va_to_poc((char *)uptr);
}
arm_data_memory_barrier();
}
/*
* flush_dcache_nolog -- flush the CPU cache, using DC CVAC
*/
static force_inline void
flush_dcache_nolog(const void *addr, size_t len)
{
uintptr_t uptr;
/*
* Loop through cache-line-size (typically 64B) aligned chunks
* covering the given range.
*/
for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1);
uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) {
arm_clean_va_to_poc((char *)uptr);
}
}
#endif
| 2,631 | 32.74359 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libpmem/aarch64/arm_cacheops.h | /*
* Copyright 2014-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ARM inline assembly to flush and invalidate caches
* clwb => dc cvac
* clflush | clflushopt => dc civac
* fence => dmb ish
*/
#ifndef AARCH64_CACHEOPS_H
#define AARCH64_CACHEOPS_H
#include <stdlib.h>
static inline void
arm_clean_va_to_poc(void const *p __attribute__((unused)))
{
asm volatile("dc cvac, %0" : : "r" (p) : "memory");
}
static inline void
arm_data_memory_barrier(void)
{
asm volatile("dmb ish" : : : "memory");
}
static inline void
arm_clean_and_invalidate_va_to_poc(const void *addr)
{
asm volatile("dc civac, %0" : : "r" (addr) : "memory");
}
#endif
| 2,185 | 34.258065 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libpmem/aarch64/init.c | /*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include "libpmem.h"
#include "flush.h"
#include "os.h"
#include "out.h"
#include "pmem.h"
#include "valgrind_internal.h"
/*
* memmove_nodrain_libc -- (internal) memmove to pmem without hw drain
*/
static void *
memmove_nodrain_libc(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
LOG(15, "pmemdest %p src %p len %zu flags 0x%x", pmemdest, src, len,
flags);
memmove(pmemdest, src, len);
pmem_flush_flags(pmemdest, len, flags);
return pmemdest;
}
/*
* memset_nodrain_libc -- (internal) memset to pmem without hw drain
*/
static void *
memset_nodrain_libc(void *pmemdest, int c, size_t len, unsigned flags)
{
LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x", pmemdest, c, len,
flags);
memset(pmemdest, c, len);
pmem_flush_flags(pmemdest, len, flags);
return pmemdest;
}
/*
* predrain_fence_empty -- (internal) issue the pre-drain fence instruction
*/
static void
predrain_fence_empty(void)
{
LOG(15, NULL);
VALGRIND_DO_FENCE;
/* nothing to do (because CLFLUSH did it for us) */
}
/*
* predrain_memory_barrier -- (internal) issue the pre-drain fence instruction
*/
static void
predrain_memory_barrier(void)
{
LOG(15, NULL);
arm_data_memory_barrier();
}
/*
* flush_dcache_invalidate_opt -- (internal) flush the CPU cache,
* using clflushopt for X86 and arm_clean_and_invalidate_va_to_poc
* for aarch64 (see arm_cacheops.h) {DC CIVAC}
*/
static void
flush_dcache_invalidate_opt(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
flush_dcache_invalidate_opt_nolog(addr, len);
}
/*
* flush_dcache -- (internal) flush the CPU cache, using clwb
*/
static void
flush_dcache(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
flush_dcache_nolog(addr, len);
}
/*
* flush_empty -- (internal) do not flush the CPU cache
*/
static void
flush_empty(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
flush_empty_nolog(addr, len);
}
/*
* pmem_init_funcs -- initialize architecture-specific list of pmem operations
*/
void
pmem_init_funcs(struct pmem_funcs *funcs)
{
LOG(3, NULL);
funcs->predrain_fence = predrain_fence_empty;
funcs->deep_flush = flush_dcache_invalidate_opt;
funcs->is_pmem = is_pmem_detect;
funcs->memmove_nodrain = memmove_nodrain_generic;
funcs->memset_nodrain = memset_nodrain_generic;
char *ptr = os_getenv("PMEM_NO_GENERIC_MEMCPY");
if (ptr) {
long long val = atoll(ptr);
if (val) {
funcs->memmove_nodrain = memmove_nodrain_libc;
funcs->memset_nodrain = memset_nodrain_libc;
}
}
int flush;
char *e = os_getenv("PMEM_NO_FLUSH");
if (e && (strcmp(e, "1") == 0)) {
flush = 0;
LOG(3, "Forced not flushing CPU_cache");
} else if (e && (strcmp(e, "0") == 0)) {
flush = 1;
LOG(3, "Forced flushing CPU_cache");
} else if (pmem_has_auto_flush() == 1) {
flush = 0;
LOG(3, "Not flushing CPU_cache, eADR detected");
} else {
flush = 1;
LOG(3, "Flushing CPU cache");
}
if (flush) {
funcs->flush = funcs->deep_flush;
} else {
funcs->flush = flush_empty;
funcs->predrain_fence = predrain_memory_barrier;
}
if (funcs->deep_flush == flush_dcache)
LOG(3, "Using ARM invalidate");
else if (funcs->deep_flush == flush_dcache_invalidate_opt)
LOG(3, "Synchronize VA to poc for ARM");
else
FATAL("invalid deep flush function address");
if (funcs->deep_flush == flush_empty)
LOG(3, "not flushing CPU cache");
else if (funcs->flush != funcs->deep_flush)
FATAL("invalid flush function address");
if (funcs->memmove_nodrain == memmove_nodrain_generic)
LOG(3, "using generic memmove");
else if (funcs->memmove_nodrain == memmove_nodrain_libc)
LOG(3, "using libc memmove");
else
FATAL("invalid memove_nodrain function address");
}
| 5,348 | 26.572165 | 78 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libvmem/libvmem_main.c | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* libvmem_main.c -- entry point for libvmem.dll
*
* XXX - This is a placeholder. All the library initialization/cleanup
* that is done in library ctors/dtors, as well as TLS initialization
* should be moved here.
*/
#include "win_mmap.h"
void vmem_init(void);
void vmem_fini(void);
void jemalloc_constructor(void);
void jemalloc_destructor(void);
int APIENTRY
DllMain(HINSTANCE hInstance, DWORD dwReason, LPVOID lpReserved)
{
switch (dwReason) {
case DLL_PROCESS_ATTACH:
jemalloc_constructor();
vmem_init();
win_mmap_init();
break;
case DLL_THREAD_ATTACH:
case DLL_THREAD_DETACH:
break;
case DLL_PROCESS_DETACH:
win_mmap_fini();
vmem_fini();
jemalloc_destructor();
break;
}
return TRUE;
}
| 2,332 | 31.859155 | 74 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libvmem/vmem.c | /*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* vmem.c -- memory pool & allocation entry points for libvmem
*/
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/param.h>
#include <errno.h>
#include <stdint.h>
#include <fcntl.h>
#include <inttypes.h>
#include <wchar.h>
#include "libvmem.h"
#include "jemalloc.h"
#include "pmemcommon.h"
#include "sys_util.h"
#include "file.h"
#include "vmem.h"
#include "valgrind_internal.h"
/*
* private to this file...
*/
static size_t Header_size;
static os_mutex_t Vmem_init_lock;
static os_mutex_t Pool_lock; /* guards vmem_create and vmem_delete */
/*
* print_jemalloc_messages -- custom print function, for jemalloc
*
* Prints traces from jemalloc. All traces from jemalloc
* are considered as error messages.
*/
static void
print_jemalloc_messages(void *ignore, const char *s)
{
ERR("%s", s);
}
/*
* print_jemalloc_stats -- print function, for jemalloc statistics
*
* Prints statistics from jemalloc. All statistics are printed with level 0.
*/
static void
print_jemalloc_stats(void *ignore, const char *s)
{
LOG_NONL(0, "%s", s);
}
/*
* vmem_construct -- initialization for vmem
*
* Called automatically by the run-time loader or on the first use of vmem.
*/
void
vmem_construct(void)
{
static bool initialized = false;
int (*je_vmem_navsnprintf)
(char *, size_t, const char *, va_list) = NULL;
if (initialized)
return;
util_mutex_lock(&Vmem_init_lock);
if (!initialized) {
common_init(VMEM_LOG_PREFIX, VMEM_LOG_LEVEL_VAR,
VMEM_LOG_FILE_VAR, VMEM_MAJOR_VERSION,
VMEM_MINOR_VERSION);
out_set_vsnprintf_func(je_vmem_navsnprintf);
LOG(3, NULL);
Header_size = roundup(sizeof(VMEM), Pagesize);
/* Set up jemalloc messages to a custom print function */
je_vmem_malloc_message = print_jemalloc_messages;
initialized = true;
}
util_mutex_unlock(&Vmem_init_lock);
}
/*
* vmem_init -- load-time initialization for vmem
*
* Called automatically by the run-time loader.
*/
ATTR_CONSTRUCTOR
void
vmem_init(void)
{
util_mutex_init(&Vmem_init_lock);
util_mutex_init(&Pool_lock);
vmem_construct();
}
/*
* vmem_fini -- libvmem cleanup routine
*
* Called automatically when the process terminates.
*/
ATTR_DESTRUCTOR
void
vmem_fini(void)
{
LOG(3, NULL);
util_mutex_destroy(&Pool_lock);
util_mutex_destroy(&Vmem_init_lock);
/* set up jemalloc messages back to stderr */
je_vmem_malloc_message = NULL;
common_fini();
}
/*
* vmem_createU -- create a memory pool in a temp file
*/
#ifndef _WIN32
static inline
#endif
VMEM *
vmem_createU(const char *dir, size_t size)
{
vmem_construct();
LOG(3, "dir \"%s\" size %zu", dir, size);
if (size < VMEM_MIN_POOL) {
ERR("size %zu smaller than %zu", size, VMEM_MIN_POOL);
errno = EINVAL;
return NULL;
}
enum file_type type = util_file_get_type(dir);
if (type == OTHER_ERROR)
return NULL;
util_mutex_lock(&Pool_lock);
/* silently enforce multiple of mapping alignment */
size = roundup(size, Mmap_align);
void *addr;
if (type == TYPE_DEVDAX) {
if ((addr = util_file_map_whole(dir)) == NULL) {
util_mutex_unlock(&Pool_lock);
return NULL;
}
} else {
if ((addr = util_map_tmpfile(dir, size,
4 * MEGABYTE)) == NULL) {
util_mutex_unlock(&Pool_lock);
return NULL;
}
}
/* store opaque info at beginning of mapped area */
struct vmem *vmp = addr;
memset(&vmp->hdr, '\0', sizeof(vmp->hdr));
memcpy(vmp->hdr.signature, VMEM_HDR_SIG, POOL_HDR_SIG_LEN);
vmp->addr = addr;
vmp->size = size;
vmp->caller_mapped = 0;
/* Prepare pool for jemalloc */
if (je_vmem_pool_create((void *)((uintptr_t)addr + Header_size),
size - Header_size,
/* zeroed if */ type != TYPE_DEVDAX,
/* empty */ 1) == NULL) {
ERR("pool creation failed");
util_unmap(vmp->addr, vmp->size);
util_mutex_unlock(&Pool_lock);
return NULL;
}
/*
* If possible, turn off all permissions on the pool header page.
*
* The prototype PMFS doesn't allow this when large pages are in
* use. It is not considered an error if this fails.
*/
if (type != TYPE_DEVDAX)
util_range_none(addr, sizeof(struct pool_hdr));
util_mutex_unlock(&Pool_lock);
LOG(3, "vmp %p", vmp);
return vmp;
}
#ifndef _WIN32
/*
* vmem_create -- create a memory pool in a temp file
*/
VMEM *
vmem_create(const char *dir, size_t size)
{
return vmem_createU(dir, size);
}
#else
/*
* vmem_createW -- create a memory pool in a temp file
*/
VMEM *
vmem_createW(const wchar_t *dir, size_t size)
{
char *udir = util_toUTF8(dir);
if (udir == NULL)
return NULL;
VMEM *ret = vmem_createU(udir, size);
util_free_UTF8(udir);
return ret;
}
#endif
/*
* vmem_create_in_region -- create a memory pool in a given range
*/
VMEM *
vmem_create_in_region(void *addr, size_t size)
{
vmem_construct();
LOG(3, "addr %p size %zu", addr, size);
if (((uintptr_t)addr & (Pagesize - 1)) != 0) {
ERR("addr %p not aligned to pagesize %llu", addr, Pagesize);
errno = EINVAL;
return NULL;
}
if (size < VMEM_MIN_POOL) {
ERR("size %zu smaller than %zu", size, VMEM_MIN_POOL);
errno = EINVAL;
return NULL;
}
/*
* Initially, treat this memory region as undefined.
* Once jemalloc initializes its metadata, it will also mark
* registered free chunks (usable heap space) as unaddressable.
*/
VALGRIND_DO_MAKE_MEM_UNDEFINED(addr, size);
/* store opaque info at beginning of mapped area */
struct vmem *vmp = addr;
memset(&vmp->hdr, '\0', sizeof(vmp->hdr));
memcpy(vmp->hdr.signature, VMEM_HDR_SIG, POOL_HDR_SIG_LEN);
vmp->addr = addr;
vmp->size = size;
vmp->caller_mapped = 1;
util_mutex_lock(&Pool_lock);
/* Prepare pool for jemalloc */
if (je_vmem_pool_create((void *)((uintptr_t)addr + Header_size),
size - Header_size, 0,
/* empty */ 1) == NULL) {
ERR("pool creation failed");
util_mutex_unlock(&Pool_lock);
return NULL;
}
#ifndef _WIN32
/*
* If possible, turn off all permissions on the pool header page.
*
* The prototype PMFS doesn't allow this when large pages are in
* use. It is not considered an error if this fails.
*/
util_range_none(addr, sizeof(struct pool_hdr));
#endif
util_mutex_unlock(&Pool_lock);
LOG(3, "vmp %p", vmp);
return vmp;
}
/*
* vmem_delete -- delete a memory pool
*/
void
vmem_delete(VMEM *vmp)
{
LOG(3, "vmp %p", vmp);
util_mutex_lock(&Pool_lock);
int ret = je_vmem_pool_delete((pool_t *)((uintptr_t)vmp + Header_size));
if (ret != 0) {
ERR("invalid pool handle: 0x%" PRIxPTR, (uintptr_t)vmp);
errno = EINVAL;
util_mutex_unlock(&Pool_lock);
return;
}
#ifndef _WIN32
util_range_rw(vmp->addr, sizeof(struct pool_hdr));
#endif
if (vmp->caller_mapped == 0) {
util_unmap(vmp->addr, vmp->size);
} else {
/*
* The application cannot do any assumptions about the content
* of this memory region once the pool is destroyed.
*/
VALGRIND_DO_MAKE_MEM_UNDEFINED(vmp->addr, vmp->size);
}
util_mutex_unlock(&Pool_lock);
}
/*
* vmem_check -- memory pool consistency check
*/
int
vmem_check(VMEM *vmp)
{
vmem_construct();
LOG(3, "vmp %p", vmp);
util_mutex_lock(&Pool_lock);
int ret = je_vmem_pool_check((pool_t *)((uintptr_t)vmp + Header_size));
util_mutex_unlock(&Pool_lock);
return ret;
}
/*
* vmem_stats_print -- spew memory allocator stats for a pool
*/
void
vmem_stats_print(VMEM *vmp, const char *opts)
{
LOG(3, "vmp %p opts \"%s\"", vmp, opts ? opts : "");
je_vmem_pool_malloc_stats_print(
(pool_t *)((uintptr_t)vmp + Header_size),
print_jemalloc_stats, NULL, opts);
}
/*
* vmem_malloc -- allocate memory
*/
void *
vmem_malloc(VMEM *vmp, size_t size)
{
LOG(3, "vmp %p size %zu", vmp, size);
return je_vmem_pool_malloc(
(pool_t *)((uintptr_t)vmp + Header_size), size);
}
/*
* vmem_free -- free memory
*/
void
vmem_free(VMEM *vmp, void *ptr)
{
LOG(3, "vmp %p ptr %p", vmp, ptr);
je_vmem_pool_free((pool_t *)((uintptr_t)vmp + Header_size), ptr);
}
/*
* vmem_calloc -- allocate zeroed memory
*/
void *
vmem_calloc(VMEM *vmp, size_t nmemb, size_t size)
{
LOG(3, "vmp %p nmemb %zu size %zu", vmp, nmemb, size);
return je_vmem_pool_calloc((pool_t *)((uintptr_t)vmp + Header_size),
nmemb, size);
}
/*
* vmem_realloc -- resize a memory allocation
*/
void *
vmem_realloc(VMEM *vmp, void *ptr, size_t size)
{
LOG(3, "vmp %p ptr %p size %zu", vmp, ptr, size);
return je_vmem_pool_ralloc((pool_t *)((uintptr_t)vmp + Header_size),
ptr, size);
}
/*
* vmem_aligned_alloc -- allocate aligned memory
*/
void *
vmem_aligned_alloc(VMEM *vmp, size_t alignment, size_t size)
{
LOG(3, "vmp %p alignment %zu size %zu", vmp, alignment, size);
return je_vmem_pool_aligned_alloc(
(pool_t *)((uintptr_t)vmp + Header_size),
alignment, size);
}
/*
* vmem_strdup -- allocate memory for copy of string
*/
char *
vmem_strdup(VMEM *vmp, const char *s)
{
LOG(3, "vmp %p s %p", vmp, s);
size_t size = strlen(s) + 1;
void *retaddr = je_vmem_pool_malloc(
(pool_t *)((uintptr_t)vmp + Header_size), size);
if (retaddr == NULL)
return NULL;
return (char *)memcpy(retaddr, s, size);
}
/*
* vmem_wcsdup -- allocate memory for copy of wide character string
*/
wchar_t *
vmem_wcsdup(VMEM *vmp, const wchar_t *s)
{
LOG(3, "vmp %p s %p", vmp, s);
size_t size = (wcslen(s) + 1) * sizeof(wchar_t);
void *retaddr = je_vmem_pool_malloc(
(pool_t *)((uintptr_t)vmp + Header_size), size);
if (retaddr == NULL)
return NULL;
return (wchar_t *)memcpy(retaddr, s, size);
}
/*
* vmem_malloc_usable_size -- get usable size of allocation
*/
size_t
vmem_malloc_usable_size(VMEM *vmp, void *ptr)
{
LOG(3, "vmp %p ptr %p", vmp, ptr);
return je_vmem_pool_malloc_usable_size(
(pool_t *)((uintptr_t)vmp + Header_size), ptr);
}
| 11,248 | 21.957143 | 76 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libvmem/vmem.h | /*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* vmem.h -- internal definitions for libvmem
*/
#ifndef VMEM_H
#define VMEM_H 1
#include <stddef.h>
#include "pool_hdr.h"
#ifdef __cplusplus
extern "C" {
#endif
#define VMEM_LOG_PREFIX "libvmem"
#define VMEM_LOG_LEVEL_VAR "VMEM_LOG_LEVEL"
#define VMEM_LOG_FILE_VAR "VMEM_LOG_FILE"
/* attributes of the vmem memory pool format for the pool header */
#define VMEM_HDR_SIG "VMEM " /* must be 8 bytes including '\0' */
#define VMEM_FORMAT_MAJOR 1
struct vmem {
struct pool_hdr hdr; /* memory pool header */
void *addr; /* mapped region */
size_t size; /* size of mapped region */
int caller_mapped;
};
void vmem_construct(void);
#ifdef __cplusplus
}
#endif
#endif
| 2,284 | 31.183099 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libvmem/libvmem.c | /*
* Copyright 2014-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* libvmem.c -- basic libvmem functions
*/
#include <stdio.h>
#include <stdint.h>
#include "libvmem.h"
#include "jemalloc.h"
#include "out.h"
#include "vmem.h"
/*
* vmem_check_versionU -- see if library meets application version requirements
*/
#ifndef _WIN32
static inline
#endif
const char *
vmem_check_versionU(unsigned major_required, unsigned minor_required)
{
vmem_construct();
LOG(3, "major_required %u minor_required %u",
major_required, minor_required);
if (major_required != VMEM_MAJOR_VERSION) {
ERR("libvmem major version mismatch (need %u, found %u)",
major_required, VMEM_MAJOR_VERSION);
return out_get_errormsg();
}
if (minor_required > VMEM_MINOR_VERSION) {
ERR("libvmem minor version mismatch (need %u, found %u)",
minor_required, VMEM_MINOR_VERSION);
return out_get_errormsg();
}
return NULL;
}
#ifndef _WIN32
/*
* vmem_check_version -- see if library meets application version requirements
*/
const char *
vmem_check_version(unsigned major_required, unsigned minor_required)
{
return vmem_check_versionU(major_required, minor_required);
}
#else
/*
* vmem_check_versionW -- see if library meets application version requirements
*/
const wchar_t *
vmem_check_versionW(unsigned major_required, unsigned minor_required)
{
if (vmem_check_versionU(major_required, minor_required) != NULL)
return out_get_errormsgW();
else
return NULL;
}
#endif
/*
* vmem_set_funcs -- allow overriding libvmem's call to malloc, etc.
*/
void
vmem_set_funcs(
void *(*malloc_func)(size_t size),
void (*free_func)(void *ptr),
void *(*realloc_func)(void *ptr, size_t size),
char *(*strdup_func)(const char *s),
void (*print_func)(const char *s))
{
vmem_construct();
LOG(3, NULL);
util_set_alloc_funcs(malloc_func, free_func,
realloc_func, strdup_func);
out_set_print_func(print_func);
je_vmem_pool_set_alloc_funcs(malloc_func, free_func);
}
/*
* vmem_errormsgU -- return last error message
*/
#ifndef _WIN32
static inline
#endif
const char *
vmem_errormsgU(void)
{
return out_get_errormsg();
}
#ifndef _WIN32
/*
* vmem_errormsg -- return last error message
*/
const char *
vmem_errormsg(void)
{
return vmem_errormsgU();
}
#else
/*
* vmem_errormsgW -- return last error message as wchar_t
*/
const wchar_t *
vmem_errormsgW(void)
{
return out_get_errormsgW();
}
#endif
| 3,940 | 25.809524 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/getopt/getopt.c | /*
* *Copyright (c) 2012, Kim Gräsman
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Kim Gräsman nor the
* names of contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL KIM GRÄSMAN BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "getopt.h"
#include <stddef.h>
#include <string.h>
#include <stdio.h>
char* optarg;
int optopt;
/* The variable optind [...] shall be initialized to 1 by the system. */
int optind = 1;
int opterr;
static char* optcursor = NULL;
static char *first = NULL;
/* rotates argv array */
static void rotate(char **argv, int argc) {
if (argc <= 1)
return;
char *tmp = argv[0];
memmove(argv, argv + 1, (argc - 1) * sizeof(char *));
argv[argc - 1] = tmp;
}
/* Implemented based on [1] and [2] for optional arguments.
optopt is handled FreeBSD-style, per [3].
Other GNU and FreeBSD extensions are purely accidental.
[1] http://pubs.opengroup.org/onlinepubs/000095399/functions/getopt.html
[2] http://www.kernel.org/doc/man-pages/online/pages/man3/getopt.3.html
[3] http://www.freebsd.org/cgi/man.cgi?query=getopt&sektion=3&manpath=FreeBSD+9.0-RELEASE
*/
int getopt(int argc, char* const argv[], const char* optstring) {
int optchar = -1;
const char* optdecl = NULL;
optarg = NULL;
opterr = 0;
optopt = 0;
/* Unspecified, but we need it to avoid overrunning the argv bounds. */
if (optind >= argc)
goto no_more_optchars;
/* If, when getopt() is called argv[optind] is a null pointer, getopt()
shall return -1 without changing optind. */
if (argv[optind] == NULL)
goto no_more_optchars;
/* If, when getopt() is called *argv[optind] is not the character '-',
permute argv to move non options to the end */
if (*argv[optind] != '-') {
if (argc - optind <= 1)
goto no_more_optchars;
if (!first)
first = argv[optind];
do {
rotate((char **)(argv + optind), argc - optind);
} while (*argv[optind] != '-' && argv[optind] != first);
if (argv[optind] == first)
goto no_more_optchars;
}
/* If, when getopt() is called argv[optind] points to the string "-",
getopt() shall return -1 without changing optind. */
if (strcmp(argv[optind], "-") == 0)
goto no_more_optchars;
/* If, when getopt() is called argv[optind] points to the string "--",
getopt() shall return -1 after incrementing optind. */
if (strcmp(argv[optind], "--") == 0) {
++optind;
if (first) {
do {
rotate((char **)(argv + optind), argc - optind);
} while (argv[optind] != first);
}
goto no_more_optchars;
}
if (optcursor == NULL || *optcursor == '\0')
optcursor = argv[optind] + 1;
optchar = *optcursor;
/* FreeBSD: The variable optopt saves the last known option character
returned by getopt(). */
optopt = optchar;
/* The getopt() function shall return the next option character (if one is
found) from argv that matches a character in optstring, if there is
one that matches. */
optdecl = strchr(optstring, optchar);
if (optdecl) {
/* [I]f a character is followed by a colon, the option takes an
argument. */
if (optdecl[1] == ':') {
optarg = ++optcursor;
if (*optarg == '\0') {
/* GNU extension: Two colons mean an option takes an
optional arg; if there is text in the current argv-element
(i.e., in the same word as the option name itself, for example,
"-oarg"), then it is returned in optarg, otherwise optarg is set
to zero. */
if (optdecl[2] != ':') {
/* If the option was the last character in the string pointed to by
an element of argv, then optarg shall contain the next element
of argv, and optind shall be incremented by 2. If the resulting
value of optind is greater than argc, this indicates a missing
option-argument, and getopt() shall return an error indication.
Otherwise, optarg shall point to the string following the
option character in that element of argv, and optind shall be
incremented by 1.
*/
if (++optind < argc) {
optarg = argv[optind];
} else {
/* If it detects a missing option-argument, it shall return the
colon character ( ':' ) if the first character of optstring
was a colon, or a question-mark character ( '?' ) otherwise.
*/
optarg = NULL;
fprintf(stderr, "%s: option requires an argument -- '%c'\n", argv[0], optchar);
optchar = (optstring[0] == ':') ? ':' : '?';
}
} else {
optarg = NULL;
}
}
optcursor = NULL;
}
} else {
fprintf(stderr,"%s: invalid option -- '%c'\n", argv[0], optchar);
/* If getopt() encounters an option character that is not contained in
optstring, it shall return the question-mark ( '?' ) character. */
optchar = '?';
}
if (optcursor == NULL || *++optcursor == '\0')
++optind;
return optchar;
no_more_optchars:
optcursor = NULL;
first = NULL;
return -1;
}
/* Implementation based on [1].
[1] http://www.kernel.org/doc/man-pages/online/pages/man3/getopt.3.html
*/
int getopt_long(int argc, char* const argv[], const char* optstring,
const struct option* longopts, int* longindex) {
const struct option* o = longopts;
const struct option* match = NULL;
int num_matches = 0;
size_t argument_name_length = 0;
const char* current_argument = NULL;
int retval = -1;
optarg = NULL;
optopt = 0;
if (optind >= argc)
return -1;
/* If, when getopt() is called argv[optind] is a null pointer, getopt_long()
shall return -1 without changing optind. */
if (argv[optind] == NULL)
goto no_more_optchars;
/* If, when getopt_long() is called *argv[optind] is not the character '-',
permute argv to move non options to the end */
if (*argv[optind] != '-') {
if (argc - optind <= 1)
goto no_more_optchars;
if (!first)
first = argv[optind];
do {
rotate((char **)(argv + optind), argc - optind);
} while (*argv[optind] != '-' && argv[optind] != first);
if (argv[optind] == first)
goto no_more_optchars;
}
if (strlen(argv[optind]) < 3 || strncmp(argv[optind], "--", 2) != 0)
return getopt(argc, argv, optstring);
/* It's an option; starts with -- and is longer than two chars. */
current_argument = argv[optind] + 2;
argument_name_length = strcspn(current_argument, "=");
for (; o->name; ++o) {
if (strncmp(o->name, current_argument, argument_name_length) == 0) {
match = o;
++num_matches;
if (strlen(o->name) == argument_name_length) {
/* found match is exactly the one which we are looking for */
num_matches = 1;
break;
}
}
}
if (num_matches == 1) {
/* If longindex is not NULL, it points to a variable which is set to the
index of the long option relative to longopts. */
if (longindex)
*longindex = (int)(match - longopts);
/* If flag is NULL, then getopt_long() shall return val.
Otherwise, getopt_long() returns 0, and flag shall point to a variable
which shall be set to val if the option is found, but left unchanged if
the option is not found. */
if (match->flag)
*(match->flag) = match->val;
retval = match->flag ? 0 : match->val;
if (match->has_arg != no_argument) {
optarg = strchr(argv[optind], '=');
if (optarg != NULL)
++optarg;
if (match->has_arg == required_argument) {
/* Only scan the next argv for required arguments. Behavior is not
specified, but has been observed with Ubuntu and Mac OSX. */
if (optarg == NULL && ++optind < argc) {
optarg = argv[optind];
}
if (optarg == NULL)
retval = ':';
}
} else if (strchr(argv[optind], '=')) {
/* An argument was provided to a non-argument option.
I haven't seen this specified explicitly, but both GNU and BSD-based
implementations show this behavior.
*/
retval = '?';
}
} else {
/* Unknown option or ambiguous match. */
retval = '?';
if (num_matches == 0) {
fprintf(stderr, "%s: unrecognized option -- '%s'\n", argv[0], argv[optind]);
} else {
fprintf(stderr, "%s: option '%s' is ambiguous\n", argv[0], argv[optind]);
}
}
++optind;
return retval;
no_more_optchars:
first = NULL;
return -1;
}
| 9,862 | 32.547619 | 91 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/getopt/getopt.h | /*
* *Copyright (c) 2012, Kim Gräsman
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Kim Gräsman nor the
* names of contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL KIM GRÄSMAN BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef INCLUDED_GETOPT_PORT_H
#define INCLUDED_GETOPT_PORT_H
#if defined(__cplusplus)
extern "C" {
#endif
#define no_argument 0
#define required_argument 1
#define optional_argument 2
extern char* optarg;
extern int optind, opterr, optopt;
struct option {
const char* name;
int has_arg;
int* flag;
int val;
};
int getopt(int argc, char* const argv[], const char* optstring);
int getopt_long(int argc, char* const argv[],
const char* optstring, const struct option* longopts, int* longindex);
#if defined(__cplusplus)
}
#endif
#endif // INCLUDED_GETOPT_PORT_H
| 2,137 | 35.237288 | 79 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_defs.h | /* ./../windows/jemalloc_gen/include/jemalloc/jemalloc_defs.h. Generated from jemalloc_defs.h.in by configure. */
/* Defined if __attribute__((...)) syntax is supported. */
/* #undef JEMALLOC_HAVE_ATTR */
/* Defined if alloc_size attribute is supported. */
/* #undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE */
/* Defined if format(gnu_printf, ...) attribute is supported. */
/* #undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF */
/* Defined if format(printf, ...) attribute is supported. */
/* #undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF */
/*
* Define overrides for non-standard allocator-related functions if they are
* present on the system.
*/
/* #undef JEMALLOC_OVERRIDE_MEMALIGN */
/* #undef JEMALLOC_OVERRIDE_VALLOC */
/*
* At least Linux omits the "const" in:
*
* size_t malloc_usable_size(const void *ptr);
*
* Match the operating system's prototype.
*/
#define JEMALLOC_USABLE_SIZE_CONST const
/*
* If defined, specify throw() for the public function prototypes when compiling
* with C++. The only justification for this is to match the prototypes that
* glibc defines.
*/
/* #undef JEMALLOC_USE_CXX_THROW */
#ifdef _MSC_VER
# ifdef _WIN64
# define LG_SIZEOF_PTR_WIN 3
# else
# define LG_SIZEOF_PTR_WIN 2
# endif
#endif
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
#define LG_SIZEOF_PTR LG_SIZEOF_PTR_WIN
| 1,327 | 27.255319 | 115 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_mangle_jet.h | /*
* By default application code must explicitly refer to mangled symbol names,
* so that it is possible to use jemalloc in conjunction with another allocator
* in the same application. Define JEMALLOC_MANGLE in order to cause automatic
* name mangling that matches the API prefixing that happened as a result of
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
*/
#ifdef JEMALLOC_MANGLE
# ifndef JEMALLOC_NO_DEMANGLE
# define JEMALLOC_NO_DEMANGLE
# endif
# define pool_create jet_pool_create
# define pool_delete jet_pool_delete
# define pool_malloc jet_pool_malloc
# define pool_calloc jet_pool_calloc
# define pool_ralloc jet_pool_ralloc
# define pool_aligned_alloc jet_pool_aligned_alloc
# define pool_free jet_pool_free
# define pool_malloc_usable_size jet_pool_malloc_usable_size
# define pool_malloc_stats_print jet_pool_malloc_stats_print
# define pool_extend jet_pool_extend
# define pool_set_alloc_funcs jet_pool_set_alloc_funcs
# define pool_check jet_pool_check
# define malloc_conf jet_malloc_conf
# define malloc_message jet_malloc_message
# define malloc jet_malloc
# define calloc jet_calloc
# define posix_memalign jet_posix_memalign
# define aligned_alloc jet_aligned_alloc
# define realloc jet_realloc
# define free jet_free
# define mallocx jet_mallocx
# define rallocx jet_rallocx
# define xallocx jet_xallocx
# define sallocx jet_sallocx
# define dallocx jet_dallocx
# define nallocx jet_nallocx
# define mallctl jet_mallctl
# define mallctlnametomib jet_mallctlnametomib
# define mallctlbymib jet_mallctlbymib
# define navsnprintf jet_navsnprintf
# define malloc_stats_print jet_malloc_stats_print
# define malloc_usable_size jet_malloc_usable_size
#endif
/*
* The jet_* macros can be used as stable alternative names for the
* public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
* meant for use in jemalloc itself, but it can be used by application code to
* provide isolation from the name mangling specified via --with-mangling
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
# undef jet_pool_create
# undef jet_pool_delete
# undef jet_pool_malloc
# undef jet_pool_calloc
# undef jet_pool_ralloc
# undef jet_pool_aligned_alloc
# undef jet_pool_free
# undef jet_pool_malloc_usable_size
# undef jet_pool_malloc_stats_print
# undef jet_pool_extend
# undef jet_pool_set_alloc_funcs
# undef jet_pool_check
# undef jet_malloc_conf
# undef jet_malloc_message
# undef jet_malloc
# undef jet_calloc
# undef jet_posix_memalign
# undef jet_aligned_alloc
# undef jet_realloc
# undef jet_free
# undef jet_mallocx
# undef jet_rallocx
# undef jet_xallocx
# undef jet_sallocx
# undef jet_dallocx
# undef jet_nallocx
# undef jet_mallctl
# undef jet_mallctlnametomib
# undef jet_mallctlbymib
# undef jet_navsnprintf
# undef jet_malloc_stats_print
# undef jet_malloc_usable_size
#endif
| 2,939 | 32.793103 | 79 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_protos_jet.h | /*
* The jet_ prefix on the following public symbol declarations is an artifact
* of namespace management, and should be omitted in application code unless
* JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle@[email protected]).
*/
extern JEMALLOC_EXPORT const char *jet_malloc_conf;
extern JEMALLOC_EXPORT void (*jet_malloc_message)(void *cbopaque,
const char *s);
typedef struct pool_s pool_t;
JEMALLOC_EXPORT pool_t *jet_pool_create(void *addr, size_t size, int zeroed);
JEMALLOC_EXPORT int jet_pool_delete(pool_t *pool);
JEMALLOC_EXPORT size_t jet_pool_extend(pool_t *pool, void *addr,
size_t size, int zeroed);
JEMALLOC_EXPORT void *jet_pool_malloc(pool_t *pool, size_t size);
JEMALLOC_EXPORT void *jet_pool_calloc(pool_t *pool, size_t nmemb, size_t size);
JEMALLOC_EXPORT void *jet_pool_ralloc(pool_t *pool, void *ptr, size_t size);
JEMALLOC_EXPORT void *jet_pool_aligned_alloc(pool_t *pool, size_t alignment, size_t size);
JEMALLOC_EXPORT void jet_pool_free(pool_t *pool, void *ptr);
JEMALLOC_EXPORT size_t jet_pool_malloc_usable_size(pool_t *pool, void *ptr);
JEMALLOC_EXPORT void jet_pool_malloc_stats_print(pool_t *pool,
void (*write_cb)(void *, const char *),
void *cbopaque, const char *opts);
JEMALLOC_EXPORT void jet_pool_set_alloc_funcs(void *(*malloc_func)(size_t),
void (*free_func)(void *));
JEMALLOC_EXPORT int jet_pool_check(pool_t *pool);
JEMALLOC_EXPORT void *jet_malloc(size_t size) JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *jet_calloc(size_t num, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT int jet_posix_memalign(void **memptr, size_t alignment,
size_t size) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT void *jet_aligned_alloc(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *jet_realloc(void *ptr, size_t size);
JEMALLOC_EXPORT void jet_free(void *ptr);
JEMALLOC_EXPORT void *jet_mallocx(size_t size, int flags);
JEMALLOC_EXPORT void *jet_rallocx(void *ptr, size_t size, int flags);
JEMALLOC_EXPORT size_t jet_xallocx(void *ptr, size_t size, size_t extra,
int flags);
JEMALLOC_EXPORT size_t jet_sallocx(const void *ptr, int flags);
JEMALLOC_EXPORT void jet_dallocx(void *ptr, int flags);
JEMALLOC_EXPORT size_t jet_nallocx(size_t size, int flags);
JEMALLOC_EXPORT int jet_mallctl(const char *name, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT int jet_mallctlnametomib(const char *name, size_t *mibp,
size_t *miblenp);
JEMALLOC_EXPORT int jet_mallctlbymib(const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT void jet_malloc_stats_print(void (*write_cb)(void *,
const char *), void *jet_cbopaque, const char *opts);
JEMALLOC_EXPORT size_t jet_malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr);
JEMALLOC_EXPORT int jet_navsnprintf(char *str, size_t size,
const char *format, va_list ap);
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT void * jet_memalign(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT void * jet_valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif
| 3,176 | 45.043478 | 91 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_rename.h | /*
* Name mangling for public symbols is controlled by --with-mangling and
* --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
* these macro definitions.
*/
#ifndef JEMALLOC_NO_RENAME
# define je_pool_create je_vmem_pool_create
# define je_pool_delete je_vmem_pool_delete
# define je_pool_malloc je_vmem_pool_malloc
# define je_pool_calloc je_vmem_pool_calloc
# define je_pool_ralloc je_vmem_pool_ralloc
# define je_pool_aligned_alloc je_vmem_pool_aligned_alloc
# define je_pool_free je_vmem_pool_free
# define je_pool_malloc_usable_size je_vmem_pool_malloc_usable_size
# define je_pool_malloc_stats_print je_vmem_pool_malloc_stats_print
# define je_pool_extend je_vmem_pool_extend
# define je_pool_set_alloc_funcs je_vmem_pool_set_alloc_funcs
# define je_pool_check je_vmem_pool_check
# define je_malloc_conf je_vmem_malloc_conf
# define je_malloc_message je_vmem_malloc_message
# define je_malloc je_vmem_malloc
# define je_calloc je_vmem_calloc
# define je_posix_memalign je_vmem_posix_memalign
# define je_aligned_alloc je_vmem_aligned_alloc
# define je_realloc je_vmem_realloc
# define je_free je_vmem_free
# define je_mallocx je_vmem_mallocx
# define je_rallocx je_vmem_rallocx
# define je_xallocx je_vmem_xallocx
# define je_sallocx je_vmem_sallocx
# define je_dallocx je_vmem_dallocx
# define je_nallocx je_vmem_nallocx
# define je_mallctl je_vmem_mallctl
# define je_mallctlnametomib je_vmem_mallctlnametomib
# define je_mallctlbymib je_vmem_mallctlbymib
# define je_navsnprintf je_vmem_navsnprintf
# define je_malloc_stats_print je_vmem_malloc_stats_print
# define je_malloc_usable_size je_vmem_malloc_usable_size
#endif
| 1,694 | 41.375 | 79 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_mangle.h | /*
* By default application code must explicitly refer to mangled symbol names,
* so that it is possible to use jemalloc in conjunction with another allocator
* in the same application. Define JEMALLOC_MANGLE in order to cause automatic
* name mangling that matches the API prefixing that happened as a result of
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
*/
#ifdef JEMALLOC_MANGLE
# ifndef JEMALLOC_NO_DEMANGLE
# define JEMALLOC_NO_DEMANGLE
# endif
# define pool_create je_pool_create
# define pool_delete je_pool_delete
# define pool_malloc je_pool_malloc
# define pool_calloc je_pool_calloc
# define pool_ralloc je_pool_ralloc
# define pool_aligned_alloc je_pool_aligned_alloc
# define pool_free je_pool_free
# define pool_malloc_usable_size je_pool_malloc_usable_size
# define pool_malloc_stats_print je_pool_malloc_stats_print
# define pool_extend je_pool_extend
# define pool_set_alloc_funcs je_pool_set_alloc_funcs
# define pool_check je_pool_check
# define malloc_conf je_malloc_conf
# define malloc_message je_malloc_message
# define malloc je_malloc
# define calloc je_calloc
# define posix_memalign je_posix_memalign
# define aligned_alloc je_aligned_alloc
# define realloc je_realloc
# define free je_free
# define mallocx je_mallocx
# define rallocx je_rallocx
# define xallocx je_xallocx
# define sallocx je_sallocx
# define dallocx je_dallocx
# define nallocx je_nallocx
# define mallctl je_mallctl
# define mallctlnametomib je_mallctlnametomib
# define mallctlbymib je_mallctlbymib
# define navsnprintf je_navsnprintf
# define malloc_stats_print je_malloc_stats_print
# define malloc_usable_size je_malloc_usable_size
#endif
/*
* The je_* macros can be used as stable alternative names for the
* public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
* meant for use in jemalloc itself, but it can be used by application code to
* provide isolation from the name mangling specified via --with-mangling
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
# undef je_pool_create
# undef je_pool_delete
# undef je_pool_malloc
# undef je_pool_calloc
# undef je_pool_ralloc
# undef je_pool_aligned_alloc
# undef je_pool_free
# undef je_pool_malloc_usable_size
# undef je_pool_malloc_stats_print
# undef je_pool_extend
# undef je_pool_set_alloc_funcs
# undef je_pool_check
# undef je_malloc_conf
# undef je_malloc_message
# undef je_malloc
# undef je_calloc
# undef je_posix_memalign
# undef je_aligned_alloc
# undef je_realloc
# undef je_free
# undef je_mallocx
# undef je_rallocx
# undef je_xallocx
# undef je_sallocx
# undef je_dallocx
# undef je_nallocx
# undef je_mallctl
# undef je_mallctlnametomib
# undef je_mallctlbymib
# undef je_navsnprintf
# undef je_malloc_stats_print
# undef je_malloc_usable_size
#endif
| 2,874 | 32.045977 | 79 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc.h | #ifndef JEMALLOC_H_
#define JEMALLOC_H_
#ifdef __cplusplus
extern "C" {
#endif
/* Defined if __attribute__((...)) syntax is supported. */
/* #undef JEMALLOC_HAVE_ATTR */
/* Defined if alloc_size attribute is supported. */
/* #undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE */
/* Defined if format(gnu_printf, ...) attribute is supported. */
/* #undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF */
/* Defined if format(printf, ...) attribute is supported. */
/* #undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF */
/*
* Define overrides for non-standard allocator-related functions if they are
* present on the system.
*/
/* #undef JEMALLOC_OVERRIDE_MEMALIGN */
/* #undef JEMALLOC_OVERRIDE_VALLOC */
/*
* At least Linux omits the "const" in:
*
* size_t malloc_usable_size(const void *ptr);
*
* Match the operating system's prototype.
*/
#define JEMALLOC_USABLE_SIZE_CONST const
/*
* If defined, specify throw() for the public function prototypes when compiling
* with C++. The only justification for this is to match the prototypes that
* glibc defines.
*/
/* #undef JEMALLOC_USE_CXX_THROW */
#ifdef _MSC_VER
# ifdef _WIN64
# define LG_SIZEOF_PTR_WIN 3
# else
# define LG_SIZEOF_PTR_WIN 2
# endif
#endif
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
#define LG_SIZEOF_PTR LG_SIZEOF_PTR_WIN
/*
* Name mangling for public symbols is controlled by --with-mangling and
* --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
* these macro definitions.
*/
#ifndef JEMALLOC_NO_RENAME
# define je_pool_create je_vmem_pool_create
# define je_pool_delete je_vmem_pool_delete
# define je_pool_malloc je_vmem_pool_malloc
# define je_pool_calloc je_vmem_pool_calloc
# define je_pool_ralloc je_vmem_pool_ralloc
# define je_pool_aligned_alloc je_vmem_pool_aligned_alloc
# define je_pool_free je_vmem_pool_free
# define je_pool_malloc_usable_size je_vmem_pool_malloc_usable_size
# define je_pool_malloc_stats_print je_vmem_pool_malloc_stats_print
# define je_pool_extend je_vmem_pool_extend
# define je_pool_set_alloc_funcs je_vmem_pool_set_alloc_funcs
# define je_pool_check je_vmem_pool_check
# define je_malloc_conf je_vmem_malloc_conf
# define je_malloc_message je_vmem_malloc_message
# define je_malloc je_vmem_malloc
# define je_calloc je_vmem_calloc
# define je_posix_memalign je_vmem_posix_memalign
# define je_aligned_alloc je_vmem_aligned_alloc
# define je_realloc je_vmem_realloc
# define je_free je_vmem_free
# define je_mallocx je_vmem_mallocx
# define je_rallocx je_vmem_rallocx
# define je_xallocx je_vmem_xallocx
# define je_sallocx je_vmem_sallocx
# define je_dallocx je_vmem_dallocx
# define je_nallocx je_vmem_nallocx
# define je_mallctl je_vmem_mallctl
# define je_mallctlnametomib je_vmem_mallctlnametomib
# define je_mallctlbymib je_vmem_mallctlbymib
# define je_navsnprintf je_vmem_navsnprintf
# define je_malloc_stats_print je_vmem_malloc_stats_print
# define je_malloc_usable_size je_vmem_malloc_usable_size
#endif
#include <limits.h>
#include <strings.h>
#include <stdbool.h>
#include <stdarg.h>
#define JEMALLOC_VERSION ""
#define JEMALLOC_VERSION_MAJOR
#define JEMALLOC_VERSION_MINOR
#define JEMALLOC_VERSION_BUGFIX
#define JEMALLOC_VERSION_NREV
#define JEMALLOC_VERSION_GID ""
# define MALLOCX_LG_ALIGN(la) (la)
# if LG_SIZEOF_PTR == 2
# define MALLOCX_ALIGN(a) (ffs(a)-1)
# else
# define MALLOCX_ALIGN(a) \
(((a) < (size_t)INT_MAX) ? ffs(a)-1 : ffs((a)>>32)+31)
# endif
# define MALLOCX_ZERO ((int)0x40)
/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */
# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8))
#ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
#elif _MSC_VER
# define JEMALLOC_ATTR(s)
# ifndef JEMALLOC_EXPORT
# ifdef DLLEXPORT
# define JEMALLOC_EXPORT __declspec(dllexport)
# else
# define JEMALLOC_EXPORT __declspec(dllimport)
# endif
# endif
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
# define JEMALLOC_NOINLINE __declspec(noinline)
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_EXPORT
# define JEMALLOC_ALIGNED(s)
# define JEMALLOC_SECTION(s)
# define JEMALLOC_NOINLINE
#endif
/*
* The je_ prefix on the following public symbol declarations is an artifact
* of namespace management, and should be omitted in application code unless
* JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h).
*/
extern JEMALLOC_EXPORT const char *je_malloc_conf;
extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
const char *s);
typedef struct pool_s pool_t;
JEMALLOC_EXPORT pool_t *je_pool_create(void *addr, size_t size, int zeroed, int empty);
JEMALLOC_EXPORT int je_pool_delete(pool_t *pool);
JEMALLOC_EXPORT size_t je_pool_extend(pool_t *pool, void *addr,
size_t size, int zeroed);
JEMALLOC_EXPORT void *je_pool_malloc(pool_t *pool, size_t size);
JEMALLOC_EXPORT void *je_pool_calloc(pool_t *pool, size_t nmemb, size_t size);
JEMALLOC_EXPORT void *je_pool_ralloc(pool_t *pool, void *ptr, size_t size);
JEMALLOC_EXPORT void *je_pool_aligned_alloc(pool_t *pool, size_t alignment, size_t size);
JEMALLOC_EXPORT void je_pool_free(pool_t *pool, void *ptr);
JEMALLOC_EXPORT size_t je_pool_malloc_usable_size(pool_t *pool, void *ptr);
JEMALLOC_EXPORT void je_pool_malloc_stats_print(pool_t *pool,
void (*write_cb)(void *, const char *),
void *cbopaque, const char *opts);
JEMALLOC_EXPORT void je_pool_set_alloc_funcs(void *(*malloc_func)(size_t),
void (*free_func)(void *));
JEMALLOC_EXPORT int je_pool_check(pool_t *pool);
JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment,
size_t size) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size);
JEMALLOC_EXPORT void je_free(void *ptr);
JEMALLOC_EXPORT void *je_mallocx(size_t size, int flags);
JEMALLOC_EXPORT void *je_rallocx(void *ptr, size_t size, int flags);
JEMALLOC_EXPORT size_t je_xallocx(void *ptr, size_t size, size_t extra,
int flags);
JEMALLOC_EXPORT size_t je_sallocx(const void *ptr, int flags);
JEMALLOC_EXPORT void je_dallocx(void *ptr, int flags);
JEMALLOC_EXPORT size_t je_nallocx(size_t size, int flags);
JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp,
size_t *miblenp);
JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
const char *), void *je_cbopaque, const char *opts);
JEMALLOC_EXPORT size_t je_malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr);
JEMALLOC_EXPORT int je_navsnprintf(char *str, size_t size,
const char *format, va_list ap);
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif
typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, unsigned, pool_t *);
typedef bool (chunk_dalloc_t)(void *, size_t, unsigned, pool_t *);
/*
* By default application code must explicitly refer to mangled symbol names,
* so that it is possible to use jemalloc in conjunction with another allocator
* in the same application. Define JEMALLOC_MANGLE in order to cause automatic
* name mangling that matches the API prefixing that happened as a result of
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
*/
#ifdef JEMALLOC_MANGLE
# ifndef JEMALLOC_NO_DEMANGLE
# define JEMALLOC_NO_DEMANGLE
# endif
# define pool_create je_pool_create
# define pool_delete je_pool_delete
# define pool_malloc je_pool_malloc
# define pool_calloc je_pool_calloc
# define pool_ralloc je_pool_ralloc
# define pool_aligned_alloc je_pool_aligned_alloc
# define pool_free je_pool_free
# define pool_malloc_usable_size je_pool_malloc_usable_size
# define pool_malloc_stats_print je_pool_malloc_stats_print
# define pool_extend je_pool_extend
# define pool_set_alloc_funcs je_pool_set_alloc_funcs
# define pool_check je_pool_check
# define malloc_conf je_malloc_conf
# define malloc_message je_malloc_message
# define malloc je_malloc
# define calloc je_calloc
# define posix_memalign je_posix_memalign
# define aligned_alloc je_aligned_alloc
# define realloc je_realloc
# define free je_free
# define mallocx je_mallocx
# define rallocx je_rallocx
# define xallocx je_xallocx
# define sallocx je_sallocx
# define dallocx je_dallocx
# define nallocx je_nallocx
# define mallctl je_mallctl
# define mallctlnametomib je_mallctlnametomib
# define mallctlbymib je_mallctlbymib
# define navsnprintf je_navsnprintf
# define malloc_stats_print je_malloc_stats_print
# define malloc_usable_size je_malloc_usable_size
#endif
/*
* The je_* macros can be used as stable alternative names for the
* public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
* meant for use in jemalloc itself, but it can be used by application code to
* provide isolation from the name mangling specified via --with-mangling
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
# undef je_pool_create
# undef je_pool_delete
# undef je_pool_malloc
# undef je_pool_calloc
# undef je_pool_ralloc
# undef je_pool_aligned_alloc
# undef je_pool_free
# undef je_pool_malloc_usable_size
# undef je_pool_malloc_stats_print
# undef je_pool_extend
# undef je_pool_set_alloc_funcs
# undef je_pool_check
# undef je_malloc_conf
# undef je_malloc_message
# undef je_malloc
# undef je_calloc
# undef je_posix_memalign
# undef je_aligned_alloc
# undef je_realloc
# undef je_free
# undef je_mallocx
# undef je_rallocx
# undef je_xallocx
# undef je_sallocx
# undef je_dallocx
# undef je_nallocx
# undef je_mallctl
# undef je_mallctlnametomib
# undef je_mallctlbymib
# undef je_navsnprintf
# undef je_malloc_stats_print
# undef je_malloc_usable_size
#endif
#ifdef __cplusplus
}
#endif
#endif /* JEMALLOC_H_ */
| 10,674 | 34 | 90 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_protos.h | /*
* The je_ prefix on the following public symbol declarations is an artifact
* of namespace management, and should be omitted in application code unless
* JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h).
*/
extern JEMALLOC_EXPORT const char *je_malloc_conf;
extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
const char *s);
typedef struct pool_s pool_t;
JEMALLOC_EXPORT pool_t *je_pool_create(void *addr, size_t size, int zeroed);
JEMALLOC_EXPORT int je_pool_delete(pool_t *pool);
JEMALLOC_EXPORT size_t je_pool_extend(pool_t *pool, void *addr,
size_t size, int zeroed);
JEMALLOC_EXPORT void *je_pool_malloc(pool_t *pool, size_t size);
JEMALLOC_EXPORT void *je_pool_calloc(pool_t *pool, size_t nmemb, size_t size);
JEMALLOC_EXPORT void *je_pool_ralloc(pool_t *pool, void *ptr, size_t size);
JEMALLOC_EXPORT void *je_pool_aligned_alloc(pool_t *pool, size_t alignment, size_t size);
JEMALLOC_EXPORT void je_pool_free(pool_t *pool, void *ptr);
JEMALLOC_EXPORT size_t je_pool_malloc_usable_size(pool_t *pool, void *ptr);
JEMALLOC_EXPORT void je_pool_malloc_stats_print(pool_t *pool,
void (*write_cb)(void *, const char *),
void *cbopaque, const char *opts);
JEMALLOC_EXPORT void je_pool_set_alloc_funcs(void *(*malloc_func)(size_t),
void (*free_func)(void *));
JEMALLOC_EXPORT int je_pool_check(pool_t *pool);
JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment,
size_t size) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size);
JEMALLOC_EXPORT void je_free(void *ptr);
JEMALLOC_EXPORT void *je_mallocx(size_t size, int flags);
JEMALLOC_EXPORT void *je_rallocx(void *ptr, size_t size, int flags);
JEMALLOC_EXPORT size_t je_xallocx(void *ptr, size_t size, size_t extra,
int flags);
JEMALLOC_EXPORT size_t je_sallocx(const void *ptr, int flags);
JEMALLOC_EXPORT void je_dallocx(void *ptr, int flags);
JEMALLOC_EXPORT size_t je_nallocx(size_t size, int flags);
JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp,
size_t *miblenp);
JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
const char *), void *je_cbopaque, const char *opts);
JEMALLOC_EXPORT size_t je_malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr);
JEMALLOC_EXPORT int je_navsnprintf(char *str, size_t size,
const char *format, va_list ap);
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif
| 3,124 | 44.289855 | 90 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_macros.h | #include <limits.h>
#include <strings.h>
#include <stdbool.h>
#include <stdarg.h>
#define JEMALLOC_VERSION ""
#define JEMALLOC_VERSION_MAJOR
#define JEMALLOC_VERSION_MINOR
#define JEMALLOC_VERSION_BUGFIX
#define JEMALLOC_VERSION_NREV
#define JEMALLOC_VERSION_GID ""
# define MALLOCX_LG_ALIGN(la) (la)
# if LG_SIZEOF_PTR == 2
# define MALLOCX_ALIGN(a) (ffs(a)-1)
# else
# define MALLOCX_ALIGN(a) \
(((a) < (size_t)INT_MAX) ? ffs(a)-1 : ffs((a)>>32)+31)
# endif
# define MALLOCX_ZERO ((int)0x40)
/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */
# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8))
#ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
#elif _MSC_VER
# define JEMALLOC_ATTR(s)
# ifdef DLLEXPORT
# define JEMALLOC_EXPORT __declspec(dllexport)
# else
# define JEMALLOC_EXPORT __declspec(dllimport)
# endif
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
# define JEMALLOC_NOINLINE __declspec(noinline)
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_EXPORT
# define JEMALLOC_ALIGNED(s)
# define JEMALLOC_SECTION(s)
# define JEMALLOC_NOINLINE
#endif
| 1,426 | 29.361702 | 76 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/public_unnamespace.h | #undef je_pool_create
#undef je_pool_delete
#undef je_pool_malloc
#undef je_pool_calloc
#undef je_pool_ralloc
#undef je_pool_aligned_alloc
#undef je_pool_free
#undef je_pool_malloc_usable_size
#undef je_pool_malloc_stats_print
#undef je_pool_extend
#undef je_pool_set_alloc_funcs
#undef je_pool_check
#undef je_malloc_conf
#undef je_malloc_message
#undef je_malloc
#undef je_calloc
#undef je_posix_memalign
#undef je_aligned_alloc
#undef je_realloc
#undef je_free
#undef je_mallocx
#undef je_rallocx
#undef je_xallocx
#undef je_sallocx
#undef je_dallocx
#undef je_nallocx
#undef je_mallctl
#undef je_mallctlnametomib
#undef je_mallctlbymib
#undef je_navsnprintf
#undef je_malloc_stats_print
#undef je_malloc_usable_size
| 720 | 20.848485 | 33 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/public_namespace.h | #define je_pool_create JEMALLOC_N(pool_create)
#define je_pool_delete JEMALLOC_N(pool_delete)
#define je_pool_malloc JEMALLOC_N(pool_malloc)
#define je_pool_calloc JEMALLOC_N(pool_calloc)
#define je_pool_ralloc JEMALLOC_N(pool_ralloc)
#define je_pool_aligned_alloc JEMALLOC_N(pool_aligned_alloc)
#define je_pool_free JEMALLOC_N(pool_free)
#define je_pool_malloc_usable_size JEMALLOC_N(pool_malloc_usable_size)
#define je_pool_malloc_stats_print JEMALLOC_N(pool_malloc_stats_print)
#define je_pool_extend JEMALLOC_N(pool_extend)
#define je_pool_set_alloc_funcs JEMALLOC_N(pool_set_alloc_funcs)
#define je_pool_check JEMALLOC_N(pool_check)
#define je_malloc_conf JEMALLOC_N(malloc_conf)
#define je_malloc_message JEMALLOC_N(malloc_message)
#define je_malloc JEMALLOC_N(malloc)
#define je_calloc JEMALLOC_N(calloc)
#define je_posix_memalign JEMALLOC_N(posix_memalign)
#define je_aligned_alloc JEMALLOC_N(aligned_alloc)
#define je_realloc JEMALLOC_N(realloc)
#define je_free JEMALLOC_N(free)
#define je_mallocx JEMALLOC_N(mallocx)
#define je_rallocx JEMALLOC_N(rallocx)
#define je_xallocx JEMALLOC_N(xallocx)
#define je_sallocx JEMALLOC_N(sallocx)
#define je_dallocx JEMALLOC_N(dallocx)
#define je_nallocx JEMALLOC_N(nallocx)
#define je_mallctl JEMALLOC_N(mallctl)
#define je_mallctlnametomib JEMALLOC_N(mallctlnametomib)
#define je_mallctlbymib JEMALLOC_N(mallctlbymib)
#define je_navsnprintf JEMALLOC_N(navsnprintf)
#define je_malloc_stats_print JEMALLOC_N(malloc_stats_print)
#define je_malloc_usable_size JEMALLOC_N(malloc_usable_size)
| 1,536 | 45.575758 | 70 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/private_unnamespace.h | #undef a0calloc
#undef a0free
#undef a0malloc
#undef arena_alloc_junk_small
#undef arena_bin_index
#undef arena_bin_info
#undef arena_boot
#undef arena_chunk_alloc_huge
#undef arena_chunk_dalloc_huge
#undef arena_dalloc
#undef arena_dalloc_bin
#undef arena_dalloc_bin_locked
#undef arena_dalloc_junk_large
#undef arena_dalloc_junk_small
#undef arena_dalloc_large
#undef arena_dalloc_large_locked
#undef arena_dalloc_small
#undef arena_dss_prec_get
#undef arena_dss_prec_set
#undef arena_malloc
#undef arena_malloc_large
#undef arena_malloc_small
#undef arena_mapbits_allocated_get
#undef arena_mapbits_binind_get
#undef arena_mapbits_dirty_get
#undef arena_mapbits_get
#undef arena_mapbits_large_binind_set
#undef arena_mapbits_large_get
#undef arena_mapbits_large_set
#undef arena_mapbits_large_size_get
#undef arena_mapbits_small_runind_get
#undef arena_mapbits_small_set
#undef arena_mapbits_unallocated_set
#undef arena_mapbits_unallocated_size_get
#undef arena_mapbits_unallocated_size_set
#undef arena_mapbits_unzeroed_get
#undef arena_mapbits_unzeroed_set
#undef arena_mapbitsp_get
#undef arena_mapbitsp_read
#undef arena_mapbitsp_write
#undef arena_mapelm_to_pageind
#undef arena_mapp_get
#undef arena_maxclass
#undef arena_new
#undef arena_palloc
#undef arena_postfork_child
#undef arena_postfork_parent
#undef arena_prefork
#undef arena_prof_accum
#undef arena_prof_accum_impl
#undef arena_prof_accum_locked
#undef arena_prof_ctx_get
#undef arena_prof_ctx_set
#undef arena_prof_promoted
#undef arena_ptr_small_binind_get
#undef arena_purge_all
#undef arena_quarantine_junk_small
#undef arena_ralloc
#undef arena_ralloc_junk_large
#undef arena_ralloc_no_move
#undef arena_redzone_corruption
#undef arena_run_regind
#undef arena_runs_avail_tree_iter
#undef arena_salloc
#undef arena_stats_merge
#undef arena_tcache_fill_small
#undef arenas
#undef pools
#undef arenas_booted
#undef arenas_cleanup
#undef arenas_extend
#undef arenas_initialized
#undef arenas_lock
#undef arenas_tls
#undef arenas_tsd
#undef arenas_tsd_boot
#undef arenas_tsd_cleanup_wrapper
#undef arenas_tsd_get
#undef arenas_tsd_get_wrapper
#undef arenas_tsd_init_head
#undef arenas_tsd_set
#undef atomic_add_u
#undef atomic_add_uint32
#undef atomic_add_uint64
#undef atomic_add_z
#undef atomic_sub_u
#undef atomic_sub_uint32
#undef atomic_sub_uint64
#undef atomic_sub_z
#undef base_alloc
#undef base_boot
#undef base_calloc
#undef base_free_fn
#undef base_malloc_fn
#undef base_node_alloc
#undef base_node_dalloc
#undef base_pool
#undef base_postfork_child
#undef base_postfork_parent
#undef base_prefork
#undef bitmap_full
#undef bitmap_get
#undef bitmap_info_init
#undef bitmap_info_ngroups
#undef bitmap_init
#undef bitmap_set
#undef bitmap_sfu
#undef bitmap_size
#undef bitmap_unset
#undef bt_init
#undef buferror
#undef choose_arena
#undef choose_arena_hard
#undef chunk_alloc_arena
#undef chunk_alloc_base
#undef chunk_alloc_default
#undef chunk_alloc_dss
#undef chunk_alloc_mmap
#undef chunk_global_boot
#undef chunk_boot
#undef chunk_dalloc_default
#undef chunk_dalloc_mmap
#undef chunk_dss_boot
#undef chunk_dss_postfork_child
#undef chunk_dss_postfork_parent
#undef chunk_dss_prec_get
#undef chunk_dss_prec_set
#undef chunk_dss_prefork
#undef chunk_in_dss
#undef chunk_npages
#undef chunk_postfork_child
#undef chunk_postfork_parent
#undef chunk_prefork
#undef chunk_unmap
#undef chunk_record
#undef chunks_mtx
#undef chunks_rtree
#undef chunksize
#undef chunksize_mask
#undef ckh_bucket_search
#undef ckh_count
#undef ckh_delete
#undef ckh_evict_reloc_insert
#undef ckh_insert
#undef ckh_isearch
#undef ckh_iter
#undef ckh_new
#undef ckh_pointer_hash
#undef ckh_pointer_keycomp
#undef ckh_rebuild
#undef ckh_remove
#undef ckh_search
#undef ckh_string_hash
#undef ckh_string_keycomp
#undef ckh_try_bucket_insert
#undef ckh_try_insert
#undef ctl_boot
#undef ctl_bymib
#undef ctl_byname
#undef ctl_nametomib
#undef ctl_postfork_child
#undef ctl_postfork_parent
#undef ctl_prefork
#undef dss_prec_names
#undef extent_tree_ad_first
#undef extent_tree_ad_insert
#undef extent_tree_ad_iter
#undef extent_tree_ad_iter_recurse
#undef extent_tree_ad_iter_start
#undef extent_tree_ad_last
#undef extent_tree_ad_new
#undef extent_tree_ad_next
#undef extent_tree_ad_nsearch
#undef extent_tree_ad_prev
#undef extent_tree_ad_psearch
#undef extent_tree_ad_remove
#undef extent_tree_ad_reverse_iter
#undef extent_tree_ad_reverse_iter_recurse
#undef extent_tree_ad_reverse_iter_start
#undef extent_tree_ad_search
#undef extent_tree_szad_first
#undef extent_tree_szad_insert
#undef extent_tree_szad_iter
#undef extent_tree_szad_iter_recurse
#undef extent_tree_szad_iter_start
#undef extent_tree_szad_last
#undef extent_tree_szad_new
#undef extent_tree_szad_next
#undef extent_tree_szad_nsearch
#undef extent_tree_szad_prev
#undef extent_tree_szad_psearch
#undef extent_tree_szad_remove
#undef extent_tree_szad_reverse_iter
#undef extent_tree_szad_reverse_iter_recurse
#undef extent_tree_szad_reverse_iter_start
#undef extent_tree_szad_search
#undef get_errno
#undef hash
#undef hash_fmix_32
#undef hash_fmix_64
#undef hash_get_block_32
#undef hash_get_block_64
#undef hash_rotl_32
#undef hash_rotl_64
#undef hash_x64_128
#undef hash_x86_128
#undef hash_x86_32
#undef huge_allocated
#undef huge_boot
#undef huge_dalloc
#undef huge_dalloc_junk
#undef huge_malloc
#undef huge_ndalloc
#undef huge_nmalloc
#undef huge_palloc
#undef huge_postfork_child
#undef huge_postfork_parent
#undef huge_prefork
#undef huge_prof_ctx_get
#undef huge_prof_ctx_set
#undef huge_ralloc
#undef huge_ralloc_no_move
#undef huge_salloc
#undef icalloc
#undef icalloct
#undef idalloc
#undef idalloct
#undef imalloc
#undef imalloct
#undef in_valgrind
#undef ipalloc
#undef ipalloct
#undef iqalloc
#undef iqalloct
#undef iralloc
#undef iralloct
#undef iralloct_realign
#undef isalloc
#undef isthreaded
#undef ivsalloc
#undef ixalloc
#undef jemalloc_postfork_child
#undef jemalloc_postfork_parent
#undef jemalloc_prefork
#undef lg_floor
#undef malloc_cprintf
#undef malloc_mutex_init
#undef malloc_mutex_lock
#undef malloc_mutex_postfork_child
#undef malloc_mutex_postfork_parent
#undef malloc_mutex_prefork
#undef malloc_mutex_unlock
#undef malloc_rwlock_init
#undef malloc_rwlock_postfork_child
#undef malloc_rwlock_postfork_parent
#undef malloc_rwlock_prefork
#undef malloc_rwlock_rdlock
#undef malloc_rwlock_wrlock
#undef malloc_rwlock_unlock
#undef malloc_rwlock_destroy
#undef malloc_printf
#undef malloc_snprintf
#undef malloc_strtoumax
#undef malloc_tsd_boot
#undef malloc_tsd_cleanup_register
#undef malloc_tsd_dalloc
#undef malloc_tsd_malloc
#undef malloc_tsd_no_cleanup
#undef malloc_vcprintf
#undef malloc_vsnprintf
#undef malloc_write
#undef map_bias
#undef mb_write
#undef mutex_boot
#undef narenas_auto
#undef narenas_total
#undef narenas_total_get
#undef ncpus
#undef nhbins
#undef npools
#undef npools_cnt
#undef opt_abort
#undef opt_dss
#undef opt_junk
#undef opt_lg_chunk
#undef opt_lg_dirty_mult
#undef opt_lg_prof_interval
#undef opt_lg_prof_sample
#undef opt_lg_tcache_max
#undef opt_narenas
#undef opt_prof
#undef opt_prof_accum
#undef opt_prof_active
#undef opt_prof_final
#undef opt_prof_gdump
#undef opt_prof_leak
#undef opt_prof_prefix
#undef opt_quarantine
#undef opt_redzone
#undef opt_stats_print
#undef opt_tcache
#undef opt_utrace
#undef opt_xmalloc
#undef opt_zero
#undef p2rz
#undef pages_purge
#undef pools_shared_data_initialized
#undef pow2_ceil
#undef prof_backtrace
#undef prof_boot0
#undef prof_boot1
#undef prof_boot2
#undef prof_bt_count
#undef prof_ctx_get
#undef prof_ctx_set
#undef prof_dump_open
#undef prof_free
#undef prof_gdump
#undef prof_idump
#undef prof_interval
#undef prof_lookup
#undef prof_malloc
#undef prof_malloc_record_object
#undef prof_mdump
#undef prof_postfork_child
#undef prof_postfork_parent
#undef prof_prefork
#undef prof_realloc
#undef prof_sample_accum_update
#undef prof_sample_threshold_update
#undef prof_tdata_booted
#undef prof_tdata_cleanup
#undef prof_tdata_get
#undef prof_tdata_init
#undef prof_tdata_initialized
#undef prof_tdata_tls
#undef prof_tdata_tsd
#undef prof_tdata_tsd_boot
#undef prof_tdata_tsd_cleanup_wrapper
#undef prof_tdata_tsd_get
#undef prof_tdata_tsd_get_wrapper
#undef prof_tdata_tsd_init_head
#undef prof_tdata_tsd_set
#undef quarantine
#undef quarantine_alloc_hook
#undef quarantine_boot
#undef quarantine_booted
#undef quarantine_cleanup
#undef quarantine_init
#undef quarantine_tls
#undef quarantine_tsd
#undef quarantine_tsd_boot
#undef quarantine_tsd_cleanup_wrapper
#undef quarantine_tsd_get
#undef quarantine_tsd_get_wrapper
#undef quarantine_tsd_init_head
#undef quarantine_tsd_set
#undef register_zone
#undef rtree_delete
#undef rtree_get
#undef rtree_get_locked
#undef rtree_new
#undef rtree_postfork_child
#undef rtree_postfork_parent
#undef rtree_prefork
#undef rtree_set
#undef s2u
#undef sa2u
#undef set_errno
#undef small_bin2size
#undef small_bin2size_compute
#undef small_bin2size_lookup
#undef small_bin2size_tab
#undef small_s2u
#undef small_s2u_compute
#undef small_s2u_lookup
#undef small_size2bin
#undef small_size2bin_compute
#undef small_size2bin_lookup
#undef small_size2bin_tab
#undef stats_cactive
#undef stats_cactive_add
#undef stats_cactive_get
#undef stats_cactive_sub
#undef stats_chunks
#undef stats_print
#undef tcache_alloc_easy
#undef tcache_alloc_large
#undef tcache_alloc_small
#undef tcache_alloc_small_hard
#undef tcache_arena_associate
#undef tcache_arena_dissociate
#undef tcache_bin_flush_large
#undef tcache_bin_flush_small
#undef tcache_bin_info
#undef tcache_boot0
#undef tcache_boot1
#undef tcache_booted
#undef tcache_create
#undef tcache_dalloc_large
#undef tcache_dalloc_small
#undef tcache_destroy
#undef tcache_enabled_booted
#undef tcache_enabled_get
#undef tcache_enabled_initialized
#undef tcache_enabled_set
#undef tcache_enabled_tls
#undef tcache_enabled_tsd
#undef tcache_enabled_tsd_boot
#undef tcache_enabled_tsd_cleanup_wrapper
#undef tcache_enabled_tsd_get
#undef tcache_enabled_tsd_get_wrapper
#undef tcache_enabled_tsd_init_head
#undef tcache_enabled_tsd_set
#undef tcache_event
#undef tcache_event_hard
#undef tcache_flush
#undef tcache_get
#undef tcache_get_hard
#undef tcache_initialized
#undef tcache_maxclass
#undef tcache_salloc
#undef tcache_stats_merge
#undef tcache_thread_cleanup
#undef tcache_tls
#undef tcache_tsd
#undef tcache_tsd_boot
#undef tcache_tsd_cleanup_wrapper
#undef tcache_tsd_get
#undef tcache_tsd_get_wrapper
#undef tcache_tsd_init_head
#undef tcache_tsd_set
#undef thread_allocated_booted
#undef thread_allocated_initialized
#undef thread_allocated_tls
#undef thread_allocated_tsd
#undef thread_allocated_tsd_boot
#undef thread_allocated_tsd_cleanup_wrapper
#undef thread_allocated_tsd_get
#undef thread_allocated_tsd_get_wrapper
#undef thread_allocated_tsd_init_head
#undef thread_allocated_tsd_set
#undef tsd_init_check_recursion
#undef tsd_init_finish
#undef u2rz
#undef valgrind_freelike_block
#undef valgrind_make_mem_defined
#undef valgrind_make_mem_noaccess
#undef valgrind_make_mem_undefined
#undef pool_new
#undef pool_destroy
#undef pools_lock
#undef pool_base_lock
#undef pool_prefork
#undef pool_postfork_parent
#undef pool_postfork_child
#undef pool_alloc
#undef vec_get
#undef vec_set
#undef vec_delete
| 11,246 | 23.396963 | 44 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/include/err.h | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* err.h - error and warning messages
*/
#ifndef ERR_H
#define ERR_H 1
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
/*
* err - windows implementation of unix err function
*/
__declspec(noreturn) static void
err(int eval, const char *fmt, ...)
{
va_list vl;
va_start(vl, fmt);
vfprintf(stderr, fmt, vl);
va_end(vl);
exit(eval);
}
/*
* warn - windows implementation of unix warn function
*/
static void
warn(const char *fmt, ...)
{
va_list vl;
va_start(vl, fmt);
fprintf(stderr, "Warning: ");
vfprintf(stderr, fmt, vl);
va_end(vl);
}
#endif /* ERR_H */
| 2,190 | 29.859155 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/include/sched.h | /*
* Copyright 2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake sched.h
*/
| 1,620 | 44.027778 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/include/win_mmap.h | /*
* Copyright 2015-2018, Intel Corporation
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* win_mmap.h -- (internal) tracks the regions mapped by mmap
*/
#ifndef WIN_MMAP_H
#define WIN_MMAP_H 1
#include "queue.h"
#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
#define rounddown(x, y) (((x) / (y)) * (y))
void win_mmap_init(void);
void win_mmap_fini(void);
/* allocation/mmap granularity */
extern unsigned long long Mmap_align;
typedef enum FILE_MAPPING_TRACKER_FLAGS {
FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED = 0x0001,
/*
* This should hold the value of all flags ORed for debug purpose.
*/
FILE_MAPPING_TRACKER_FLAGS_MASK =
FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED
} FILE_MAPPING_TRACKER_FLAGS;
/*
* this structure tracks the file mappings outstanding per file handle
*/
typedef struct FILE_MAPPING_TRACKER {
SORTEDQ_ENTRY(FILE_MAPPING_TRACKER) ListEntry;
HANDLE FileHandle;
HANDLE FileMappingHandle;
void *BaseAddress;
void *EndAddress;
DWORD Access;
os_off_t Offset;
size_t FileLen;
FILE_MAPPING_TRACKER_FLAGS Flags;
} FILE_MAPPING_TRACKER, *PFILE_MAPPING_TRACKER;
extern SRWLOCK FileMappingQLock;
extern SORTEDQ_HEAD(FMLHead, FILE_MAPPING_TRACKER) FileMappingQHead;
#endif /* WIN_MMAP_H */
| 2,817 | 33.790123 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/include/platform.h | /*
* Copyright 2015-2018, Intel Corporation
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* platform.h -- dirty hacks to compile Linux code on Windows using VC++
*
* This is included to each source file using "/FI" (forced include) option.
*
* XXX - it is a subject for refactoring
*/
#ifndef PLATFORM_H
#define PLATFORM_H 1
#pragma warning(disable : 4996)
#pragma warning(disable : 4200) /* allow flexible array member */
#pragma warning(disable : 4819) /* non unicode characteres */
#ifdef __cplusplus
extern "C" {
#endif
/* Prevent PMDK compilation for 32-bit platforms */
#if defined(_WIN32) && !defined(_WIN64)
#error "32-bit builds of PMDK are not supported!"
#endif
#define _CRT_RAND_S /* rand_s() */
#include <windows.h>
#include <stdint.h>
#include <time.h>
#include <io.h>
#include <process.h>
#include <fcntl.h>
#include <sys/types.h>
#include <malloc.h>
#include <signal.h>
#include <intrin.h>
#include <direct.h>
/* use uuid_t definition from util.h */
#ifdef uuid_t
#undef uuid_t
#endif
/* a few trivial substitutions */
#define PATH_MAX MAX_PATH
#define __thread __declspec(thread)
#define __func__ __FUNCTION__
#ifdef _DEBUG
#define DEBUG
#endif
/*
* The inline keyword is available only in VC++.
* https://msdn.microsoft.com/en-us/library/bw1hbe6y.aspx
*/
#ifndef __cplusplus
#define inline __inline
#endif
/* XXX - no equivalents in VC++ */
#define __attribute__(a)
#define __builtin_constant_p(cnd) 0
/*
* missing definitions
*/
/* errno.h */
#define ELIBACC 79 /* cannot access a needed shared library */
/* sys/stat.h */
#define S_IRUSR S_IREAD
#define S_IWUSR S_IWRITE
#define S_IRGRP S_IRUSR
#define S_IWGRP S_IWUSR
#define O_SYNC 0
typedef int mode_t;
#define fchmod(fd, mode) 0 /* XXX - dummy */
#define setlinebuf(fp) setvbuf(fp, NULL, _IOLBF, BUFSIZ);
/* unistd.h */
typedef long long os_off_t;
typedef long long ssize_t;
int setenv(const char *name, const char *value, int overwrite);
int unsetenv(const char *name);
/* fcntl.h */
int posix_fallocate(int fd, os_off_t offset, os_off_t len);
/* string.h */
#define strtok_r strtok_s
/* time.h */
#define CLOCK_MONOTONIC 1
#define CLOCK_REALTIME 2
int clock_gettime(int id, struct timespec *ts);
/* signal.h */
typedef unsigned long long sigset_t; /* one bit for each signal */
C_ASSERT(NSIG <= sizeof(sigset_t) * 8);
struct sigaction {
void (*sa_handler) (int signum);
/* void (*sa_sigaction)(int, siginfo_t *, void *); */
sigset_t sa_mask;
int sa_flags;
void (*sa_restorer) (void);
};
__inline int
sigemptyset(sigset_t *set)
{
*set = 0;
return 0;
}
__inline int
sigfillset(sigset_t *set)
{
*set = ~0;
return 0;
}
__inline int
sigaddset(sigset_t *set, int signum)
{
if (signum <= 0 || signum >= NSIG) {
errno = EINVAL;
return -1;
}
*set |= (1ULL << (signum - 1));
return 0;
}
__inline int
sigdelset(sigset_t *set, int signum)
{
if (signum <= 0 || signum >= NSIG) {
errno = EINVAL;
return -1;
}
*set &= ~(1ULL << (signum - 1));
return 0;
}
__inline int
sigismember(const sigset_t *set, int signum)
{
if (signum <= 0 || signum >= NSIG) {
errno = EINVAL;
return -1;
}
return ((*set & (1ULL << (signum - 1))) ? 1 : 0);
}
/* sched.h */
/*
* sched_yield -- yield the processor
*/
__inline int
sched_yield(void)
{
SwitchToThread();
return 0; /* always succeeds */
}
/*
* helper macros for library ctor/dtor function declarations
*/
#define MSVC_CONSTR(func) \
void func(void); \
__pragma(comment(linker, "/include:_" #func)) \
__pragma(section(".CRT$XCU", read)) \
__declspec(allocate(".CRT$XCU")) \
const void (WINAPI *_##func)(void) = (const void (WINAPI *)(void))func;
#define MSVC_DESTR(func) \
void func(void); \
static void _##func##_reg(void) { atexit(func); }; \
MSVC_CONSTR(_##func##_reg)
#ifdef __cplusplus
}
#endif
#endif /* PLATFORM_H */
| 5,389 | 22.744493 | 76 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/include/libgen.h | /*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake libgen.h
*/
| 1,621 | 44.055556 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/include/endian.h | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* endian.h -- convert values between host and big-/little-endian byte order
*/
#ifndef ENDIAN_H
#define ENDIAN_H 1
/*
* XXX: On Windows we can assume little-endian architecture
*/
#include <intrin.h>
#define htole16(a) (a)
#define htole32(a) (a)
#define htole64(a) (a)
#define le16toh(a) (a)
#define le32toh(a) (a)
#define le64toh(a) (a)
#define htobe16(x) _byteswap_ushort(x)
#define htobe32(x) _byteswap_ulong(x)
#define htobe64(x) _byteswap_uint64(x)
#define be16toh(x) _byteswap_ushort(x)
#define be32toh(x) _byteswap_ulong(x)
#define be64toh(x) _byteswap_uint64(x)
#endif /* ENDIAN_H */
| 2,211 | 34.677419 | 76 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/include/features.h | /*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake features.h
*/
| 1,623 | 44.111111 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/include/unistd.h | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* unistd.h -- compatibility layer for POSIX operating system API
*/
#ifndef UNISTD_H
#define UNISTD_H 1
#include <stdio.h>
#define _SC_PAGESIZE 0
#define _SC_NPROCESSORS_ONLN 1
#define R_OK 04
#define W_OK 02
#define X_OK 00 /* execute permission doesn't exist on Windows */
#define F_OK 00
/*
* sysconf -- get configuration information at run time
*/
static __inline long
sysconf(int p)
{
SYSTEM_INFO si;
int ret = 0;
switch (p) {
case _SC_PAGESIZE:
GetSystemInfo(&si);
return si.dwPageSize;
case _SC_NPROCESSORS_ONLN:
for (int i = 0; i < GetActiveProcessorGroupCount(); i++) {
ret += GetActiveProcessorCount(i);
}
return ret;
default:
return 0;
}
}
#define getpid _getpid
/*
* pread -- read from a file descriptor at given offset
*/
static ssize_t
pread(int fd, void *buf, size_t count, os_off_t offset)
{
__int64 position = _lseeki64(fd, 0, SEEK_CUR);
_lseeki64(fd, offset, SEEK_SET);
int ret = _read(fd, buf, (unsigned)count);
_lseeki64(fd, position, SEEK_SET);
return ret;
}
/*
* pwrite -- write to a file descriptor at given offset
*/
static ssize_t
pwrite(int fd, const void *buf, size_t count, os_off_t offset)
{
__int64 position = _lseeki64(fd, 0, SEEK_CUR);
_lseeki64(fd, offset, SEEK_SET);
int ret = _write(fd, buf, (unsigned)count);
_lseeki64(fd, position, SEEK_SET);
return ret;
}
#define S_ISBLK(x) 0 /* BLK devices not exist on Windows */
/*
* basename -- parse pathname and return filename component
*/
static char *
basename(char *path)
{
char fname[_MAX_FNAME];
char ext[_MAX_EXT];
_splitpath(path, NULL, NULL, fname, ext);
sprintf(path, "%s%s", fname, ext);
return path;
}
/*
* dirname -- parse pathname and return directory component
*/
static char *
dirname(char *path)
{
if (path == NULL)
return ".";
size_t len = strlen(path);
if (len == 0)
return ".";
char *end = path + len;
/* strip trailing forslashes and backslashes */
while ((--end) > path) {
if (*end != '\\' && *end != '/') {
*(end + 1) = '\0';
break;
}
}
/* strip basename */
while ((--end) > path) {
if (*end == '\\' || *end == '/') {
*end = '\0';
break;
}
}
if (end != path) {
return path;
/* handle edge cases */
} else if (*end == '\\' || *end == '/') {
*(end + 1) = '\0';
} else {
*end++ = '.';
*end = '\0';
}
return path;
}
#endif /* UNISTD_H */
| 3,962 | 22.873494 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/include/strings.h | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake strings.h
*/
| 1,627 | 44.222222 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/include/dirent.h | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake dirent.h
*/
| 1,626 | 44.194444 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/include/sys/uio.h | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* sys/uio.h -- definition of iovec structure
*/
#ifndef SYS_UIO_H
#define SYS_UIO_H 1
#include <pmemcompat.h>
#ifdef __cplusplus
extern "C" {
#endif
ssize_t writev(int fd, const struct iovec *iov, int iovcnt);
#ifdef __cplusplus
}
#endif
#endif /* SYS_UIO_H */
| 1,874 | 34.377358 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/include/sys/file.h | /*
* Copyright 2015-2018, Intel Corporation
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* sys/file.h -- file locking
*/
| 1,706 | 45.135135 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/include/sys/statvfs.h | /*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake statvfs.h
*/
| 1,622 | 44.083333 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/include/sys/param.h | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* sys/param.h -- a few useful macros
*/
#ifndef SYS_PARAM_H
#define SYS_PARAM_H 1
#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
#define howmany(x, y) (((x) + ((y) - 1)) / (y))
#define BPB 8 /* bits per byte */
#define setbit(b, i) ((b)[(i) / BPB] |= 1 << ((i) % BPB))
#define isset(b, i) ((b)[(i) / BPB] & (1 << ((i) % BPB)))
#define isclr(b, i) (((b)[(i) / BPB] & (1 << ((i) % BPB))) == 0)
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
#endif /* SYS_PARAM_H */
| 2,127 | 39.150943 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/include/sys/mount.h | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake sys/mount.h
*/
| 1,629 | 44.277778 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/include/sys/mman.h | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* sys/mman.h -- memory-mapped files for Windows
*/
#ifndef SYS_MMAN_H
#define SYS_MMAN_H 1
#ifdef __cplusplus
extern "C" {
#endif
#define PROT_NONE 0x0
#define PROT_READ 0x1
#define PROT_WRITE 0x2
#define PROT_EXEC 0x4
#define MAP_SHARED 0x1
#define MAP_PRIVATE 0x2
#define MAP_FIXED 0x10
#define MAP_ANONYMOUS 0x20
#define MAP_ANON MAP_ANONYMOUS
#define MAP_NORESERVE 0x04000
#define MS_ASYNC 1
#define MS_SYNC 4
#define MS_INVALIDATE 2
#define MAP_FAILED ((void *)(-1))
void *mmap(void *addr, size_t len, int prot, int flags,
int fd, os_off_t offset);
int munmap(void *addr, size_t len);
int msync(void *addr, size_t len, int flags);
int mprotect(void *addr, size_t len, int prot);
#ifdef __cplusplus
}
#endif
#endif /* SYS_MMAN_H */
| 2,357 | 30.026316 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/include/sys/resource.h | /*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake sys/resource.h
*/
| 1,627 | 44.222222 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/include/sys/wait.h | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake sys/wait.h
*/
| 1,628 | 44.25 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/windows/include/linux/limits.h | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* linux/limits.h -- fake header file
*/
/*
* XXX - The only purpose of this empty file is to avoid preprocessor
* errors when including a Linux-specific header file that has no equivalent
* on Windows. With this cheap trick, we don't need a lot of preprocessor
* conditionals in all the source code files.
*
* In the future, this will be addressed in some other way.
*/
| 1,986 | 43.155556 | 76 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/src/chunk_dss.c | #define JEMALLOC_CHUNK_DSS_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
const char *dss_prec_names[] = {
"disabled",
"primary",
"secondary",
"N/A"
};
/* Current dss precedence default, used when creating new arenas. */
static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT;
/*
* Protects sbrk() calls. This avoids malloc races among threads, though it
* does not protect against races with threads that call sbrk() directly.
*/
static malloc_mutex_t dss_mtx;
/* Base address of the DSS. */
static void *dss_base;
/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
static void *dss_prev;
/* Current upper limit on DSS addresses. */
static void *dss_max;
/******************************************************************************/
static void *
chunk_dss_sbrk(intptr_t increment)
{
#ifdef JEMALLOC_DSS
return (sbrk(increment));
#else
not_implemented();
return (NULL);
#endif
}
dss_prec_t
chunk_dss_prec_get(void)
{
dss_prec_t ret;
if (have_dss == false)
return (dss_prec_disabled);
malloc_mutex_lock(&dss_mtx);
ret = dss_prec_default;
malloc_mutex_unlock(&dss_mtx);
return (ret);
}
bool
chunk_dss_prec_set(dss_prec_t dss_prec)
{
if (have_dss == false)
return (dss_prec != dss_prec_disabled);
malloc_mutex_lock(&dss_mtx);
dss_prec_default = dss_prec;
malloc_mutex_unlock(&dss_mtx);
return (false);
}
void *
chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
{
void *ret;
cassert(have_dss);
assert(size > 0 && (size & chunksize_mask) == 0);
assert(alignment > 0 && (alignment & chunksize_mask) == 0);
/*
* sbrk() uses a signed increment argument, so take care not to
* interpret a huge allocation request as a negative increment.
*/
if ((intptr_t)size < 0)
return (NULL);
malloc_mutex_lock(&dss_mtx);
if (dss_prev != (void *)-1) {
size_t gap_size, cpad_size;
void *cpad, *dss_next;
intptr_t incr;
/*
* The loop is necessary to recover from races with other
* threads that are using the DSS for something other than
* malloc.
*/
do {
/* Get the current end of the DSS. */
dss_max = chunk_dss_sbrk(0);
/*
* Calculate how much padding is necessary to
* chunk-align the end of the DSS.
*/
gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) &
chunksize_mask;
/*
* Compute how much chunk-aligned pad space (if any) is
* necessary to satisfy alignment. This space can be
* recycled for later use.
*/
cpad = (void *)((uintptr_t)dss_max + gap_size);
ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
alignment);
cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
dss_next = (void *)((uintptr_t)ret + size);
if ((uintptr_t)ret < (uintptr_t)dss_max ||
(uintptr_t)dss_next < (uintptr_t)dss_max) {
/* Wrap-around. */
malloc_mutex_unlock(&dss_mtx);
return (NULL);
}
incr = gap_size + cpad_size + size;
dss_prev = chunk_dss_sbrk(incr);
if (dss_prev == dss_max) {
/* Success. */
dss_max = dss_next;
malloc_mutex_unlock(&dss_mtx);
if (cpad_size != 0)
chunk_unmap(&base_pool, cpad, cpad_size);
if (*zero) {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
ret, size);
memset(ret, 0, size);
}
return (ret);
}
} while (dss_prev != (void *)-1);
}
malloc_mutex_unlock(&dss_mtx);
return (NULL);
}
bool
chunk_in_dss(void *chunk)
{
bool ret;
cassert(have_dss);
malloc_mutex_lock(&dss_mtx);
if ((uintptr_t)chunk >= (uintptr_t)dss_base
&& (uintptr_t)chunk < (uintptr_t)dss_max)
ret = true;
else
ret = false;
malloc_mutex_unlock(&dss_mtx);
return (ret);
}
bool
chunk_dss_boot(void)
{
cassert(have_dss);
if (malloc_mutex_init(&dss_mtx))
return (true);
dss_base = chunk_dss_sbrk(0);
dss_prev = dss_base;
dss_max = dss_base;
return (false);
}
void
chunk_dss_prefork(void)
{
if (have_dss)
malloc_mutex_prefork(&dss_mtx);
}
void
chunk_dss_postfork_parent(void)
{
if (have_dss)
malloc_mutex_postfork_parent(&dss_mtx);
}
void
chunk_dss_postfork_child(void)
{
if (have_dss)
malloc_mutex_postfork_child(&dss_mtx);
}
/******************************************************************************/
| 4,272 | 20.365 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/src/util.c | #define assert(e) do { \
if (config_debug && !(e)) { \
malloc_write("<jemalloc>: Failed assertion\n"); \
abort(); \
} \
} while (0)
#define not_reached() do { \
if (config_debug) { \
malloc_write("<jemalloc>: Unreachable code reached\n"); \
abort(); \
} \
} while (0)
#define not_implemented() do { \
if (config_debug) { \
malloc_write("<jemalloc>: Not implemented\n"); \
abort(); \
} \
} while (0)
#define JEMALLOC_UTIL_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void wrtmessage(void *cbopaque, const char *s);
#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1)
static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s,
size_t *slen_p);
#define D2S_BUFSIZE (1 + U2S_BUFSIZE)
static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p);
#define O2S_BUFSIZE (1 + U2S_BUFSIZE)
static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p);
#define X2S_BUFSIZE (2 + U2S_BUFSIZE)
static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s,
size_t *slen_p);
/******************************************************************************/
/* malloc_message() setup. */
static void
wrtmessage(void *cbopaque, const char *s)
{
#ifdef SYS_write
/*
* Use syscall(2) rather than write(2) when possible in order to avoid
* the possibility of memory allocation within libc. This is necessary
* on FreeBSD; most operating systems do not have this problem though.
*/
UNUSED int result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
#else
UNUSED int result = write(STDERR_FILENO, s, strlen(s));
#endif
}
JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s);
/*
* Wrapper around malloc_message() that avoids the need for
* je_malloc_message(...) throughout the code.
*/
void
malloc_write(const char *s)
{
if (je_malloc_message != NULL)
je_malloc_message(NULL, s);
else
wrtmessage(NULL, s);
}
/*
* glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
* provide a wrapper.
*/
int
buferror(int err, char *buf, size_t buflen)
{
#ifdef _WIN32
FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, GetLastError(), 0,
(LPSTR)buf, buflen, NULL);
return (0);
#elif defined(_GNU_SOURCE)
char *b = strerror_r(err, buf, buflen);
if (b != buf) {
strncpy(buf, b, buflen);
buf[buflen-1] = '\0';
}
return (0);
#else
return (strerror_r(err, buf, buflen));
#endif
}
uintmax_t
malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
{
uintmax_t ret, digit;
unsigned b;
bool neg;
const char *p, *ns;
p = nptr;
if (base < 0 || base == 1 || base > 36) {
ns = p;
set_errno(EINVAL);
ret = UINTMAX_MAX;
goto label_return;
}
b = base;
/* Swallow leading whitespace and get sign, if any. */
neg = false;
while (true) {
switch (*p) {
case '\t': case '\n': case '\v': case '\f': case '\r': case ' ':
p++;
break;
case '-':
neg = true;
/* Fall through. */
case '+':
p++;
/* Fall through. */
default:
goto label_prefix;
}
}
/* Get prefix, if any. */
label_prefix:
/*
* Note where the first non-whitespace/sign character is so that it is
* possible to tell whether any digits are consumed (e.g., " 0" vs.
* " -x").
*/
ns = p;
if (*p == '0') {
switch (p[1]) {
case '0': case '1': case '2': case '3': case '4': case '5':
case '6': case '7':
if (b == 0)
b = 8;
if (b == 8)
p++;
break;
case 'X': case 'x':
switch (p[2]) {
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
case 'A': case 'B': case 'C': case 'D': case 'E':
case 'F':
case 'a': case 'b': case 'c': case 'd': case 'e':
case 'f':
if (b == 0)
b = 16;
if (b == 16)
p += 2;
break;
default:
break;
}
break;
default:
p++;
ret = 0;
goto label_return;
}
}
if (b == 0)
b = 10;
/* Convert. */
ret = 0;
while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b)
|| (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b)
|| (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) {
uintmax_t pret = ret;
ret *= b;
ret += digit;
if (ret < pret) {
/* Overflow. */
set_errno(ERANGE);
ret = UINTMAX_MAX;
goto label_return;
}
p++;
}
if (neg)
ret = -ret;
if (p == ns) {
/* No conversion performed. */
set_errno(EINVAL);
ret = UINTMAX_MAX;
goto label_return;
}
label_return:
if (endptr != NULL) {
if (p == ns) {
/* No characters were converted. */
*endptr = (char *)nptr;
} else
*endptr = (char *)p;
}
return (ret);
}
static char *
u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p)
{
unsigned i;
i = U2S_BUFSIZE - 1;
s[i] = '\0';
switch (base) {
case 10:
do {
i--;
s[i] = "0123456789"[x % (uint64_t)10];
x /= (uint64_t)10;
} while (x > 0);
break;
case 16: {
const char *digits = (uppercase)
? "0123456789ABCDEF"
: "0123456789abcdef";
do {
i--;
s[i] = digits[x & 0xf];
x >>= 4;
} while (x > 0);
break;
} default: {
const char *digits = (uppercase)
? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
: "0123456789abcdefghijklmnopqrstuvwxyz";
assert(base >= 2 && base <= 36);
do {
i--;
s[i] = digits[x % (uint64_t)base];
x /= (uint64_t)base;
} while (x > 0);
}}
*slen_p = U2S_BUFSIZE - 1 - i;
return (&s[i]);
}
static char *
d2s(intmax_t x, char sign, char *s, size_t *slen_p)
{
bool neg;
if ((neg = (x < 0)))
x = -x;
s = u2s(x, 10, false, s, slen_p);
if (neg)
sign = '-';
switch (sign) {
case '-':
if (neg == false)
break;
/* Fall through. */
case ' ':
case '+':
s--;
(*slen_p)++;
*s = sign;
break;
default: not_reached();
}
return (s);
}
static char *
o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p)
{
s = u2s(x, 8, false, s, slen_p);
if (alt_form && *s != '0') {
s--;
(*slen_p)++;
*s = '0';
}
return (s);
}
static char *
x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p)
{
s = u2s(x, 16, uppercase, s, slen_p);
if (alt_form) {
s -= 2;
(*slen_p) += 2;
memcpy(s, uppercase ? "0X" : "0x", 2);
}
return (s);
}
int
malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
{
int ret;
size_t i;
const char *f;
#define APPEND_C(c) do { \
if (i < size) \
str[i] = (c); \
i++; \
} while (0)
#define APPEND_S(s, slen) do { \
if (i < size) { \
size_t cpylen = ((slen) <= size - i) ? (slen) : size - i; \
memcpy(&str[i], s, cpylen); \
} \
i += (slen); \
} while (0)
#define APPEND_PADDED_S(s, slen, width, left_justify) do { \
/* Left padding. */ \
size_t pad_len = ((width) == -1) ? 0 : (((slen) < (size_t)(width)) ? \
(size_t)(width) - (slen) : 0); \
if ((left_justify) == false && pad_len != 0) { \
size_t j; \
for (j = 0; j < pad_len; j++) \
APPEND_C(' '); \
} \
/* Value. */ \
APPEND_S(s, slen); \
/* Right padding. */ \
if ((left_justify) && pad_len != 0) { \
size_t j; \
for (j = 0; j < pad_len; j++) \
APPEND_C(' '); \
} \
} while (0)
#define GET_ARG_NUMERIC(val, len) do { \
switch (len) { \
case '?': \
val = va_arg(ap, int); \
break; \
case '?' | 0x80: \
val = va_arg(ap, unsigned int); \
break; \
case 'l': \
val = va_arg(ap, long); \
break; \
case 'l' | 0x80: \
val = va_arg(ap, unsigned long); \
break; \
case 'q': \
val = va_arg(ap, long long); \
break; \
case 'q' | 0x80: \
val = va_arg(ap, unsigned long long); \
break; \
case 'j': \
val = va_arg(ap, intmax_t); \
break; \
case 'j' | 0x80: \
val = va_arg(ap, uintmax_t); \
break; \
case 't': \
val = va_arg(ap, ptrdiff_t); \
break; \
case 'z': \
val = va_arg(ap, ssize_t); \
break; \
case 'z' | 0x80: \
val = va_arg(ap, size_t); \
break; \
case 'p': /* Synthetic; used for %p. */ \
val = va_arg(ap, uintptr_t); \
break; \
default: \
not_reached(); \
val = 0; \
} \
} while (0)
i = 0;
f = format;
while (true) {
switch (*f) {
case '\0': goto label_out;
case '%': {
bool alt_form = false;
bool left_justify = false;
bool plus_space = false;
bool plus_plus = false;
int prec = -1;
int width = -1;
unsigned char len = '?';
f++;
/* Flags. */
while (true) {
switch (*f) {
case '#':
assert(alt_form == false);
alt_form = true;
break;
case '-':
assert(left_justify == false);
left_justify = true;
break;
case ' ':
assert(plus_space == false);
plus_space = true;
break;
case '+':
assert(plus_plus == false);
plus_plus = true;
break;
default: goto label_width;
}
f++;
}
/* Width. */
label_width:
switch (*f) {
case '*':
width = va_arg(ap, int);
f++;
if (width < 0) {
left_justify = true;
width = -width;
}
break;
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9': {
uintmax_t uwidth;
set_errno(0);
uwidth = malloc_strtoumax(f, (char **)&f, 10);
assert(uwidth != UINTMAX_MAX || get_errno() !=
ERANGE);
width = (int)uwidth;
break;
} default:
break;
}
/* Width/precision separator. */
if (*f == '.')
f++;
else
goto label_length;
/* Precision. */
switch (*f) {
case '*':
prec = va_arg(ap, int);
f++;
break;
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9': {
uintmax_t uprec;
set_errno(0);
uprec = malloc_strtoumax(f, (char **)&f, 10);
assert(uprec != UINTMAX_MAX || get_errno() !=
ERANGE);
prec = (int)uprec;
break;
}
default: break;
}
/* Length. */
label_length:
switch (*f) {
case 'l':
f++;
if (*f == 'l') {
len = 'q';
f++;
} else
len = 'l';
break;
case 'q': case 'j': case 't': case 'z':
len = *f;
f++;
break;
default: break;
}
/* Conversion specifier. */
switch (*f) {
char *s;
size_t slen;
case '%':
/* %% */
APPEND_C(*f);
f++;
break;
case 'd': case 'i': {
intmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[D2S_BUFSIZE];
GET_ARG_NUMERIC(val, len);
s = d2s(val, (plus_plus ? '+' : (plus_space ?
' ' : '-')), buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} case 'o': {
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[O2S_BUFSIZE];
GET_ARG_NUMERIC(val, len | 0x80);
s = o2s(val, alt_form, buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} case 'u': {
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[U2S_BUFSIZE];
GET_ARG_NUMERIC(val, len | 0x80);
s = u2s(val, 10, false, buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} case 'x': case 'X': {
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[X2S_BUFSIZE];
GET_ARG_NUMERIC(val, len | 0x80);
s = x2s(val, alt_form, *f == 'X', buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} case 'c': {
unsigned char val;
char buf[2];
assert(len == '?' || len == 'l');
assert_not_implemented(len != 'l');
val = va_arg(ap, int);
buf[0] = val;
buf[1] = '\0';
APPEND_PADDED_S(buf, 1, width, left_justify);
f++;
break;
} case 's':
assert(len == '?' || len == 'l');
assert_not_implemented(len != 'l');
s = va_arg(ap, char *);
if (s) {
slen = (prec < 0) ? strlen(s) : (size_t)prec;
APPEND_PADDED_S(s, slen, width, left_justify);
} else {
APPEND_S("(null)", 6);
}
f++;
break;
case 'p': {
uintmax_t val;
char buf[X2S_BUFSIZE];
GET_ARG_NUMERIC(val, 'p');
s = x2s(val, true, false, buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} default: not_reached();
}
break;
} default: {
APPEND_C(*f);
f++;
break;
}}
}
label_out:
if (i < size)
str[i] = '\0';
else
str[size - 1] = '\0';
ret = i;
#undef APPEND_C
#undef APPEND_S
#undef APPEND_PADDED_S
#undef GET_ARG_NUMERIC
return (ret);
}
JEMALLOC_ATTR(format(printf, 3, 4))
int
malloc_snprintf(char *str, size_t size, const char *format, ...)
{
int ret;
va_list ap;
va_start(ap, format);
ret = malloc_vsnprintf(str, size, format, ap);
va_end(ap);
return (ret);
}
void
malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, va_list ap)
{
char buf[MALLOC_PRINTF_BUFSIZE];
if (write_cb == NULL) {
/*
* The caller did not provide an alternate write_cb callback
* function, so use the default one. malloc_write() is an
* inline function, so use malloc_message() directly here.
*/
write_cb = (je_malloc_message != NULL) ? je_malloc_message :
wrtmessage;
cbopaque = NULL;
}
malloc_vsnprintf(buf, sizeof(buf), format, ap);
write_cb(cbopaque, buf);
}
/*
* Print to a callback function in such a way as to (hopefully) avoid memory
* allocation.
*/
JEMALLOC_ATTR(format(printf, 3, 4))
void
malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, ...)
{
va_list ap;
va_start(ap, format);
malloc_vcprintf(write_cb, cbopaque, format, ap);
va_end(ap);
}
/* Print to stderr in such a way as to avoid memory allocation. */
JEMALLOC_ATTR(format(printf, 1, 2))
void
malloc_printf(const char *format, ...)
{
va_list ap;
va_start(ap, format);
malloc_vcprintf(NULL, NULL, format, ap);
va_end(ap);
}
| 14,080 | 20.49771 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/src/stats.c | #define JEMALLOC_STATS_C_
#include "jemalloc/internal/jemalloc_internal.h"
#define CTL_GET(n, v, t) do { \
size_t sz = sizeof(t); \
xmallctl(n, v, &sz, NULL, 0); \
} while (0)
#define CTL_P_GET_ARRAY(n, v, t, c) do { \
size_t mib[8]; \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t) * (c); \
xmallctlnametomib(n, mib, &miblen); \
mib[1] = p; \
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
#define CTL_P_GET(n, v, t) CTL_P_GET_ARRAY(n, v, t, 1)
#define CTL_PI_GET(n, v, t) do { \
size_t mib[8]; \
char buf[256]; \
snprintf(buf, sizeof(buf), n, p); \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(buf, mib, &miblen); \
mib[1] = p; \
mib[4] = i; \
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
#define CTL_PJ_GET(n, v, t) do { \
size_t mib[8]; \
char buf[256]; \
snprintf(buf, sizeof(buf), n, p); \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(buf, mib, &miblen); \
mib[1] = p; \
mib[4] = j; \
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
#define CTL_PIJ_GET(n, v, t) do { \
size_t mib[8]; \
char buf[256]; \
snprintf(buf, sizeof(buf), n, p); \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(buf, mib, &miblen); \
mib[1] = p; \
mib[4] = i; \
mib[6] = j; \
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
/******************************************************************************/
/* Data. */
bool opt_stats_print = false;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned p, unsigned i);
static void stats_arena_lruns_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned p, unsigned i);
static void stats_arena_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned p, unsigned i, bool bins, bool large);
/******************************************************************************/
static void
stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned p, unsigned i)
{
size_t page;
bool config_tcache;
unsigned nbins, j, gap_start;
CTL_P_GET("pool.0.arenas.page", &page, size_t);
CTL_P_GET("config.tcache", &config_tcache, bool);
if (config_tcache) {
malloc_cprintf(write_cb, cbopaque,
"bins: bin size regs pgs allocated nmalloc"
" ndalloc nrequests nfills nflushes"
" newruns reruns curruns\n");
} else {
malloc_cprintf(write_cb, cbopaque,
"bins: bin size regs pgs allocated nmalloc"
" ndalloc newruns reruns curruns\n");
}
CTL_P_GET("pool.0.arenas.nbins", &nbins, unsigned);
for (j = 0, gap_start = UINT_MAX; j < nbins; j++) {
uint64_t nruns;
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.nruns", &nruns, uint64_t);
if (nruns == 0) {
if (gap_start == UINT_MAX)
gap_start = j;
} else {
size_t reg_size, run_size, allocated;
uint32_t nregs;
uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
uint64_t reruns;
size_t curruns;
if (gap_start != UINT_MAX) {
if (j > gap_start + 1) {
/* Gap of more than one size class. */
malloc_cprintf(write_cb, cbopaque,
"[%u..%u]\n", gap_start,
j - 1);
} else {
/* Gap of one size class. */
malloc_cprintf(write_cb, cbopaque,
"[%u]\n", gap_start);
}
gap_start = UINT_MAX;
}
CTL_PJ_GET("pool.%u.arenas.bin.0.size", ®_size, size_t);
CTL_PJ_GET("pool.%u.arenas.bin.0.nregs", &nregs, uint32_t);
CTL_PJ_GET("pool.%u.arenas.bin.0.run_size", &run_size, size_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.allocated",
&allocated, size_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.nmalloc",
&nmalloc, uint64_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.ndalloc",
&ndalloc, uint64_t);
if (config_tcache) {
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.nrequests",
&nrequests, uint64_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.nfills",
&nfills, uint64_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.nflushes",
&nflushes, uint64_t);
}
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.nreruns", &reruns,
uint64_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.curruns", &curruns,
size_t);
if (config_tcache) {
malloc_cprintf(write_cb, cbopaque,
"%13u %5zu %4u %3zu %12zu %12"PRIu64
" %12"PRIu64" %12"PRIu64" %12"PRIu64
" %12"PRIu64" %12"PRIu64" %12"PRIu64
" %12zu\n",
j, reg_size, nregs, run_size / page,
allocated, nmalloc, ndalloc, nrequests,
nfills, nflushes, nruns, reruns, curruns);
} else {
malloc_cprintf(write_cb, cbopaque,
"%13u %5zu %4u %3zu %12zu %12"PRIu64
" %12"PRIu64" %12"PRIu64" %12"PRIu64
" %12zu\n",
j, reg_size, nregs, run_size / page,
allocated, nmalloc, ndalloc, nruns, reruns,
curruns);
}
}
}
if (gap_start != UINT_MAX) {
if (j > gap_start + 1) {
/* Gap of more than one size class. */
malloc_cprintf(write_cb, cbopaque, "[%u..%u]\n",
gap_start, j - 1);
} else {
/* Gap of one size class. */
malloc_cprintf(write_cb, cbopaque, "[%u]\n", gap_start);
}
}
}
static void
stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned p, unsigned i)
{
size_t page, nlruns, j;
ssize_t gap_start;
CTL_P_GET("pool.0.arenas.page", &page, size_t);
malloc_cprintf(write_cb, cbopaque,
"large: size pages nmalloc ndalloc nrequests"
" curruns\n");
CTL_P_GET("pool.0.arenas.nlruns", &nlruns, size_t);
for (j = 0, gap_start = -1; j < nlruns; j++) {
uint64_t nmalloc, ndalloc, nrequests;
size_t run_size, curruns;
CTL_PIJ_GET("pool.%u.stats.arenas.0.lruns.0.nmalloc", &nmalloc,
uint64_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.lruns.0.ndalloc", &ndalloc,
uint64_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.lruns.0.nrequests", &nrequests,
uint64_t);
if (nrequests == 0) {
if (gap_start == -1)
gap_start = j;
} else {
CTL_PJ_GET("pool.%u.arenas.lrun.0.size", &run_size, size_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.lruns.0.curruns", &curruns,
size_t);
if (gap_start != -1) {
malloc_cprintf(write_cb, cbopaque, "[%zu]\n",
j - gap_start);
gap_start = -1;
}
malloc_cprintf(write_cb, cbopaque,
"%13zu %5zu %12"PRIu64" %12"PRIu64" %12"PRIu64
" %12zu\n",
run_size, run_size / page, nmalloc, ndalloc,
nrequests, curruns);
}
}
if (gap_start != -1)
malloc_cprintf(write_cb, cbopaque, "[%zu]\n", j - gap_start);
}
static void
stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned p, unsigned i, bool bins, bool large)
{
unsigned nthreads;
const char *dss;
size_t page, pactive, pdirty, mapped;
uint64_t npurge, nmadvise, purged;
size_t small_allocated;
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
size_t large_allocated;
uint64_t large_nmalloc, large_ndalloc, large_nrequests;
size_t huge_allocated;
uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests;
CTL_P_GET("pool.0.arenas.page", &page, size_t);
CTL_PI_GET("pool.%u.stats.arenas.0.nthreads", &nthreads, unsigned);
malloc_cprintf(write_cb, cbopaque,
"assigned threads: %u\n", nthreads);
CTL_PI_GET("pool.%u.stats.arenas.0.dss", &dss, const char *);
malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n",
dss);
CTL_PI_GET("pool.%u.stats.arenas.0.pactive", &pactive, size_t);
CTL_PI_GET("pool.%u.stats.arenas.0.pdirty", &pdirty, size_t);
CTL_PI_GET("pool.%u.stats.arenas.0.npurge", &npurge, uint64_t);
CTL_PI_GET("pool.%u.stats.arenas.0.nmadvise", &nmadvise, uint64_t);
CTL_PI_GET("pool.%u.stats.arenas.0.purged", &purged, uint64_t);
malloc_cprintf(write_cb, cbopaque,
"dirty pages: %zu:%zu active:dirty, %"PRIu64" sweep%s,"
" %"PRIu64" madvise%s, %"PRIu64" purged\n",
pactive, pdirty, npurge, npurge == 1 ? "" : "s",
nmadvise, nmadvise == 1 ? "" : "s", purged);
malloc_cprintf(write_cb, cbopaque,
" allocated nmalloc ndalloc nrequests\n");
CTL_PI_GET("pool.%u.stats.arenas.0.small.allocated", &small_allocated, size_t);
CTL_PI_GET("pool.%u.stats.arenas.0.small.nmalloc", &small_nmalloc, uint64_t);
CTL_PI_GET("pool.%u.stats.arenas.0.small.ndalloc", &small_ndalloc, uint64_t);
CTL_PI_GET("pool.%u.stats.arenas.0.small.nrequests", &small_nrequests, uint64_t);
malloc_cprintf(write_cb, cbopaque,
"small: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
small_allocated, small_nmalloc, small_ndalloc, small_nrequests);
CTL_PI_GET("pool.%u.stats.arenas.0.large.allocated", &large_allocated, size_t);
CTL_PI_GET("pool.%u.stats.arenas.0.large.nmalloc", &large_nmalloc, uint64_t);
CTL_PI_GET("pool.%u.stats.arenas.0.large.ndalloc", &large_ndalloc, uint64_t);
CTL_PI_GET("pool.%u.stats.arenas.0.large.nrequests", &large_nrequests, uint64_t);
malloc_cprintf(write_cb, cbopaque,
"large: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
CTL_PI_GET("pool.%u.stats.arenas.0.huge.allocated", &huge_allocated, size_t);
CTL_PI_GET("pool.%u.stats.arenas.0.huge.nmalloc", &huge_nmalloc, uint64_t);
CTL_PI_GET("pool.%u.stats.arenas.0.huge.ndalloc", &huge_ndalloc, uint64_t);
CTL_PI_GET("pool.%u.stats.arenas.0.huge.nrequests", &huge_nrequests, uint64_t);
malloc_cprintf(write_cb, cbopaque,
"huge: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests);
malloc_cprintf(write_cb, cbopaque,
"total: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
small_allocated + large_allocated + huge_allocated,
small_nmalloc + large_nmalloc + huge_nmalloc,
small_ndalloc + large_ndalloc + huge_ndalloc,
small_nrequests + large_nrequests + huge_nrequests);
malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", pactive * page);
CTL_PI_GET("pool.%u.stats.arenas.0.mapped", &mapped, size_t);
malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped);
if (bins)
stats_arena_bins_print(write_cb, cbopaque, p, i);
if (large)
stats_arena_lruns_print(write_cb, cbopaque, p, i);
}
void
stats_print(pool_t *pool, void (*write_cb)(void *, const char *), void *cbopaque,
const char *opts)
{
int err;
uint64_t epoch;
size_t u64sz;
bool general = true;
bool merged = true;
bool unmerged = true;
bool bins = true;
bool large = true;
unsigned p = pool->pool_id;
/*
* Refresh stats, in case mallctl() was called by the application.
*
* Check for OOM here, since refreshing the ctl cache can trigger
* allocation. In practice, none of the subsequent mallctl()-related
* calls in this function will cause OOM if this one succeeds.
* */
epoch = 1;
u64sz = sizeof(uint64_t);
err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t));
if (err != 0) {
if (err == EAGAIN) {
malloc_write("<jemalloc>: Memory allocation failure in "
"mallctl(\"epoch\", ...)\n");
return;
}
malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
"...)\n");
abort();
}
if (opts != NULL) {
unsigned i;
for (i = 0; opts[i] != '\0'; i++) {
switch (opts[i]) {
case 'g':
general = false;
break;
case 'm':
merged = false;
break;
case 'a':
unmerged = false;
break;
case 'b':
bins = false;
break;
case 'l':
large = false;
break;
default:;
}
}
}
malloc_cprintf(write_cb, cbopaque,
"___ Begin jemalloc statistics ___\n");
if (general) {
int err;
const char *cpv;
bool bv;
unsigned uv;
ssize_t ssv;
size_t sv, bsz, ssz, sssz, cpsz;
bsz = sizeof(bool);
ssz = sizeof(size_t);
sssz = sizeof(ssize_t);
cpsz = sizeof(const char *);
CTL_GET("version", &cpv, const char *);
malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
CTL_GET("config.debug", &bv, bool);
malloc_cprintf(write_cb, cbopaque, "Assertions %s\n",
bv ? "enabled" : "disabled");
#define OPT_WRITE_BOOL(n) \
if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0)) \
== 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %s\n", bv ? "true" : "false"); \
}
#define OPT_WRITE_SIZE_T(n) \
if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0)) \
== 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zu\n", sv); \
}
#define OPT_WRITE_SSIZE_T(n) \
if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0)) \
== 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zd\n", ssv); \
}
#define OPT_WRITE_CHAR_P(n) \
if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0)) \
== 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": \"%s\"\n", cpv); \
}
malloc_cprintf(write_cb, cbopaque,
"Run-time option settings:\n");
OPT_WRITE_BOOL(abort)
OPT_WRITE_SIZE_T(lg_chunk)
OPT_WRITE_CHAR_P(dss)
OPT_WRITE_SIZE_T(narenas)
OPT_WRITE_SSIZE_T(lg_dirty_mult)
OPT_WRITE_BOOL(stats_print)
OPT_WRITE_BOOL(junk)
OPT_WRITE_SIZE_T(quarantine)
OPT_WRITE_BOOL(redzone)
OPT_WRITE_BOOL(zero)
OPT_WRITE_BOOL(utrace)
OPT_WRITE_BOOL(valgrind)
OPT_WRITE_BOOL(xmalloc)
OPT_WRITE_BOOL(tcache)
OPT_WRITE_SSIZE_T(lg_tcache_max)
OPT_WRITE_BOOL(prof)
OPT_WRITE_CHAR_P(prof_prefix)
OPT_WRITE_BOOL(prof_active)
OPT_WRITE_SSIZE_T(lg_prof_sample)
OPT_WRITE_BOOL(prof_accum)
OPT_WRITE_SSIZE_T(lg_prof_interval)
OPT_WRITE_BOOL(prof_gdump)
OPT_WRITE_BOOL(prof_final)
OPT_WRITE_BOOL(prof_leak)
#undef OPT_WRITE_BOOL
#undef OPT_WRITE_SIZE_T
#undef OPT_WRITE_SSIZE_T
#undef OPT_WRITE_CHAR_P
malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus);
CTL_P_GET("pool.0.arenas.narenas", &uv, unsigned);
malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv);
malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n",
sizeof(void *));
CTL_P_GET("pool.0.arenas.quantum", &sv, size_t);
malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);
CTL_P_GET("pool.0.arenas.page", &sv, size_t);
malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
CTL_P_GET("opt.lg_dirty_mult", &ssv, ssize_t);
if (ssv >= 0) {
malloc_cprintf(write_cb, cbopaque,
"Min active:dirty page ratio per arena: %u:1\n",
(1U << ssv));
} else {
malloc_cprintf(write_cb, cbopaque,
"Min active:dirty page ratio per arena: N/A\n");
}
if ((err = je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0))
== 0) {
malloc_cprintf(write_cb, cbopaque,
"Maximum thread-cached size class: %zu\n", sv);
}
if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 &&
bv) {
CTL_GET("opt.lg_prof_sample", &sv, size_t);
malloc_cprintf(write_cb, cbopaque,
"Average profile sample interval: %"PRIu64
" (2^%zu)\n", (((uint64_t)1U) << sv), sv);
CTL_GET("opt.lg_prof_interval", &ssv, ssize_t);
if (ssv >= 0) {
malloc_cprintf(write_cb, cbopaque,
"Average profile dump interval: %"PRIu64
" (2^%zd)\n",
(((uint64_t)1U) << ssv), ssv);
} else {
malloc_cprintf(write_cb, cbopaque,
"Average profile dump interval: N/A\n");
}
}
CTL_GET("opt.lg_chunk", &sv, size_t);
malloc_cprintf(write_cb, cbopaque, "Chunk size: %zu (2^%zu)\n",
(ZU(1) << sv), sv);
}
if (config_stats) {
size_t *cactive;
size_t allocated, active, mapped;
size_t chunks_current, chunks_high;
uint64_t chunks_total;
CTL_P_GET("pool.0.stats.cactive", &cactive, size_t *);
CTL_P_GET("pool.0.stats.allocated", &allocated, size_t);
CTL_P_GET("pool.0.stats.active", &active, size_t);
CTL_P_GET("pool.0.stats.mapped", &mapped, size_t);
malloc_cprintf(write_cb, cbopaque,
"Allocated: %zu, active: %zu, mapped: %zu\n",
allocated, active, mapped);
malloc_cprintf(write_cb, cbopaque,
"Current active ceiling: %zu\n", atomic_read_z(cactive));
/* Print chunk stats. */
CTL_P_GET("pool.0.stats.chunks.total", &chunks_total, uint64_t);
CTL_P_GET("pool.0.stats.chunks.high", &chunks_high, size_t);
CTL_P_GET("pool.0.stats.chunks.current", &chunks_current, size_t);
malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
"highchunks curchunks\n");
malloc_cprintf(write_cb, cbopaque,
" %13"PRIu64" %12zu %12zu\n",
chunks_total, chunks_high, chunks_current);
if (merged) {
unsigned narenas;
CTL_P_GET("pool.0.arenas.narenas", &narenas, unsigned);
{
VARIABLE_ARRAY(bool, initialized, narenas);
unsigned i, ninitialized;
CTL_P_GET_ARRAY("pool.0.arenas.initialized",
initialized, bool, narenas);
for (i = ninitialized = 0; i < narenas; i++) {
if (initialized[i])
ninitialized++;
}
if (ninitialized > 1 || unmerged == false) {
/* Print merged arena stats. */
malloc_cprintf(write_cb, cbopaque,
"\nMerged arenas stats:\n");
stats_arena_print(write_cb, cbopaque,
p, narenas, bins, large);
}
}
}
if (unmerged) {
unsigned narenas;
/* Print stats for each arena. */
CTL_P_GET("pool.0.arenas.narenas", &narenas, unsigned);
{
VARIABLE_ARRAY(bool, initialized, narenas);
unsigned i;
CTL_P_GET_ARRAY("pool.0.arenas.initialized",
initialized, bool, narenas);
for (i = 0; i < narenas; i++) {
if (initialized[i]) {
malloc_cprintf(write_cb,
cbopaque,
"\narenas[%u]:\n", i);
stats_arena_print(write_cb,
cbopaque, p, i, bins, large);
}
}
}
}
}
malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n");
}
| 18,137 | 31.216696 | 82 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/src/vector.c | #define JEMALLOC_VECTOR_C_
#include "jemalloc/internal/jemalloc_internal.h"
/* Round up the value to the closest power of two. */
static inline unsigned
ceil_p2(unsigned n)
{
return 1 << (32 - __builtin_clz(n));
}
/* Calculate how big should be the vector list array. */
static inline unsigned
get_vec_part_len(unsigned n)
{
return MAX(ceil_p2(n), VECTOR_MIN_PART_SIZE);
}
/*
* Find the vector list element in which the index should be stored,
* if no such list exist return a pointer to a place in memory where it should
* be allocated.
*/
static vec_list_t **
find_vec_list(vector_t *vector, int *index)
{
vec_list_t **vec_list;
for (vec_list = &vector->list;
*vec_list != NULL; vec_list = &(*vec_list)->next) {
if (*index < (*vec_list)->length)
break;
*index -= (*vec_list)->length;
}
return vec_list;
}
/* Return a value from vector at index. */
void *
vec_get(vector_t *vector, int index)
{
vec_list_t *vec_list = *find_vec_list(vector, &index);
return (vec_list == NULL) ? NULL : vec_list->data[index];
}
/* Set a value to vector at index. */
void
vec_set(vector_t *vector, int index, void *val)
{
vec_list_t **vec_list = find_vec_list(vector, &index);
/*
* There's no array to put the value in,
* which means a new one has to be allocated.
*/
if (*vec_list == NULL) {
int vec_part_len = get_vec_part_len(index);
*vec_list = base_malloc_fn(sizeof(vec_list_t) +
sizeof(void *) * vec_part_len);
if (*vec_list == NULL)
return;
(*vec_list)->next = NULL;
(*vec_list)->length = vec_part_len;
}
(*vec_list)->data[index] = val;
}
/* Free all the memory in the container. */
void
vec_delete(vector_t *vector)
{
vec_list_t *vec_list_next, *vec_list = vector->list;
while (vec_list != NULL) {
vec_list_next = vec_list->next;
base_free_fn(vec_list);
vec_list = vec_list_next;
}
} | 1,845 | 21.512195 | 78 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/src/zone.c | #include "jemalloc/internal/jemalloc_internal.h"
#ifndef JEMALLOC_ZONE
# error "This source file is for zones on Darwin (OS X)."
#endif
/*
* The malloc_default_purgeable_zone function is only available on >= 10.6.
* We need to check whether it is present at runtime, thus the weak_import.
*/
extern malloc_zone_t *malloc_default_purgeable_zone(void)
JEMALLOC_ATTR(weak_import);
/******************************************************************************/
/* Data. */
static malloc_zone_t zone;
static struct malloc_introspection_t zone_introspect;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static size_t zone_size(malloc_zone_t *zone, void *ptr);
static void *zone_malloc(malloc_zone_t *zone, size_t size);
static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
static void *zone_valloc(malloc_zone_t *zone, size_t size);
static void zone_free(malloc_zone_t *zone, void *ptr);
static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
#if (JEMALLOC_ZONE_VERSION >= 5)
static void *zone_memalign(malloc_zone_t *zone, size_t alignment,
#endif
#if (JEMALLOC_ZONE_VERSION >= 6)
size_t size);
static void zone_free_definite_size(malloc_zone_t *zone, void *ptr,
size_t size);
#endif
static void *zone_destroy(malloc_zone_t *zone);
static size_t zone_good_size(malloc_zone_t *zone, size_t size);
static void zone_force_lock(malloc_zone_t *zone);
static void zone_force_unlock(malloc_zone_t *zone);
/******************************************************************************/
/*
* Functions.
*/
static size_t
zone_size(malloc_zone_t *zone, void *ptr)
{
/*
* There appear to be places within Darwin (such as setenv(3)) that
* cause calls to this function with pointers that *no* zone owns. If
* we knew that all pointers were owned by *some* zone, we could split
* our zone into two parts, and use one as the default allocator and
* the other as the default deallocator/reallocator. Since that will
* not work in practice, we must check all pointers to assure that they
* reside within a mapped chunk before determining size.
*/
return (ivsalloc(ptr, config_prof));
}
static void *
zone_malloc(malloc_zone_t *zone, size_t size)
{
return (je_malloc(size));
}
static void *
zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
{
return (je_calloc(num, size));
}
static void *
zone_valloc(malloc_zone_t *zone, size_t size)
{
void *ret = NULL; /* Assignment avoids useless compiler warning. */
je_posix_memalign(&ret, PAGE, size);
return (ret);
}
static void
zone_free(malloc_zone_t *zone, void *ptr)
{
if (ivsalloc(ptr, config_prof) != 0) {
je_free(ptr);
return;
}
free(ptr);
}
static void *
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
{
if (ivsalloc(ptr, config_prof) != 0)
return (je_realloc(ptr, size));
return (realloc(ptr, size));
}
#if (JEMALLOC_ZONE_VERSION >= 5)
static void *
zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
{
void *ret = NULL; /* Assignment avoids useless compiler warning. */
je_posix_memalign(&ret, alignment, size);
return (ret);
}
#endif
#if (JEMALLOC_ZONE_VERSION >= 6)
static void
zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
{
if (ivsalloc(ptr, config_prof) != 0) {
assert(ivsalloc(ptr, config_prof) == size);
je_free(ptr);
return;
}
free(ptr);
}
#endif
static void *
zone_destroy(malloc_zone_t *zone)
{
/* This function should never be called. */
not_reached();
return (NULL);
}
static size_t
zone_good_size(malloc_zone_t *zone, size_t size)
{
if (size == 0)
size = 1;
return (s2u(size));
}
static void
zone_force_lock(malloc_zone_t *zone)
{
if (isthreaded)
jemalloc_prefork();
}
static void
zone_force_unlock(malloc_zone_t *zone)
{
if (isthreaded)
jemalloc_postfork_parent();
}
JEMALLOC_ATTR(constructor)
void
register_zone(void)
{
/*
* If something else replaced the system default zone allocator, don't
* register jemalloc's.
*/
malloc_zone_t *default_zone = malloc_default_zone();
malloc_zone_t *purgeable_zone = NULL;
if (!default_zone->zone_name ||
strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) {
return;
}
zone.size = (void *)zone_size;
zone.malloc = (void *)zone_malloc;
zone.calloc = (void *)zone_calloc;
zone.valloc = (void *)zone_valloc;
zone.free = (void *)zone_free;
zone.realloc = (void *)zone_realloc;
zone.destroy = (void *)zone_destroy;
zone.zone_name = "jemalloc_zone";
zone.batch_malloc = NULL;
zone.batch_free = NULL;
zone.introspect = &zone_introspect;
zone.version = JEMALLOC_ZONE_VERSION;
#if (JEMALLOC_ZONE_VERSION >= 5)
zone.memalign = zone_memalign;
#endif
#if (JEMALLOC_ZONE_VERSION >= 6)
zone.free_definite_size = zone_free_definite_size;
#endif
#if (JEMALLOC_ZONE_VERSION >= 8)
zone.pressure_relief = NULL;
#endif
zone_introspect.enumerator = NULL;
zone_introspect.good_size = (void *)zone_good_size;
zone_introspect.check = NULL;
zone_introspect.print = NULL;
zone_introspect.log = NULL;
zone_introspect.force_lock = (void *)zone_force_lock;
zone_introspect.force_unlock = (void *)zone_force_unlock;
zone_introspect.statistics = NULL;
#if (JEMALLOC_ZONE_VERSION >= 6)
zone_introspect.zone_locked = NULL;
#endif
#if (JEMALLOC_ZONE_VERSION >= 7)
zone_introspect.enable_discharge_checking = NULL;
zone_introspect.disable_discharge_checking = NULL;
zone_introspect.discharge = NULL;
#ifdef __BLOCKS__
zone_introspect.enumerate_discharged_pointers = NULL;
#else
zone_introspect.enumerate_unavailable_without_blocks = NULL;
#endif
#endif
/*
* The default purgeable zone is created lazily by OSX's libc. It uses
* the default zone when it is created for "small" allocations
* (< 15 KiB), but assumes the default zone is a scalable_zone. This
* obviously fails when the default zone is the jemalloc zone, so
* malloc_default_purgeable_zone is called beforehand so that the
* default purgeable zone is created when the default zone is still
* a scalable_zone. As purgeable zones only exist on >= 10.6, we need
* to check for the existence of malloc_default_purgeable_zone() at
* run time.
*/
if (malloc_default_purgeable_zone != NULL)
purgeable_zone = malloc_default_purgeable_zone();
/* Register the custom zone. At this point it won't be the default. */
malloc_zone_register(&zone);
do {
default_zone = malloc_default_zone();
/*
* Unregister and reregister the default zone. On OSX >= 10.6,
* unregistering takes the last registered zone and places it
* at the location of the specified zone. Unregistering the
* default zone thus makes the last registered one the default.
* On OSX < 10.6, unregistering shifts all registered zones.
* The first registered zone then becomes the default.
*/
malloc_zone_unregister(default_zone);
malloc_zone_register(default_zone);
/*
* On OSX 10.6, having the default purgeable zone appear before
* the default zone makes some things crash because it thinks it
* owns the default zone allocated pointers. We thus unregister/
* re-register it in order to ensure it's always after the
* default zone. On OSX < 10.6, there is no purgeable zone, so
* this does nothing. On OSX >= 10.6, unregistering replaces the
* purgeable zone with the last registered zone above, i.e the
* default zone. Registering it again then puts it at the end,
* obviously after the default zone.
*/
if (purgeable_zone) {
malloc_zone_unregister(purgeable_zone);
malloc_zone_register(purgeable_zone);
}
} while (malloc_default_zone() != &zone);
}
| 7,677 | 26.92 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/src/bitmap.c | #define JEMALLOC_BITMAP_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static size_t bits2groups(size_t nbits);
/******************************************************************************/
static size_t
bits2groups(size_t nbits)
{
return ((nbits >> LG_BITMAP_GROUP_NBITS) +
!!(nbits & BITMAP_GROUP_NBITS_MASK));
}
void
bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
{
unsigned i;
size_t group_count;
assert(nbits > 0);
assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
/*
* Compute the number of groups necessary to store nbits bits, and
* progressively work upward through the levels until reaching a level
* that requires only one group.
*/
binfo->levels[0].group_offset = 0;
group_count = bits2groups(nbits);
for (i = 1; group_count > 1; i++) {
assert(i < BITMAP_MAX_LEVELS);
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ group_count;
group_count = bits2groups(group_count);
}
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ group_count;
binfo->nlevels = i;
binfo->nbits = nbits;
}
size_t
bitmap_info_ngroups(const bitmap_info_t *binfo)
{
return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP);
}
size_t
bitmap_size(size_t nbits)
{
bitmap_info_t binfo;
bitmap_info_init(&binfo, nbits);
return (bitmap_info_ngroups(&binfo));
}
void
bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
{
size_t extra;
unsigned i;
/*
* Bits are actually inverted with regard to the external bitmap
* interface, so the bitmap starts out with all 1 bits, except for
* trailing unused bits (if any). Note that each group uses bit 0 to
* correspond to the first logical bit in the group, so extra bits
* are the most significant bits of the last group.
*/
memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset <<
LG_SIZEOF_BITMAP);
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK;
if (extra != 0)
bitmap[binfo->levels[1].group_offset - 1] >>= extra;
for (i = 1; i < binfo->nlevels; i++) {
size_t group_count = binfo->levels[i].group_offset -
binfo->levels[i-1].group_offset;
extra = (BITMAP_GROUP_NBITS - (group_count &
BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK;
if (extra != 0)
bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
}
}
| 2,516 | 26.659341 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/src/ckh.c | /*
*******************************************************************************
* Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each
* hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash
* functions are employed. The original cuckoo hashing algorithm was described
* in:
*
* Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms
* 51(2):122-144.
*
* Generalization of cuckoo hashing was discussed in:
*
* Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical
* alternative to traditional hash tables. In Proceedings of the 7th
* Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA,
* January 2006.
*
* This implementation uses precisely two hash functions because that is the
* fewest that can work, and supporting multiple hashes is an implementation
* burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006)
* that shows approximate expected maximum load factors for various
* configurations:
*
* | #cells/bucket |
* #hashes | 1 | 2 | 4 | 8 |
* --------+-------+-------+-------+-------+
* 1 | 0.006 | 0.006 | 0.03 | 0.12 |
* 2 | 0.49 | 0.86 |>0.93< |>0.96< |
* 3 | 0.91 | 0.97 | 0.98 | 0.999 |
* 4 | 0.97 | 0.99 | 0.999 | |
*
* The number of cells per bucket is chosen such that a bucket fits in one cache
* line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing,
* respectively.
*
******************************************************************************/
#define JEMALLOC_CKH_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static bool ckh_grow(ckh_t *ckh);
static void ckh_shrink(ckh_t *ckh);
/******************************************************************************/
/*
* Search bucket for key and return the cell number if found; SIZE_T_MAX
* otherwise.
*/
JEMALLOC_INLINE_C size_t
ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
{
ckhc_t *cell;
unsigned i;
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
if (cell->key != NULL && ckh->keycomp(key, cell->key))
return ((bucket << LG_CKH_BUCKET_CELLS) + i);
}
return (SIZE_T_MAX);
}
/*
* Search table for key and return cell number if found; SIZE_T_MAX otherwise.
*/
JEMALLOC_INLINE_C size_t
ckh_isearch(ckh_t *ckh, const void *key)
{
size_t hashes[2], bucket, cell;
assert(ckh != NULL);
ckh->hash(key, hashes);
/* Search primary bucket. */
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
if (cell != SIZE_T_MAX)
return (cell);
/* Search secondary bucket. */
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
return (cell);
}
JEMALLOC_INLINE_C bool
ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
const void *data)
{
ckhc_t *cell;
unsigned offset, i;
/*
* Cycle through the cells in the bucket, starting at a random position.
* The randomness avoids worst-case search overhead as buckets fill up.
*/
prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
if (cell->key == NULL) {
cell->key = key;
cell->data = data;
ckh->count++;
return (false);
}
}
return (true);
}
/*
* No space is available in bucket. Randomly evict an item, then try to find an
* alternate location for that item. Iteratively repeat this
* eviction/relocation procedure until either success or detection of an
* eviction/relocation bucket cycle.
*/
JEMALLOC_INLINE_C bool
ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
void const **argdata)
{
const void *key, *data, *tkey, *tdata;
ckhc_t *cell;
size_t hashes[2], bucket, tbucket;
unsigned i;
bucket = argbucket;
key = *argkey;
data = *argdata;
while (true) {
/*
* Choose a random item within the bucket to evict. This is
* critical to correct function, because without (eventually)
* evicting all items within a bucket during iteration, it
* would be possible to get stuck in an infinite loop if there
* were an item for which both hashes indicated the same
* bucket.
*/
prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
assert(cell->key != NULL);
/* Swap cell->{key,data} and {key,data} (evict). */
tkey = cell->key; tdata = cell->data;
cell->key = key; cell->data = data;
key = tkey; data = tdata;
#ifdef CKH_COUNT
ckh->nrelocs++;
#endif
/* Find the alternate bucket for the evicted item. */
ckh->hash(key, hashes);
tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (tbucket == bucket) {
tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets)
- 1);
/*
* It may be that (tbucket == bucket) still, if the
* item's hashes both indicate this bucket. However,
* we are guaranteed to eventually escape this bucket
* during iteration, assuming pseudo-random item
* selection (true randomness would make infinite
* looping a remote possibility). The reason we can
* never get trapped forever is that there are two
* cases:
*
* 1) This bucket == argbucket, so we will quickly
* detect an eviction cycle and terminate.
* 2) An item was evicted to this bucket from another,
* which means that at least one item in this bucket
* has hashes that indicate distinct buckets.
*/
}
/* Check for a cycle. */
if (tbucket == argbucket) {
*argkey = key;
*argdata = data;
return (true);
}
bucket = tbucket;
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
return (false);
}
}
JEMALLOC_INLINE_C bool
ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
{
size_t hashes[2], bucket;
const void *key = *argkey;
const void *data = *argdata;
ckh->hash(key, hashes);
/* Try to insert in primary bucket. */
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
return (false);
/* Try to insert in secondary bucket. */
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
return (false);
/*
* Try to find a place for this item via iterative eviction/relocation.
*/
return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata));
}
/*
* Try to rebuild the hash table from scratch by inserting all items from the
* old table into the new.
*/
JEMALLOC_INLINE_C bool
ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
{
size_t count, i, nins;
const void *key, *data;
count = ckh->count;
ckh->count = 0;
for (i = nins = 0; nins < count; i++) {
if (aTab[i].key != NULL) {
key = aTab[i].key;
data = aTab[i].data;
if (ckh_try_insert(ckh, &key, &data)) {
ckh->count = count;
return (true);
}
nins++;
}
}
return (false);
}
static bool
ckh_grow(ckh_t *ckh)
{
bool ret;
ckhc_t *tab, *ttab;
size_t lg_curcells;
unsigned lg_prevbuckets;
#ifdef CKH_COUNT
ckh->ngrows++;
#endif
/*
* It is possible (though unlikely, given well behaved hashes) that the
* table will have to be doubled more than once in order to create a
* usable table.
*/
lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS;
while (true) {
size_t usize;
lg_curcells++;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (usize == 0) {
ret = true;
goto label_return;
}
tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
if (tab == NULL) {
ret = true;
goto label_return;
}
/* Swap in new table. */
ttab = ckh->tab;
ckh->tab = tab;
tab = ttab;
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (ckh_rebuild(ckh, tab) == false) {
idalloc(tab);
break;
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloc(ckh->tab);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
}
ret = false;
label_return:
return (ret);
}
static void
ckh_shrink(ckh_t *ckh)
{
ckhc_t *tab, *ttab;
size_t lg_curcells, usize;
unsigned lg_prevbuckets;
/*
* It is possible (though unlikely, given well behaved hashes) that the
* table rebuild will fail.
*/
lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (usize == 0)
return;
tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
if (tab == NULL) {
/*
* An OOM error isn't worth propagating, since it doesn't
* prevent this or future operations from proceeding.
*/
return;
}
/* Swap in new table. */
ttab = ckh->tab;
ckh->tab = tab;
tab = ttab;
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (ckh_rebuild(ckh, tab) == false) {
idalloc(tab);
#ifdef CKH_COUNT
ckh->nshrinks++;
#endif
return;
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloc(ckh->tab);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
#ifdef CKH_COUNT
ckh->nshrinkfails++;
#endif
}
bool
ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
{
bool ret;
size_t mincells, usize;
unsigned lg_mincells;
assert(minitems > 0);
assert(hash != NULL);
assert(keycomp != NULL);
#ifdef CKH_COUNT
ckh->ngrows = 0;
ckh->nshrinks = 0;
ckh->nshrinkfails = 0;
ckh->ninserts = 0;
ckh->nrelocs = 0;
#endif
ckh->prng_state = 42; /* Value doesn't really matter. */
ckh->count = 0;
/*
* Find the minimum power of 2 that is large enough to fit aBaseCount
* entries. We are using (2+,2) cuckoo hashing, which has an expected
* maximum load factor of at least ~0.86, so 0.75 is a conservative load
* factor that will typically allow 2^aLgMinItems to fit without ever
* growing the table.
*/
assert(LG_CKH_BUCKET_CELLS > 0);
mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2;
for (lg_mincells = LG_CKH_BUCKET_CELLS;
(ZU(1) << lg_mincells) < mincells;
lg_mincells++)
; /* Do nothing. */
ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->hash = hash;
ckh->keycomp = keycomp;
usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
if (usize == 0) {
ret = true;
goto label_return;
}
ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
if (ckh->tab == NULL) {
ret = true;
goto label_return;
}
ret = false;
label_return:
return (ret);
}
void
ckh_delete(ckh_t *ckh)
{
assert(ckh != NULL);
#ifdef CKH_VERBOSE
malloc_printf(
"%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64","
" nshrinkfails: %"PRIu64", ninserts: %"PRIu64","
" nrelocs: %"PRIu64"\n", __func__, ckh,
(unsigned long long)ckh->ngrows,
(unsigned long long)ckh->nshrinks,
(unsigned long long)ckh->nshrinkfails,
(unsigned long long)ckh->ninserts,
(unsigned long long)ckh->nrelocs);
#endif
idalloc(ckh->tab);
if (config_debug)
memset(ckh, 0x5a, sizeof(ckh_t));
}
size_t
ckh_count(ckh_t *ckh)
{
assert(ckh != NULL);
return (ckh->count);
}
bool
ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data)
{
size_t i, ncells;
for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets +
LG_CKH_BUCKET_CELLS)); i < ncells; i++) {
if (ckh->tab[i].key != NULL) {
if (key != NULL)
*key = (void *)ckh->tab[i].key;
if (data != NULL)
*data = (void *)ckh->tab[i].data;
*tabind = i + 1;
return (false);
}
}
return (true);
}
bool
ckh_insert(ckh_t *ckh, const void *key, const void *data)
{
bool ret;
assert(ckh != NULL);
assert(ckh_search(ckh, key, NULL, NULL));
#ifdef CKH_COUNT
ckh->ninserts++;
#endif
while (ckh_try_insert(ckh, &key, &data)) {
if (ckh_grow(ckh)) {
ret = true;
goto label_return;
}
}
ret = false;
label_return:
return (ret);
}
bool
ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
{
size_t cell;
assert(ckh != NULL);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
if (key != NULL)
*key = (void *)ckh->tab[cell].key;
if (data != NULL)
*data = (void *)ckh->tab[cell].data;
ckh->tab[cell].key = NULL;
ckh->tab[cell].data = NULL; /* Not necessary. */
ckh->count--;
/* Try to halve the table if it is less than 1/4 full. */
if (ckh->count < (ZU(1) << (ckh->lg_curbuckets
+ LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets
> ckh->lg_minbuckets) {
/* Ignore error due to OOM. */
ckh_shrink(ckh);
}
return (false);
}
return (true);
}
bool
ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
{
size_t cell;
assert(ckh != NULL);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
if (key != NULL)
*key = (void *)ckh->tab[cell].key;
if (data != NULL)
*data = (void *)ckh->tab[cell].data;
return (false);
}
return (true);
}
void
ckh_string_hash(const void *key, size_t r_hash[2])
{
hash(key, strlen((const char *)key), 0x94122f33U, r_hash);
}
bool
ckh_string_keycomp(const void *k1, const void *k2)
{
assert(k1 != NULL);
assert(k2 != NULL);
return (strcmp((char *)k1, (char *)k2) ? false : true);
}
void
ckh_pointer_hash(const void *key, size_t r_hash[2])
{
union {
const void *v;
size_t i;
} u;
assert(sizeof(u.v) == sizeof(u.i));
u.v = key;
hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash);
}
bool
ckh_pointer_keycomp(const void *k1, const void *k2)
{
return ((k1 == k2) ? true : false);
}
| 13,888 | 23.625887 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/src/extent.c | #define JEMALLOC_EXTENT_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
static inline int
extent_szad_comp(extent_node_t *a, extent_node_t *b)
{
int ret;
size_t a_size = a->size;
size_t b_size = b->size;
ret = (a_size > b_size) - (a_size < b_size);
if (ret == 0) {
uintptr_t a_addr = (uintptr_t)a->addr;
uintptr_t b_addr = (uintptr_t)b->addr;
ret = (a_addr > b_addr) - (a_addr < b_addr);
}
return (ret);
}
/* Generate red-black tree functions. */
rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad,
extent_szad_comp)
static inline int
extent_ad_comp(extent_node_t *a, extent_node_t *b)
{
uintptr_t a_addr = (uintptr_t)a->addr;
uintptr_t b_addr = (uintptr_t)b->addr;
return ((a_addr > b_addr) - (a_addr < b_addr));
}
/* Generate red-black tree functions. */
rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad,
extent_ad_comp)
| 973 | 23.35 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/src/rtree.c | #define JEMALLOC_RTREE_C_
#include "jemalloc/internal/jemalloc_internal.h"
rtree_t *
rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc,
pool_t *pool)
{
rtree_t *ret;
unsigned bits_per_level, bits_in_leaf, height, i;
assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
bits_per_level = jemalloc_ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void *)))) - 1;
bits_in_leaf = jemalloc_ffs(pow2_ceil((RTREE_NODESIZE / sizeof(uint8_t)))) - 1;
if (bits > bits_in_leaf) {
height = 1 + (bits - bits_in_leaf) / bits_per_level;
if ((height-1) * bits_per_level + bits_in_leaf != bits)
height++;
} else {
height = 1;
}
assert((height-1) * bits_per_level + bits_in_leaf >= bits);
ret = (rtree_t*)alloc(pool, offsetof(rtree_t, level2bits) +
(sizeof(unsigned) * height));
if (ret == NULL)
return (NULL);
memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) *
height));
ret->alloc = alloc;
ret->dalloc = dalloc;
ret->pool = pool;
if (malloc_mutex_init(&ret->mutex)) {
if (dalloc != NULL)
dalloc(pool, ret);
return (NULL);
}
ret->height = height;
if (height > 1) {
if ((height-1) * bits_per_level + bits_in_leaf > bits) {
ret->level2bits[0] = (bits - bits_in_leaf) %
bits_per_level;
} else
ret->level2bits[0] = bits_per_level;
for (i = 1; i < height-1; i++)
ret->level2bits[i] = bits_per_level;
ret->level2bits[height-1] = bits_in_leaf;
} else
ret->level2bits[0] = bits;
ret->root = (void**)alloc(pool, sizeof(void *) << ret->level2bits[0]);
if (ret->root == NULL) {
if (dalloc != NULL)
dalloc(pool, ret);
return (NULL);
}
memset(ret->root, 0, sizeof(void *) << ret->level2bits[0]);
return (ret);
}
static void
rtree_delete_subtree(rtree_t *rtree, void **node, unsigned level)
{
if (level < rtree->height - 1) {
size_t nchildren, i;
nchildren = ZU(1) << rtree->level2bits[level];
for (i = 0; i < nchildren; i++) {
void **child = (void **)node[i];
if (child != NULL)
rtree_delete_subtree(rtree, child, level + 1);
}
}
if (rtree->dalloc)
rtree->dalloc(rtree->pool, node);
}
void
rtree_delete(rtree_t *rtree)
{
rtree_delete_subtree(rtree, rtree->root, 0);
malloc_mutex_destroy(&rtree->mutex);
if (rtree->dalloc)
rtree->dalloc(rtree->pool, rtree);
}
void
rtree_prefork(rtree_t *rtree)
{
malloc_mutex_prefork(&rtree->mutex);
}
void
rtree_postfork_parent(rtree_t *rtree)
{
malloc_mutex_postfork_parent(&rtree->mutex);
}
void
rtree_postfork_child(rtree_t *rtree)
{
malloc_mutex_postfork_child(&rtree->mutex);
}
| 2,549 | 21.767857 | 81 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/src/huge.c | #define JEMALLOC_HUGE_C_
#include "jemalloc/internal/jemalloc_internal.h"
void *
huge_malloc(arena_t *arena, size_t size, bool zero)
{
return (huge_palloc(arena, size, chunksize, zero));
}
void *
huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
{
void *ret;
size_t csize;
extent_node_t *node;
bool is_zeroed;
pool_t *pool;
/* Allocate one or more contiguous chunks for this request. */
csize = CHUNK_CEILING(size);
if (csize == 0) {
/* size is large enough to cause size_t wrap-around. */
return (NULL);
}
/*
* Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
arena = choose_arena(arena);
if (arena == NULL)
return (NULL);
pool = arena->pool;
/* Allocate an extent node with which to track the chunk. */
node = base_node_alloc(pool);
if (node == NULL)
return (NULL);
ret = arena_chunk_alloc_huge(arena, NULL, csize, alignment, &is_zeroed);
if (ret == NULL) {
base_node_dalloc(pool, node);
return (NULL);
}
/* Insert node into huge. */
node->addr = ret;
node->size = csize;
node->arena = arena;
malloc_mutex_lock(&pool->huge_mtx);
extent_tree_ad_insert(&pool->huge, node);
malloc_mutex_unlock(&pool->huge_mtx);
if (config_fill && zero == false) {
if (opt_junk)
memset(ret, 0xa5, csize);
else if (opt_zero && is_zeroed == false)
memset(ret, 0, csize);
}
return (ret);
}
#ifdef JEMALLOC_JET
#undef huge_dalloc_junk
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
#endif
static void
huge_dalloc_junk(void *ptr, size_t usize)
{
if (config_fill && have_dss && unlikely(opt_junk)) {
/*
* Only bother junk filling if the chunk isn't about to be
* unmapped.
*/
if (config_munmap == false || (have_dss && chunk_in_dss(ptr)))
memset(ptr, 0x5a, usize);
}
}
#ifdef JEMALLOC_JET
#undef huge_dalloc_junk
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
#endif
static bool
huge_ralloc_no_move_expand(pool_t *pool, char *ptr, size_t oldsize, size_t size, bool zero) {
size_t csize;
void *expand_addr;
size_t expand_size;
extent_node_t *node, key;
arena_t *arena;
bool is_zeroed;
void *ret;
csize = CHUNK_CEILING(size);
if (csize == 0) {
/* size is large enough to cause size_t wrap-around. */
return (true);
}
expand_addr = ptr + oldsize;
expand_size = csize - oldsize;
malloc_mutex_lock(&pool->huge_mtx);
key.addr = ptr;
node = extent_tree_ad_search(&pool->huge, &key);
assert(node != NULL);
assert(node->addr == ptr);
/* Find the current arena. */
arena = node->arena;
malloc_mutex_unlock(&pool->huge_mtx);
/*
* Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
ret = arena_chunk_alloc_huge(arena, expand_addr, expand_size, chunksize,
&is_zeroed);
if (ret == NULL)
return (true);
assert(ret == expand_addr);
malloc_mutex_lock(&pool->huge_mtx);
/* Update the size of the huge allocation. */
node->size = csize;
malloc_mutex_unlock(&pool->huge_mtx);
if (config_fill && !zero) {
if (unlikely(opt_junk))
memset(expand_addr, 0xa5, expand_size);
else if (unlikely(opt_zero) && !is_zeroed)
memset(expand_addr, 0, expand_size);
}
return (false);
}
bool
huge_ralloc_no_move(pool_t *pool, void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero)
{
/* Both allocations must be huge to avoid a move. */
if (oldsize <= arena_maxclass)
return (true);
assert(CHUNK_CEILING(oldsize) == oldsize);
/*
* Avoid moving the allocation if the size class can be left the same.
*/
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
return (false);
}
/* Overflow. */
if (CHUNK_CEILING(size) == 0)
return (true);
/* Shrink the allocation in-place. */
if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(size)) {
extent_node_t *node, key;
void *excess_addr;
size_t excess_size;
malloc_mutex_lock(&pool->huge_mtx);
key.addr = ptr;
node = extent_tree_ad_search(&pool->huge, &key);
assert(node != NULL);
assert(node->addr == ptr);
/* Update the size of the huge allocation. */
node->size = CHUNK_CEILING(size);
malloc_mutex_unlock(&pool->huge_mtx);
excess_addr = (char *)node->addr + CHUNK_CEILING(size);
excess_size = CHUNK_CEILING(oldsize) - CHUNK_CEILING(size);
/* Zap the excess chunks. */
huge_dalloc_junk(excess_addr, excess_size);
arena_chunk_dalloc_huge(node->arena, excess_addr, excess_size);
return (false);
}
/* Attempt to expand the allocation in-place. */
if (huge_ralloc_no_move_expand(pool, ptr, oldsize, size + extra, zero)) {
if (extra == 0)
return (true);
/* Try again, this time without extra. */
return (huge_ralloc_no_move_expand(pool, ptr, oldsize, size, zero));
}
return (false);
}
void *
huge_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, bool try_tcache_dalloc)
{
void *ret;
size_t copysize;
/* Try to avoid moving the allocation. */
if (huge_ralloc_no_move(arena->pool, ptr, oldsize, size, extra, zero) == false)
return (ptr);
/*
* size and oldsize are different enough that we need to use a
* different size class. In that case, fall back to allocating new
* space and copying.
*/
if (alignment > chunksize)
ret = huge_palloc(arena, size + extra, alignment, zero);
else
ret = huge_malloc(arena, size + extra, zero);
if (ret == NULL) {
if (extra == 0)
return (NULL);
/* Try again, this time without extra. */
if (alignment > chunksize)
ret = huge_palloc(arena, size, alignment, zero);
else
ret = huge_malloc(arena, size, zero);
if (ret == NULL)
return (NULL);
}
/*
* Copy at most size bytes (not size+extra), since the caller has no
* expectation that the extra bytes will be reliably preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(ret, ptr, copysize);
pool_iqalloct(arena->pool, ptr, try_tcache_dalloc);
return (ret);
}
void
huge_dalloc(pool_t *pool, void *ptr)
{
extent_node_t *node, key;
malloc_mutex_lock(&pool->huge_mtx);
/* Extract from tree of huge allocations. */
key.addr = ptr;
node = extent_tree_ad_search(&pool->huge, &key);
assert(node != NULL);
assert(node->addr == ptr);
extent_tree_ad_remove(&pool->huge, node);
malloc_mutex_unlock(&pool->huge_mtx);
huge_dalloc_junk(node->addr, node->size);
arena_chunk_dalloc_huge(node->arena, node->addr, node->size);
base_node_dalloc(pool, node);
}
size_t
huge_salloc(const void *ptr)
{
size_t ret = 0;
size_t i;
extent_node_t *node, key;
malloc_mutex_lock(&pools_lock);
for (i = 0; i < npools; ++i) {
pool_t *pool = pools[i];
if (pool == NULL)
continue;
malloc_mutex_lock(&pool->huge_mtx);
/* Extract from tree of huge allocations. */
key.addr = __DECONST(void *, ptr);
node = extent_tree_ad_search(&pool->huge, &key);
if (node != NULL)
ret = node->size;
malloc_mutex_unlock(&pool->huge_mtx);
if (ret != 0)
break;
}
malloc_mutex_unlock(&pools_lock);
return (ret);
}
size_t
huge_pool_salloc(pool_t *pool, const void *ptr)
{
size_t ret = 0;
extent_node_t *node, key;
malloc_mutex_lock(&pool->huge_mtx);
/* Extract from tree of huge allocations. */
key.addr = __DECONST(void *, ptr);
node = extent_tree_ad_search(&pool->huge, &key);
if (node != NULL)
ret = node->size;
malloc_mutex_unlock(&pool->huge_mtx);
return (ret);
}
prof_ctx_t *
huge_prof_ctx_get(const void *ptr)
{
prof_ctx_t *ret = NULL;
size_t i;
extent_node_t *node, key;
malloc_mutex_lock(&pools_lock);
for (i = 0; i < npools; ++i) {
pool_t *pool = pools[i];
if (pool == NULL)
continue;
malloc_mutex_lock(&pool->huge_mtx);
/* Extract from tree of huge allocations. */
key.addr = __DECONST(void *, ptr);
node = extent_tree_ad_search(&pool->huge, &key);
if (node != NULL)
ret = node->prof_ctx;
malloc_mutex_unlock(&pool->huge_mtx);
if (ret != NULL)
break;
}
malloc_mutex_unlock(&pools_lock);
return (ret);
}
void
huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
{
extent_node_t *node, key;
size_t i;
malloc_mutex_lock(&pools_lock);
for (i = 0; i < npools; ++i) {
pool_t *pool = pools[i];
if (pool == NULL)
continue;
malloc_mutex_lock(&pool->huge_mtx);
/* Extract from tree of huge allocations. */
key.addr = __DECONST(void *, ptr);
node = extent_tree_ad_search(&pool->huge, &key);
if (node != NULL)
node->prof_ctx = ctx;
malloc_mutex_unlock(&pool->huge_mtx);
if (node != NULL)
break;
}
malloc_mutex_unlock(&pools_lock);
}
/*
* Called at each pool opening.
*/
bool
huge_boot(pool_t *pool)
{
if (malloc_mutex_init(&pool->huge_mtx))
return (true);
return (false);
}
/*
* Called only at pool creation.
*/
bool
huge_init(pool_t *pool)
{
if (huge_boot(pool))
return (true);
/* Initialize chunks data. */
extent_tree_ad_new(&pool->huge);
return (false);
}
void
huge_prefork(pool_t *pool)
{
malloc_mutex_prefork(&pool->huge_mtx);
}
void
huge_postfork_parent(pool_t *pool)
{
malloc_mutex_postfork_parent(&pool->huge_mtx);
}
void
huge_postfork_child(pool_t *pool)
{
malloc_mutex_postfork_child(&pool->huge_mtx);
}
| 9,358 | 21.021176 | 93 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/src/tcache.c | #define JEMALLOC_TCACHE_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
#define ARR_INITIALIZER JEMALLOC_ARG_CONCAT({0})
malloc_tsd_data(, tcache, tsd_tcache_t, TSD_TCACHE_INITIALIZER)
malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default)
bool opt_tcache = true;
ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
tcache_bin_info_t *tcache_bin_info;
static unsigned stack_nelms; /* Total stack elms per tcache. */
size_t nhbins;
size_t tcache_maxclass;
/******************************************************************************/
size_t tcache_salloc(const void *ptr)
{
return (arena_salloc(ptr, false));
}
void
tcache_event_hard(tcache_t *tcache)
{
size_t binind = tcache->next_gc_bin;
tcache_bin_t *tbin = &tcache->tbins[binind];
tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
if (tbin->low_water > 0) {
/*
* Flush (ceiling) 3/4 of the objects below the low water mark.
*/
if (binind < NBINS) {
tcache_bin_flush_small(tbin, binind, tbin->ncached -
tbin->low_water + (tbin->low_water >> 2), tcache);
} else {
tcache_bin_flush_large(tbin, binind, tbin->ncached -
tbin->low_water + (tbin->low_water >> 2), tcache);
}
/*
* Reduce fill count by 2X. Limit lg_fill_div such that the
* fill count is always at least 1.
*/
if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
tbin->lg_fill_div++;
} else if (tbin->low_water < 0) {
/*
* Increase fill count by 2X. Make sure lg_fill_div stays
* greater than 0.
*/
if (tbin->lg_fill_div > 1)
tbin->lg_fill_div--;
}
tbin->low_water = tbin->ncached;
tcache->next_gc_bin++;
if (tcache->next_gc_bin == nhbins)
tcache->next_gc_bin = 0;
tcache->ev_cnt = 0;
}
void *
tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
{
void *ret;
arena_tcache_fill_small(tcache->arena, tbin, binind,
config_prof ? tcache->prof_accumbytes : 0);
if (config_prof)
tcache->prof_accumbytes = 0;
ret = tcache_alloc_easy(tbin);
return (ret);
}
void
tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
tcache_t *tcache)
{
void *ptr;
unsigned i, nflush, ndeferred;
bool merged_stats = false;
assert(binind < NBINS);
assert(rem <= tbin->ncached);
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena bin associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
tbin->avail[0]);
arena_t *arena = chunk->arena;
arena_bin_t *bin = &arena->bins[binind];
if (config_prof && arena == tcache->arena) {
if (arena_prof_accum(arena, tcache->prof_accumbytes))
prof_idump();
tcache->prof_accumbytes = 0;
}
malloc_mutex_lock(&bin->lock);
if (config_stats && arena == tcache->arena) {
assert(merged_stats == false);
merged_stats = true;
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
ndeferred = 0;
for (i = 0; i < nflush; i++) {
ptr = tbin->avail[i];
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk->arena == arena) {
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_t *mapelm =
arena_mapp_get(chunk, pageind);
if (config_fill && opt_junk) {
arena_alloc_junk_small(ptr,
&arena_bin_info[binind], true);
}
arena_dalloc_bin_locked(arena, chunk, ptr,
mapelm);
} else {
/*
* This object was allocated via a different
* arena bin than the one that is currently
* locked. Stash the object, so that it can be
* handled in a future pass.
*/
tbin->avail[ndeferred] = ptr;
ndeferred++;
}
}
malloc_mutex_unlock(&bin->lock);
}
if (config_stats && merged_stats == false) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
*/
arena_bin_t *bin = &tcache->arena->bins[binind];
malloc_mutex_lock(&bin->lock);
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
malloc_mutex_unlock(&bin->lock);
}
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
rem * sizeof(void *));
tbin->ncached = rem;
if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
}
void
tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
tcache_t *tcache)
{
void *ptr;
unsigned i, nflush, ndeferred;
bool merged_stats = false;
assert(binind < nhbins);
assert(rem <= tbin->ncached);
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
tbin->avail[0]);
arena_t *arena = chunk->arena;
UNUSED bool idump;
if (config_prof)
idump = false;
malloc_mutex_lock(&arena->lock);
if ((config_prof || config_stats) && arena == tcache->arena) {
if (config_prof) {
idump = arena_prof_accum_locked(arena,
tcache->prof_accumbytes);
tcache->prof_accumbytes = 0;
}
if (config_stats) {
merged_stats = true;
arena->stats.nrequests_large +=
tbin->tstats.nrequests;
arena->stats.lstats[binind - NBINS].nrequests +=
tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
}
ndeferred = 0;
for (i = 0; i < nflush; i++) {
ptr = tbin->avail[i];
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk->arena == arena)
arena_dalloc_large_locked(arena, chunk, ptr);
else {
/*
* This object was allocated via a different
* arena than the one that is currently locked.
* Stash the object, so that it can be handled
* in a future pass.
*/
tbin->avail[ndeferred] = ptr;
ndeferred++;
}
}
malloc_mutex_unlock(&arena->lock);
if (config_prof && idump)
prof_idump();
}
if (config_stats && merged_stats == false) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
*/
arena_t *arena = tcache->arena;
malloc_mutex_lock(&arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[binind - NBINS].nrequests +=
tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
malloc_mutex_unlock(&arena->lock);
}
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
rem * sizeof(void *));
tbin->ncached = rem;
if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
}
void
tcache_arena_associate(tcache_t *tcache, arena_t *arena)
{
if (config_stats) {
/* Link into list of extant tcaches. */
malloc_mutex_lock(&arena->lock);
ql_elm_new(tcache, link);
ql_tail_insert(&arena->tcache_ql, tcache, link);
malloc_mutex_unlock(&arena->lock);
}
tcache->arena = arena;
}
void
tcache_arena_dissociate(tcache_t *tcache)
{
if (config_stats) {
/* Unlink from list of extant tcaches. */
malloc_mutex_lock(&tcache->arena->lock);
ql_remove(&tcache->arena->tcache_ql, tcache, link);
tcache_stats_merge(tcache, tcache->arena);
malloc_mutex_unlock(&tcache->arena->lock);
}
}
tcache_t *
tcache_get_hard(tcache_t *tcache, pool_t *pool, bool create)
{
arena_t dummy;
DUMMY_ARENA_INITIALIZE(dummy, pool);
if (tcache == NULL) {
if (create == false) {
/*
* Creating a tcache here would cause
* allocation as a side effect of free().
* Ordinarily that would be okay since
* tcache_create() failure is a soft failure
* that doesn't propagate. However, if TLS
* data are freed via free() as in glibc,
* subtle corruption could result from setting
* a TLS variable after its backing memory is
* freed.
*/
return (NULL);
}
if (tcache_enabled_get() == false) {
tcache_enabled_set(false); /* Memoize. */
return (NULL);
}
return (tcache_create(choose_arena(&dummy)));
}
if (tcache == TCACHE_STATE_PURGATORY) {
/*
* Make a note that an allocator function was called
* after tcache_thread_cleanup() was called.
*/
tsd_tcache_t *tsd = tcache_tsd_get();
tcache = TCACHE_STATE_REINCARNATED;
tsd->seqno[pool->pool_id] = pool->seqno;
tsd->tcaches[pool->pool_id] = tcache;
return (NULL);
}
if (tcache == TCACHE_STATE_REINCARNATED)
return (NULL);
not_reached();
return (NULL);
}
tcache_t *
tcache_create(arena_t *arena)
{
tcache_t *tcache;
size_t size, stack_offset;
unsigned i;
tsd_tcache_t *tsd = tcache_tsd_get();
size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
/* Naturally align the pointer stacks. */
size = PTR_CEILING(size);
stack_offset = size;
size += stack_nelms * sizeof(void *);
/*
* Round up to the nearest multiple of the cacheline size, in order to
* avoid the possibility of false cacheline sharing.
*
* That this works relies on the same logic as in ipalloc(), but we
* cannot directly call ipalloc() here due to tcache bootstrapping
* issues.
*/
size = (size + CACHELINE_MASK) & (-CACHELINE);
if (size <= SMALL_MAXCLASS)
tcache = (tcache_t *)arena_malloc_small(arena, size, true);
else if (size <= tcache_maxclass)
tcache = (tcache_t *)arena_malloc_large(arena, size, true);
else
tcache = (tcache_t *)icalloct(size, false, arena);
if (tcache == NULL)
return (NULL);
tcache_arena_associate(tcache, arena);
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
for (i = 0; i < nhbins; i++) {
tcache->tbins[i].lg_fill_div = 1;
tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
(uintptr_t)stack_offset);
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
}
tsd->seqno[arena->pool->pool_id] = arena->pool->seqno;
tsd->tcaches[arena->pool->pool_id] = tcache;
return (tcache);
}
void
tcache_destroy(tcache_t *tcache)
{
unsigned i;
size_t tcache_size;
tcache_arena_dissociate(tcache);
for (i = 0; i < NBINS; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
tcache_bin_flush_small(tbin, i, 0, tcache);
if (config_stats && tbin->tstats.nrequests != 0) {
arena_t *arena = tcache->arena;
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(&bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
malloc_mutex_unlock(&bin->lock);
}
}
for (; i < nhbins; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
tcache_bin_flush_large(tbin, i, 0, tcache);
if (config_stats && tbin->tstats.nrequests != 0) {
arena_t *arena = tcache->arena;
malloc_mutex_lock(&arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[i - NBINS].nrequests +=
tbin->tstats.nrequests;
malloc_mutex_unlock(&arena->lock);
}
}
if (config_prof && tcache->prof_accumbytes > 0 &&
arena_prof_accum(tcache->arena, tcache->prof_accumbytes))
prof_idump();
tcache_size = arena_salloc(tcache, false);
if (tcache_size <= SMALL_MAXCLASS) {
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
arena_t *arena = chunk->arena;
size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
LG_PAGE;
arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm);
} else if (tcache_size <= tcache_maxclass) {
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
arena_t *arena = chunk->arena;
arena_dalloc_large(arena, chunk, tcache);
} else
idalloct(tcache, false);
}
bool
tcache_tsd_extend(tsd_tcache_t *tsd, unsigned len)
{
if (len == UINT_MAX)
return (true);
assert(len < POOLS_MAX);
/* round up the new length to the nearest power of 2... */
size_t npools = 1ULL << (32 - __builtin_clz(len + 1));
/* ... but not less than */
if (npools < POOLS_MIN)
npools = POOLS_MIN;
unsigned *tseqno = base_malloc_fn(npools * sizeof (unsigned));
if (tseqno == NULL)
return (true);
if (tsd->seqno != NULL)
memcpy(tseqno, tsd->seqno, tsd->npools * sizeof (unsigned));
memset(&tseqno[tsd->npools], 0, (npools - tsd->npools) * sizeof (unsigned));
tcache_t **tcaches = base_malloc_fn(npools * sizeof (tcache_t *));
if (tcaches == NULL) {
base_free_fn(tseqno);
return (true);
}
if (tsd->tcaches != NULL)
memcpy(tcaches, tsd->tcaches, tsd->npools * sizeof (tcache_t *));
memset(&tcaches[tsd->npools], 0, (npools - tsd->npools) * sizeof (tcache_t *));
base_free_fn(tsd->seqno);
tsd->seqno = tseqno;
base_free_fn(tsd->tcaches);
tsd->tcaches = tcaches;
tsd->npools = npools;
return (false);
}
void
tcache_thread_cleanup(void *arg)
{
int i;
tsd_tcache_t *tsd_array = arg;
malloc_mutex_lock(&pools_lock);
for (i = 0; i < tsd_array->npools; ++i) {
tcache_t *tcache = tsd_array->tcaches[i];
if (tcache != NULL) {
if (tcache == TCACHE_STATE_DISABLED) {
/* Do nothing. */
} else if (tcache == TCACHE_STATE_REINCARNATED) {
/*
* Another destructor called an allocator function after this
* destructor was called. Reset tcache to
* TCACHE_STATE_PURGATORY in order to receive another callback.
*/
tsd_array->tcaches[i] = TCACHE_STATE_PURGATORY;
} else if (tcache == TCACHE_STATE_PURGATORY) {
/*
* The previous time this destructor was called, we set the key
* to TCACHE_STATE_PURGATORY so that other destructors wouldn't
* cause re-creation of the tcache. This time, do nothing, so
* that the destructor will not be called again.
*/
} else if (tcache != NULL) {
assert(tcache != TCACHE_STATE_PURGATORY);
if (pools[i] != NULL && tsd_array->seqno[i] == pools[i]->seqno)
tcache_destroy(tcache);
tsd_array->tcaches[i] = TCACHE_STATE_PURGATORY;
}
}
}
base_free_fn(tsd_array->seqno);
base_free_fn(tsd_array->tcaches);
tsd_array->npools = 0;
malloc_mutex_unlock(&pools_lock);
}
/* Caller must own arena->lock. */
void
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
{
unsigned i;
cassert(config_stats);
/* Merge and reset tcache stats. */
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
tcache_bin_t *tbin = &tcache->tbins[i];
malloc_mutex_lock(&bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
malloc_mutex_unlock(&bin->lock);
tbin->tstats.nrequests = 0;
}
for (; i < nhbins; i++) {
malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
tcache_bin_t *tbin = &tcache->tbins[i];
arena->stats.nrequests_large += tbin->tstats.nrequests;
lstats->nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
}
bool
tcache_boot0(void)
{
unsigned i;
/* Array still initialized */
if (tcache_bin_info != NULL)
return (false);
/*
* If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is
* known.
*/
if (opt_lg_tcache_max < 0 || (1ULL << opt_lg_tcache_max) < SMALL_MAXCLASS)
tcache_maxclass = SMALL_MAXCLASS;
else if ((1ULL << opt_lg_tcache_max) > arena_maxclass)
tcache_maxclass = arena_maxclass;
else
tcache_maxclass = (1ULL << opt_lg_tcache_max);
nhbins = NBINS + (tcache_maxclass >> LG_PAGE);
/* Initialize tcache_bin_info. */
tcache_bin_info = (tcache_bin_info_t *)base_alloc(&base_pool,
nhbins * sizeof(tcache_bin_info_t));
if (tcache_bin_info == NULL)
return (true);
stack_nelms = 0;
for (i = 0; i < NBINS; i++) {
if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) {
tcache_bin_info[i].ncached_max =
(arena_bin_info[i].nregs << 1);
} else {
tcache_bin_info[i].ncached_max =
TCACHE_NSLOTS_SMALL_MAX;
}
stack_nelms += tcache_bin_info[i].ncached_max;
}
for (; i < nhbins; i++) {
tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
stack_nelms += tcache_bin_info[i].ncached_max;
}
return (false);
}
bool
tcache_boot1(void)
{
if (tcache_tsd_boot() || tcache_enabled_tsd_boot())
return (true);
return (false);
}
| 15,882 | 26.057922 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/src/chunk.c | #define JEMALLOC_CHUNK_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
const char *opt_dss = DSS_DEFAULT;
size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
/* Various chunk-related settings. */
size_t chunksize;
size_t chunksize_mask; /* (chunksize - 1). */
size_t chunk_npages;
size_t map_bias;
size_t arena_maxclass; /* Max size class for arenas. */
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
* definition.
*/
static void chunk_dalloc_core(pool_t *pool, void *chunk, size_t size);
/******************************************************************************/
static void *
chunk_recycle(pool_t *pool, extent_tree_t *chunks_szad, extent_tree_t *chunks_ad,
void *new_addr, size_t size, size_t alignment, bool base, bool *zero)
{
void *ret;
extent_node_t *node;
extent_node_t key;
size_t alloc_size, leadsize, trailsize;
bool zeroed;
if (base) {
/*
* This function may need to call base_node_{,de}alloc(), but
* the current chunk allocation request is on behalf of the
* base allocator. Avoid deadlock (and if that weren't an
* issue, potential for infinite recursion) by returning NULL.
*/
return (NULL);
}
alloc_size = size + alignment - chunksize;
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);
key.addr = new_addr;
key.size = alloc_size;
malloc_mutex_lock(&pool->chunks_mtx);
node = extent_tree_szad_nsearch(chunks_szad, &key);
if (node == NULL || (new_addr && node->addr != new_addr)) {
malloc_mutex_unlock(&pool->chunks_mtx);
return (NULL);
}
leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
(uintptr_t)node->addr;
assert(node->size >= leadsize + size);
trailsize = node->size - leadsize - size;
ret = (void *)((uintptr_t)node->addr + leadsize);
zeroed = node->zeroed;
if (zeroed)
*zero = true;
/* Remove node from the tree. */
extent_tree_szad_remove(chunks_szad, node);
extent_tree_ad_remove(chunks_ad, node);
if (leadsize != 0) {
/* Insert the leading space as a smaller chunk. */
node->size = leadsize;
extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node);
node = NULL;
}
if (trailsize != 0) {
/* Insert the trailing space as a smaller chunk. */
if (node == NULL) {
/*
* An additional node is required, but
* base_node_alloc() can cause a new base chunk to be
* allocated. Drop chunks_mtx in order to avoid
* deadlock, and if node allocation fails, deallocate
* the result before returning an error.
*/
malloc_mutex_unlock(&pool->chunks_mtx);
node = base_node_alloc(pool);
if (node == NULL) {
chunk_dalloc_core(pool, ret, size);
return (NULL);
}
malloc_mutex_lock(&pool->chunks_mtx);
}
node->addr = (void *)((uintptr_t)(ret) + size);
node->size = trailsize;
node->zeroed = zeroed;
extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node);
node = NULL;
}
malloc_mutex_unlock(&pool->chunks_mtx);
if (node != NULL)
base_node_dalloc(pool, node);
if (*zero) {
if (zeroed == false)
memset(ret, 0, size);
else if (config_debug) {
size_t i;
size_t *p = (size_t *)(uintptr_t)ret;
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
for (i = 0; i < size / sizeof(size_t); i++)
assert(p[i] == 0);
}
}
return (ret);
}
/*
* If the caller specifies (*zero == false), it is still possible to receive
* zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc()
* takes advantage of this to avoid demanding zeroed chunks, but taking
* advantage of them if they are returned.
*/
static void *
chunk_alloc_core(pool_t *pool, void *new_addr, size_t size, size_t alignment,
bool base, bool *zero, dss_prec_t dss_prec)
{
void *ret;
assert(size != 0);
assert((size & chunksize_mask) == 0);
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
/* "primary" dss. */
if (have_dss && dss_prec == dss_prec_primary) {
if ((ret = chunk_recycle(pool, &pool->chunks_szad_dss, &pool->chunks_ad_dss,
new_addr, size, alignment, base, zero)) != NULL)
return (ret);
/* requesting an address only implemented for recycle */
if (new_addr == NULL
&& (ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
return (ret);
}
/* mmap. */
if ((ret = chunk_recycle(pool, &pool->chunks_szad_mmap, &pool->chunks_ad_mmap,
new_addr, size, alignment, base, zero)) != NULL)
return (ret);
/* requesting an address only implemented for recycle */
if (new_addr == NULL &&
(ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
return (ret);
/* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary) {
if ((ret = chunk_recycle(pool, &pool->chunks_szad_dss, &pool->chunks_ad_dss,
new_addr, size, alignment, base, zero)) != NULL)
return (ret);
/* requesting an address only implemented for recycle */
if (new_addr == NULL &&
(ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
return (ret);
}
/* All strategies for allocation failed. */
return (NULL);
}
static bool
chunk_register(pool_t *pool, void *chunk, size_t size, bool base)
{
assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk);
if (config_ivsalloc && base == false) {
if (rtree_set(pool->chunks_rtree, (uintptr_t)chunk, 1))
return (true);
}
if (config_stats || config_prof) {
bool gdump;
malloc_mutex_lock(&pool->chunks_mtx);
if (config_stats)
pool->stats_chunks.nchunks += (size / chunksize);
pool->stats_chunks.curchunks += (size / chunksize);
if (pool->stats_chunks.curchunks > pool->stats_chunks.highchunks) {
pool->stats_chunks.highchunks =
pool->stats_chunks.curchunks;
if (config_prof)
gdump = true;
} else if (config_prof)
gdump = false;
malloc_mutex_unlock(&pool->chunks_mtx);
if (config_prof && opt_prof && opt_prof_gdump && gdump)
prof_gdump();
}
if (config_valgrind)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(chunk, size);
return (false);
}
void *
chunk_alloc_base(pool_t *pool, size_t size)
{
void *ret;
bool zero;
zero = false;
if (pool->pool_id != 0) {
/* Custom pools can only use existing chunks. */
ret = chunk_recycle(pool, &pool->chunks_szad_mmap,
&pool->chunks_ad_mmap, NULL, size,
chunksize, false, &zero);
} else {
ret = chunk_alloc_core(pool, NULL, size, chunksize, true, &zero,
chunk_dss_prec_get());
}
if (ret == NULL)
return (NULL);
if (chunk_register(pool, ret, size, true)) {
chunk_dalloc_core(pool, ret, size);
return (NULL);
}
return (ret);
}
void *
chunk_alloc_arena(chunk_alloc_t *chunk_alloc, chunk_dalloc_t *chunk_dalloc,
arena_t *arena, void *new_addr, size_t size, size_t alignment, bool *zero)
{
void *ret;
ret = chunk_alloc(new_addr, size, alignment, zero,
arena->ind, arena->pool);
if (ret != NULL && chunk_register(arena->pool, ret, size, false)) {
chunk_dalloc(ret, size, arena->ind, arena->pool);
ret = NULL;
}
return (ret);
}
/* Default arena chunk allocation routine in the absence of user override. */
void *
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
unsigned arena_ind, pool_t *pool)
{
if (pool->pool_id != 0) {
/* Custom pools can only use existing chunks. */
return (chunk_recycle(pool, &pool->chunks_szad_mmap,
&pool->chunks_ad_mmap, new_addr, size,
alignment, false, zero));
} else {
malloc_rwlock_rdlock(&pool->arenas_lock);
dss_prec_t dss_prec = pool->arenas[arena_ind]->dss_prec;
malloc_rwlock_unlock(&pool->arenas_lock);
return (chunk_alloc_core(pool, new_addr, size, alignment,
false, zero, dss_prec));
}
}
void
chunk_record(pool_t *pool, extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
size_t size, bool zeroed)
{
bool unzeroed, file_mapped;
extent_node_t *xnode, *node, *prev, *xprev, key;
file_mapped = pool_is_file_mapped(pool);
unzeroed = pages_purge(chunk, size, file_mapped);
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
/*
* If pages_purge() returned that the pages were zeroed
* as a side effect of purging we can safely do this assignment.
*/
if (zeroed == false && unzeroed == false) {
zeroed = true;
}
/*
* Allocate a node before acquiring chunks_mtx even though it might not
* be needed, because base_node_alloc() may cause a new base chunk to
* be allocated, which could cause deadlock if chunks_mtx were already
* held.
*/
xnode = base_node_alloc(pool);
/* Use xprev to implement conditional deferred deallocation of prev. */
xprev = NULL;
malloc_mutex_lock(&pool->chunks_mtx);
key.addr = (void *)((uintptr_t)chunk + size);
node = extent_tree_ad_nsearch(chunks_ad, &key);
/* Try to coalesce forward. */
if (node != NULL && node->addr == key.addr) {
/*
* Coalesce chunk with the following address range. This does
* not change the position within chunks_ad, so only
* remove/insert from/into chunks_szad.
*/
extent_tree_szad_remove(chunks_szad, node);
node->addr = chunk;
node->size += size;
node->zeroed = (node->zeroed && zeroed);
extent_tree_szad_insert(chunks_szad, node);
} else {
/* Coalescing forward failed, so insert a new node. */
if (xnode == NULL) {
/*
* base_node_alloc() failed, which is an exceedingly
* unlikely failure. Leak chunk; its pages have
* already been purged, so this is only a virtual
* memory leak.
*/
goto label_return;
}
node = xnode;
xnode = NULL; /* Prevent deallocation below. */
node->addr = chunk;
node->size = size;
node->zeroed = zeroed;
extent_tree_ad_insert(chunks_ad, node);
extent_tree_szad_insert(chunks_szad, node);
}
/* Try to coalesce backward. */
prev = extent_tree_ad_prev(chunks_ad, node);
if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
chunk) {
/*
* Coalesce chunk with the previous address range. This does
* not change the position within chunks_ad, so only
* remove/insert node from/into chunks_szad.
*/
extent_tree_szad_remove(chunks_szad, prev);
extent_tree_ad_remove(chunks_ad, prev);
extent_tree_szad_remove(chunks_szad, node);
node->addr = prev->addr;
node->size += prev->size;
node->zeroed = (node->zeroed && prev->zeroed);
extent_tree_szad_insert(chunks_szad, node);
xprev = prev;
}
label_return:
malloc_mutex_unlock(&pool->chunks_mtx);
/*
* Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
* avoid potential deadlock.
*/
if (xnode != NULL)
base_node_dalloc(pool, xnode);
if (xprev != NULL)
base_node_dalloc(pool, xprev);
}
void
chunk_unmap(pool_t *pool, void *chunk, size_t size)
{
assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk);
assert(size != 0);
assert((size & chunksize_mask) == 0);
if (have_dss && chunk_in_dss(chunk))
chunk_record(pool, &pool->chunks_szad_dss, &pool->chunks_ad_dss, chunk, size, false);
else if (chunk_dalloc_mmap(chunk, size))
chunk_record(pool, &pool->chunks_szad_mmap, &pool->chunks_ad_mmap, chunk, size, false);
}
static void
chunk_dalloc_core(pool_t *pool, void *chunk, size_t size)
{
assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk);
assert(size != 0);
assert((size & chunksize_mask) == 0);
if (config_ivsalloc)
rtree_set(pool->chunks_rtree, (uintptr_t)chunk, 0);
if (config_stats || config_prof) {
malloc_mutex_lock(&pool->chunks_mtx);
assert(pool->stats_chunks.curchunks >= (size / chunksize));
pool->stats_chunks.curchunks -= (size / chunksize);
malloc_mutex_unlock(&pool->chunks_mtx);
}
chunk_unmap(pool, chunk, size);
}
/* Default arena chunk deallocation routine in the absence of user override. */
bool
chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind, pool_t *pool)
{
chunk_dalloc_core(pool, chunk, size);
return (false);
}
bool
chunk_global_boot()
{
if (have_dss && chunk_dss_boot())
return (true);
/* Set variables according to the value of opt_lg_chunk. */
chunksize = (ZU(1) << opt_lg_chunk);
assert(chunksize >= PAGE);
chunksize_mask = chunksize - 1;
chunk_npages = (chunksize >> LG_PAGE);
return (false);
}
/*
* Called at each pool opening.
*/
bool
chunk_boot(pool_t *pool)
{
if (config_stats || config_prof) {
if (malloc_mutex_init(&pool->chunks_mtx))
return (true);
}
if (pool->chunks_rtree) {
rtree_t *rtree = pool->chunks_rtree;
if (malloc_mutex_init(&rtree->mutex))
return (true);
}
return (false);
}
/*
* Called only at pool creation.
*/
bool
chunk_init(pool_t *pool)
{
if (chunk_boot(pool))
return (true);
if (config_stats || config_prof)
memset(&pool->stats_chunks, 0, sizeof(chunk_stats_t));
extent_tree_szad_new(&pool->chunks_szad_mmap);
extent_tree_ad_new(&pool->chunks_ad_mmap);
extent_tree_szad_new(&pool->chunks_szad_dss);
extent_tree_ad_new(&pool->chunks_ad_dss);
if (config_ivsalloc) {
pool->chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
opt_lg_chunk, base_alloc, NULL, pool);
if (pool->chunks_rtree == NULL)
return (true);
}
return (false);
}
void
chunk_prefork0(pool_t *pool)
{
if (config_ivsalloc)
rtree_prefork(pool->chunks_rtree);
}
void
chunk_prefork1(pool_t *pool)
{
malloc_mutex_prefork(&pool->chunks_mtx);
}
void
chunk_postfork_parent0(pool_t *pool)
{
if (config_ivsalloc)
rtree_postfork_parent(pool->chunks_rtree);
}
void
chunk_postfork_parent1(pool_t *pool)
{
malloc_mutex_postfork_parent(&pool->chunks_mtx);
}
void
chunk_postfork_child0(pool_t *pool)
{
if (config_ivsalloc)
rtree_postfork_child(pool->chunks_rtree);
}
void
chunk_postfork_child1(pool_t *pool)
{
malloc_mutex_postfork_child(&pool->chunks_mtx);
}
| 13,794 | 26.371032 | 93 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/src/chunk_mmap.c | #define JEMALLOC_CHUNK_MMAP_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void *pages_map(void *addr, size_t size);
static void pages_unmap(void *addr, size_t size);
static void *chunk_alloc_mmap_slow(size_t size, size_t alignment,
bool *zero);
/******************************************************************************/
static void *
pages_map(void *addr, size_t size)
{
void *ret;
assert(size != 0);
#ifdef _WIN32
/*
* If VirtualAlloc can't allocate at the given address when one is
* given, it fails and returns NULL.
*/
ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
PAGE_READWRITE);
#else
/*
* We don't use MAP_FIXED here, because it can cause the *replacement*
* of existing mappings, and we only want to create new mappings.
*/
ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
-1, 0);
assert(ret != NULL);
if (ret == MAP_FAILED)
ret = NULL;
else if (addr != NULL && ret != addr) {
/*
* We succeeded in mapping memory, but not in the right place.
*/
if (munmap(ret, size) == -1) {
char buf[BUFERROR_BUF];
buferror(get_errno(), buf, sizeof(buf));
malloc_printf("<jemalloc: Error in munmap(): %s\n",
buf);
if (opt_abort)
abort();
}
ret = NULL;
}
#endif
assert(ret == NULL || (addr == NULL && ret != addr)
|| (addr != NULL && ret == addr));
return (ret);
}
static void
pages_unmap(void *addr, size_t size)
{
#ifdef _WIN32
if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
#else
if (munmap(addr, size) == -1)
#endif
{
char buf[BUFERROR_BUF];
buferror(get_errno(), buf, sizeof(buf));
malloc_printf("<jemalloc>: Error in "
#ifdef _WIN32
"VirtualFree"
#else
"munmap"
#endif
"(): %s\n", buf);
if (opt_abort)
abort();
}
}
static void *
pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
{
void *ret = (void *)((uintptr_t)addr + leadsize);
assert(alloc_size >= leadsize + size);
#ifdef _WIN32
{
void *new_addr;
pages_unmap(addr, alloc_size);
new_addr = pages_map(ret, size);
if (new_addr == ret)
return (ret);
if (new_addr)
pages_unmap(new_addr, size);
return (NULL);
}
#else
{
size_t trailsize = alloc_size - leadsize - size;
if (leadsize != 0)
pages_unmap(addr, leadsize);
if (trailsize != 0)
pages_unmap((void *)((uintptr_t)ret + size), trailsize);
return (ret);
}
#endif
}
bool
pages_purge(void *addr, size_t length, bool file_mapped)
{
bool unzeroed;
#ifdef _WIN32
VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE);
unzeroed = true;
#elif defined(JEMALLOC_HAVE_MADVISE)
# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
# define JEMALLOC_MADV_ZEROS true
# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
# define JEMALLOC_MADV_PURGE MADV_FREE
# define JEMALLOC_MADV_ZEROS false
# else
# error "No madvise(2) flag defined for purging unused dirty pages."
# endif
int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
unzeroed = (JEMALLOC_MADV_ZEROS == false || file_mapped || err != 0);
# undef JEMALLOC_MADV_PURGE
# undef JEMALLOC_MADV_ZEROS
#else
/* Last resort no-op. */
unzeroed = true;
#endif
return (unzeroed);
}
static void *
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
{
void *ret, *pages;
size_t alloc_size, leadsize;
alloc_size = size + alignment - PAGE;
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);
do {
pages = pages_map(NULL, alloc_size);
if (pages == NULL)
return (NULL);
leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
(uintptr_t)pages;
ret = pages_trim(pages, alloc_size, leadsize, size);
} while (ret == NULL);
assert(ret != NULL);
*zero = true;
return (ret);
}
void *
chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
{
void *ret;
size_t offset;
/*
* Ideally, there would be a way to specify alignment to mmap() (like
* NetBSD has), but in the absence of such a feature, we have to work
* hard to efficiently create aligned mappings. The reliable, but
* slow method is to create a mapping that is over-sized, then trim the
* excess. However, that always results in one or two calls to
* pages_unmap().
*
* Optimistically try mapping precisely the right amount before falling
* back to the slow method, with the expectation that the optimistic
* approach works most of the time.
*/
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
ret = pages_map(NULL, size);
if (ret == NULL)
return (NULL);
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
if (offset != 0) {
pages_unmap(ret, size);
return (chunk_alloc_mmap_slow(size, alignment, zero));
}
assert(ret != NULL);
*zero = true;
return (ret);
}
bool
chunk_dalloc_mmap(void *chunk, size_t size)
{
if (config_munmap)
pages_unmap(chunk, size);
return (config_munmap == false);
}
| 5,078 | 22.733645 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/src/quarantine.c | #define JEMALLOC_QUARANTINE_C_
#include "jemalloc/internal/jemalloc_internal.h"
/*
* quarantine pointers close to NULL are used to encode state information that
* is used for cleaning up during thread shutdown.
*/
#define QUARANTINE_STATE_REINCARNATED ((quarantine_t *)(uintptr_t)1)
#define QUARANTINE_STATE_PURGATORY ((quarantine_t *)(uintptr_t)2)
#define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY
/******************************************************************************/
/* Data. */
malloc_tsd_data(, quarantine, quarantine_t *, NULL)
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static quarantine_t *quarantine_grow(quarantine_t *quarantine);
static void quarantine_drain_one(quarantine_t *quarantine);
static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound);
/******************************************************************************/
quarantine_t *
quarantine_init(size_t lg_maxobjs)
{
quarantine_t *quarantine;
quarantine = (quarantine_t *)imalloc(offsetof(quarantine_t, objs) +
((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)));
if (quarantine == NULL)
return (NULL);
quarantine->curbytes = 0;
quarantine->curobjs = 0;
quarantine->first = 0;
quarantine->lg_maxobjs = lg_maxobjs;
quarantine_tsd_set(&quarantine);
return (quarantine);
}
static quarantine_t *
quarantine_grow(quarantine_t *quarantine)
{
quarantine_t *ret;
ret = quarantine_init(quarantine->lg_maxobjs + 1);
if (ret == NULL) {
quarantine_drain_one(quarantine);
return (quarantine);
}
ret->curbytes = quarantine->curbytes;
ret->curobjs = quarantine->curobjs;
if (quarantine->first + quarantine->curobjs <= (ZU(1) <<
quarantine->lg_maxobjs)) {
/* objs ring buffer data are contiguous. */
memcpy(ret->objs, &quarantine->objs[quarantine->first],
quarantine->curobjs * sizeof(quarantine_obj_t));
} else {
/* objs ring buffer data wrap around. */
size_t ncopy_a = (ZU(1) << quarantine->lg_maxobjs) -
quarantine->first;
size_t ncopy_b = quarantine->curobjs - ncopy_a;
memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy_a
* sizeof(quarantine_obj_t));
memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
sizeof(quarantine_obj_t));
}
idalloc(quarantine);
return (ret);
}
static void
quarantine_drain_one(quarantine_t *quarantine)
{
quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
assert(obj->usize == isalloc(obj->ptr, config_prof));
idalloc(obj->ptr);
quarantine->curbytes -= obj->usize;
quarantine->curobjs--;
quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
quarantine->lg_maxobjs) - 1);
}
static void
quarantine_drain(quarantine_t *quarantine, size_t upper_bound)
{
while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0)
quarantine_drain_one(quarantine);
}
void
quarantine(void *ptr)
{
quarantine_t *quarantine;
size_t usize = isalloc(ptr, config_prof);
cassert(config_fill);
assert(opt_quarantine);
quarantine = *quarantine_tsd_get();
if ((uintptr_t)quarantine <= (uintptr_t)QUARANTINE_STATE_MAX) {
if (quarantine == QUARANTINE_STATE_PURGATORY) {
/*
* Make a note that quarantine() was called after
* quarantine_cleanup() was called.
*/
quarantine = QUARANTINE_STATE_REINCARNATED;
quarantine_tsd_set(&quarantine);
}
idalloc(ptr);
return;
}
/*
* Drain one or more objects if the quarantine size limit would be
* exceeded by appending ptr.
*/
if (quarantine->curbytes + usize > opt_quarantine) {
size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
- usize : 0;
quarantine_drain(quarantine, upper_bound);
}
/* Grow the quarantine ring buffer if it's full. */
if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
quarantine = quarantine_grow(quarantine);
/* quarantine_grow() must free a slot if it fails to grow. */
assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs));
/* Append ptr if its size doesn't exceed the quarantine size. */
if (quarantine->curbytes + usize <= opt_quarantine) {
size_t offset = (quarantine->first + quarantine->curobjs) &
((ZU(1) << quarantine->lg_maxobjs) - 1);
quarantine_obj_t *obj = &quarantine->objs[offset];
obj->ptr = ptr;
obj->usize = usize;
quarantine->curbytes += usize;
quarantine->curobjs++;
if (config_fill && opt_junk) {
/*
* Only do redzone validation if Valgrind isn't in
* operation.
*/
if ((config_valgrind == false || in_valgrind == false)
&& usize <= SMALL_MAXCLASS)
arena_quarantine_junk_small(ptr, usize);
else
memset(ptr, 0x5a, usize);
}
} else {
assert(quarantine->curbytes == 0);
idalloc(ptr);
}
}
void
quarantine_cleanup(void *arg)
{
quarantine_t *quarantine = *(quarantine_t **)arg;
if (quarantine == QUARANTINE_STATE_REINCARNATED) {
/*
* Another destructor deallocated memory after this destructor
* was called. Reset quarantine to QUARANTINE_STATE_PURGATORY
* in order to receive another callback.
*/
quarantine = QUARANTINE_STATE_PURGATORY;
quarantine_tsd_set(&quarantine);
} else if (quarantine == QUARANTINE_STATE_PURGATORY) {
/*
* The previous time this destructor was called, we set the key
* to QUARANTINE_STATE_PURGATORY so that other destructors
* wouldn't cause re-creation of the quarantine. This time, do
* nothing, so that the destructor will not be called again.
*/
} else if (quarantine != NULL) {
quarantine_drain(quarantine, 0);
idalloc(quarantine);
quarantine = QUARANTINE_STATE_PURGATORY;
quarantine_tsd_set(&quarantine);
}
}
bool
quarantine_boot(void)
{
cassert(config_fill);
if (quarantine_tsd_boot())
return (true);
return (false);
}
| 5,792 | 27.965 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/src/mutex.c | #define JEMALLOC_MUTEX_C_
#include "jemalloc/internal/jemalloc_internal.h"
#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
#include <dlfcn.h>
#endif
#ifndef _CRT_SPINCOUNT
#define _CRT_SPINCOUNT 4000
#endif
/******************************************************************************/
/* Data. */
#ifdef JEMALLOC_LAZY_LOCK
bool isthreaded = false;
#endif
#ifdef JEMALLOC_MUTEX_INIT_CB
static bool postpone_init = true;
static malloc_mutex_t *postponed_mutexes = NULL;
#endif
/******************************************************************************/
/*
* We intercept pthread_create() calls in order to toggle isthreaded if the
* process goes multi-threaded.
*/
#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
static void pthread_create_once(void);
static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
void *(*)(void *), void *__restrict);
static void
pthread_create_once(void)
{
pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
if (pthread_create_fptr == NULL) {
malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
"\"pthread_create\")\n");
abort();
}
isthreaded = true;
}
JEMALLOC_EXPORT int
pthread_create(pthread_t *__restrict thread,
const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
void *__restrict arg)
{
static pthread_once_t once_control = PTHREAD_ONCE_INIT;
pthread_once(&once_control, pthread_create_once);
return (pthread_create_fptr(thread, attr, start_routine, arg));
}
#endif
/******************************************************************************/
#ifdef JEMALLOC_MUTEX_INIT_CB
JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
void *(calloc_cb)(size_t, size_t));
static void *
base_calloc_wrapper(size_t number, size_t size)
{
return base_calloc(&base_pool, number, size);
}
/* XXX We need somewhere to allocate mutexes from during early initialization */
#define BOOTSTRAP_POOL_SIZE 4096
#define BP_MASK 0xfffffffffffffff0UL
static char bootstrap_pool[BOOTSTRAP_POOL_SIZE] __attribute__((aligned (16)));
static char *bpp = bootstrap_pool;
static void *
bootstrap_calloc(size_t number, size_t size)
{
size_t my_size = ((number * size) + 0xf) & BP_MASK;
bpp += my_size;
if ((bpp - bootstrap_pool) > BOOTSTRAP_POOL_SIZE) {
return NULL;
}
return (void *)(bpp - my_size);
}
#endif
bool
malloc_mutex_init(malloc_mutex_t *mutex)
{
#ifdef _WIN32
if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
_CRT_SPINCOUNT))
return (true);
#elif (defined(JEMALLOC_OSSPIN))
mutex->lock = 0;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
if (postpone_init) {
mutex->postponed_next = postponed_mutexes;
postponed_mutexes = mutex;
} else {
if (_pthread_mutex_init_calloc_cb(&mutex->lock,
base_calloc_wrapper) != 0)
return (true);
}
#else
pthread_mutexattr_t attr;
if (pthread_mutexattr_init(&attr) != 0)
return (true);
pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
pthread_mutexattr_destroy(&attr);
return (true);
}
pthread_mutexattr_destroy(&attr);
#endif
return (false);
}
void
malloc_mutex_prefork(malloc_mutex_t *mutex)
{
malloc_mutex_lock(mutex);
}
void
malloc_mutex_postfork_parent(malloc_mutex_t *mutex)
{
malloc_mutex_unlock(mutex);
}
bool
mutex_boot(void)
{
#ifdef JEMALLOC_MUTEX_INIT_CB
postpone_init = false;
while (postponed_mutexes != NULL) {
if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
bootstrap_calloc) != 0)
return (true);
postponed_mutexes = postponed_mutexes->postponed_next;
}
#endif
return (false);
}
void
malloc_mutex_postfork_child(malloc_mutex_t *mutex)
{
#if (defined(JEMALLOC_MUTEX_INIT_CB) || defined(JEMALLOC_DISABLE_BSD_MALLOC_HOOKS))
malloc_mutex_unlock(mutex);
#else
if (malloc_mutex_init(mutex)) {
malloc_printf("<jemalloc>: Error re-initializing mutex in "
"child\n");
if (opt_abort)
abort();
}
#endif
}
void
malloc_rwlock_prefork(malloc_rwlock_t *rwlock)
{
malloc_rwlock_wrlock(rwlock);
}
void
malloc_rwlock_postfork_parent(malloc_rwlock_t *rwlock)
{
malloc_rwlock_unlock(rwlock);
}
void
malloc_rwlock_postfork_child(malloc_rwlock_t *rwlock)
{
#if (defined(JEMALLOC_MUTEX_INIT_CB) || defined(JEMALLOC_DISABLE_BSD_MALLOC_HOOKS))
malloc_rwlock_unlock(rwlock);
#else
if (malloc_rwlock_init(rwlock)) {
malloc_printf("<jemalloc>: Error re-initializing rwlock in "
"child\n");
if (opt_abort)
abort();
}
#endif
}
| 4,488 | 21.445 | 83 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/src/pool.c | #define JEMALLOC_POOL_C_
#include "jemalloc/internal/jemalloc_internal.h"
malloc_mutex_t pool_base_lock;
malloc_mutex_t pools_lock;
/*
* Initialize runtime state of the pool.
* Called both at pool creation and each pool opening.
*/
bool
pool_boot(pool_t *pool, unsigned pool_id)
{
pool->pool_id = pool_id;
if (malloc_mutex_init(&pool->memory_range_mtx))
return (true);
/*
* Rwlock initialization must be deferred if we are
* creating the base pool in the JEMALLOC_LAZY_LOCK case.
* This is safe because the lock won't be used until
* isthreaded has been set.
*/
if ((isthreaded || (pool != &base_pool))
&& malloc_rwlock_init(&pool->arenas_lock))
return (true);
return (false);
}
/*
* Initialize runtime state of the pool.
* Called at each pool opening.
*/
bool
pool_runtime_init(pool_t *pool, unsigned pool_id)
{
if (pool_boot(pool, pool_id))
return (true);
if (base_boot(pool))
return (true);
if (chunk_boot(pool))
return (true);
if (huge_boot(pool))
return (true);
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(pool->arenas,
sizeof(arena_t) * pool->narenas_total);
for (size_t i = 0; i < pool->narenas_total; ++i) {
if (pool->arenas[i] != NULL) {
arena_t *arena = pool->arenas[i];
if (arena_boot(arena))
return (true);
}
}
return (false);
}
/*
* Initialize pool and create its base arena.
* Called only at pool creation.
*/
bool
pool_new(pool_t *pool, unsigned pool_id)
{
if (pool_boot(pool, pool_id))
return (true);
if (base_init(pool))
return (true);
if (chunk_init(pool))
return (true);
if (huge_init(pool))
return (true);
if (pools_shared_data_create())
return (true);
pool->stats_cactive = 0;
pool->ctl_stats_active = 0;
pool->ctl_stats_allocated = 0;
pool->ctl_stats_mapped = 0;
pool->narenas_auto = opt_narenas;
/*
* Make sure that the arenas array can be allocated. In practice, this
* limit is enough to allow the allocator to function, but the ctl
* machinery will fail to allocate memory at far lower limits.
*/
if (pool->narenas_auto > chunksize / sizeof(arena_t *)) {
pool->narenas_auto = chunksize / sizeof(arena_t *);
malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
pool->narenas_auto);
}
pool->narenas_total = pool->narenas_auto;
/* Allocate and initialize arenas. */
pool->arenas = (arena_t **)base_calloc(pool, sizeof(arena_t *),
pool->narenas_total);
if (pool->arenas == NULL)
return (true);
arenas_extend(pool, 0);
return false;
}
/* Release the arenas associated with a pool. */
void
pool_destroy(pool_t *pool)
{
size_t i, j;
for (i = 0; i < pool->narenas_total; ++i) {
if (pool->arenas[i] != NULL) {
arena_t *arena = pool->arenas[i];
//arena_purge_all(arena); /* XXX */
for (j = 0; j < NBINS; j++)
malloc_mutex_destroy(&arena->bins[j].lock);
malloc_mutex_destroy(&arena->lock);
}
}
/*
* Set 'pool_id' to an incorrect value so that the pool cannot be used
* after being deleted.
*/
pool->pool_id = UINT_MAX;
if (pool->chunks_rtree) {
rtree_t *rtree = pool->chunks_rtree;
malloc_mutex_destroy(&rtree->mutex);
}
malloc_mutex_destroy(&pool->memory_range_mtx);
malloc_mutex_destroy(&pool->base_mtx);
malloc_mutex_destroy(&pool->base_node_mtx);
malloc_mutex_destroy(&pool->chunks_mtx);
malloc_mutex_destroy(&pool->huge_mtx);
malloc_rwlock_destroy(&pool->arenas_lock);
}
void pool_prefork()
{
malloc_mutex_prefork(&pools_lock);
malloc_mutex_prefork(&pool_base_lock);
}
void pool_postfork_parent()
{
malloc_mutex_postfork_parent(&pools_lock);
malloc_mutex_postfork_parent(&pool_base_lock);
}
void pool_postfork_child()
{
malloc_mutex_postfork_child(&pools_lock);
malloc_mutex_postfork_child(&pool_base_lock);
}
| 3,723 | 21.166667 | 72 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/test/unit/rb.c | #include "test/jemalloc_test.h"
#define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \
a_type *rbp_bh_t; \
for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; \
rbp_bh_t != &(a_rbt)->rbt_nil; \
rbp_bh_t = rbtn_left_get(a_type, a_field, rbp_bh_t)) { \
if (rbtn_red_get(a_type, a_field, rbp_bh_t) == false) { \
(r_height)++; \
} \
} \
} while (0)
typedef struct node_s node_t;
struct node_s {
#define NODE_MAGIC 0x9823af7e
uint32_t magic;
rb_node(node_t) link;
uint64_t key;
};
static int
node_cmp(node_t *a, node_t *b) {
int ret;
assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
ret = (a->key > b->key) - (a->key < b->key);
if (ret == 0) {
/*
* Duplicates are not allowed in the tree, so force an
* arbitrary ordering for non-identical items with equal keys.
*/
ret = (((uintptr_t)a) > ((uintptr_t)b))
- (((uintptr_t)a) < ((uintptr_t)b));
}
return (ret);
}
typedef rb_tree(node_t) tree_t;
rb_gen(static, tree_, tree_t, node_t, link, node_cmp);
TEST_BEGIN(test_rb_empty)
{
tree_t tree;
node_t key;
tree_new(&tree);
assert_ptr_null(tree_first(&tree), "Unexpected node");
assert_ptr_null(tree_last(&tree), "Unexpected node");
key.key = 0;
key.magic = NODE_MAGIC;
assert_ptr_null(tree_search(&tree, &key), "Unexpected node");
key.key = 0;
key.magic = NODE_MAGIC;
assert_ptr_null(tree_nsearch(&tree, &key), "Unexpected node");
key.key = 0;
key.magic = NODE_MAGIC;
assert_ptr_null(tree_psearch(&tree, &key), "Unexpected node");
}
TEST_END
static unsigned
tree_recurse(node_t *node, unsigned black_height, unsigned black_depth,
node_t *nil)
{
unsigned ret = 0;
node_t *left_node = rbtn_left_get(node_t, link, node);
node_t *right_node = rbtn_right_get(node_t, link, node);
if (rbtn_red_get(node_t, link, node) == false)
black_depth++;
/* Red nodes must be interleaved with black nodes. */
if (rbtn_red_get(node_t, link, node)) {
assert_false(rbtn_red_get(node_t, link, left_node),
"Node should be black");
assert_false(rbtn_red_get(node_t, link, right_node),
"Node should be black");
}
if (node == nil)
return (ret);
/* Self. */
assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
/* Left subtree. */
if (left_node != nil)
ret += tree_recurse(left_node, black_height, black_depth, nil);
else
ret += (black_depth != black_height);
/* Right subtree. */
if (right_node != nil)
ret += tree_recurse(right_node, black_height, black_depth, nil);
else
ret += (black_depth != black_height);
return (ret);
}
static node_t *
tree_iterate_cb(tree_t *tree, node_t *node, void *data)
{
unsigned *i = (unsigned *)data;
node_t *search_node;
assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
/* Test rb_search(). */
search_node = tree_search(tree, node);
assert_ptr_eq(search_node, node,
"tree_search() returned unexpected node");
/* Test rb_nsearch(). */
search_node = tree_nsearch(tree, node);
assert_ptr_eq(search_node, node,
"tree_nsearch() returned unexpected node");
/* Test rb_psearch(). */
search_node = tree_psearch(tree, node);
assert_ptr_eq(search_node, node,
"tree_psearch() returned unexpected node");
(*i)++;
return (NULL);
}
static unsigned
tree_iterate(tree_t *tree)
{
unsigned i;
i = 0;
tree_iter(tree, NULL, tree_iterate_cb, (void *)&i);
return (i);
}
static unsigned
tree_iterate_reverse(tree_t *tree)
{
unsigned i;
i = 0;
tree_reverse_iter(tree, NULL, tree_iterate_cb, (void *)&i);
return (i);
}
static void
node_remove(tree_t *tree, node_t *node, unsigned nnodes)
{
node_t *search_node;
unsigned black_height, imbalances;
tree_remove(tree, node);
/* Test rb_nsearch(). */
search_node = tree_nsearch(tree, node);
if (search_node != NULL) {
assert_u64_ge(search_node->key, node->key,
"Key ordering error");
}
/* Test rb_psearch(). */
search_node = tree_psearch(tree, node);
if (search_node != NULL) {
assert_u64_le(search_node->key, node->key,
"Key ordering error");
}
node->magic = 0;
rbtn_black_height(node_t, link, tree, black_height);
imbalances = tree_recurse(tree->rbt_root, black_height, 0,
&(tree->rbt_nil));
assert_u_eq(imbalances, 0, "Tree is unbalanced");
assert_u_eq(tree_iterate(tree), nnodes-1,
"Unexpected node iteration count");
assert_u_eq(tree_iterate_reverse(tree), nnodes-1,
"Unexpected node iteration count");
}
static node_t *
remove_iterate_cb(tree_t *tree, node_t *node, void *data)
{
unsigned *nnodes = (unsigned *)data;
node_t *ret = tree_next(tree, node);
node_remove(tree, node, *nnodes);
return (ret);
}
static node_t *
remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data)
{
unsigned *nnodes = (unsigned *)data;
node_t *ret = tree_prev(tree, node);
node_remove(tree, node, *nnodes);
return (ret);
}
TEST_BEGIN(test_rb_random)
{
#define NNODES 25
#define NBAGS 250
#define SEED 42
sfmt_t *sfmt;
uint64_t bag[NNODES];
tree_t tree;
node_t nodes[NNODES];
unsigned i, j, k, black_height, imbalances;
sfmt = init_gen_rand(SEED);
for (i = 0; i < NBAGS; i++) {
switch (i) {
case 0:
/* Insert in order. */
for (j = 0; j < NNODES; j++)
bag[j] = j;
break;
case 1:
/* Insert in reverse order. */
for (j = 0; j < NNODES; j++)
bag[j] = NNODES - j - 1;
break;
default:
for (j = 0; j < NNODES; j++)
bag[j] = gen_rand64_range(sfmt, NNODES);
}
for (j = 1; j <= NNODES; j++) {
/* Initialize tree and nodes. */
tree_new(&tree);
tree.rbt_nil.magic = 0;
for (k = 0; k < j; k++) {
nodes[k].magic = NODE_MAGIC;
nodes[k].key = bag[k];
}
/* Insert nodes. */
for (k = 0; k < j; k++) {
tree_insert(&tree, &nodes[k]);
rbtn_black_height(node_t, link, &tree,
black_height);
imbalances = tree_recurse(tree.rbt_root,
black_height, 0, &(tree.rbt_nil));
assert_u_eq(imbalances, 0,
"Tree is unbalanced");
assert_u_eq(tree_iterate(&tree), k+1,
"Unexpected node iteration count");
assert_u_eq(tree_iterate_reverse(&tree), k+1,
"Unexpected node iteration count");
assert_ptr_not_null(tree_first(&tree),
"Tree should not be empty");
assert_ptr_not_null(tree_last(&tree),
"Tree should not be empty");
tree_next(&tree, &nodes[k]);
tree_prev(&tree, &nodes[k]);
}
/* Remove nodes. */
switch (i % 4) {
case 0:
for (k = 0; k < j; k++)
node_remove(&tree, &nodes[k], j - k);
break;
case 1:
for (k = j; k > 0; k--)
node_remove(&tree, &nodes[k-1], k);
break;
case 2: {
node_t *start;
unsigned nnodes = j;
start = NULL;
do {
start = tree_iter(&tree, start,
remove_iterate_cb, (void *)&nnodes);
nnodes--;
} while (start != NULL);
assert_u_eq(nnodes, 0,
"Removal terminated early");
break;
} case 3: {
node_t *start;
unsigned nnodes = j;
start = NULL;
do {
start = tree_reverse_iter(&tree, start,
remove_reverse_iterate_cb,
(void *)&nnodes);
nnodes--;
} while (start != NULL);
assert_u_eq(nnodes, 0,
"Removal terminated early");
break;
} default:
not_reached();
}
}
}
fini_gen_rand(sfmt);
#undef NNODES
#undef NBAGS
#undef SEED
}
TEST_END
int
main(void)
{
return (test(
test_rb_empty,
test_rb_random));
}
| 7,430 | 21.248503 | 71 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/test/unit/util.c | #include "test/jemalloc_test.h"
TEST_BEGIN(test_pow2_ceil)
{
unsigned i, pow2;
size_t x;
assert_zu_eq(pow2_ceil(0), 0, "Unexpected result");
for (i = 0; i < sizeof(size_t) * 8; i++) {
assert_zu_eq(pow2_ceil(ZU(1) << i), ZU(1) << i,
"Unexpected result");
}
for (i = 2; i < sizeof(size_t) * 8; i++) {
assert_zu_eq(pow2_ceil((ZU(1) << i) - 1), ZU(1) << i,
"Unexpected result");
}
for (i = 0; i < sizeof(size_t) * 8 - 1; i++) {
assert_zu_eq(pow2_ceil((ZU(1) << i) + 1), ZU(1) << (i+1),
"Unexpected result");
}
for (pow2 = 1; pow2 < 25; pow2++) {
for (x = (ZU(1) << (pow2-1)) + 1; x <= ZU(1) << pow2; x++) {
assert_zu_eq(pow2_ceil(x), ZU(1) << pow2,
"Unexpected result, x=%zu", x);
}
}
}
TEST_END
TEST_BEGIN(test_malloc_strtoumax_no_endptr)
{
int err;
set_errno(0);
assert_ju_eq(malloc_strtoumax("0", NULL, 0), 0, "Unexpected result");
err = get_errno();
assert_d_eq(err, 0, "Unexpected failure");
}
TEST_END
TEST_BEGIN(test_malloc_strtoumax)
{
struct test_s {
const char *input;
const char *expected_remainder;
int base;
int expected_errno;
const char *expected_errno_name;
uintmax_t expected_x;
};
#define ERR(e) e, #e
#define KUMAX(x) ((uintmax_t)x##ULL)
struct test_s tests[] = {
{"0", "0", -1, ERR(EINVAL), UINTMAX_MAX},
{"0", "0", 1, ERR(EINVAL), UINTMAX_MAX},
{"0", "0", 37, ERR(EINVAL), UINTMAX_MAX},
{"", "", 0, ERR(EINVAL), UINTMAX_MAX},
{"+", "+", 0, ERR(EINVAL), UINTMAX_MAX},
{"++3", "++3", 0, ERR(EINVAL), UINTMAX_MAX},
{"-", "-", 0, ERR(EINVAL), UINTMAX_MAX},
{"42", "", 0, ERR(0), KUMAX(42)},
{"+42", "", 0, ERR(0), KUMAX(42)},
{"-42", "", 0, ERR(0), KUMAX(-42)},
{"042", "", 0, ERR(0), KUMAX(042)},
{"+042", "", 0, ERR(0), KUMAX(042)},
{"-042", "", 0, ERR(0), KUMAX(-042)},
{"0x42", "", 0, ERR(0), KUMAX(0x42)},
{"+0x42", "", 0, ERR(0), KUMAX(0x42)},
{"-0x42", "", 0, ERR(0), KUMAX(-0x42)},
{"0", "", 0, ERR(0), KUMAX(0)},
{"1", "", 0, ERR(0), KUMAX(1)},
{"42", "", 0, ERR(0), KUMAX(42)},
{" 42", "", 0, ERR(0), KUMAX(42)},
{"42 ", " ", 0, ERR(0), KUMAX(42)},
{"0x", "x", 0, ERR(0), KUMAX(0)},
{"42x", "x", 0, ERR(0), KUMAX(42)},
{"07", "", 0, ERR(0), KUMAX(7)},
{"010", "", 0, ERR(0), KUMAX(8)},
{"08", "8", 0, ERR(0), KUMAX(0)},
{"0_", "_", 0, ERR(0), KUMAX(0)},
{"0x", "x", 0, ERR(0), KUMAX(0)},
{"0X", "X", 0, ERR(0), KUMAX(0)},
{"0xg", "xg", 0, ERR(0), KUMAX(0)},
{"0XA", "", 0, ERR(0), KUMAX(10)},
{"010", "", 10, ERR(0), KUMAX(10)},
{"0x3", "x3", 10, ERR(0), KUMAX(0)},
{"12", "2", 2, ERR(0), KUMAX(1)},
{"78", "8", 8, ERR(0), KUMAX(7)},
{"9a", "a", 10, ERR(0), KUMAX(9)},
{"9A", "A", 10, ERR(0), KUMAX(9)},
{"fg", "g", 16, ERR(0), KUMAX(15)},
{"FG", "G", 16, ERR(0), KUMAX(15)},
{"0xfg", "g", 16, ERR(0), KUMAX(15)},
{"0XFG", "G", 16, ERR(0), KUMAX(15)},
{"z_", "_", 36, ERR(0), KUMAX(35)},
{"Z_", "_", 36, ERR(0), KUMAX(35)}
};
#undef ERR
#undef KUMAX
unsigned i;
for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) {
struct test_s *test = &tests[i];
int err;
uintmax_t result;
char *remainder;
set_errno(0);
result = malloc_strtoumax(test->input, &remainder, test->base);
err = get_errno();
assert_d_eq(err, test->expected_errno,
"Expected errno %s for \"%s\", base %d",
test->expected_errno_name, test->input, test->base);
assert_str_eq(remainder, test->expected_remainder,
"Unexpected remainder for \"%s\", base %d",
test->input, test->base);
if (err == 0) {
assert_ju_eq(result, test->expected_x,
"Unexpected result for \"%s\", base %d",
test->input, test->base);
}
}
}
TEST_END
TEST_BEGIN(test_malloc_snprintf_truncated)
{
#define BUFLEN 15
char buf[BUFLEN];
int result;
size_t len;
#define TEST(expected_str_untruncated, ...) do { \
result = malloc_snprintf(buf, len, __VA_ARGS__); \
assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \
"Unexpected string inequality (\"%s\" vs \"%s\")", \
buf, expected_str_untruncated); \
assert_d_eq(result, strlen(expected_str_untruncated), \
"Unexpected result"); \
} while (0)
for (len = 1; len < BUFLEN; len++) {
TEST("012346789", "012346789");
TEST("a0123b", "a%sb", "0123");
TEST("a01234567", "a%s%s", "0123", "4567");
TEST("a0123 ", "a%-6s", "0123");
TEST("a 0123", "a%6s", "0123");
TEST("a 012", "a%6.3s", "0123");
TEST("a 012", "a%*.*s", 6, 3, "0123");
TEST("a 123b", "a% db", 123);
TEST("a123b", "a%-db", 123);
TEST("a-123b", "a%-db", -123);
TEST("a+123b", "a%+db", 123);
}
#undef BUFLEN
#undef TEST
}
TEST_END
TEST_BEGIN(test_malloc_snprintf)
{
#define BUFLEN 128
char buf[BUFLEN];
int result;
#define TEST(expected_str, ...) do { \
result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \
assert_str_eq(buf, expected_str, "Unexpected output"); \
assert_d_eq(result, strlen(expected_str), "Unexpected result"); \
} while (0)
TEST("hello", "hello");
TEST("50%, 100%", "50%%, %d%%", 100);
TEST("a0123b", "a%sb", "0123");
TEST("a 0123b", "a%5sb", "0123");
TEST("a 0123b", "a%*sb", 5, "0123");
TEST("a0123 b", "a%-5sb", "0123");
TEST("a0123b", "a%*sb", -1, "0123");
TEST("a0123 b", "a%*sb", -5, "0123");
TEST("a0123 b", "a%-*sb", -5, "0123");
TEST("a012b", "a%.3sb", "0123");
TEST("a012b", "a%.*sb", 3, "0123");
TEST("a0123b", "a%.*sb", -3, "0123");
TEST("a 012b", "a%5.3sb", "0123");
TEST("a 012b", "a%5.*sb", 3, "0123");
TEST("a 012b", "a%*.3sb", 5, "0123");
TEST("a 012b", "a%*.*sb", 5, 3, "0123");
TEST("a 0123b", "a%*.*sb", 5, -3, "0123");
TEST("_abcd_", "_%x_", 0xabcd);
TEST("_0xabcd_", "_%#x_", 0xabcd);
TEST("_1234_", "_%o_", 01234);
TEST("_01234_", "_%#o_", 01234);
TEST("_1234_", "_%u_", 1234);
TEST("_1234_", "_%d_", 1234);
TEST("_ 1234_", "_% d_", 1234);
TEST("_+1234_", "_%+d_", 1234);
TEST("_-1234_", "_%d_", -1234);
TEST("_-1234_", "_% d_", -1234);
TEST("_-1234_", "_%+d_", -1234);
TEST("_-1234_", "_%d_", -1234);
TEST("_1234_", "_%d_", 1234);
TEST("_-1234_", "_%i_", -1234);
TEST("_1234_", "_%i_", 1234);
TEST("_01234_", "_%#o_", 01234);
TEST("_1234_", "_%u_", 1234);
TEST("_0x1234abc_", "_%#x_", 0x1234abc);
TEST("_0X1234ABC_", "_%#X_", 0x1234abc);
TEST("_c_", "_%c_", 'c');
TEST("_string_", "_%s_", "string");
TEST("_0x42_", "_%p_", ((void *)0x42));
TEST("_-1234_", "_%ld_", ((long)-1234));
TEST("_1234_", "_%ld_", ((long)1234));
TEST("_-1234_", "_%li_", ((long)-1234));
TEST("_1234_", "_%li_", ((long)1234));
TEST("_01234_", "_%#lo_", ((long)01234));
TEST("_1234_", "_%lu_", ((long)1234));
TEST("_0x1234abc_", "_%#lx_", ((long)0x1234abc));
TEST("_0X1234ABC_", "_%#lX_", ((long)0x1234ABC));
TEST("_-1234_", "_%lld_", ((long long)-1234));
TEST("_1234_", "_%lld_", ((long long)1234));
TEST("_-1234_", "_%lli_", ((long long)-1234));
TEST("_1234_", "_%lli_", ((long long)1234));
TEST("_01234_", "_%#llo_", ((long long)01234));
TEST("_1234_", "_%llu_", ((long long)1234));
TEST("_0x1234abc_", "_%#llx_", ((long long)0x1234abc));
TEST("_0X1234ABC_", "_%#llX_", ((long long)0x1234ABC));
#ifdef __INTEL_COMPILER
/* turn off ICC warnings on invalid format string conversion */
#pragma warning (push)
#pragma warning (disable: 269)
#endif
TEST("_-1234_", "_%qd_", ((long long)-1234));
TEST("_1234_", "_%qd_", ((long long)1234));
TEST("_-1234_", "_%qi_", ((long long)-1234));
TEST("_1234_", "_%qi_", ((long long)1234));
TEST("_01234_", "_%#qo_", ((long long)01234));
TEST("_1234_", "_%qu_", ((long long)1234));
TEST("_0x1234abc_", "_%#qx_", ((long long)0x1234abc));
TEST("_0X1234ABC_", "_%#qX_", ((long long)0x1234ABC));
#ifdef __INTEL_COMPILER
#pragma warning (pop)
#endif
TEST("_-1234_", "_%jd_", ((intmax_t)-1234));
TEST("_1234_", "_%jd_", ((intmax_t)1234));
TEST("_-1234_", "_%ji_", ((intmax_t)-1234));
TEST("_1234_", "_%ji_", ((intmax_t)1234));
TEST("_01234_", "_%#jo_", ((intmax_t)01234));
TEST("_1234_", "_%ju_", ((intmax_t)1234));
TEST("_0x1234abc_", "_%#jx_", ((intmax_t)0x1234abc));
TEST("_0X1234ABC_", "_%#jX_", ((intmax_t)0x1234ABC));
TEST("_1234_", "_%td_", ((ptrdiff_t)1234));
TEST("_-1234_", "_%td_", ((ptrdiff_t)-1234));
TEST("_1234_", "_%ti_", ((ptrdiff_t)1234));
TEST("_-1234_", "_%ti_", ((ptrdiff_t)-1234));
TEST("_-1234_", "_%zd_", ((ssize_t)-1234));
TEST("_1234_", "_%zd_", ((ssize_t)1234));
TEST("_-1234_", "_%zi_", ((ssize_t)-1234));
TEST("_1234_", "_%zi_", ((ssize_t)1234));
TEST("_01234_", "_%#zo_", ((ssize_t)01234));
TEST("_1234_", "_%zu_", ((ssize_t)1234));
TEST("_0x1234abc_", "_%#zx_", ((ssize_t)0x1234abc));
TEST("_0X1234ABC_", "_%#zX_", ((ssize_t)0x1234ABC));
#undef BUFLEN
}
TEST_END
int
main(void)
{
return (test(
test_pow2_ceil,
test_malloc_strtoumax_no_endptr,
test_malloc_strtoumax,
test_malloc_snprintf_truncated,
test_malloc_snprintf));
}
| 8,905 | 28.2 | 70 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/test/unit/stats.c | #include "test/jemalloc_test.h"
TEST_BEGIN(test_stats_summary)
{
size_t *cactive;
size_t sz, allocated, active, mapped;
int expected = config_stats ? 0 : ENOENT;
sz = sizeof(cactive);
assert_d_eq(mallctl("pool.0.stats.cactive", &cactive, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.allocated", &allocated, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.active", &active, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.mapped", &mapped, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
if (config_stats) {
assert_zu_le(active, *cactive,
"active should be no larger than cactive");
assert_zu_le(allocated, active,
"allocated should be no larger than active");
assert_zu_le(active, mapped,
"active should be no larger than mapped");
}
}
TEST_END
TEST_BEGIN(test_stats_chunks)
{
size_t current, high;
uint64_t total;
size_t sz;
int expected = config_stats ? 0 : ENOENT;
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.chunks.current", ¤t, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("pool.0.stats.chunks.total", &total, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.chunks.high", &high, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
if (config_stats) {
assert_zu_le(current, high,
"current should be no larger than high");
assert_u64_le((uint64_t)high, total,
"high should be no larger than total");
}
}
TEST_END
TEST_BEGIN(test_stats_huge)
{
void *p;
uint64_t epoch;
size_t allocated;
uint64_t nmalloc, ndalloc, nrequests;
size_t sz;
int expected = config_stats ? 0 : ENOENT;
p = mallocx(arena_maxclass+1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
"Unexpected mallctl() failure");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.huge.allocated", &allocated, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.huge.nmalloc", &nmalloc, &sz, NULL,
0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.huge.ndalloc", &ndalloc, &sz, NULL,
0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.huge.nrequests", &nrequests, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_zu_gt(allocated, 0,
"allocated should be greater than zero");
assert_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
assert_u64_le(nmalloc, nrequests,
"nmalloc should no larger than nrequests");
}
dallocx(p, 0);
}
TEST_END
TEST_BEGIN(test_stats_arenas_summary)
{
unsigned arena;
void *little, *large;
uint64_t epoch;
size_t sz;
int expected = config_stats ? 0 : ENOENT;
size_t mapped;
uint64_t npurge, nmadvise, purged;
arena = 0;
assert_d_eq(mallctl("thread.pool.0.arena", NULL, NULL, &arena, sizeof(arena)),
0, "Unexpected mallctl() failure");
little = mallocx(SMALL_MAXCLASS, 0);
assert_ptr_not_null(little, "Unexpected mallocx() failure");
large = mallocx(arena_maxclass, 0);
assert_ptr_not_null(large, "Unexpected mallocx() failure");
assert_d_eq(mallctl("pool.0.arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
"Unexpected mallctl() failure");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.mapped", &mapped, &sz, NULL, 0),
expected, "Unexepected mallctl() result");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.npurge", &npurge, &sz, NULL, 0),
expected, "Unexepected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.nmadvise", &nmadvise, &sz, NULL, 0),
expected, "Unexepected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.purged", &purged, &sz, NULL, 0),
expected, "Unexepected mallctl() result");
if (config_stats) {
assert_u64_gt(npurge, 0,
"At least one purge should have occurred");
assert_u64_le(nmadvise, purged,
"nmadvise should be no greater than purged");
}
dallocx(little, 0);
dallocx(large, 0);
}
TEST_END
void *
thd_start(void *arg)
{
return (NULL);
}
static void
no_lazy_lock(void)
{
thd_t thd;
thd_create(&thd, thd_start, NULL);
thd_join(thd, NULL);
}
TEST_BEGIN(test_stats_arenas_small)
{
unsigned arena;
void *p;
size_t sz, allocated;
uint64_t epoch, nmalloc, ndalloc, nrequests;
int expected = config_stats ? 0 : ENOENT;
no_lazy_lock(); /* Lazy locking would dodge tcache testing. */
arena = 0;
assert_d_eq(mallctl("thread.pool.0.arena", NULL, NULL, &arena, sizeof(arena)),
0, "Unexpected mallctl() failure");
p = mallocx(SMALL_MAXCLASS, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
config_tcache ? 0 : ENOENT, "Unexpected mallctl() result");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
"Unexpected mallctl() failure");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.small.allocated", &allocated, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.small.nmalloc", &nmalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.small.ndalloc", &ndalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.small.nrequests", &nrequests, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_zu_gt(allocated, 0,
"allocated should be greater than zero");
assert_u64_gt(nmalloc, 0,
"nmalloc should be no greater than zero");
assert_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
assert_u64_gt(nrequests, 0,
"nrequests should be greater than zero");
}
dallocx(p, 0);
}
TEST_END
TEST_BEGIN(test_stats_arenas_large)
{
unsigned arena;
void *p;
size_t sz, allocated;
uint64_t epoch, nmalloc, ndalloc, nrequests;
int expected = config_stats ? 0 : ENOENT;
arena = 0;
assert_d_eq(mallctl("thread.pool.0.arena", NULL, NULL, &arena, sizeof(arena)),
0, "Unexpected mallctl() failure");
p = mallocx(arena_maxclass, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
"Unexpected mallctl() failure");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.large.allocated", &allocated, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.large.nmalloc", &nmalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.large.ndalloc", &ndalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.large.nrequests", &nrequests, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_zu_gt(allocated, 0,
"allocated should be greater than zero");
assert_zu_gt(nmalloc, 0,
"nmalloc should be greater than zero");
assert_zu_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
assert_zu_gt(nrequests, 0,
"nrequests should be greater than zero");
}
dallocx(p, 0);
}
TEST_END
TEST_BEGIN(test_stats_arenas_bins)
{
unsigned arena;
void *p;
size_t sz, allocated, curruns;
uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes;
uint64_t nruns, nreruns;
int expected = config_stats ? 0 : ENOENT;
arena = 0;
assert_d_eq(mallctl("thread.pool.0.arena", NULL, NULL, &arena, sizeof(arena)),
0, "Unexpected mallctl() failure");
p = mallocx(arena_bin_info[0].reg_size, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
config_tcache ? 0 : ENOENT, "Unexpected mallctl() result");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
"Unexpected mallctl() failure");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.allocated", &allocated, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.nmalloc", &nmalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.ndalloc", &ndalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.nrequests", &nrequests, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.nfills", &nfills, &sz,
NULL, 0), config_tcache ? expected : ENOENT,
"Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.nflushes", &nflushes, &sz,
NULL, 0), config_tcache ? expected : ENOENT,
"Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.nruns", &nruns, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.nreruns", &nreruns, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.curruns", &curruns, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_zu_gt(allocated, 0,
"allocated should be greater than zero");
assert_u64_gt(nmalloc, 0,
"nmalloc should be greater than zero");
assert_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
assert_u64_gt(nrequests, 0,
"nrequests should be greater than zero");
if (config_tcache) {
assert_u64_gt(nfills, 0,
"At least one fill should have occurred");
assert_u64_gt(nflushes, 0,
"At least one flush should have occurred");
}
assert_u64_gt(nruns, 0,
"At least one run should have been allocated");
assert_zu_gt(curruns, 0,
"At least one run should be currently allocated");
}
dallocx(p, 0);
}
TEST_END
TEST_BEGIN(test_stats_arenas_lruns)
{
unsigned arena;
void *p;
uint64_t epoch, nmalloc, ndalloc, nrequests;
size_t curruns, sz;
int expected = config_stats ? 0 : ENOENT;
arena = 0;
assert_d_eq(mallctl("thread.pool.0.arena", NULL, NULL, &arena, sizeof(arena)),
0, "Unexpected mallctl() failure");
p = mallocx(SMALL_MAXCLASS+1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
"Unexpected mallctl() failure");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.lruns.0.nmalloc", &nmalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.lruns.0.ndalloc", &ndalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.lruns.0.nrequests", &nrequests, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.lruns.0.curruns", &curruns, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_u64_gt(nmalloc, 0,
"nmalloc should be greater than zero");
assert_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
assert_u64_gt(nrequests, 0,
"nrequests should be greater than zero");
assert_u64_gt(curruns, 0,
"At least one run should be currently allocated");
}
dallocx(p, 0);
}
TEST_END
int
main(void)
{
return (test(
test_stats_summary,
test_stats_chunks,
test_stats_huge,
test_stats_arenas_summary,
test_stats_arenas_small,
test_stats_arenas_large,
test_stats_arenas_bins,
test_stats_arenas_lruns));
}
| 12,318 | 30.997403 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/test/unit/bitmap.c | #include "test/jemalloc_test.h"
#if (LG_BITMAP_MAXBITS > 12)
# define MAXBITS 4500
#else
# define MAXBITS (1U << LG_BITMAP_MAXBITS)
#endif
TEST_BEGIN(test_bitmap_size)
{
size_t i, prev_size;
prev_size = 0;
for (i = 1; i <= MAXBITS; i++) {
size_t size = bitmap_size(i);
assert_true(size >= prev_size,
"Bitmap size is smaller than expected");
prev_size = size;
}
}
TEST_END
TEST_BEGIN(test_bitmap_init)
{
size_t i;
for (i = 1; i <= MAXBITS; i++) {
bitmap_info_t binfo;
bitmap_info_init(&binfo, i);
{
size_t j;
bitmap_t *bitmap = malloc(sizeof(bitmap_t) *
bitmap_info_ngroups(&binfo));
bitmap_init(bitmap, &binfo);
for (j = 0; j < i; j++) {
assert_false(bitmap_get(bitmap, &binfo, j),
"Bit should be unset");
}
free(bitmap);
}
}
}
TEST_END
TEST_BEGIN(test_bitmap_set)
{
size_t i;
for (i = 1; i <= MAXBITS; i++) {
bitmap_info_t binfo;
bitmap_info_init(&binfo, i);
{
size_t j;
bitmap_t *bitmap = malloc(sizeof(bitmap_t) *
bitmap_info_ngroups(&binfo));
bitmap_init(bitmap, &binfo);
for (j = 0; j < i; j++)
bitmap_set(bitmap, &binfo, j);
assert_true(bitmap_full(bitmap, &binfo),
"All bits should be set");
free(bitmap);
}
}
}
TEST_END
TEST_BEGIN(test_bitmap_unset)
{
size_t i;
for (i = 1; i <= MAXBITS; i++) {
bitmap_info_t binfo;
bitmap_info_init(&binfo, i);
{
size_t j;
bitmap_t *bitmap = malloc(sizeof(bitmap_t) *
bitmap_info_ngroups(&binfo));
bitmap_init(bitmap, &binfo);
for (j = 0; j < i; j++)
bitmap_set(bitmap, &binfo, j);
assert_true(bitmap_full(bitmap, &binfo),
"All bits should be set");
for (j = 0; j < i; j++)
bitmap_unset(bitmap, &binfo, j);
for (j = 0; j < i; j++)
bitmap_set(bitmap, &binfo, j);
assert_true(bitmap_full(bitmap, &binfo),
"All bits should be set");
free(bitmap);
}
}
}
TEST_END
TEST_BEGIN(test_bitmap_sfu)
{
size_t i;
for (i = 1; i <= MAXBITS; i++) {
bitmap_info_t binfo;
bitmap_info_init(&binfo, i);
{
ssize_t j;
bitmap_t *bitmap = malloc(sizeof(bitmap_t) *
bitmap_info_ngroups(&binfo));
bitmap_init(bitmap, &binfo);
/* Iteratively set bits starting at the beginning. */
for (j = 0; j < i; j++) {
assert_zd_eq(bitmap_sfu(bitmap, &binfo), j,
"First unset bit should be just after "
"previous first unset bit");
}
assert_true(bitmap_full(bitmap, &binfo),
"All bits should be set");
/*
* Iteratively unset bits starting at the end, and
* verify that bitmap_sfu() reaches the unset bits.
*/
for (j = i - 1; j >= 0; j--) {
bitmap_unset(bitmap, &binfo, j);
assert_zd_eq(bitmap_sfu(bitmap, &binfo), j,
"First unset bit should the bit previously "
"unset");
bitmap_unset(bitmap, &binfo, j);
}
assert_false(bitmap_get(bitmap, &binfo, 0),
"Bit should be unset");
/*
* Iteratively set bits starting at the beginning, and
* verify that bitmap_sfu() looks past them.
*/
for (j = 1; j < i; j++) {
bitmap_set(bitmap, &binfo, j - 1);
assert_zd_eq(bitmap_sfu(bitmap, &binfo), j,
"First unset bit should be just after the "
"bit previously set");
bitmap_unset(bitmap, &binfo, j);
}
assert_zd_eq(bitmap_sfu(bitmap, &binfo), i - 1,
"First unset bit should be the last bit");
assert_true(bitmap_full(bitmap, &binfo),
"All bits should be set");
free(bitmap);
}
}
}
TEST_END
int
main(void)
{
return (test(
test_bitmap_size,
test_bitmap_init,
test_bitmap_set,
test_bitmap_unset,
test_bitmap_sfu));
}
| 3,614 | 20.777108 | 57 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/test/unit/junk.c | #include "test/jemalloc_test.h"
#ifdef JEMALLOC_FILL
const char *malloc_conf =
"abort:false,junk:true,zero:false,redzone:true,quarantine:0";
#endif
static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig;
static arena_dalloc_junk_large_t *arena_dalloc_junk_large_orig;
static huge_dalloc_junk_t *huge_dalloc_junk_orig;
static void *most_recently_junked;
static void
arena_dalloc_junk_small_intercept(void *ptr, arena_bin_info_t *bin_info)
{
size_t i;
arena_dalloc_junk_small_orig(ptr, bin_info);
for (i = 0; i < bin_info->reg_size; i++) {
assert_c_eq(((char *)ptr)[i], 0x5a,
"Missing junk fill for byte %zu/%zu of deallocated region",
i, bin_info->reg_size);
}
most_recently_junked = ptr;
}
static void
arena_dalloc_junk_large_intercept(void *ptr, size_t usize)
{
size_t i;
arena_dalloc_junk_large_orig(ptr, usize);
for (i = 0; i < usize; i++) {
assert_c_eq(((char *)ptr)[i], 0x5a,
"Missing junk fill for byte %zu/%zu of deallocated region",
i, usize);
}
most_recently_junked = ptr;
}
static void
huge_dalloc_junk_intercept(void *ptr, size_t usize)
{
huge_dalloc_junk_orig(ptr, usize);
/*
* The conditions under which junk filling actually occurs are nuanced
* enough that it doesn't make sense to duplicate the decision logic in
* test code, so don't actually check that the region is junk-filled.
*/
most_recently_junked = ptr;
}
static void
test_junk(size_t sz_min, size_t sz_max)
{
char *s;
size_t sz_prev, sz, i;
arena_dalloc_junk_small_orig = arena_dalloc_junk_small;
arena_dalloc_junk_small = arena_dalloc_junk_small_intercept;
arena_dalloc_junk_large_orig = arena_dalloc_junk_large;
arena_dalloc_junk_large = arena_dalloc_junk_large_intercept;
huge_dalloc_junk_orig = huge_dalloc_junk;
huge_dalloc_junk = huge_dalloc_junk_intercept;
sz_prev = 0;
s = (char *)mallocx(sz_min, 0);
assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
for (sz = sallocx(s, 0); sz <= sz_max;
sz_prev = sz, sz = sallocx(s, 0)) {
if (sz_prev > 0) {
assert_c_eq(s[0], 'a',
"Previously allocated byte %zu/%zu is corrupted",
ZU(0), sz_prev);
assert_c_eq(s[sz_prev-1], 'a',
"Previously allocated byte %zu/%zu is corrupted",
sz_prev-1, sz_prev);
}
for (i = sz_prev; i < sz; i++) {
assert_c_eq(s[i], 0xa5,
"Newly allocated byte %zu/%zu isn't junk-filled",
i, sz);
s[i] = 'a';
}
if (xallocx(s, sz+1, 0, 0) == sz) {
void *junked = (void *)s;
s = (char *)rallocx(s, sz+1, 0);
assert_ptr_not_null((void *)s,
"Unexpected rallocx() failure");
assert_ptr_eq(most_recently_junked, junked,
"Expected region of size %zu to be junk-filled",
sz);
}
}
dallocx(s, 0);
assert_ptr_eq(most_recently_junked, (void *)s,
"Expected region of size %zu to be junk-filled", sz);
arena_dalloc_junk_small = arena_dalloc_junk_small_orig;
arena_dalloc_junk_large = arena_dalloc_junk_large_orig;
huge_dalloc_junk = huge_dalloc_junk_orig;
}
TEST_BEGIN(test_junk_small)
{
test_skip_if(!config_fill);
test_junk(1, SMALL_MAXCLASS-1);
}
TEST_END
TEST_BEGIN(test_junk_large)
{
test_skip_if(!config_fill);
test_junk(SMALL_MAXCLASS+1, arena_maxclass);
}
TEST_END
TEST_BEGIN(test_junk_huge)
{
test_skip_if(!config_fill);
test_junk(arena_maxclass+1, chunksize*2);
}
TEST_END
arena_ralloc_junk_large_t *arena_ralloc_junk_large_orig;
static void *most_recently_trimmed;
static void
arena_ralloc_junk_large_intercept(void *ptr, size_t old_usize, size_t usize)
{
arena_ralloc_junk_large_orig(ptr, old_usize, usize);
assert_zu_eq(old_usize, arena_maxclass, "Unexpected old_usize");
assert_zu_eq(usize, arena_maxclass-PAGE, "Unexpected usize");
most_recently_trimmed = ptr;
}
TEST_BEGIN(test_junk_large_ralloc_shrink)
{
void *p1, *p2;
p1 = mallocx(arena_maxclass, 0);
assert_ptr_not_null(p1, "Unexpected mallocx() failure");
arena_ralloc_junk_large_orig = arena_ralloc_junk_large;
arena_ralloc_junk_large = arena_ralloc_junk_large_intercept;
p2 = rallocx(p1, arena_maxclass-PAGE, 0);
assert_ptr_eq(p1, p2, "Unexpected move during shrink");
arena_ralloc_junk_large = arena_ralloc_junk_large_orig;
assert_ptr_eq(most_recently_trimmed, p1,
"Expected trimmed portion of region to be junk-filled");
}
TEST_END
static bool detected_redzone_corruption;
static void
arena_redzone_corruption_replacement(void *ptr, size_t usize, bool after,
size_t offset, uint8_t byte)
{
detected_redzone_corruption = true;
}
TEST_BEGIN(test_junk_redzone)
{
char *s;
arena_redzone_corruption_t *arena_redzone_corruption_orig;
test_skip_if(!config_fill);
arena_redzone_corruption_orig = arena_redzone_corruption;
arena_redzone_corruption = arena_redzone_corruption_replacement;
/* Test underflow. */
detected_redzone_corruption = false;
s = (char *)mallocx(1, 0);
assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
s[-1] = 0xbb;
dallocx(s, 0);
assert_true(detected_redzone_corruption,
"Did not detect redzone corruption");
/* Test overflow. */
detected_redzone_corruption = false;
s = (char *)mallocx(1, 0);
assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
s[sallocx(s, 0)] = 0xbb;
dallocx(s, 0);
assert_true(detected_redzone_corruption,
"Did not detect redzone corruption");
arena_redzone_corruption = arena_redzone_corruption_orig;
}
TEST_END
int
main(void)
{
return (test(
test_junk_small,
test_junk_large,
test_junk_huge,
test_junk_large_ralloc_shrink,
test_junk_redzone));
}
| 5,541 | 24.190909 | 76 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/test/unit/ckh.c | #include "test/jemalloc_test.h"
TEST_BEGIN(test_new_delete)
{
ckh_t ckh;
assert_false(ckh_new(&ckh, 2, ckh_string_hash, ckh_string_keycomp),
"Unexpected ckh_new() error");
ckh_delete(&ckh);
assert_false(ckh_new(&ckh, 3, ckh_pointer_hash, ckh_pointer_keycomp),
"Unexpected ckh_new() error");
ckh_delete(&ckh);
}
TEST_END
TEST_BEGIN(test_count_insert_search_remove)
{
ckh_t ckh;
const char *strs[] = {
"a string",
"A string",
"a string.",
"A string."
};
const char *missing = "A string not in the hash table.";
size_t i;
assert_false(ckh_new(&ckh, 2, ckh_string_hash, ckh_string_keycomp),
"Unexpected ckh_new() error");
assert_zu_eq(ckh_count(&ckh), 0,
"ckh_count() should return %zu, but it returned %zu", ZU(0),
ckh_count(&ckh));
/* Insert. */
for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
ckh_insert(&ckh, strs[i], strs[i]);
assert_zu_eq(ckh_count(&ckh), i+1,
"ckh_count() should return %zu, but it returned %zu", i+1,
ckh_count(&ckh));
}
/* Search. */
for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
union {
void *p;
const char *s;
} k, v;
void **kp, **vp;
const char *ks, *vs;
kp = (i & 1) ? &k.p : NULL;
vp = (i & 2) ? &v.p : NULL;
k.p = NULL;
v.p = NULL;
assert_false(ckh_search(&ckh, strs[i], kp, vp),
"Unexpected ckh_search() error");
ks = (i & 1) ? strs[i] : (const char *)NULL;
vs = (i & 2) ? strs[i] : (const char *)NULL;
assert_ptr_eq((void *)ks, (void *)k.s,
"Key mismatch, i=%zu", i);
assert_ptr_eq((void *)vs, (void *)v.s,
"Value mismatch, i=%zu", i);
}
assert_true(ckh_search(&ckh, missing, NULL, NULL),
"Unexpected ckh_search() success");
/* Remove. */
for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
union {
void *p;
const char *s;
} k, v;
void **kp, **vp;
const char *ks, *vs;
kp = (i & 1) ? &k.p : NULL;
vp = (i & 2) ? &v.p : NULL;
k.p = NULL;
v.p = NULL;
assert_false(ckh_remove(&ckh, strs[i], kp, vp),
"Unexpected ckh_remove() error");
ks = (i & 1) ? strs[i] : (const char *)NULL;
vs = (i & 2) ? strs[i] : (const char *)NULL;
assert_ptr_eq((void *)ks, (void *)k.s,
"Key mismatch, i=%zu", i);
assert_ptr_eq((void *)vs, (void *)v.s,
"Value mismatch, i=%zu", i);
assert_zu_eq(ckh_count(&ckh),
sizeof(strs)/sizeof(const char *) - i - 1,
"ckh_count() should return %zu, but it returned %zu",
sizeof(strs)/sizeof(const char *) - i - 1,
ckh_count(&ckh));
}
ckh_delete(&ckh);
}
TEST_END
TEST_BEGIN(test_insert_iter_remove)
{
#define NITEMS ZU(1000)
ckh_t ckh;
void **p[NITEMS];
void *q, *r;
size_t i;
assert_false(ckh_new(&ckh, 2, ckh_pointer_hash, ckh_pointer_keycomp),
"Unexpected ckh_new() error");
for (i = 0; i < NITEMS; i++) {
p[i] = mallocx(i+1, 0);
assert_ptr_not_null(p[i], "Unexpected mallocx() failure");
}
for (i = 0; i < NITEMS; i++) {
size_t j;
for (j = i; j < NITEMS; j++) {
assert_false(ckh_insert(&ckh, p[j], p[j]),
"Unexpected ckh_insert() failure");
assert_false(ckh_search(&ckh, p[j], &q, &r),
"Unexpected ckh_search() failure");
assert_ptr_eq(p[j], q, "Key pointer mismatch");
assert_ptr_eq(p[j], r, "Value pointer mismatch");
}
assert_zu_eq(ckh_count(&ckh), NITEMS,
"ckh_count() should return %zu, but it returned %zu",
NITEMS, ckh_count(&ckh));
for (j = i + 1; j < NITEMS; j++) {
assert_false(ckh_search(&ckh, p[j], NULL, NULL),
"Unexpected ckh_search() failure");
assert_false(ckh_remove(&ckh, p[j], &q, &r),
"Unexpected ckh_remove() failure");
assert_ptr_eq(p[j], q, "Key pointer mismatch");
assert_ptr_eq(p[j], r, "Value pointer mismatch");
assert_true(ckh_search(&ckh, p[j], NULL, NULL),
"Unexpected ckh_search() success");
assert_true(ckh_remove(&ckh, p[j], &q, &r),
"Unexpected ckh_remove() success");
}
{
bool seen[NITEMS];
size_t tabind;
memset(seen, 0, sizeof(seen));
for (tabind = 0; ckh_iter(&ckh, &tabind, &q, &r) ==
false;) {
size_t k;
assert_ptr_eq(q, r, "Key and val not equal");
for (k = 0; k < NITEMS; k++) {
if (p[k] == q) {
assert_false(seen[k],
"Item %zu already seen", k);
seen[k] = true;
break;
}
}
}
for (j = 0; j < i + 1; j++)
assert_true(seen[j], "Item %zu not seen", j);
for (; j < NITEMS; j++)
assert_false(seen[j], "Item %zu seen", j);
}
}
for (i = 0; i < NITEMS; i++) {
assert_false(ckh_search(&ckh, p[i], NULL, NULL),
"Unexpected ckh_search() failure");
assert_false(ckh_remove(&ckh, p[i], &q, &r),
"Unexpected ckh_remove() failure");
assert_ptr_eq(p[i], q, "Key pointer mismatch");
assert_ptr_eq(p[i], r, "Value pointer mismatch");
assert_true(ckh_search(&ckh, p[i], NULL, NULL),
"Unexpected ckh_search() success");
assert_true(ckh_remove(&ckh, p[i], &q, &r),
"Unexpected ckh_remove() success");
dallocx(p[i], 0);
}
assert_zu_eq(ckh_count(&ckh), 0,
"ckh_count() should return %zu, but it returned %zu", ZU(0),
ckh_count(&ckh));
ckh_delete(&ckh);
#undef NITEMS
}
TEST_END
int
main(void)
{
return (test(
test_new_delete,
test_count_insert_search_remove,
test_insert_iter_remove));
}
| 5,301 | 24.613527 | 70 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/test/unit/math.c | #include "test/jemalloc_test.h"
#define MAX_REL_ERR 1.0e-9
#define MAX_ABS_ERR 1.0e-9
#include <float.h>
#ifndef INFINITY
#define INFINITY (DBL_MAX + DBL_MAX)
#endif
static bool
double_eq_rel(double a, double b, double max_rel_err, double max_abs_err)
{
double rel_err;
if (fabs(a - b) < max_abs_err)
return (true);
rel_err = (fabs(b) > fabs(a)) ? fabs((a-b)/b) : fabs((a-b)/a);
return (rel_err < max_rel_err);
}
static uint64_t
factorial(unsigned x)
{
uint64_t ret = 1;
unsigned i;
for (i = 2; i <= x; i++)
ret *= (uint64_t)i;
return (ret);
}
TEST_BEGIN(test_ln_gamma_factorial)
{
unsigned x;
/* exp(ln_gamma(x)) == (x-1)! for integer x. */
for (x = 1; x <= 21; x++) {
assert_true(double_eq_rel(exp(ln_gamma(x)),
(double)factorial(x-1), MAX_REL_ERR, MAX_ABS_ERR),
"Incorrect factorial result for x=%u", x);
}
}
TEST_END
/* Expected ln_gamma([0.0..100.0] increment=0.25). */
static const double ln_gamma_misc_expected[] = {
INFINITY,
1.28802252469807743, 0.57236494292470008, 0.20328095143129538,
0.00000000000000000, -0.09827183642181320, -0.12078223763524518,
-0.08440112102048555, 0.00000000000000000, 0.12487171489239651,
0.28468287047291918, 0.47521466691493719, 0.69314718055994529,
0.93580193110872523, 1.20097360234707429, 1.48681557859341718,
1.79175946922805496, 2.11445692745037128, 2.45373657084244234,
2.80857141857573644, 3.17805383034794575, 3.56137591038669710,
3.95781396761871651, 4.36671603662228680, 4.78749174278204581,
5.21960398699022932, 5.66256205985714178, 6.11591589143154568,
6.57925121201010121, 7.05218545073853953, 7.53436423675873268,
8.02545839631598312, 8.52516136106541467, 9.03318691960512332,
9.54926725730099690, 10.07315123968123949, 10.60460290274525086,
11.14340011995171231, 11.68933342079726856, 12.24220494005076176,
12.80182748008146909, 13.36802367147604720, 13.94062521940376342,
14.51947222506051816, 15.10441257307551943, 15.69530137706046524,
16.29200047656724237, 16.89437797963419285, 17.50230784587389010,
18.11566950571089407, 18.73434751193644843, 19.35823122022435427,
19.98721449566188468, 20.62119544270163018, 21.26007615624470048,
21.90376249182879320, 22.55216385312342098, 23.20519299513386002,
23.86276584168908954, 24.52480131594137802, 25.19122118273868338,
25.86194990184851861, 26.53691449111561340, 27.21604439872720604,
27.89927138384089389, 28.58652940490193828, 29.27775451504081516,
29.97288476399884871, 30.67186010608067548, 31.37462231367769050,
32.08111489594735843, 32.79128302226991565, 33.50507345013689076,
34.22243445715505317, 34.94331577687681545, 35.66766853819134298,
36.39544520803305261, 37.12659953718355865, 37.86108650896109395,
38.59886229060776230, 39.33988418719949465, 40.08411059791735198,
40.83150097453079752, 41.58201578195490100, 42.33561646075348506,
43.09226539146988699, 43.85192586067515208, 44.61456202863158893,
45.38013889847690052, 46.14862228684032885, 46.91997879580877395,
47.69417578616628361, 48.47118135183522014, 49.25096429545256882,
50.03349410501914463, 50.81874093156324790, 51.60667556776436982,
52.39726942748592364, 53.19049452616926743, 53.98632346204390586,
54.78472939811231157, 55.58568604486942633, 56.38916764371992940,
57.19514895105859864, 58.00360522298051080, 58.81451220059079787,
59.62784609588432261, 60.44358357816834371, 61.26170176100199427,
62.08217818962842927, 62.90499082887649962, 63.73011805151035958,
64.55753862700632340, 65.38723171073768015, 66.21917683354901385,
67.05335389170279825, 67.88974313718154008, 68.72832516833013017,
69.56908092082363737, 70.41199165894616385, 71.25703896716800045,
72.10420474200799390, 72.95347118416940191, 73.80482079093779646,
74.65823634883015814, 75.51370092648485866, 76.37119786778275454,
77.23071078519033961, 78.09222355331530707, 78.95572030266725960,
79.82118541361435859, 80.68860351052903468, 81.55795945611502873,
82.42923834590904164, 83.30242550295004378, 84.17750647261028973,
85.05446701758152983, 85.93329311301090456, 86.81397094178107920,
87.69648688992882057, 88.58082754219766741, 89.46697967771913795,
90.35493026581838194, 91.24466646193963015, 92.13617560368709292,
93.02944520697742803, 93.92446296229978486, 94.82121673107967297,
95.71969454214321615, 96.61988458827809723, 97.52177522288820910,
98.42535495673848800, 99.33061245478741341, 100.23753653310367895,
101.14611615586458981, 102.05634043243354370, 102.96819861451382394,
103.88168009337621811, 104.79677439715833032, 105.71347118823287303,
106.63176026064346047, 107.55163153760463501, 108.47307506906540198,
109.39608102933323153, 110.32063971475740516, 111.24674154146920557,
112.17437704317786995, 113.10353686902013237, 114.03421178146170689,
114.96639265424990128, 115.90007047041454769, 116.83523632031698014,
117.77188139974506953, 118.70999700805310795, 119.64957454634490830,
120.59060551569974962, 121.53308151543865279, 122.47699424143097247,
123.42233548443955726, 124.36909712850338394, 125.31727114935689826,
126.26684961288492559, 127.21782467361175861, 128.17018857322420899,
129.12393363912724453, 130.07905228303084755, 131.03553699956862033,
131.99338036494577864, 132.95257503561629164, 133.91311374698926784,
134.87498931216194364, 135.83819462068046846, 136.80272263732638294,
137.76856640092901785, 138.73571902320256299, 139.70417368760718091,
140.67392364823425055, 141.64496222871400732, 142.61728282114600574,
143.59087888505104047, 144.56574394634486680, 145.54187159633210058,
146.51925549072063859, 147.49788934865566148, 148.47776695177302031,
149.45888214327129617, 150.44122882700193600, 151.42480096657754984,
152.40959258449737490, 153.39559776128982094, 154.38281063467164245,
155.37122539872302696, 156.36083630307879844, 157.35163765213474107,
158.34362380426921391, 159.33678917107920370, 160.33112821663092973,
161.32663545672428995, 162.32330545817117695, 163.32113283808695314,
164.32011226319519892, 165.32023844914485267, 166.32150615984036790,
167.32391020678358018, 168.32744544842768164, 169.33210678954270634,
170.33788918059275375, 171.34478761712384198, 172.35279713916281707,
173.36191283062726143, 174.37212981874515094, 175.38344327348534080,
176.39584840699734514, 177.40934047306160437, 178.42391476654847793,
179.43956662288721304, 180.45629141754378111, 181.47408456550741107,
182.49294152078630304, 183.51285777591152737, 184.53382886144947861,
185.55585034552262869, 186.57891783333786861, 187.60302696672312095,
188.62817342367162610, 189.65435291789341932, 190.68156119837468054,
191.70979404894376330, 192.73904728784492590, 193.76931676731820176,
194.80059837318714244, 195.83288802445184729, 196.86618167288995096,
197.90047530266301123, 198.93576492992946214, 199.97204660246373464,
201.00931639928148797, 202.04757043027063901, 203.08680483582807597,
204.12701578650228385, 205.16819948264117102, 206.21035215404597807,
207.25347005962987623, 208.29754948708190909, 209.34258675253678916,
210.38857820024875878, 211.43552020227099320, 212.48340915813977858,
213.53224149456323744, 214.58201366511514152, 215.63272214993284592,
216.68436345542014010, 217.73693411395422004, 218.79043068359703739,
219.84484974781133815, 220.90018791517996988, 221.95644181913033322,
223.01360811766215875, 224.07168349307951871, 225.13066465172661879,
226.19054832372759734, 227.25133126272962159, 228.31301024565024704,
229.37558207242807384, 230.43904356577689896, 231.50339157094342113,
232.56862295546847008, 233.63473460895144740, 234.70172344281823484,
235.76958639009222907, 236.83832040516844586, 237.90792246359117712,
238.97838956183431947, 240.04971871708477238, 241.12190696702904802,
242.19495136964280846, 243.26884900298270509, 244.34359696498191283,
245.41919237324782443, 246.49563236486270057, 247.57291409618682110,
248.65103474266476269, 249.72999149863338175, 250.80978157713354904,
251.89040220972316320, 252.97185064629374551, 254.05412415488834199,
255.13722002152300661, 256.22113555000953511, 257.30586806178126835,
258.39141489572085675, 259.47777340799029844, 260.56494097186322279,
261.65291497755913497, 262.74169283208021852, 263.83127195904967266,
264.92164979855277807, 266.01282380697938379, 267.10479145686849733,
268.19755023675537586, 269.29109765101975427, 270.38543121973674488,
271.48054847852881721, 272.57644697842033565, 273.67312428569374561,
274.77057798174683967, 275.86880566295326389, 276.96780494052313770,
278.06757344036617496, 279.16810880295668085, 280.26940868320008349,
281.37147075030043197, 282.47429268763045229, 283.57787219260217171,
284.68220697654078322, 285.78729476455760050, 286.89313329542699194,
287.99972032146268930, 289.10705360839756395, 290.21513093526289140,
291.32395009427028754, 292.43350889069523646, 293.54380514276073200,
294.65483668152336350, 295.76660135076059532, 296.87909700685889902,
297.99232151870342022, 299.10627276756946458, 300.22094864701409733,
301.33634706277030091, 302.45246593264130297, 303.56930318639643929,
304.68685676566872189, 305.80512462385280514, 306.92410472600477078,
308.04379504874236773, 309.16419358014690033, 310.28529831966631036,
311.40710727801865687, 312.52961847709792664, 313.65282994987899201,
314.77673974032603610, 315.90134590329950015, 317.02664650446632777,
318.15263962020929966, 319.27932333753892635, 320.40669575400545455,
321.53475497761127144, 322.66349912672620803, 323.79292633000159185,
324.92303472628691452, 326.05382246454587403, 327.18528770377525916,
328.31742861292224234, 329.45024337080525356, 330.58373016603343331,
331.71788719692847280, 332.85271267144611329, 333.98820480709991898,
335.12436183088397001, 336.26118197919845443, 337.39866349777429377,
338.53680464159958774, 339.67560367484657036, 340.81505887079896411,
341.95516851178109619, 343.09593088908627578, 344.23734430290727460,
345.37940706226686416, 346.52211748494903532, 347.66547389743118401,
348.80947463481720661, 349.95411804077025408, 351.09940246744753267,
352.24532627543504759, 353.39188783368263103, 354.53908551944078908,
355.68691771819692349, 356.83538282361303118, 357.98447923746385868,
359.13420536957539753
};
TEST_BEGIN(test_ln_gamma_misc)
{
unsigned i;
for (i = 1; i < sizeof(ln_gamma_misc_expected)/sizeof(double); i++) {
double x = (double)i * 0.25;
assert_true(double_eq_rel(ln_gamma(x),
ln_gamma_misc_expected[i], MAX_REL_ERR, MAX_ABS_ERR),
"Incorrect ln_gamma result for i=%u", i);
}
}
TEST_END
/* Expected pt_norm([0.01..0.99] increment=0.01). */
static const double pt_norm_expected[] = {
-INFINITY,
-2.32634787404084076, -2.05374891063182252, -1.88079360815125085,
-1.75068607125216946, -1.64485362695147264, -1.55477359459685305,
-1.47579102817917063, -1.40507156030963221, -1.34075503369021654,
-1.28155156554460081, -1.22652812003661049, -1.17498679206608991,
-1.12639112903880045, -1.08031934081495606, -1.03643338949378938,
-0.99445788320975281, -0.95416525314619416, -0.91536508784281390,
-0.87789629505122846, -0.84162123357291418, -0.80642124701824025,
-0.77219321418868492, -0.73884684918521371, -0.70630256284008752,
-0.67448975019608171, -0.64334540539291685, -0.61281299101662701,
-0.58284150727121620, -0.55338471955567281, -0.52440051270804067,
-0.49585034734745320, -0.46769879911450812, -0.43991316567323380,
-0.41246312944140462, -0.38532046640756751, -0.35845879325119373,
-0.33185334643681652, -0.30548078809939738, -0.27931903444745404,
-0.25334710313579978, -0.22754497664114931, -0.20189347914185077,
-0.17637416478086135, -0.15096921549677725, -0.12566134685507399,
-0.10043372051146975, -0.07526986209982976, -0.05015358346473352,
-0.02506890825871106, 0.00000000000000000, 0.02506890825871106,
0.05015358346473366, 0.07526986209982990, 0.10043372051146990,
0.12566134685507413, 0.15096921549677739, 0.17637416478086146,
0.20189347914185105, 0.22754497664114931, 0.25334710313579978,
0.27931903444745404, 0.30548078809939738, 0.33185334643681652,
0.35845879325119373, 0.38532046640756762, 0.41246312944140484,
0.43991316567323391, 0.46769879911450835, 0.49585034734745348,
0.52440051270804111, 0.55338471955567303, 0.58284150727121620,
0.61281299101662701, 0.64334540539291685, 0.67448975019608171,
0.70630256284008752, 0.73884684918521371, 0.77219321418868492,
0.80642124701824036, 0.84162123357291441, 0.87789629505122879,
0.91536508784281423, 0.95416525314619460, 0.99445788320975348,
1.03643338949378938, 1.08031934081495606, 1.12639112903880045,
1.17498679206608991, 1.22652812003661049, 1.28155156554460081,
1.34075503369021654, 1.40507156030963265, 1.47579102817917085,
1.55477359459685394, 1.64485362695147308, 1.75068607125217102,
1.88079360815125041, 2.05374891063182208, 2.32634787404084076
};
TEST_BEGIN(test_pt_norm)
{
unsigned i;
for (i = 1; i < sizeof(pt_norm_expected)/sizeof(double); i++) {
double p = (double)i * 0.01;
assert_true(double_eq_rel(pt_norm(p), pt_norm_expected[i],
MAX_REL_ERR, MAX_ABS_ERR),
"Incorrect pt_norm result for i=%u", i);
}
}
TEST_END
/*
* Expected pt_chi2(p=[0.01..0.99] increment=0.07,
* df={0.1, 1.1, 10.1, 100.1, 1000.1}).
*/
static const double pt_chi2_df[] = {0.1, 1.1, 10.1, 100.1, 1000.1};
static const double pt_chi2_expected[] = {
1.168926411457320e-40, 1.347680397072034e-22, 3.886980416666260e-17,
8.245951724356564e-14, 2.068936347497604e-11, 1.562561743309233e-09,
5.459543043426564e-08, 1.114775688149252e-06, 1.532101202364371e-05,
1.553884683726585e-04, 1.239396954915939e-03, 8.153872320255721e-03,
4.631183739647523e-02, 2.473187311701327e-01, 2.175254800183617e+00,
0.0003729887888876379, 0.0164409238228929513, 0.0521523015190650113,
0.1064701372271216612, 0.1800913735793082115, 0.2748704281195626931,
0.3939246282787986497, 0.5420727552260817816, 0.7267265822221973259,
0.9596554296000253670, 1.2607440376386165326, 1.6671185084541604304,
2.2604828984738705167, 3.2868613342148607082, 6.9298574921692139839,
2.606673548632508, 4.602913725294877, 5.646152813924212,
6.488971315540869, 7.249823275816285, 7.977314231410841,
8.700354939944047, 9.441728024225892, 10.224338321374127,
11.076435368801061, 12.039320937038386, 13.183878752697167,
14.657791935084575, 16.885728216339373, 23.361991680031817,
70.14844087392152, 80.92379498849355, 85.53325420085891,
88.94433120715347, 91.83732712857017, 94.46719943606301,
96.96896479994635, 99.43412843510363, 101.94074719829733,
104.57228644307247, 107.43900093448734, 110.71844673417287,
114.76616819871325, 120.57422505959563, 135.92318818757556,
899.0072447849649, 937.9271278858220, 953.8117189560207,
965.3079371501154, 974.8974061207954, 983.4936235182347,
991.5691170518946, 999.4334123954690, 1007.3391826856553,
1015.5445154999951, 1024.3777075619569, 1034.3538789836223,
1046.4872561869577, 1063.5717461999654, 1107.0741966053859
};
TEST_BEGIN(test_pt_chi2)
{
unsigned i, j;
unsigned e = 0;
for (i = 0; i < sizeof(pt_chi2_df)/sizeof(double); i++) {
double df = pt_chi2_df[i];
double ln_gamma_df = ln_gamma(df * 0.5);
for (j = 1; j < 100; j += 7) {
double p = (double)j * 0.01;
assert_true(double_eq_rel(pt_chi2(p, df, ln_gamma_df),
pt_chi2_expected[e], MAX_REL_ERR, MAX_ABS_ERR),
"Incorrect pt_chi2 result for i=%u, j=%u", i, j);
e++;
}
}
}
TEST_END
/*
* Expected pt_gamma(p=[0.1..0.99] increment=0.07,
* shape=[0.5..3.0] increment=0.5).
*/
static const double pt_gamma_shape[] = {0.5, 1.0, 1.5, 2.0, 2.5, 3.0};
static const double pt_gamma_expected[] = {
7.854392895485103e-05, 5.043466107888016e-03, 1.788288957794883e-02,
3.900956150232906e-02, 6.913847560638034e-02, 1.093710833465766e-01,
1.613412523825817e-01, 2.274682115597864e-01, 3.114117323127083e-01,
4.189466220207417e-01, 5.598106789059246e-01, 7.521856146202706e-01,
1.036125427911119e+00, 1.532450860038180e+00, 3.317448300510606e+00,
0.01005033585350144, 0.08338160893905107, 0.16251892949777497,
0.24846135929849966, 0.34249030894677596, 0.44628710262841947,
0.56211891815354142, 0.69314718055994529, 0.84397007029452920,
1.02165124753198167, 1.23787435600161766, 1.51412773262977574,
1.89711998488588196, 2.52572864430825783, 4.60517018598809091,
0.05741590094955853, 0.24747378084860744, 0.39888572212236084,
0.54394139997444901, 0.69048812513915159, 0.84311389861296104,
1.00580622221479898, 1.18298694218766931, 1.38038096305861213,
1.60627736383027453, 1.87396970522337947, 2.20749220408081070,
2.65852391865854942, 3.37934630984842244, 5.67243336507218476,
0.1485547402532659, 0.4657458011640391, 0.6832386130709406,
0.8794297834672100, 1.0700752852474524, 1.2629614217350744,
1.4638400448580779, 1.6783469900166610, 1.9132338090606940,
2.1778589228618777, 2.4868823970010991, 2.8664695666264195,
3.3724415436062114, 4.1682658512758071, 6.6383520679938108,
0.2771490383641385, 0.7195001279643727, 0.9969081732265243,
1.2383497880608061, 1.4675206597269927, 1.6953064251816552,
1.9291243435606809, 2.1757300955477641, 2.4428032131216391,
2.7406534569230616, 3.0851445039665513, 3.5043101122033367,
4.0575997065264637, 4.9182956424675286, 7.5431362346944937,
0.4360451650782932, 0.9983600902486267, 1.3306365880734528,
1.6129750834753802, 1.8767241606994294, 2.1357032436097660,
2.3988853336865565, 2.6740603137235603, 2.9697561737517959,
3.2971457713883265, 3.6731795898504660, 4.1275751617770631,
4.7230515633946677, 5.6417477865306020, 8.4059469148854635
};
TEST_BEGIN(test_pt_gamma_shape)
{
unsigned i, j;
unsigned e = 0;
for (i = 0; i < sizeof(pt_gamma_shape)/sizeof(double); i++) {
double shape = pt_gamma_shape[i];
double ln_gamma_shape = ln_gamma(shape);
for (j = 1; j < 100; j += 7) {
double p = (double)j * 0.01;
assert_true(double_eq_rel(pt_gamma(p, shape, 1.0,
ln_gamma_shape), pt_gamma_expected[e], MAX_REL_ERR,
MAX_ABS_ERR),
"Incorrect pt_gamma result for i=%u, j=%u", i, j);
e++;
}
}
}
TEST_END
TEST_BEGIN(test_pt_gamma_scale)
{
double shape = 1.0;
double ln_gamma_shape = ln_gamma(shape);
assert_true(double_eq_rel(
pt_gamma(0.5, shape, 1.0, ln_gamma_shape) * 10.0,
pt_gamma(0.5, shape, 10.0, ln_gamma_shape), MAX_REL_ERR,
MAX_ABS_ERR),
"Scale should be trivially equivalent to external multiplication");
}
TEST_END
int
main(void)
{
return (test(
test_ln_gamma_factorial,
test_ln_gamma_misc,
test_pt_norm,
test_pt_chi2,
test_pt_gamma_shape,
test_pt_gamma_scale));
}
| 18,448 | 45.706329 | 73 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/test/unit/ql.c | #include "test/jemalloc_test.h"
/* Number of ring entries, in [2..26]. */
#define NENTRIES 9
typedef struct list_s list_t;
typedef ql_head(list_t) list_head_t;
struct list_s {
ql_elm(list_t) link;
char id;
};
static void
test_empty_list(list_head_t *head)
{
list_t *t;
unsigned i;
assert_ptr_null(ql_first(head), "Unexpected element for empty list");
assert_ptr_null(ql_last(head, link),
"Unexpected element for empty list");
i = 0;
ql_foreach(t, head, link) {
i++;
}
assert_u_eq(i, 0, "Unexpected element for empty list");
i = 0;
ql_reverse_foreach(t, head, link) {
i++;
}
assert_u_eq(i, 0, "Unexpected element for empty list");
}
TEST_BEGIN(test_ql_empty)
{
list_head_t head;
ql_new(&head);
test_empty_list(&head);
}
TEST_END
static void
init_entries(list_t *entries, unsigned nentries)
{
unsigned i;
for (i = 0; i < nentries; i++) {
entries[i].id = 'a' + i;
ql_elm_new(&entries[i], link);
}
}
static void
test_entries_list(list_head_t *head, list_t *entries, unsigned nentries)
{
list_t *t;
unsigned i;
assert_c_eq(ql_first(head)->id, entries[0].id, "Element id mismatch");
assert_c_eq(ql_last(head, link)->id, entries[nentries-1].id,
"Element id mismatch");
i = 0;
ql_foreach(t, head, link) {
assert_c_eq(t->id, entries[i].id, "Element id mismatch");
i++;
}
i = 0;
ql_reverse_foreach(t, head, link) {
assert_c_eq(t->id, entries[nentries-i-1].id,
"Element id mismatch");
i++;
}
for (i = 0; i < nentries-1; i++) {
t = ql_next(head, &entries[i], link);
assert_c_eq(t->id, entries[i+1].id, "Element id mismatch");
}
assert_ptr_null(ql_next(head, &entries[nentries-1], link),
"Unexpected element");
assert_ptr_null(ql_prev(head, &entries[0], link), "Unexpected element");
for (i = 1; i < nentries; i++) {
t = ql_prev(head, &entries[i], link);
assert_c_eq(t->id, entries[i-1].id, "Element id mismatch");
}
}
TEST_BEGIN(test_ql_tail_insert)
{
list_head_t head;
list_t entries[NENTRIES];
unsigned i;
ql_new(&head);
init_entries(entries, sizeof(entries)/sizeof(list_t));
for (i = 0; i < NENTRIES; i++)
ql_tail_insert(&head, &entries[i], link);
test_entries_list(&head, entries, NENTRIES);
}
TEST_END
TEST_BEGIN(test_ql_tail_remove)
{
list_head_t head;
list_t entries[NENTRIES];
unsigned i;
ql_new(&head);
init_entries(entries, sizeof(entries)/sizeof(list_t));
for (i = 0; i < NENTRIES; i++)
ql_tail_insert(&head, &entries[i], link);
for (i = 0; i < NENTRIES; i++) {
test_entries_list(&head, entries, NENTRIES-i);
ql_tail_remove(&head, list_t, link);
}
test_empty_list(&head);
}
TEST_END
TEST_BEGIN(test_ql_head_insert)
{
list_head_t head;
list_t entries[NENTRIES];
unsigned i;
ql_new(&head);
init_entries(entries, sizeof(entries)/sizeof(list_t));
for (i = 0; i < NENTRIES; i++)
ql_head_insert(&head, &entries[NENTRIES-i-1], link);
test_entries_list(&head, entries, NENTRIES);
}
TEST_END
TEST_BEGIN(test_ql_head_remove)
{
list_head_t head;
list_t entries[NENTRIES];
unsigned i;
ql_new(&head);
init_entries(entries, sizeof(entries)/sizeof(list_t));
for (i = 0; i < NENTRIES; i++)
ql_head_insert(&head, &entries[NENTRIES-i-1], link);
for (i = 0; i < NENTRIES; i++) {
test_entries_list(&head, &entries[i], NENTRIES-i);
ql_head_remove(&head, list_t, link);
}
test_empty_list(&head);
}
TEST_END
TEST_BEGIN(test_ql_insert)
{
list_head_t head;
list_t entries[8];
list_t *a, *b, *c, *d, *e, *f, *g, *h;
ql_new(&head);
init_entries(entries, sizeof(entries)/sizeof(list_t));
a = &entries[0];
b = &entries[1];
c = &entries[2];
d = &entries[3];
e = &entries[4];
f = &entries[5];
g = &entries[6];
h = &entries[7];
/*
* ql_remove(), ql_before_insert(), and ql_after_insert() are used
* internally by other macros that are already tested, so there's no
* need to test them completely. However, insertion/deletion from the
* middle of lists is not otherwise tested; do so here.
*/
ql_tail_insert(&head, f, link);
ql_before_insert(&head, f, b, link);
ql_before_insert(&head, f, c, link);
ql_after_insert(f, h, link);
ql_after_insert(f, g, link);
ql_before_insert(&head, b, a, link);
ql_after_insert(c, d, link);
ql_before_insert(&head, f, e, link);
test_entries_list(&head, entries, sizeof(entries)/sizeof(list_t));
}
TEST_END
int
main(void)
{
return (test(
test_ql_empty,
test_ql_tail_insert,
test_ql_tail_remove,
test_ql_head_insert,
test_ql_head_remove,
test_ql_insert));
}
| 4,483 | 20.352381 | 73 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/test/unit/mallctl.c | #include "test/jemalloc_test.h"
TEST_BEGIN(test_mallctl_errors)
{
uint64_t epoch;
size_t sz;
assert_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT,
"mallctl() should return ENOENT for non-existent names");
assert_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")),
EPERM, "mallctl() should return EPERM on attempt to write "
"read-only value");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)-1),
EINVAL, "mallctl() should return EINVAL for input size mismatch");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)+1),
EINVAL, "mallctl() should return EINVAL for input size mismatch");
sz = sizeof(epoch)-1;
assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL,
"mallctl() should return EINVAL for output size mismatch");
sz = sizeof(epoch)+1;
assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL,
"mallctl() should return EINVAL for output size mismatch");
}
TEST_END
TEST_BEGIN(test_mallctlnametomib_errors)
{
size_t mib[1];
size_t miblen;
miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT,
"mallctlnametomib() should return ENOENT for non-existent names");
}
TEST_END
TEST_BEGIN(test_mallctlbymib_errors)
{
uint64_t epoch;
size_t sz;
size_t mib[1];
size_t miblen;
miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("version", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0",
strlen("0.0.0")), EPERM, "mallctl() should return EPERM on "
"attempt to write read-only value");
miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("epoch", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch,
sizeof(epoch)-1), EINVAL,
"mallctlbymib() should return EINVAL for input size mismatch");
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch,
sizeof(epoch)+1), EINVAL,
"mallctlbymib() should return EINVAL for input size mismatch");
sz = sizeof(epoch)-1;
assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL,
"mallctlbymib() should return EINVAL for output size mismatch");
sz = sizeof(epoch)+1;
assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL,
"mallctlbymib() should return EINVAL for output size mismatch");
}
TEST_END
TEST_BEGIN(test_mallctl_read_write)
{
uint64_t old_epoch, new_epoch;
size_t sz = sizeof(old_epoch);
/* Blind. */
assert_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
/* Read. */
assert_d_eq(mallctl("epoch", &old_epoch, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
/* Write. */
assert_d_eq(mallctl("epoch", NULL, NULL, &new_epoch, sizeof(new_epoch)),
0, "Unexpected mallctl() failure");
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
/* Read+write. */
assert_d_eq(mallctl("epoch", &old_epoch, &sz, &new_epoch,
sizeof(new_epoch)), 0, "Unexpected mallctl() failure");
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
}
TEST_END
TEST_BEGIN(test_mallctlnametomib_short_mib)
{
size_t mib[6];
size_t miblen;
void *mem;
pool_t *pool;
unsigned npools;
size_t sz = sizeof(npools);
mem = calloc(1, POOL_MINIMAL_SIZE);
assert_ptr_ne(mem, NULL, "Unexpected calloc() failure");
pool = je_pool_create(mem, POOL_MINIMAL_SIZE, 1, 1);
assert_ptr_ne((void*)pool, NULL, "Unexpected je_pool_create() failure");
assert_d_eq(mallctl("pools.npools", &npools, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_u_eq(npools, 2, "Unexpected number of pools");
miblen = 5;
mib[5] = 42;
assert_d_eq(mallctlnametomib("pool.1.arenas.bin.0.nregs", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
assert_zu_eq(miblen, 5, "Unexpected mib output length");
assert_zu_eq(mib[5], 42,
"mallctlnametomib() wrote past the end of the input mib");
je_pool_delete(pool);
free(mem);
}
TEST_END
TEST_BEGIN(test_mallctl_config)
{
#define TEST_MALLCTL_CONFIG(config) do { \
bool oldval; \
size_t sz = sizeof(oldval); \
assert_d_eq(mallctl("config."#config, &oldval, &sz, NULL, 0), \
0, "Unexpected mallctl() failure"); \
assert_b_eq(oldval, config_##config, "Incorrect config value"); \
assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
} while (0)
TEST_MALLCTL_CONFIG(debug);
TEST_MALLCTL_CONFIG(fill);
TEST_MALLCTL_CONFIG(lazy_lock);
TEST_MALLCTL_CONFIG(munmap);
TEST_MALLCTL_CONFIG(prof);
TEST_MALLCTL_CONFIG(prof_libgcc);
TEST_MALLCTL_CONFIG(prof_libunwind);
TEST_MALLCTL_CONFIG(stats);
TEST_MALLCTL_CONFIG(tcache);
TEST_MALLCTL_CONFIG(tls);
TEST_MALLCTL_CONFIG(utrace);
TEST_MALLCTL_CONFIG(valgrind);
TEST_MALLCTL_CONFIG(xmalloc);
#undef TEST_MALLCTL_CONFIG
}
TEST_END
TEST_BEGIN(test_mallctl_opt)
{
bool config_always = true;
#define TEST_MALLCTL_OPT(t, opt, config) do { \
t oldval; \
size_t sz = sizeof(oldval); \
int expected = config_##config ? 0 : ENOENT; \
int result = mallctl("opt."#opt, &oldval, &sz, NULL, 0); \
assert_d_eq(result, expected, \
"Unexpected mallctl() result for opt."#opt); \
assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
} while (0)
TEST_MALLCTL_OPT(bool, abort, always);
TEST_MALLCTL_OPT(size_t, lg_chunk, always);
TEST_MALLCTL_OPT(const char *, dss, always);
TEST_MALLCTL_OPT(size_t, narenas, always);
TEST_MALLCTL_OPT(ssize_t, lg_dirty_mult, always);
TEST_MALLCTL_OPT(bool, stats_print, always);
TEST_MALLCTL_OPT(bool, junk, fill);
TEST_MALLCTL_OPT(size_t, quarantine, fill);
TEST_MALLCTL_OPT(bool, redzone, fill);
TEST_MALLCTL_OPT(bool, zero, fill);
TEST_MALLCTL_OPT(bool, utrace, utrace);
TEST_MALLCTL_OPT(bool, xmalloc, xmalloc);
TEST_MALLCTL_OPT(bool, tcache, tcache);
TEST_MALLCTL_OPT(size_t, lg_tcache_max, tcache);
TEST_MALLCTL_OPT(bool, prof, prof);
TEST_MALLCTL_OPT(const char *, prof_prefix, prof);
TEST_MALLCTL_OPT(bool, prof_active, prof);
TEST_MALLCTL_OPT(ssize_t, lg_prof_sample, prof);
TEST_MALLCTL_OPT(bool, prof_accum, prof);
TEST_MALLCTL_OPT(ssize_t, lg_prof_interval, prof);
TEST_MALLCTL_OPT(bool, prof_gdump, prof);
TEST_MALLCTL_OPT(bool, prof_final, prof);
TEST_MALLCTL_OPT(bool, prof_leak, prof);
#undef TEST_MALLCTL_OPT
}
TEST_END
/*
* create a couple of pools and check their size
* using mib feature
*/
TEST_BEGIN(test_mallctl_with_multiple_pools)
{
#define NPOOLS 4
pool_t *pools[NPOOLS];
void *mem;
unsigned npools;
int i;
size_t sz = sizeof(npools);
size_t mib[4], miblen;
mem = calloc(NPOOLS, POOL_MINIMAL_SIZE);
assert_ptr_ne(mem, NULL, "Unexpected calloc() failure");
for (i = 0; i < NPOOLS; ++i) {
pools[i] = je_pool_create( mem + (i*POOL_MINIMAL_SIZE), POOL_MINIMAL_SIZE, 1, 1);
assert_ptr_ne( (void*)pools[i], NULL, "Unexpected je_pool_create() failure");
}
assert_d_eq(mallctl("pools.npools", &npools, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_u_eq(npools, NPOOLS+1, "Unexpected number of pools");
miblen = 4;
assert_d_eq(mallctlnametomib("pool.0.arenas.narenas", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
/*
* This loop does not use local variable pools.
* Moreover we ommit pool[0].
*/
for (i = 1; i <= NPOOLS; ++i) {
unsigned narenas;
mib[1] = i;
sz = sizeof(narenas);
assert_d_eq(mallctlbymib(mib, miblen, &narenas, &sz, NULL, 0),
0, "Unexpected mallctlbymib() failure");
}
for (i = 0; i < NPOOLS; ++i) {
je_pool_delete( pools[i]);
}
free(mem);
#undef NPOOLS
}
TEST_END
TEST_BEGIN(test_manpage_example)
{
unsigned nbins, i;
size_t mib[6];
size_t len, miblen;
len = sizeof(nbins);
assert_d_eq(mallctl("pool.0.arenas.nbins", &nbins, &len, NULL, 0), 0,
"Unexpected mallctl() failure");
miblen = 6;
assert_d_eq(mallctlnametomib("pool.0.arenas.bin.0.size", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
for (i = 0; i < nbins; i++) {
size_t bin_size;
mib[4] = i;
len = sizeof(bin_size);
assert_d_eq(mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0),
0, "Unexpected mallctlbymib() failure");
/* Do something with bin_size... */
}
}
TEST_END
TEST_BEGIN(test_thread_arena)
{
unsigned arena_old, arena_new, narenas;
size_t sz = sizeof(unsigned);
assert_d_eq(mallctl("pool.0.arenas.narenas", &narenas, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect");
arena_new = narenas - 1;
assert_d_eq(mallctl("thread.pool.0.arena", &arena_old, &sz, &arena_new,
sizeof(unsigned)), 0, "Unexpected mallctl() failure");
arena_new = 0;
assert_d_eq(mallctl("thread.pool.0.arena", &arena_old, &sz, &arena_new,
sizeof(unsigned)), 0, "Unexpected mallctl() failure");
}
TEST_END
TEST_BEGIN(test_arena_i_purge)
{
unsigned narenas;
unsigned npools;
size_t sz = sizeof(unsigned);
size_t mib[5];
size_t miblen = 5;
void *mem;
pool_t *pool;
mem = calloc(1, POOL_MINIMAL_SIZE);
assert_ptr_ne(mem, NULL, "Unexpected calloc() failure");
pool = je_pool_create(mem, POOL_MINIMAL_SIZE, 1, 1);
assert_ptr_ne( (void*)pool, NULL, "Unexpected je_pool_create() failure");
assert_d_eq(mallctl("pools.npools", &npools, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_u_eq(npools, 2, "Unexpected number of pools");
assert_d_eq(mallctl("pool.1.arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_d_eq(mallctl("pool.1.arenas.narenas", &narenas, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_d_eq(mallctlnametomib("pool.1.arena.0.purge", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[3] = narenas;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
je_pool_delete(pool);
free(mem);
}
TEST_END
TEST_BEGIN(test_arena_i_dss)
{
const char *dss_prec_old, *dss_prec_new;
size_t sz = sizeof(dss_prec_old);
size_t mib[5];
size_t miblen;
miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("pool.0.arena.0.dss", mib, &miblen), 0,
"Unexpected mallctlnametomib() error");
dss_prec_new = "disabled";
assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new,
sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure");
assert_str_ne(dss_prec_old, "primary",
"Unexpected default for dss precedence");
assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_new, &sz, &dss_prec_old,
sizeof(dss_prec_old)), 0, "Unexpected mallctl() failure");
mib[3] = narenas_total_get(pools[0]);
dss_prec_new = "disabled";
assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new,
sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure");
assert_str_ne(dss_prec_old, "primary",
"Unexpected default for dss precedence");
}
TEST_END
TEST_BEGIN(test_arenas_initialized)
{
unsigned narenas;
size_t sz = sizeof(narenas);
assert_d_eq(mallctl("pool.0.arenas.narenas", &narenas, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
{
VARIABLE_ARRAY(bool, initialized, narenas);
sz = narenas * sizeof(bool);
assert_d_eq(mallctl("pool.0.arenas.initialized", initialized, &sz,
NULL, 0), 0, "Unexpected mallctl() failure");
}
}
TEST_END
TEST_BEGIN(test_arenas_constants)
{
#define TEST_ARENAS_CONSTANT(t, name, expected) do { \
t name; \
size_t sz = sizeof(t); \
assert_d_eq(mallctl("pool.0.arenas."#name, &(name), &sz, NULL, 0), 0, \
"Unexpected mallctl() failure"); \
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0)
TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM);
TEST_ARENAS_CONSTANT(size_t, page, PAGE);
TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS);
TEST_ARENAS_CONSTANT(size_t, nlruns, nlclasses);
#undef TEST_ARENAS_CONSTANT
}
TEST_END
TEST_BEGIN(test_arenas_bin_constants)
{
#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \
t name; \
size_t sz = sizeof(t); \
assert_d_eq(mallctl("pool.0.arenas.bin.0."#name, &(name), &sz, NULL, 0), \
0, "Unexpected mallctl() failure"); \
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0)
TEST_ARENAS_BIN_CONSTANT(size_t, size, arena_bin_info[0].reg_size);
TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, arena_bin_info[0].nregs);
TEST_ARENAS_BIN_CONSTANT(size_t, run_size, arena_bin_info[0].run_size);
#undef TEST_ARENAS_BIN_CONSTANT
}
TEST_END
TEST_BEGIN(test_arenas_lrun_constants)
{
#define TEST_ARENAS_LRUN_CONSTANT(t, name, expected) do { \
t name; \
size_t sz = sizeof(t); \
assert_d_eq(mallctl("pool.0.arenas.lrun.0."#name, &(name), &sz, NULL, \
0), 0, "Unexpected mallctl() failure"); \
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0)
TEST_ARENAS_LRUN_CONSTANT(size_t, size, (1 << LG_PAGE));
#undef TEST_ARENAS_LRUN_CONSTANT
}
TEST_END
/*
* create a couple of pools and extend their arenas
*/
TEST_BEGIN(test_arenas_extend)
{
#define NPOOLS 4
pool_t *pools[NPOOLS];
void *mem;
unsigned npools, narenas_before, arena, narenas_after;
int i;
size_t mib_narenas[4],
mib_extend[4],
miblen = sizeof(mib_narenas),
sz = sizeof(unsigned);
mem = calloc(NPOOLS, POOL_MINIMAL_SIZE);
assert_ptr_ne(mem, NULL, "Unexpected calloc() failure");
for (i = 0; i < NPOOLS; ++i) {
pools[i] = je_pool_create(mem + (i*POOL_MINIMAL_SIZE), POOL_MINIMAL_SIZE, 0, 1);
assert_ptr_ne((void *)pools[i], NULL, "Unexpected je_pool_create() failure");
}
assert_d_eq(mallctl("pools.npools", &npools, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_u_eq(npools, NPOOLS+1, "Unexpected number of pools");
assert_d_eq(mallctlnametomib("pool.0.arenas.narenas", mib_narenas, &miblen), 0,
"Unexpected mallctlnametomib() failure");
assert_d_eq(mallctlnametomib("pool.0.arenas.extend", mib_extend, &miblen), 0,
"Unexpected mallctlnametomib() failure");
/*
* This loop does not use local variable pools.
* Moreover we ommit pool[0].
*/
for (i = 1; i <= NPOOLS; ++i) {
mib_narenas[1] = i;
mib_extend[1] = i;
assert_d_eq(mallctlbymib(mib_narenas, miblen, &narenas_before, &sz, NULL, 0),
0, "Unexpected mallctlbymib() failure");
assert_d_eq(mallctlbymib(mib_extend, miblen, &arena, &sz, NULL, 0),
0, "Unexpected mallctlbymib() failure");
assert_d_eq(mallctlbymib(mib_narenas, miblen, &narenas_after, &sz, NULL, 0),
0, "Unexpected mallctlbymib() failure");
assert_u_eq(narenas_before+1, narenas_after,
"Unexpected number of arenas before versus after extension");
assert_u_eq(arena, narenas_after-1, "Unexpected arena index");
}
for (i = 0; i < NPOOLS; ++i) {
je_pool_delete( pools[i]);
}
free(mem);
#undef NPOOLS
}
TEST_END
TEST_BEGIN(test_stats_arenas)
{
#define TEST_STATS_ARENAS(t, name) do { \
t name; \
size_t sz = sizeof(t); \
assert_d_eq(mallctl("pool.0.stats.arenas.0."#name, &(name), &sz, NULL, \
0), 0, "Unexpected mallctl() failure"); \
} while (0)
TEST_STATS_ARENAS(const char *, dss);
TEST_STATS_ARENAS(unsigned, nthreads);
TEST_STATS_ARENAS(size_t, pactive);
TEST_STATS_ARENAS(size_t, pdirty);
#undef TEST_STATS_ARENAS
}
TEST_END
/*
* Each arena allocates 32 kilobytes of CTL metadata, and since we only
* have 12 megabytes, we have to hard-limit it to a known value, otherwise
* on systems with high CPU count, the tests might run out of memory.
*/
#define NARENAS_IN_POOL 64
int
main(void)
{
opt_narenas = NARENAS_IN_POOL;
return (test(
test_mallctl_errors,
test_mallctlnametomib_errors,
test_mallctlbymib_errors,
test_mallctl_read_write,
test_mallctlnametomib_short_mib,
test_mallctl_config,
test_mallctl_opt,
test_mallctl_with_multiple_pools,
test_manpage_example,
test_thread_arena,
test_arena_i_purge,
test_arena_i_dss,
test_arenas_initialized,
test_arenas_constants,
test_arenas_bin_constants,
test_arenas_lrun_constants,
test_arenas_extend,
test_stats_arenas));
}
| 16,249 | 28.98155 | 83 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/test/unit/rtree.c | #include "test/jemalloc_test.h"
void *
rtree_malloc(pool_t *pool, size_t size)
{
return imalloc(size);
}
void
rtree_free(pool_t *pool, void *ptr)
{
return idalloc(ptr);
}
TEST_BEGIN(test_rtree_get_empty)
{
unsigned i;
for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
rtree_t *rtree = rtree_new(i, rtree_malloc, rtree_free, pools[0]);
assert_u_eq(rtree_get(rtree, 0), 0,
"rtree_get() should return NULL for empty tree");
rtree_delete(rtree);
}
}
TEST_END
TEST_BEGIN(test_rtree_extrema)
{
unsigned i;
for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
rtree_t *rtree = rtree_new(i, rtree_malloc, rtree_free, pools[0]);
rtree_set(rtree, 0, 1);
assert_u_eq(rtree_get(rtree, 0), 1,
"rtree_get() should return previously set value");
rtree_set(rtree, ~((uintptr_t)0), 1);
assert_u_eq(rtree_get(rtree, ~((uintptr_t)0)), 1,
"rtree_get() should return previously set value");
rtree_delete(rtree);
}
}
TEST_END
TEST_BEGIN(test_rtree_bits)
{
unsigned i, j, k;
for (i = 1; i < (sizeof(uintptr_t) << 3); i++) {
uintptr_t keys[] = {0, 1,
(((uintptr_t)1) << (sizeof(uintptr_t)*8-i)) - 1};
rtree_t *rtree = rtree_new(i, rtree_malloc, rtree_free, pools[0]);
for (j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
rtree_set(rtree, keys[j], 1);
for (k = 0; k < sizeof(keys)/sizeof(uintptr_t); k++) {
assert_u_eq(rtree_get(rtree, keys[k]), 1,
"rtree_get() should return previously set "
"value and ignore insignificant key bits; "
"i=%u, j=%u, k=%u, set key=%#"PRIxPTR", "
"get key=%#"PRIxPTR, i, j, k, keys[j],
keys[k]);
}
assert_u_eq(rtree_get(rtree,
(((uintptr_t)1) << (sizeof(uintptr_t)*8-i))), 0,
"Only leftmost rtree leaf should be set; "
"i=%u, j=%u", i, j);
rtree_set(rtree, keys[j], 0);
}
rtree_delete(rtree);
}
}
TEST_END
TEST_BEGIN(test_rtree_random)
{
unsigned i;
sfmt_t *sfmt;
#define NSET 100
#define SEED 42
sfmt = init_gen_rand(SEED);
for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
rtree_t *rtree = rtree_new(i, rtree_malloc, rtree_free, pools[0]);
uintptr_t keys[NSET];
unsigned j;
for (j = 0; j < NSET; j++) {
keys[j] = (uintptr_t)gen_rand64(sfmt);
rtree_set(rtree, keys[j], 1);
assert_u_eq(rtree_get(rtree, keys[j]), 1,
"rtree_get() should return previously set value");
}
for (j = 0; j < NSET; j++) {
assert_u_eq(rtree_get(rtree, keys[j]), 1,
"rtree_get() should return previously set value");
}
for (j = 0; j < NSET; j++) {
rtree_set(rtree, keys[j], 0);
assert_u_eq(rtree_get(rtree, keys[j]), 0,
"rtree_get() should return previously set value");
}
for (j = 0; j < NSET; j++) {
assert_u_eq(rtree_get(rtree, keys[j]), 0,
"rtree_get() should return previously set value");
}
rtree_delete(rtree);
}
fini_gen_rand(sfmt);
#undef NSET
#undef SEED
}
TEST_END
int
main(void)
{
return (test(
test_rtree_get_empty,
test_rtree_extrema,
test_rtree_bits,
test_rtree_random));
}
| 3,032 | 22.152672 | 68 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/test/unit/hash.c | /*
* This file is based on code that is part of SMHasher
* (https://code.google.com/p/smhasher/), and is subject to the MIT license
* (http://www.opensource.org/licenses/mit-license.php). Both email addresses
* associated with the source code's revision history belong to Austin Appleby,
* and the revision history ranges from 2010 to 2012. Therefore the copyright
* and license are here taken to be:
*
* Copyright (c) 2010-2012 Austin Appleby
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "test/jemalloc_test.h"
typedef enum {
hash_variant_x86_32,
hash_variant_x86_128,
hash_variant_x64_128
} hash_variant_t;
static size_t
hash_variant_bits(hash_variant_t variant)
{
switch (variant) {
case hash_variant_x86_32: return (32);
case hash_variant_x86_128: return (128);
case hash_variant_x64_128: return (128);
default: not_reached();
}
}
static const char *
hash_variant_string(hash_variant_t variant)
{
switch (variant) {
case hash_variant_x86_32: return ("hash_x86_32");
case hash_variant_x86_128: return ("hash_x86_128");
case hash_variant_x64_128: return ("hash_x64_128");
default: not_reached();
}
}
static void
hash_variant_verify(hash_variant_t variant)
{
const size_t hashbytes = hash_variant_bits(variant) / 8;
uint8_t key[256];
VARIABLE_ARRAY(uint8_t, hashes, hashbytes * 256);
VARIABLE_ARRAY(uint8_t, final, hashbytes);
unsigned i;
uint32_t computed, expected;
memset(key, 0, sizeof(key));
memset(hashes, 0, sizeof(hashes));
memset(final, 0, sizeof(final));
/*
* Hash keys of the form {0}, {0,1}, {0,1,2}, ..., {0,1,...,255} as the
* seed.
*/
for (i = 0; i < 256; i++) {
key[i] = (uint8_t)i;
switch (variant) {
case hash_variant_x86_32: {
uint32_t out;
out = hash_x86_32(key, i, 256-i);
memcpy(&hashes[i*hashbytes], &out, hashbytes);
break;
} case hash_variant_x86_128: {
uint64_t out[2];
hash_x86_128(key, i, 256-i, out);
memcpy(&hashes[i*hashbytes], out, hashbytes);
break;
} case hash_variant_x64_128: {
uint64_t out[2];
hash_x64_128(key, i, 256-i, out);
memcpy(&hashes[i*hashbytes], out, hashbytes);
break;
} default: not_reached();
}
}
/* Hash the result array. */
switch (variant) {
case hash_variant_x86_32: {
uint32_t out = hash_x86_32(hashes, hashbytes*256, 0);
memcpy(final, &out, sizeof(out));
break;
} case hash_variant_x86_128: {
uint64_t out[2];
hash_x86_128(hashes, hashbytes*256, 0, out);
memcpy(final, out, sizeof(out));
break;
} case hash_variant_x64_128: {
uint64_t out[2];
hash_x64_128(hashes, hashbytes*256, 0, out);
memcpy(final, out, sizeof(out));
break;
} default: not_reached();
}
computed = (final[0] << 0) | (final[1] << 8) | (final[2] << 16) |
(final[3] << 24);
switch (variant) {
#ifdef JEMALLOC_BIG_ENDIAN
case hash_variant_x86_32: expected = 0x6213303eU; break;
case hash_variant_x86_128: expected = 0x266820caU; break;
case hash_variant_x64_128: expected = 0xcc622b6fU; break;
#else
case hash_variant_x86_32: expected = 0xb0f57ee3U; break;
case hash_variant_x86_128: expected = 0xb3ece62aU; break;
case hash_variant_x64_128: expected = 0x6384ba69U; break;
#endif
default: not_reached();
}
assert_u32_eq(computed, expected,
"Hash mismatch for %s(): expected %#x but got %#x",
hash_variant_string(variant), expected, computed);
}
TEST_BEGIN(test_hash_x86_32)
{
hash_variant_verify(hash_variant_x86_32);
}
TEST_END
TEST_BEGIN(test_hash_x86_128)
{
hash_variant_verify(hash_variant_x86_128);
}
TEST_END
TEST_BEGIN(test_hash_x64_128)
{
hash_variant_verify(hash_variant_x64_128);
}
TEST_END
int
main(void)
{
return (test(
test_hash_x86_32,
test_hash_x86_128,
test_hash_x64_128));
}
| 4,746 | 26.598837 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/test/unit/prof_accum.h | #include "test/jemalloc_test.h"
#define NTHREADS 4
#define NALLOCS_PER_THREAD 50
#define DUMP_INTERVAL 1
#define BT_COUNT_CHECK_INTERVAL 5
#define alloc_n_proto(n) \
void *alloc_##n(unsigned bits);
alloc_n_proto(0)
alloc_n_proto(1)
#define alloc_n_gen(n) \
void * \
alloc_##n(unsigned bits) \
{ \
void *p; \
\
if (bits == 0) \
p = mallocx(1, 0); \
else { \
switch (bits & 0x1U) { \
case 0: \
p = (alloc_0(bits >> 1)); \
break; \
case 1: \
p = (alloc_1(bits >> 1)); \
break; \
default: not_reached(); \
} \
} \
/* Intentionally sabotage tail call optimization. */ \
assert_ptr_not_null(p, "Unexpected mallocx() failure"); \
return (p); \
}
| 794 | 21.083333 | 59 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/test/unit/quarantine.c | #include "test/jemalloc_test.h"
#define QUARANTINE_SIZE 8192
#define STRINGIFY_HELPER(x) #x
#define STRINGIFY(x) STRINGIFY_HELPER(x)
#ifdef JEMALLOC_FILL
const char *malloc_conf = "abort:false,junk:true,redzone:true,quarantine:"
STRINGIFY(QUARANTINE_SIZE);
#endif
void
quarantine_clear(void)
{
void *p;
p = mallocx(QUARANTINE_SIZE*2, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
dallocx(p, 0);
}
TEST_BEGIN(test_quarantine)
{
#define SZ ZU(256)
#define NQUARANTINED (QUARANTINE_SIZE/SZ)
void *quarantined[NQUARANTINED+1];
size_t i, j;
test_skip_if(!config_fill);
assert_zu_eq(nallocx(SZ, 0), SZ,
"SZ=%zu does not precisely equal a size class", SZ);
quarantine_clear();
/*
* Allocate enough regions to completely fill the quarantine, plus one
* more. The last iteration occurs with a completely full quarantine,
* but no regions should be drained from the quarantine until the last
* deallocation occurs. Therefore no region recycling should occur
* until after this loop completes.
*/
for (i = 0; i < NQUARANTINED+1; i++) {
void *p = mallocx(SZ, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
quarantined[i] = p;
dallocx(p, 0);
for (j = 0; j < i; j++) {
assert_ptr_ne(p, quarantined[j],
"Quarantined region recycled too early; "
"i=%zu, j=%zu", i, j);
}
}
#undef NQUARANTINED
#undef SZ
}
TEST_END
static bool detected_redzone_corruption;
static void
arena_redzone_corruption_replacement(void *ptr, size_t usize, bool after,
size_t offset, uint8_t byte)
{
detected_redzone_corruption = true;
}
TEST_BEGIN(test_quarantine_redzone)
{
char *s;
arena_redzone_corruption_t *arena_redzone_corruption_orig;
test_skip_if(!config_fill);
arena_redzone_corruption_orig = arena_redzone_corruption;
arena_redzone_corruption = arena_redzone_corruption_replacement;
/* Test underflow. */
detected_redzone_corruption = false;
s = (char *)mallocx(1, 0);
assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
s[-1] = 0xbb;
dallocx(s, 0);
assert_true(detected_redzone_corruption,
"Did not detect redzone corruption");
/* Test overflow. */
detected_redzone_corruption = false;
s = (char *)mallocx(1, 0);
assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
s[sallocx(s, 0)] = 0xbb;
dallocx(s, 0);
assert_true(detected_redzone_corruption,
"Did not detect redzone corruption");
arena_redzone_corruption = arena_redzone_corruption_orig;
}
TEST_END
int
main(void)
{
return (test(
test_quarantine,
test_quarantine_redzone));
}
| 2,583 | 22.706422 | 74 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/test/unit/pool.h | #include "test/jemalloc_test.h"
#define TEST_POOL_SIZE (16L * 1024L * 1024L)
#define TEST_TOO_SMALL_POOL_SIZE (2L * 1024L * 1024L)
#define TEST_VALUE 123456
#define TEST_MALLOC_FREE_LOOPS 2
#define TEST_MALLOC_SIZE 1024
#define TEST_ALLOCS_SIZE (TEST_POOL_SIZE / 8)
#define TEST_BUFFOR_CMP_SIZE (4L * 1024L * 1024L)
static char mem_pool[TEST_POOL_SIZE];
static char mem_extend_ok[TEST_POOL_SIZE];
static void* allocs[TEST_ALLOCS_SIZE];
static int custom_allocs;
TEST_BEGIN(test_pool_create_errors) {
pool_t *pool;
memset(mem_pool, 1, TEST_POOL_SIZE);
pool = pool_create(mem_pool, 0, 0, 1);
assert_ptr_null(pool, "pool_create() should return NULL for size 0");
pool = pool_create(NULL, TEST_POOL_SIZE, 0, 1);
assert_ptr_null(pool, "pool_create() should return NULL for input addr NULL");
}
TEST_END
TEST_BEGIN(test_pool_create) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
assert_ptr_eq(pool, mem_pool, "pool_create() should return addr with valid input");
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_malloc) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
int *test = pool_malloc(pool, sizeof(int));
assert_ptr_not_null(test, "pool_malloc should return valid ptr");
*test = TEST_VALUE;
assert_x_eq(*test, TEST_VALUE, "ptr should be usable");
assert_lu_gt((uintptr_t)test, (uintptr_t)mem_pool,
"pool_malloc() should return pointer to memory from pool");
assert_lu_lt((uintptr_t)test, (uintptr_t)mem_pool+TEST_POOL_SIZE,
"pool_malloc() should return pointer to memory from pool");
pool_free(pool, test);
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_free) {
pool_t *pool;
int i, j, s = 0, prev_s = 0;
int allocs = TEST_POOL_SIZE/TEST_MALLOC_SIZE;
void *arr[allocs];
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
for (i = 0; i < TEST_MALLOC_FREE_LOOPS; ++i) {
for (j = 0; j < allocs; ++j) {
arr[j] = pool_malloc(pool, TEST_MALLOC_SIZE);
if (arr[j] != NULL) {
s++;
}
}
for (j = 0; j < allocs; ++j) {
if (arr[j] != NULL) {
pool_free(pool, arr[j]);
}
}
if (prev_s != 0) {
assert_x_eq(s, prev_s,
"pool_free() should record back used chunks");
}
prev_s = s;
s = 0;
}
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_calloc) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 1, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 0, 1);
int *test = pool_calloc(pool, 1, sizeof(int));
assert_ptr_not_null(test, "pool_calloc should return valid ptr");
assert_x_eq(*test, 0, "pool_calloc should return zeroed memory");
pool_free(pool, test);
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_realloc) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
int *test = pool_ralloc(pool, NULL, sizeof(int));
assert_ptr_not_null(test, "pool_ralloc with NULL addr should return valid ptr");
int *test2 = pool_ralloc(pool, test, sizeof(int)*2);
assert_ptr_not_null(test, "pool_ralloc should return valid ptr");
test2[0] = TEST_VALUE;
test2[1] = TEST_VALUE;
assert_x_eq(test[1], TEST_VALUE, "ptr should be usable");
pool_free(pool, test2);
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_aligned_alloc) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
int *test = pool_aligned_alloc(pool, 1024, 1024);
assert_ptr_not_null(test, "pool_aligned_alloc should return valid ptr");
assert_x_eq(((uintptr_t)(test) & 1023), 0, "ptr should be aligned");
assert_lu_gt((uintptr_t)test, (uintptr_t)mem_pool,
"pool_aligned_alloc() should return pointer to memory from pool");
assert_lu_lt((uintptr_t)test, (uintptr_t)mem_pool+TEST_POOL_SIZE,
"pool_aligned_alloc() should return pointer to memory from pool");
*test = TEST_VALUE;
assert_x_eq(*test, TEST_VALUE, "ptr should be usable");
pool_free(pool, test);
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_reuse_pool) {
pool_t *pool;
size_t pool_num = 0;
custom_allocs = 0;
/* create and destroy pool multiple times */
for (; pool_num<100; ++pool_num) {
pool = pool_create(mem_pool, TEST_POOL_SIZE, 0, 1);
assert_ptr_not_null(pool, "Can not create pool!!!");
if (pool == NULL) {
break;
}
void *prev = NULL;
size_t i = 0;
/* allocate memory from pool */
for (; i<100; ++i) {
void **next = pool_malloc(pool, sizeof (void *));
assert_lu_gt((uintptr_t)next, (uintptr_t)mem_pool,
"pool_malloc() should return pointer to memory from pool");
assert_lu_lt((uintptr_t)next, (uintptr_t)mem_pool+TEST_POOL_SIZE,
"pool_malloc() should return pointer to memory from pool");
*next = prev;
prev = next;
}
/* free all allocated memory from pool */
while (prev != NULL) {
void **act = prev;
prev = *act;
pool_free(pool, act);
}
pool_delete(pool);
}
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_check_memory) {
pool_t *pool;
size_t pool_size = POOL_MINIMAL_SIZE;
assert_lu_lt(POOL_MINIMAL_SIZE, TEST_POOL_SIZE, "Too small pool size");
size_t object_size;
size_t size_allocated;
size_t i;
size_t j;
for (object_size = 8; object_size <= TEST_BUFFOR_CMP_SIZE ; object_size *= 2) {
custom_allocs = 0;
pool = pool_create(mem_pool, pool_size, 0, 1);
assert_ptr_not_null(pool, "Can not create pool!!!");
size_allocated = 0;
memset(allocs, 0, TEST_ALLOCS_SIZE * sizeof(void *));
for (i = 0; i < TEST_ALLOCS_SIZE;++i) {
allocs[i] = pool_malloc(pool, object_size);
if (allocs[i] == NULL) {
/* out of memory in pool */
break;
}
assert_lu_gt((uintptr_t)allocs[i], (uintptr_t)mem_pool,
"pool_malloc() should return pointer to memory from pool");
assert_lu_lt((uintptr_t)allocs[i], (uintptr_t)mem_pool+pool_size,
"pool_malloc() should return pointer to memory from pool");
size_allocated += object_size;
/* fill each allocation with a unique value */
memset(allocs[i], (char)i, object_size);
}
assert_ptr_not_null(allocs[0], "pool_malloc should return valid ptr");
assert_lu_lt(i + 1, TEST_ALLOCS_SIZE, "All memory should be used");
/* check for unexpected modifications of prepare data */
for (i = 0; i < TEST_ALLOCS_SIZE && allocs[i] != NULL; ++i) {
char *buffer = allocs[i];
for (j = 0; j < object_size; ++j)
if (buffer[j] != (char)i) {
assert_true(0, "Content of data object was modified unexpectedly"
" for object size: %zu, id: %zu", object_size, j);
break;
}
}
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
}
TEST_END
TEST_BEGIN(test_pool_use_all_memory) {
pool_t *pool;
size_t size = 0;
size_t pool_size = POOL_MINIMAL_SIZE;
assert_lu_lt(POOL_MINIMAL_SIZE, TEST_POOL_SIZE, "Too small pool size");
custom_allocs = 0;
pool = pool_create(mem_pool, pool_size, 0, 1);
assert_ptr_not_null(pool, "Can not create pool!!!");
void *prev = NULL;
for (;;) {
void **next = pool_malloc(pool, sizeof (void *));
if (next == NULL) {
/* Out of memory in pool, test end */
break;
}
size += sizeof (void *);
assert_ptr_not_null(next, "pool_malloc should return valid ptr");
assert_lu_gt((uintptr_t)next, (uintptr_t)mem_pool,
"pool_malloc() should return pointer to memory from pool");
assert_lu_lt((uintptr_t)next, (uintptr_t)mem_pool+pool_size,
"pool_malloc() should return pointer to memory from pool");
*next = prev;
assert_x_eq((uintptr_t)(*next), (uintptr_t)(prev), "ptr should be usable");
prev = next;
}
assert_lu_gt(size, 0, "Can not alloc any memory from pool");
/* Free all allocated memory from pool */
while (prev != NULL) {
void **act = prev;
prev = *act;
pool_free(pool, act);
}
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_extend_errors) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
memset(mem_extend_ok, 0, TEST_TOO_SMALL_POOL_SIZE);
size_t usable_size = pool_extend(pool, mem_extend_ok, TEST_TOO_SMALL_POOL_SIZE, 0);
assert_zu_eq(usable_size, 0, "pool_extend() should return 0"
" when provided with memory size smaller then chunksize");
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_extend) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
memset(mem_extend_ok, 0, TEST_POOL_SIZE);
size_t usable_size = pool_extend(pool, mem_extend_ok, TEST_POOL_SIZE, 0);
assert_zu_ne(usable_size, 0, "pool_extend() should return value"
" after alignment when provided with enough memory");
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_extend_after_out_of_memory) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
/* use the all memory from pool and from base allocator */
while (pool_malloc(pool, sizeof (void *)));
pool->base_next_addr = pool->base_past_addr;
memset(mem_extend_ok, 0, TEST_POOL_SIZE);
size_t usable_size = pool_extend(pool, mem_extend_ok, TEST_POOL_SIZE, 0);
assert_zu_ne(usable_size, 0, "pool_extend() should return value"
" after alignment when provided with enough memory");
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
/*
* print_jemalloc_messages -- custom print function, for jemalloc
*/
static void
print_jemalloc_messages(void* ignore, const char *s)
{
}
TEST_BEGIN(test_pool_check_extend) {
je_malloc_message = print_jemalloc_messages;
pool_t *pool;
custom_allocs = 0;
pool = pool_create(mem_pool, TEST_POOL_SIZE, 0, 1);
pool_malloc(pool, 100);
assert_d_eq(je_pool_check(pool), 1, "je_pool_check() return error");
pool_delete(pool);
assert_d_ne(je_pool_check(pool), 1, "je_pool_check() not return error");
pool = pool_create(mem_pool, TEST_POOL_SIZE, 0, 1);
assert_d_eq(je_pool_check(pool), 1, "je_pool_check() return error");
size_t size_extend = pool_extend(pool, mem_extend_ok, TEST_POOL_SIZE, 1);
assert_zu_ne(size_extend, 0, "pool_extend() should add some free space");
assert_d_eq(je_pool_check(pool), 1, "je_pool_check() return error");
pool_malloc(pool, 100);
pool_delete(pool);
assert_d_ne(je_pool_check(pool), 1, "je_pool_check() not return error");
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
je_malloc_message = NULL;
}
TEST_END
TEST_BEGIN(test_pool_check_memory_out_of_range) {
je_malloc_message = print_jemalloc_messages;
pool_t *pool;
custom_allocs = 0;
pool = pool_create(mem_pool, TEST_POOL_SIZE, 0, 1);
assert_d_eq(je_pool_check(pool), 1, "je_pool_check() return error");
void *usable_addr = (void *)CHUNK_CEILING((uintptr_t)mem_extend_ok);
size_t usable_size = (TEST_POOL_SIZE - (uintptr_t)(usable_addr -
(void *)mem_extend_ok)) & ~chunksize_mask;
chunk_record(pool,
&pool->chunks_szad_mmap, &pool->chunks_ad_mmap,
usable_addr, usable_size, 0);
assert_d_ne(je_pool_check(pool), 1, "je_pool_check() not return error");
pool_delete(pool);
assert_d_ne(je_pool_check(pool), 1, "je_pool_check() return error");
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
je_malloc_message = NULL;
}
TEST_END
TEST_BEGIN(test_pool_check_memory_overlap) {
je_malloc_message = print_jemalloc_messages;
pool_t *pool;
pool_t *pool2;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
size_t size_extend = pool_extend(pool, mem_extend_ok, TEST_POOL_SIZE, 1);
assert_zu_ne(size_extend, 0, "pool_extend() should add some free space");
assert_d_eq(je_pool_check(pool), 1, "je_pool_check() return error");
/* create another pool in the same memory region */
pool2 = pool_create(mem_extend_ok, TEST_POOL_SIZE, 0, 1);
assert_d_ne(je_pool_check(pool), 1, "je_pool_check() not return error");
assert_d_ne(je_pool_check(pool2), 1, "je_pool_check() not return error");
pool_delete(pool2);
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
je_malloc_message = NULL;
}
TEST_END
#define POOL_TEST_CASES\
test_pool_create_errors, \
test_pool_create, \
test_pool_malloc, \
test_pool_free, \
test_pool_calloc, \
test_pool_realloc, \
test_pool_aligned_alloc, \
test_pool_reuse_pool, \
test_pool_check_memory, \
test_pool_use_all_memory, \
test_pool_extend_errors, \
test_pool_extend, \
test_pool_extend_after_out_of_memory, \
test_pool_check_extend, \
test_pool_check_memory_out_of_range, \
test_pool_check_memory_overlap
| 13,511 | 27.267782 | 84 | h |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/test/src/SFMT.c | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file SFMT.c
* @brief SIMD oriented Fast Mersenne Twister(SFMT)
*
* @author Mutsuo Saito (Hiroshima University)
* @author Makoto Matsumoto (Hiroshima University)
*
* Copyright (C) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* The new BSD License is applied to this software, see LICENSE.txt
*/
#define SFMT_C_
#include "test/jemalloc_test.h"
#include "test/SFMT-params.h"
#if defined(JEMALLOC_BIG_ENDIAN) && !defined(BIG_ENDIAN64)
#define BIG_ENDIAN64 1
#endif
#if defined(__BIG_ENDIAN__) && !defined(__amd64) && !defined(BIG_ENDIAN64)
#define BIG_ENDIAN64 1
#endif
#if defined(HAVE_ALTIVEC) && !defined(BIG_ENDIAN64)
#define BIG_ENDIAN64 1
#endif
#if defined(ONLY64) && !defined(BIG_ENDIAN64)
#if defined(__GNUC__)
#error "-DONLY64 must be specified with -DBIG_ENDIAN64"
#endif
#undef ONLY64
#endif
/*------------------------------------------------------
128-bit SIMD data type for Altivec, SSE2 or standard C
------------------------------------------------------*/
#if defined(HAVE_ALTIVEC)
/** 128-bit data structure */
union W128_T {
vector unsigned int s;
uint32_t u[4];
};
/** 128-bit data type */
typedef union W128_T w128_t;
#elif defined(HAVE_SSE2)
/** 128-bit data structure */
union W128_T {
__m128i si;
uint32_t u[4];
};
/** 128-bit data type */
typedef union W128_T w128_t;
#else
/** 128-bit data structure */
struct W128_T {
uint32_t u[4];
};
/** 128-bit data type */
typedef struct W128_T w128_t;
#endif
struct sfmt_s {
/** the 128-bit internal state array */
w128_t sfmt[N];
/** index counter to the 32-bit internal state array */
int idx;
/** a flag: it is 0 if and only if the internal state is not yet
* initialized. */
int initialized;
};
/*--------------------------------------
FILE GLOBAL VARIABLES
internal state, index counter and flag
--------------------------------------*/
/** a parity check vector which certificate the period of 2^{MEXP} */
static uint32_t parity[4] = {PARITY1, PARITY2, PARITY3, PARITY4};
/*----------------
STATIC FUNCTIONS
----------------*/
JEMALLOC_INLINE_C int idxof(int i);
#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift);
JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift);
#endif
JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx);
JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size);
JEMALLOC_INLINE_C uint32_t func1(uint32_t x);
JEMALLOC_INLINE_C uint32_t func2(uint32_t x);
static void period_certification(sfmt_t *ctx);
#if defined(BIG_ENDIAN64) && !defined(ONLY64)
JEMALLOC_INLINE_C void swap(w128_t *array, int size);
#endif
#if defined(HAVE_ALTIVEC)
#include "test/SFMT-alti.h"
#elif defined(HAVE_SSE2)
#include "test/SFMT-sse2.h"
#endif
/**
* This function simulate a 64-bit index of LITTLE ENDIAN
* in BIG ENDIAN machine.
*/
#ifdef ONLY64
JEMALLOC_INLINE_C int idxof(int i) {
return i ^ 1;
}
#else
JEMALLOC_INLINE_C int idxof(int i) {
return i;
}
#endif
/**
* This function simulates SIMD 128-bit right shift by the standard C.
* The 128-bit integer given in in is shifted by (shift * 8) bits.
* This function simulates the LITTLE ENDIAN SIMD.
* @param out the output of this function
* @param in the 128-bit data to be shifted
* @param shift the shift value
*/
#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
#ifdef ONLY64
JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) {
uint64_t th, tl, oh, ol;
th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]);
tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]);
oh = th >> (shift * 8);
ol = tl >> (shift * 8);
ol |= th << (64 - shift * 8);
out->u[0] = (uint32_t)(ol >> 32);
out->u[1] = (uint32_t)ol;
out->u[2] = (uint32_t)(oh >> 32);
out->u[3] = (uint32_t)oh;
}
#else
JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) {
uint64_t th, tl, oh, ol;
th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]);
tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]);
oh = th >> (shift * 8);
ol = tl >> (shift * 8);
ol |= th << (64 - shift * 8);
out->u[1] = (uint32_t)(ol >> 32);
out->u[0] = (uint32_t)ol;
out->u[3] = (uint32_t)(oh >> 32);
out->u[2] = (uint32_t)oh;
}
#endif
/**
* This function simulates SIMD 128-bit left shift by the standard C.
* The 128-bit integer given in in is shifted by (shift * 8) bits.
* This function simulates the LITTLE ENDIAN SIMD.
* @param out the output of this function
* @param in the 128-bit data to be shifted
* @param shift the shift value
*/
#ifdef ONLY64
JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) {
uint64_t th, tl, oh, ol;
th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]);
tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]);
oh = th << (shift * 8);
ol = tl << (shift * 8);
oh |= tl >> (64 - shift * 8);
out->u[0] = (uint32_t)(ol >> 32);
out->u[1] = (uint32_t)ol;
out->u[2] = (uint32_t)(oh >> 32);
out->u[3] = (uint32_t)oh;
}
#else
JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) {
uint64_t th, tl, oh, ol;
th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]);
tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]);
oh = th << (shift * 8);
ol = tl << (shift * 8);
oh |= tl >> (64 - shift * 8);
out->u[1] = (uint32_t)(ol >> 32);
out->u[0] = (uint32_t)ol;
out->u[3] = (uint32_t)(oh >> 32);
out->u[2] = (uint32_t)oh;
}
#endif
#endif
/**
* This function represents the recursion formula.
* @param r output
* @param a a 128-bit part of the internal state array
* @param b a 128-bit part of the internal state array
* @param c a 128-bit part of the internal state array
* @param d a 128-bit part of the internal state array
*/
#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
#ifdef ONLY64
JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c,
w128_t *d) {
w128_t x;
w128_t y;
lshift128(&x, a, SL2);
rshift128(&y, c, SR2);
r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK2) ^ y.u[0]
^ (d->u[0] << SL1);
r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK1) ^ y.u[1]
^ (d->u[1] << SL1);
r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK4) ^ y.u[2]
^ (d->u[2] << SL1);
r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK3) ^ y.u[3]
^ (d->u[3] << SL1);
}
#else
JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c,
w128_t *d) {
w128_t x;
w128_t y;
lshift128(&x, a, SL2);
rshift128(&y, c, SR2);
r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK1) ^ y.u[0]
^ (d->u[0] << SL1);
r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK2) ^ y.u[1]
^ (d->u[1] << SL1);
r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK3) ^ y.u[2]
^ (d->u[2] << SL1);
r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK4) ^ y.u[3]
^ (d->u[3] << SL1);
}
#endif
#endif
#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
/**
* This function fills the internal state array with pseudorandom
* integers.
*/
JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx) {
int i;
w128_t *r1, *r2;
r1 = &ctx->sfmt[N - 2];
r2 = &ctx->sfmt[N - 1];
for (i = 0; i < N - POS1; i++) {
do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1,
r2);
r1 = r2;
r2 = &ctx->sfmt[i];
}
for (; i < N; i++) {
do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1 - N], r1,
r2);
r1 = r2;
r2 = &ctx->sfmt[i];
}
}
/**
* This function fills the user-specified array with pseudorandom
* integers.
*
* @param array an 128-bit array to be filled by pseudorandom numbers.
* @param size number of 128-bit pseudorandom numbers to be generated.
*/
JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
int i, j;
w128_t *r1, *r2;
r1 = &ctx->sfmt[N - 2];
r2 = &ctx->sfmt[N - 1];
for (i = 0; i < N - POS1; i++) {
do_recursion(&array[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, r2);
r1 = r2;
r2 = &array[i];
}
for (; i < N; i++) {
do_recursion(&array[i], &ctx->sfmt[i], &array[i + POS1 - N], r1, r2);
r1 = r2;
r2 = &array[i];
}
for (; i < size - N; i++) {
do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2);
r1 = r2;
r2 = &array[i];
}
for (j = 0; j < 2 * N - size; j++) {
ctx->sfmt[j] = array[j + size - N];
}
for (; i < size; i++, j++) {
do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2);
r1 = r2;
r2 = &array[i];
ctx->sfmt[j] = array[i];
}
}
#endif
#if defined(BIG_ENDIAN64) && !defined(ONLY64) && !defined(HAVE_ALTIVEC)
JEMALLOC_INLINE_C void swap(w128_t *array, int size) {
int i;
uint32_t x, y;
for (i = 0; i < size; i++) {
x = array[i].u[0];
y = array[i].u[2];
array[i].u[0] = array[i].u[1];
array[i].u[2] = array[i].u[3];
array[i].u[1] = x;
array[i].u[3] = y;
}
}
#endif
/**
* This function represents a function used in the initialization
* by init_by_array
* @param x 32-bit integer
* @return 32-bit integer
*/
static uint32_t func1(uint32_t x) {
return (x ^ (x >> 27)) * (uint32_t)1664525UL;
}
/**
* This function represents a function used in the initialization
* by init_by_array
* @param x 32-bit integer
* @return 32-bit integer
*/
static uint32_t func2(uint32_t x) {
return (x ^ (x >> 27)) * (uint32_t)1566083941UL;
}
/**
* This function certificate the period of 2^{MEXP}
*/
static void period_certification(sfmt_t *ctx) {
int inner = 0;
int i, j;
uint32_t work;
uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
for (i = 0; i < 4; i++)
inner ^= psfmt32[idxof(i)] & parity[i];
for (i = 16; i > 0; i >>= 1)
inner ^= inner >> i;
inner &= 1;
/* check OK */
if (inner == 1) {
return;
}
/* check NG, and modification */
for (i = 0; i < 4; i++) {
work = 1;
for (j = 0; j < 32; j++) {
if ((work & parity[i]) != 0) {
psfmt32[idxof(i)] ^= work;
return;
}
work = work << 1;
}
}
}
/*----------------
PUBLIC FUNCTIONS
----------------*/
/**
* This function returns the identification string.
* The string shows the word size, the Mersenne exponent,
* and all parameters of this generator.
*/
const char *get_idstring(void) {
return IDSTR;
}
/**
* This function returns the minimum size of array used for \b
* fill_array32() function.
* @return minimum size of array used for fill_array32() function.
*/
int get_min_array_size32(void) {
return N32;
}
/**
* This function returns the minimum size of array used for \b
* fill_array64() function.
* @return minimum size of array used for fill_array64() function.
*/
int get_min_array_size64(void) {
return N64;
}
#ifndef ONLY64
/**
* This function generates and returns 32-bit pseudorandom number.
* init_gen_rand or init_by_array must be called before this function.
* @return 32-bit pseudorandom number
*/
uint32_t gen_rand32(sfmt_t *ctx) {
uint32_t r;
uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
assert(ctx->initialized);
if (ctx->idx >= N32) {
gen_rand_all(ctx);
ctx->idx = 0;
}
r = psfmt32[ctx->idx++];
return r;
}
/* Generate a random integer in [0..limit). */
uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit) {
uint32_t ret, above;
above = 0xffffffffU - (0xffffffffU % limit);
while (1) {
ret = gen_rand32(ctx);
if (ret < above) {
ret %= limit;
break;
}
}
return ret;
}
#endif
/**
* This function generates and returns 64-bit pseudorandom number.
* init_gen_rand or init_by_array must be called before this function.
* The function gen_rand64 should not be called after gen_rand32,
* unless an initialization is again executed.
* @return 64-bit pseudorandom number
*/
uint64_t gen_rand64(sfmt_t *ctx) {
#if defined(BIG_ENDIAN64) && !defined(ONLY64)
uint32_t r1, r2;
uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
#else
uint64_t r;
uint64_t *psfmt64 = (uint64_t *)&ctx->sfmt[0].u[0];
#endif
assert(ctx->initialized);
assert(ctx->idx % 2 == 0);
if (ctx->idx >= N32) {
gen_rand_all(ctx);
ctx->idx = 0;
}
#if defined(BIG_ENDIAN64) && !defined(ONLY64)
r1 = psfmt32[ctx->idx];
r2 = psfmt32[ctx->idx + 1];
ctx->idx += 2;
return ((uint64_t)r2 << 32) | r1;
#else
r = psfmt64[ctx->idx / 2];
ctx->idx += 2;
return r;
#endif
}
/* Generate a random integer in [0..limit). */
uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit) {
uint64_t ret, above;
above = KQU(0xffffffffffffffff) - (KQU(0xffffffffffffffff) % limit);
while (1) {
ret = gen_rand64(ctx);
if (ret < above) {
ret %= limit;
break;
}
}
return ret;
}
#ifndef ONLY64
/**
* This function generates pseudorandom 32-bit integers in the
* specified array[] by one call. The number of pseudorandom integers
* is specified by the argument size, which must be at least 624 and a
* multiple of four. The generation by this function is much faster
* than the following gen_rand function.
*
* For initialization, init_gen_rand or init_by_array must be called
* before the first call of this function. This function can not be
* used after calling gen_rand function, without initialization.
*
* @param array an array where pseudorandom 32-bit integers are filled
* by this function. The pointer to the array must be \b "aligned"
* (namely, must be a multiple of 16) in the SIMD version, since it
* refers to the address of a 128-bit integer. In the standard C
* version, the pointer is arbitrary.
*
* @param size the number of 32-bit pseudorandom integers to be
* generated. size must be a multiple of 4, and greater than or equal
* to (MEXP / 128 + 1) * 4.
*
* @note \b memalign or \b posix_memalign is available to get aligned
* memory. Mac OSX doesn't have these functions, but \b malloc of OSX
* returns the pointer to the aligned memory block.
*/
void fill_array32(sfmt_t *ctx, uint32_t *array, int size) {
assert(ctx->initialized);
assert(ctx->idx == N32);
assert(size % 4 == 0);
assert(size >= N32);
gen_rand_array(ctx, (w128_t *)array, size / 4);
ctx->idx = N32;
}
#endif
/**
* This function generates pseudorandom 64-bit integers in the
* specified array[] by one call. The number of pseudorandom integers
* is specified by the argument size, which must be at least 312 and a
* multiple of two. The generation by this function is much faster
* than the following gen_rand function.
*
* For initialization, init_gen_rand or init_by_array must be called
* before the first call of this function. This function can not be
* used after calling gen_rand function, without initialization.
*
* @param array an array where pseudorandom 64-bit integers are filled
* by this function. The pointer to the array must be "aligned"
* (namely, must be a multiple of 16) in the SIMD version, since it
* refers to the address of a 128-bit integer. In the standard C
* version, the pointer is arbitrary.
*
* @param size the number of 64-bit pseudorandom integers to be
* generated. size must be a multiple of 2, and greater than or equal
* to (MEXP / 128 + 1) * 2
*
* @note \b memalign or \b posix_memalign is available to get aligned
* memory. Mac OSX doesn't have these functions, but \b malloc of OSX
* returns the pointer to the aligned memory block.
*/
void fill_array64(sfmt_t *ctx, uint64_t *array, int size) {
assert(ctx->initialized);
assert(ctx->idx == N32);
assert(size % 2 == 0);
assert(size >= N64);
gen_rand_array(ctx, (w128_t *)array, size / 2);
ctx->idx = N32;
#if defined(BIG_ENDIAN64) && !defined(ONLY64)
swap((w128_t *)array, size /2);
#endif
}
/**
* This function initializes the internal state array with a 32-bit
* integer seed.
*
* @param seed a 32-bit integer used as the seed.
*/
sfmt_t *init_gen_rand(uint32_t seed) {
void *p;
sfmt_t *ctx;
int i;
uint32_t *psfmt32;
if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) {
return NULL;
}
ctx = (sfmt_t *)p;
psfmt32 = &ctx->sfmt[0].u[0];
psfmt32[idxof(0)] = seed;
for (i = 1; i < N32; i++) {
psfmt32[idxof(i)] = 1812433253UL * (psfmt32[idxof(i - 1)]
^ (psfmt32[idxof(i - 1)] >> 30))
+ i;
}
ctx->idx = N32;
period_certification(ctx);
ctx->initialized = 1;
return ctx;
}
/**
* This function initializes the internal state array,
* with an array of 32-bit integers used as the seeds
* @param init_key the array of 32-bit integers, used as a seed.
* @param key_length the length of init_key.
*/
sfmt_t *init_by_array(uint32_t *init_key, int key_length) {
void *p;
sfmt_t *ctx;
int i, j, count;
uint32_t r;
int lag;
int mid;
int size = N * 4;
uint32_t *psfmt32;
if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) {
return NULL;
}
ctx = (sfmt_t *)p;
psfmt32 = &ctx->sfmt[0].u[0];
if (size >= 623) {
lag = 11;
} else if (size >= 68) {
lag = 7;
} else if (size >= 39) {
lag = 5;
} else {
lag = 3;
}
mid = (size - lag) / 2;
memset(ctx->sfmt, 0x8b, sizeof(ctx->sfmt));
if (key_length + 1 > N32) {
count = key_length + 1;
} else {
count = N32;
}
r = func1(psfmt32[idxof(0)] ^ psfmt32[idxof(mid)]
^ psfmt32[idxof(N32 - 1)]);
psfmt32[idxof(mid)] += r;
r += key_length;
psfmt32[idxof(mid + lag)] += r;
psfmt32[idxof(0)] = r;
count--;
for (i = 1, j = 0; (j < count) && (j < key_length); j++) {
r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)]
^ psfmt32[idxof((i + N32 - 1) % N32)]);
psfmt32[idxof((i + mid) % N32)] += r;
r += init_key[j] + i;
psfmt32[idxof((i + mid + lag) % N32)] += r;
psfmt32[idxof(i)] = r;
i = (i + 1) % N32;
}
for (; j < count; j++) {
r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)]
^ psfmt32[idxof((i + N32 - 1) % N32)]);
psfmt32[idxof((i + mid) % N32)] += r;
r += i;
psfmt32[idxof((i + mid + lag) % N32)] += r;
psfmt32[idxof(i)] = r;
i = (i + 1) % N32;
}
for (j = 0; j < N32; j++) {
r = func2(psfmt32[idxof(i)] + psfmt32[idxof((i + mid) % N32)]
+ psfmt32[idxof((i + N32 - 1) % N32)]);
psfmt32[idxof((i + mid) % N32)] ^= r;
r -= i;
psfmt32[idxof((i + mid + lag) % N32)] ^= r;
psfmt32[idxof(i)] = r;
i = (i + 1) % N32;
}
ctx->idx = N32;
period_certification(ctx);
ctx->initialized = 1;
return ctx;
}
void fini_gen_rand(sfmt_t *ctx) {
assert(ctx != NULL);
ctx->initialized = 0;
free(ctx);
}
| 20,765 | 27.841667 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/test/src/test.c | #include "test/jemalloc_test.h"
static unsigned test_count = 0;
static test_status_t test_counts[test_status_count] = {0, 0, 0};
static test_status_t test_status = test_status_pass;
static const char * test_name = "";
JEMALLOC_ATTR(format(printf, 1, 2))
void
test_skip(const char *format, ...)
{
va_list ap;
va_start(ap, format);
malloc_vcprintf(NULL, NULL, format, ap);
va_end(ap);
malloc_printf("\n");
test_status = test_status_skip;
}
JEMALLOC_ATTR(format(printf, 1, 2))
void
test_fail(const char *format, ...)
{
va_list ap;
va_start(ap, format);
malloc_vcprintf(NULL, NULL, format, ap);
va_end(ap);
malloc_printf("\n");
test_status = test_status_fail;
}
static const char *
test_status_string(test_status_t test_status)
{
switch (test_status) {
case test_status_pass: return "pass";
case test_status_skip: return "skip";
case test_status_fail: return "fail";
default: not_reached();
}
}
void
p_test_init(const char *name)
{
test_count++;
test_status = test_status_pass;
test_name = name;
}
void
p_test_fini(void)
{
test_counts[test_status]++;
malloc_printf("%s: %s\n", test_name, test_status_string(test_status));
}
test_status_t
p_test(test_t *t, ...)
{
test_status_t ret;
va_list ap;
/*
* Make sure initialization occurs prior to running tests. Tests are
* special because they may use internal facilities prior to triggering
* initialization as a side effect of calling into the public API. This
* is a final safety that works even if jemalloc_constructor() doesn't
* run, as for MSVC builds.
*/
if (nallocx(1, 0) == 0) {
malloc_printf("Initialization error");
return (test_status_fail);
}
ret = test_status_pass;
va_start(ap, t);
for (; t != NULL; t = va_arg(ap, test_t *)) {
t();
if (test_status > ret)
ret = test_status;
}
va_end(ap);
malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n",
test_status_string(test_status_pass),
test_counts[test_status_pass], test_count,
test_status_string(test_status_skip),
test_counts[test_status_skip], test_count,
test_status_string(test_status_fail),
test_counts[test_status_fail], test_count);
return (ret);
}
test_status_t
p_test_not_init(test_t *t, ...)
{
test_status_t ret;
va_list ap;
ret = test_status_pass;
va_start(ap, t);
for (; t != NULL; t = va_arg(ap, test_t *)) {
t();
if (test_status > ret)
ret = test_status;
}
va_end(ap);
malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n",
test_status_string(test_status_pass),
test_counts[test_status_pass], test_count,
test_status_string(test_status_skip),
test_counts[test_status_skip], test_count,
test_status_string(test_status_fail),
test_counts[test_status_fail], test_count);
return (ret);
}
void
p_test_fail(const char *prefix, const char *message)
{
malloc_cprintf(NULL, NULL, "%s%s\n", prefix, message);
test_status = test_status_fail;
}
| 2,920 | 20.798507 | 73 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/test/integration/allocm.c | #include "test/jemalloc_test.h"
#define CHUNK 0x400000
#define MAXALIGN (((size_t)1) << 25)
#define NITER 4
TEST_BEGIN(test_basic)
{
size_t nsz, rsz, sz;
void *p;
sz = 42;
nsz = 0;
assert_d_eq(nallocm(&nsz, sz, 0), ALLOCM_SUCCESS,
"Unexpected nallocm() error");
rsz = 0;
assert_d_eq(allocm(&p, &rsz, sz, 0), ALLOCM_SUCCESS,
"Unexpected allocm() error");
assert_zu_ge(rsz, sz, "Real size smaller than expected");
assert_zu_eq(nsz, rsz, "nallocm()/allocm() rsize mismatch");
assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
"Unexpected dallocm() error");
assert_d_eq(allocm(&p, NULL, sz, 0), ALLOCM_SUCCESS,
"Unexpected allocm() error");
assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
"Unexpected dallocm() error");
nsz = 0;
assert_d_eq(nallocm(&nsz, sz, ALLOCM_ZERO), ALLOCM_SUCCESS,
"Unexpected nallocm() error");
rsz = 0;
assert_d_eq(allocm(&p, &rsz, sz, ALLOCM_ZERO), ALLOCM_SUCCESS,
"Unexpected allocm() error");
assert_zu_eq(nsz, rsz, "nallocm()/allocm() rsize mismatch");
assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
"Unexpected dallocm() error");
}
TEST_END
TEST_BEGIN(test_alignment_and_size)
{
int r;
size_t nsz, rsz, sz, alignment, total;
unsigned i;
void *ps[NITER];
for (i = 0; i < NITER; i++)
ps[i] = NULL;
for (alignment = 8;
alignment <= MAXALIGN;
alignment <<= 1) {
total = 0;
for (sz = 1;
sz < 3 * alignment && sz < (1U << 31);
sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
for (i = 0; i < NITER; i++) {
nsz = 0;
r = nallocm(&nsz, sz, ALLOCM_ALIGN(alignment) |
ALLOCM_ZERO);
assert_d_eq(r, ALLOCM_SUCCESS,
"nallocm() error for alignment=%zu, "
"size=%zu (%#zx): %d",
alignment, sz, sz, r);
rsz = 0;
r = allocm(&ps[i], &rsz, sz,
ALLOCM_ALIGN(alignment) | ALLOCM_ZERO);
assert_d_eq(r, ALLOCM_SUCCESS,
"allocm() error for alignment=%zu, "
"size=%zu (%#zx): %d",
alignment, sz, sz, r);
assert_zu_ge(rsz, sz,
"Real size smaller than expected for "
"alignment=%zu, size=%zu", alignment, sz);
assert_zu_eq(nsz, rsz,
"nallocm()/allocm() rsize mismatch for "
"alignment=%zu, size=%zu", alignment, sz);
assert_ptr_null(
(void *)((uintptr_t)ps[i] & (alignment-1)),
"%p inadequately aligned for"
" alignment=%zu, size=%zu", ps[i],
alignment, sz);
sallocm(ps[i], &rsz, 0);
total += rsz;
if (total >= (MAXALIGN << 1))
break;
}
for (i = 0; i < NITER; i++) {
if (ps[i] != NULL) {
dallocm(ps[i], 0);
ps[i] = NULL;
}
}
}
}
}
TEST_END
int
main(void)
{
return (test(
test_basic,
test_alignment_and_size));
}
| 2,719 | 24.185185 | 63 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/test/integration/allocated.c | #include "test/jemalloc_test.h"
static const bool config_stats =
#ifdef JEMALLOC_STATS
true
#else
false
#endif
;
void *
thd_start(void *arg)
{
int err;
void *p;
uint64_t a0, a1, d0, d1;
uint64_t *ap0, *ap1, *dp0, *dp1;
size_t sz, usize;
sz = sizeof(a0);
if ((err = mallctl("thread.allocated", &a0, &sz, NULL, 0))) {
if (err == ENOENT)
goto label_ENOENT;
test_fail("%s(): Error in mallctl(): %s", __func__,
strerror(err));
}
sz = sizeof(ap0);
if ((err = mallctl("thread.allocatedp", &ap0, &sz, NULL, 0))) {
if (err == ENOENT)
goto label_ENOENT;
test_fail("%s(): Error in mallctl(): %s", __func__,
strerror(err));
}
assert_u64_eq(*ap0, a0,
"\"thread.allocatedp\" should provide a pointer to internal "
"storage");
sz = sizeof(d0);
if ((err = mallctl("thread.deallocated", &d0, &sz, NULL, 0))) {
if (err == ENOENT)
goto label_ENOENT;
test_fail("%s(): Error in mallctl(): %s", __func__,
strerror(err));
}
sz = sizeof(dp0);
if ((err = mallctl("thread.deallocatedp", &dp0, &sz, NULL, 0))) {
if (err == ENOENT)
goto label_ENOENT;
test_fail("%s(): Error in mallctl(): %s", __func__,
strerror(err));
}
assert_u64_eq(*dp0, d0,
"\"thread.deallocatedp\" should provide a pointer to internal "
"storage");
p = malloc(1);
assert_ptr_not_null(p, "Unexpected malloc() error");
sz = sizeof(a1);
mallctl("thread.allocated", &a1, &sz, NULL, 0);
sz = sizeof(ap1);
mallctl("thread.allocatedp", &ap1, &sz, NULL, 0);
assert_u64_eq(*ap1, a1,
"Dereferenced \"thread.allocatedp\" value should equal "
"\"thread.allocated\" value");
assert_ptr_eq(ap0, ap1,
"Pointer returned by \"thread.allocatedp\" should not change");
usize = malloc_usable_size(p);
assert_u64_le(a0 + usize, a1,
"Allocated memory counter should increase by at least the amount "
"explicitly allocated");
free(p);
sz = sizeof(d1);
mallctl("thread.deallocated", &d1, &sz, NULL, 0);
sz = sizeof(dp1);
mallctl("thread.deallocatedp", &dp1, &sz, NULL, 0);
assert_u64_eq(*dp1, d1,
"Dereferenced \"thread.deallocatedp\" value should equal "
"\"thread.deallocated\" value");
assert_ptr_eq(dp0, dp1,
"Pointer returned by \"thread.deallocatedp\" should not change");
assert_u64_le(d0 + usize, d1,
"Deallocated memory counter should increase by at least the amount "
"explicitly deallocated");
return (NULL);
label_ENOENT:
assert_false(config_stats,
"ENOENT should only be returned if stats are disabled");
test_skip("\"thread.allocated\" mallctl not available");
return (NULL);
}
TEST_BEGIN(test_main_thread)
{
thd_start(NULL);
}
TEST_END
TEST_BEGIN(test_subthread)
{
thd_t thd;
thd_create(&thd, thd_start, NULL);
thd_join(thd, NULL);
}
TEST_END
int
main(void)
{
/* Run tests multiple times to check for bad interactions. */
return (test(
test_main_thread,
test_subthread,
test_main_thread,
test_subthread,
test_main_thread));
}
| 2,989 | 22.730159 | 73 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/test/integration/mallocx.c | #include "test/jemalloc_test.h"
#define CHUNK 0x400000
#define MAXALIGN (((size_t)1) << 25)
#define NITER 4
TEST_BEGIN(test_basic)
{
size_t nsz, rsz, sz;
void *p;
sz = 42;
nsz = nallocx(sz, 0);
assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
p = mallocx(sz, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
rsz = sallocx(p, 0);
assert_zu_ge(rsz, sz, "Real size smaller than expected");
assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
dallocx(p, 0);
p = mallocx(sz, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
dallocx(p, 0);
nsz = nallocx(sz, MALLOCX_ZERO);
assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
p = mallocx(sz, MALLOCX_ZERO);
assert_ptr_not_null(p, "Unexpected mallocx() error");
rsz = sallocx(p, 0);
assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
dallocx(p, 0);
}
TEST_END
TEST_BEGIN(test_alignment_and_size)
{
size_t nsz, rsz, sz, alignment, total;
unsigned i;
void *ps[NITER];
for (i = 0; i < NITER; i++)
ps[i] = NULL;
for (alignment = 8;
alignment <= MAXALIGN;
alignment <<= 1) {
total = 0;
for (sz = 1;
sz < 3 * alignment && sz < (1U << 31);
sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
for (i = 0; i < NITER; i++) {
nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
MALLOCX_ZERO);
assert_zu_ne(nsz, 0,
"nallocx() error for alignment=%zu, "
"size=%zu (%#zx)", alignment, sz, sz);
ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) |
MALLOCX_ZERO);
assert_ptr_not_null(ps[i],
"mallocx() error for alignment=%zu, "
"size=%zu (%#zx)", alignment, sz, sz);
rsz = sallocx(ps[i], 0);
assert_zu_ge(rsz, sz,
"Real size smaller than expected for "
"alignment=%zu, size=%zu", alignment, sz);
assert_zu_eq(nsz, rsz,
"nallocx()/sallocx() size mismatch for "
"alignment=%zu, size=%zu", alignment, sz);
assert_ptr_null(
(void *)((uintptr_t)ps[i] & (alignment-1)),
"%p inadequately aligned for"
" alignment=%zu, size=%zu", ps[i],
alignment, sz);
total += rsz;
if (total >= (MAXALIGN << 1))
break;
}
for (i = 0; i < NITER; i++) {
if (ps[i] != NULL) {
dallocx(ps[i], 0);
ps[i] = NULL;
}
}
}
}
}
TEST_END
int
main(void)
{
return (test(
test_basic,
test_alignment_and_size));
}
| 2,387 | 23.367347 | 62 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/test/integration/rallocm.c | #include "test/jemalloc_test.h"
TEST_BEGIN(test_same_size)
{
void *p, *q;
size_t sz, tsz;
assert_d_eq(allocm(&p, &sz, 42, 0), ALLOCM_SUCCESS,
"Unexpected allocm() error");
q = p;
assert_d_eq(rallocm(&q, &tsz, sz, 0, ALLOCM_NO_MOVE), ALLOCM_SUCCESS,
"Unexpected rallocm() error");
assert_ptr_eq(q, p, "Unexpected object move");
assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
"Unexpected dallocm() error");
}
TEST_END
TEST_BEGIN(test_extra_no_move)
{
void *p, *q;
size_t sz, tsz;
assert_d_eq(allocm(&p, &sz, 42, 0), ALLOCM_SUCCESS,
"Unexpected allocm() error");
q = p;
assert_d_eq(rallocm(&q, &tsz, sz, sz-42, ALLOCM_NO_MOVE),
ALLOCM_SUCCESS, "Unexpected rallocm() error");
assert_ptr_eq(q, p, "Unexpected object move");
assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
"Unexpected dallocm() error");
}
TEST_END
TEST_BEGIN(test_no_move_fail)
{
void *p, *q;
size_t sz, tsz;
assert_d_eq(allocm(&p, &sz, 42, 0), ALLOCM_SUCCESS,
"Unexpected allocm() error");
q = p;
assert_d_eq(rallocm(&q, &tsz, sz + 5, 0, ALLOCM_NO_MOVE),
ALLOCM_ERR_NOT_MOVED, "Unexpected rallocm() result");
assert_ptr_eq(q, p, "Unexpected object move");
assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
"Unexpected dallocm() error");
}
TEST_END
TEST_BEGIN(test_grow_and_shrink)
{
void *p, *q;
size_t tsz;
#define NCYCLES 3
unsigned i, j;
#define NSZS 2500
size_t szs[NSZS];
#define MAXSZ ZU(12 * 1024 * 1024)
assert_d_eq(allocm(&p, &szs[0], 1, 0), ALLOCM_SUCCESS,
"Unexpected allocm() error");
for (i = 0; i < NCYCLES; i++) {
for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) {
q = p;
assert_d_eq(rallocm(&q, &szs[j], szs[j-1]+1, 0, 0),
ALLOCM_SUCCESS,
"Unexpected rallocm() error for size=%zu-->%zu",
szs[j-1], szs[j-1]+1);
assert_zu_ne(szs[j], szs[j-1]+1,
"Expected size to at least: %zu", szs[j-1]+1);
p = q;
}
for (j--; j > 0; j--) {
q = p;
assert_d_eq(rallocm(&q, &tsz, szs[j-1], 0, 0),
ALLOCM_SUCCESS,
"Unexpected rallocm() error for size=%zu-->%zu",
szs[j], szs[j-1]);
assert_zu_eq(tsz, szs[j-1],
"Expected size=%zu, got size=%zu", szs[j-1], tsz);
p = q;
}
}
assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
"Unexpected dallocm() error");
}
TEST_END
int
main(void)
{
return (test(
test_same_size,
test_extra_no_move,
test_no_move_fail,
test_grow_and_shrink));
}
| 2,637 | 22.553571 | 71 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/test/integration/rallocx.c | #include "test/jemalloc_test.h"
TEST_BEGIN(test_grow_and_shrink)
{
void *p, *q;
size_t tsz;
#define NCYCLES 3
unsigned i, j;
#define NSZS 2500
size_t szs[NSZS];
#define MAXSZ ZU(12 * 1024 * 1024)
p = mallocx(1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
szs[0] = sallocx(p, 0);
for (i = 0; i < NCYCLES; i++) {
for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) {
q = rallocx(p, szs[j-1]+1, 0);
assert_ptr_not_null(q,
"Unexpected rallocx() error for size=%zu-->%zu",
szs[j-1], szs[j-1]+1);
szs[j] = sallocx(q, 0);
assert_zu_ne(szs[j], szs[j-1]+1,
"Expected size to at least: %zu", szs[j-1]+1);
p = q;
}
for (j--; j > 0; j--) {
q = rallocx(p, szs[j-1], 0);
assert_ptr_not_null(q,
"Unexpected rallocx() error for size=%zu-->%zu",
szs[j], szs[j-1]);
tsz = sallocx(q, 0);
assert_zu_eq(tsz, szs[j-1],
"Expected size=%zu, got size=%zu", szs[j-1], tsz);
p = q;
}
}
dallocx(p, 0);
#undef MAXSZ
#undef NSZS
#undef NCYCLES
}
TEST_END
static bool
validate_fill(const void *p, uint8_t c, size_t offset, size_t len)
{
bool ret = false;
const uint8_t *buf = (const uint8_t *)p;
size_t i;
for (i = 0; i < len; i++) {
uint8_t b = buf[offset+i];
if (b != c) {
test_fail("Allocation at %p contains %#x rather than "
"%#x at offset %zu", p, b, c, offset+i);
ret = true;
}
}
return (ret);
}
TEST_BEGIN(test_zero)
{
void *p, *q;
size_t psz, qsz, i, j;
size_t start_sizes[] = {1, 3*1024, 63*1024, 4095*1024};
#define FILL_BYTE 0xaaU
#define RANGE 2048
for (i = 0; i < sizeof(start_sizes)/sizeof(size_t); i++) {
size_t start_size = start_sizes[i];
p = mallocx(start_size, MALLOCX_ZERO);
assert_ptr_not_null(p, "Unexpected mallocx() error");
psz = sallocx(p, 0);
assert_false(validate_fill(p, 0, 0, psz),
"Expected zeroed memory");
memset(p, FILL_BYTE, psz);
assert_false(validate_fill(p, FILL_BYTE, 0, psz),
"Expected filled memory");
for (j = 1; j < RANGE; j++) {
q = rallocx(p, start_size+j, MALLOCX_ZERO);
assert_ptr_not_null(q, "Unexpected rallocx() error");
qsz = sallocx(q, 0);
if (q != p || qsz != psz) {
assert_false(validate_fill(q, FILL_BYTE, 0,
psz), "Expected filled memory");
assert_false(validate_fill(q, 0, psz, qsz-psz),
"Expected zeroed memory");
}
if (psz != qsz) {
memset((void *)((uintptr_t)q+psz), FILL_BYTE,
qsz-psz);
psz = qsz;
}
p = q;
}
assert_false(validate_fill(p, FILL_BYTE, 0, psz),
"Expected filled memory");
dallocx(p, 0);
}
#undef FILL_BYTE
}
TEST_END
TEST_BEGIN(test_align)
{
void *p, *q;
size_t align;
#define MAX_ALIGN (ZU(1) << 25)
align = ZU(1);
p = mallocx(1, MALLOCX_ALIGN(align));
assert_ptr_not_null(p, "Unexpected mallocx() error");
for (align <<= 1; align <= MAX_ALIGN; align <<= 1) {
q = rallocx(p, 1, MALLOCX_ALIGN(align));
assert_ptr_not_null(q,
"Unexpected rallocx() error for align=%zu", align);
assert_ptr_null(
(void *)((uintptr_t)q & (align-1)),
"%p inadequately aligned for align=%zu",
q, align);
p = q;
}
dallocx(p, 0);
#undef MAX_ALIGN
}
TEST_END
TEST_BEGIN(test_lg_align_and_zero)
{
void *p, *q;
size_t lg_align, sz;
#define MAX_LG_ALIGN 25
#define MAX_VALIDATE (ZU(1) << 22)
lg_align = ZU(0);
p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
assert_ptr_not_null(p, "Unexpected mallocx() error");
for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) {
q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
assert_ptr_not_null(q,
"Unexpected rallocx() error for lg_align=%zu", lg_align);
assert_ptr_null(
(void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)),
"%p inadequately aligned for lg_align=%zu",
q, lg_align);
sz = sallocx(q, 0);
if ((sz << 1) <= MAX_VALIDATE) {
assert_false(validate_fill(q, 0, 0, sz),
"Expected zeroed memory");
} else {
assert_false(validate_fill(q, 0, 0, MAX_VALIDATE),
"Expected zeroed memory");
assert_false(validate_fill(
(void *)((uintptr_t)q+sz-MAX_VALIDATE),
0, 0, MAX_VALIDATE), "Expected zeroed memory");
}
p = q;
}
dallocx(p, 0);
#undef MAX_VALIDATE
#undef MAX_LG_ALIGN
}
TEST_END
int
main(void)
{
return (test(
test_grow_and_shrink,
test_zero,
test_align,
test_lg_align_and_zero));
}
| 4,365 | 22.6 | 66 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/test/integration/thread_tcache_enabled.c | #include "test/jemalloc_test.h"
static const bool config_tcache =
#ifdef JEMALLOC_TCACHE
true
#else
false
#endif
;
void *
thd_start(void *arg)
{
int err;
size_t sz;
bool e0, e1;
sz = sizeof(bool);
if ((err = mallctl("thread.tcache.enabled", &e0, &sz, NULL, 0))) {
if (err == ENOENT) {
assert_false(config_tcache,
"ENOENT should only be returned if tcache is "
"disabled");
}
goto label_ENOENT;
}
if (e0) {
e1 = false;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz),
0, "Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled");
}
e1 = true;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
"Unexpected mallctl() error");
assert_false(e0, "tcache should be disabled");
e1 = true;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
"Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled");
e1 = false;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
"Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled");
e1 = false;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
"Unexpected mallctl() error");
assert_false(e0, "tcache should be disabled");
free(malloc(1));
e1 = true;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
"Unexpected mallctl() error");
assert_false(e0, "tcache should be disabled");
free(malloc(1));
e1 = true;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
"Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled");
free(malloc(1));
e1 = false;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
"Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled");
free(malloc(1));
e1 = false;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
"Unexpected mallctl() error");
assert_false(e0, "tcache should be disabled");
free(malloc(1));
return (NULL);
label_ENOENT:
test_skip("\"thread.tcache.enabled\" mallctl not available");
return (NULL);
}
TEST_BEGIN(test_main_thread)
{
thd_start(NULL);
}
TEST_END
TEST_BEGIN(test_subthread)
{
thd_t thd;
thd_create(&thd, thd_start, NULL);
thd_join(thd, NULL);
}
TEST_END
int
main(void)
{
/* Run tests multiple times to check for bad interactions. */
return (test(
test_main_thread,
test_subthread,
test_main_thread,
test_subthread,
test_main_thread));
}
| 2,535 | 21.245614 | 68 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/test/integration/chunk.c | #include "test/jemalloc_test.h"
chunk_alloc_t *old_alloc;
chunk_dalloc_t *old_dalloc;
bool
chunk_dalloc(void *chunk, size_t size, unsigned arena_ind, pool_t *pool)
{
return (old_dalloc(chunk, size, arena_ind, pool));
}
void *
chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero,
unsigned arena_ind, pool_t *pool)
{
return (old_alloc(new_addr, size, alignment, zero, arena_ind, pool));
}
TEST_BEGIN(test_chunk)
{
void *p;
chunk_alloc_t *new_alloc;
chunk_dalloc_t *new_dalloc;
size_t old_size, new_size;
new_alloc = chunk_alloc;
new_dalloc = chunk_dalloc;
old_size = sizeof(chunk_alloc_t *);
new_size = sizeof(chunk_alloc_t *);
assert_d_eq(mallctl("pool.0.arena.0.chunk.alloc", &old_alloc,
&old_size, &new_alloc, new_size), 0,
"Unexpected alloc error");
assert_ptr_ne(old_alloc, new_alloc,
"Unexpected alloc error");
assert_d_eq(mallctl("pool.0.arena.0.chunk.dalloc", &old_dalloc, &old_size,
&new_dalloc, new_size), 0, "Unexpected dalloc error");
assert_ptr_ne(old_dalloc, new_dalloc, "Unexpected dalloc error");
p = mallocx(42, 0);
assert_ptr_ne(p, NULL, "Unexpected alloc error");
free(p);
assert_d_eq(mallctl("pool.0.arena.0.chunk.alloc", NULL,
NULL, &old_alloc, old_size), 0,
"Unexpected alloc error");
assert_d_eq(mallctl("pool.0.arena.0.chunk.dalloc", NULL, NULL, &old_dalloc,
old_size), 0, "Unexpected dalloc error");
}
TEST_END
int
main(void)
{
return (test(test_chunk));
}
| 1,469 | 23.5 | 76 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/test/integration/aligned_alloc.c | #include "test/jemalloc_test.h"
#define CHUNK 0x400000
/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */
#define MAXALIGN ((size_t)0x2000000LU)
#define NITER 4
TEST_BEGIN(test_alignment_errors)
{
size_t alignment;
void *p;
alignment = 0;
set_errno(0);
p = aligned_alloc(alignment, 1);
assert_false(p != NULL || get_errno() != EINVAL,
"Expected error for invalid alignment %zu", alignment);
for (alignment = sizeof(size_t); alignment < MAXALIGN;
alignment <<= 1) {
set_errno(0);
p = aligned_alloc(alignment + 1, 1);
assert_false(p != NULL || get_errno() != EINVAL,
"Expected error for invalid alignment %zu",
alignment + 1);
}
}
TEST_END
TEST_BEGIN(test_oom_errors)
{
size_t alignment, size;
void *p;
#if LG_SIZEOF_PTR == 3
alignment = UINT64_C(0x8000000000000000);
size = UINT64_C(0x8000000000000000);
#else
alignment = 0x80000000LU;
size = 0x80000000LU;
#endif
set_errno(0);
p = aligned_alloc(alignment, size);
assert_false(p != NULL || get_errno() != ENOMEM,
"Expected error for aligned_alloc(%zu, %zu)",
alignment, size);
#if LG_SIZEOF_PTR == 3
alignment = UINT64_C(0x4000000000000000);
size = UINT64_C(0xc000000000000001);
#else
alignment = 0x40000000LU;
size = 0xc0000001LU;
#endif
set_errno(0);
p = aligned_alloc(alignment, size);
assert_false(p != NULL || get_errno() != ENOMEM,
"Expected error for aligned_alloc(%zu, %zu)",
alignment, size);
alignment = 0x10LU;
#if LG_SIZEOF_PTR == 3
size = UINT64_C(0xfffffffffffffff0);
#else
size = 0xfffffff0LU;
#endif
set_errno(0);
p = aligned_alloc(alignment, size);
assert_false(p != NULL || get_errno() != ENOMEM,
"Expected error for aligned_alloc(&p, %zu, %zu)",
alignment, size);
}
TEST_END
TEST_BEGIN(test_alignment_and_size)
{
size_t alignment, size, total;
unsigned i;
void *ps[NITER];
for (i = 0; i < NITER; i++)
ps[i] = NULL;
for (alignment = 8;
alignment <= MAXALIGN;
alignment <<= 1) {
total = 0;
for (size = 1;
size < 3 * alignment && size < (1U << 31);
size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
for (i = 0; i < NITER; i++) {
ps[i] = aligned_alloc(alignment, size);
if (ps[i] == NULL) {
char buf[BUFERROR_BUF];
buferror(get_errno(), buf, sizeof(buf));
test_fail(
"Error for alignment=%zu, "
"size=%zu (%#zx): %s",
alignment, size, size, buf);
}
total += malloc_usable_size(ps[i]);
if (total >= (MAXALIGN << 1))
break;
}
for (i = 0; i < NITER; i++) {
if (ps[i] != NULL) {
free(ps[i]);
ps[i] = NULL;
}
}
}
}
}
TEST_END
int
main(void)
{
return (test(
test_alignment_errors,
test_oom_errors,
test_alignment_and_size));
}
| 2,760 | 20.912698 | 60 | c |
null | NearPMSW-main/nearpm/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/jemalloc/test/integration/posix_memalign.c | #include "test/jemalloc_test.h"
#define CHUNK 0x400000
/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */
#define MAXALIGN ((size_t)0x2000000LU)
#define NITER 4
TEST_BEGIN(test_alignment_errors)
{
size_t alignment;
void *p;
for (alignment = 0; alignment < sizeof(void *); alignment++) {
assert_d_eq(posix_memalign(&p, alignment, 1), EINVAL,
"Expected error for invalid alignment %zu",
alignment);
}
for (alignment = sizeof(size_t); alignment < MAXALIGN;
alignment <<= 1) {
assert_d_ne(posix_memalign(&p, alignment + 1, 1), 0,
"Expected error for invalid alignment %zu",
alignment + 1);
}
}
TEST_END
TEST_BEGIN(test_oom_errors)
{
size_t alignment, size;
void *p;
#if LG_SIZEOF_PTR == 3
alignment = UINT64_C(0x8000000000000000);
size = UINT64_C(0x8000000000000000);
#else
alignment = 0x80000000LU;
size = 0x80000000LU;
#endif
assert_d_ne(posix_memalign(&p, alignment, size), 0,
"Expected error for posix_memalign(&p, %zu, %zu)",
alignment, size);
#if LG_SIZEOF_PTR == 3
alignment = UINT64_C(0x4000000000000000);
size = UINT64_C(0xc000000000000001);
#else
alignment = 0x40000000LU;
size = 0xc0000001LU;
#endif
assert_d_ne(posix_memalign(&p, alignment, size), 0,
"Expected error for posix_memalign(&p, %zu, %zu)",
alignment, size);
alignment = 0x10LU;
#if LG_SIZEOF_PTR == 3
size = UINT64_C(0xfffffffffffffff0);
#else
size = 0xfffffff0LU;
#endif
assert_d_ne(posix_memalign(&p, alignment, size), 0,
"Expected error for posix_memalign(&p, %zu, %zu)",
alignment, size);
}
TEST_END
TEST_BEGIN(test_alignment_and_size)
{
size_t alignment, size, total;
unsigned i;
int err;
void *ps[NITER];
for (i = 0; i < NITER; i++)
ps[i] = NULL;
for (alignment = 8;
alignment <= MAXALIGN;
alignment <<= 1) {
total = 0;
for (size = 1;
size < 3 * alignment && size < (1U << 31);
size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
for (i = 0; i < NITER; i++) {
err = posix_memalign(&ps[i],
alignment, size);
if (err) {
char buf[BUFERROR_BUF];
buferror(get_errno(), buf, sizeof(buf));
test_fail(
"Error for alignment=%zu, "
"size=%zu (%#zx): %s",
alignment, size, size, buf);
}
total += malloc_usable_size(ps[i]);
if (total >= (MAXALIGN << 1))
break;
}
for (i = 0; i < NITER; i++) {
if (ps[i] != NULL) {
free(ps[i]);
ps[i] = NULL;
}
}
}
}
}
TEST_END
int
main(void)
{
return (test(
test_alignment_errors,
test_oom_errors,
test_alignment_and_size));
}
| 2,603 | 20.7 | 63 | c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.