repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/private_unnamespace.h | #undef a0calloc
#undef a0free
#undef a0malloc
#undef arena_alloc_junk_small
#undef arena_bin_index
#undef arena_bin_info
#undef arena_boot
#undef arena_chunk_alloc_huge
#undef arena_chunk_dalloc_huge
#undef arena_dalloc
#undef arena_dalloc_bin
#undef arena_dalloc_bin_locked
#undef arena_dalloc_junk_large
#undef arena_dalloc_junk_small
#undef arena_dalloc_large
#undef arena_dalloc_large_locked
#undef arena_dalloc_small
#undef arena_dss_prec_get
#undef arena_dss_prec_set
#undef arena_malloc
#undef arena_malloc_large
#undef arena_malloc_small
#undef arena_mapbits_allocated_get
#undef arena_mapbits_binind_get
#undef arena_mapbits_dirty_get
#undef arena_mapbits_get
#undef arena_mapbits_large_binind_set
#undef arena_mapbits_large_get
#undef arena_mapbits_large_set
#undef arena_mapbits_large_size_get
#undef arena_mapbits_small_runind_get
#undef arena_mapbits_small_set
#undef arena_mapbits_unallocated_set
#undef arena_mapbits_unallocated_size_get
#undef arena_mapbits_unallocated_size_set
#undef arena_mapbits_unzeroed_get
#undef arena_mapbits_unzeroed_set
#undef arena_mapbitsp_get
#undef arena_mapbitsp_read
#undef arena_mapbitsp_write
#undef arena_mapelm_to_pageind
#undef arena_mapp_get
#undef arena_maxclass
#undef arena_new
#undef arena_palloc
#undef arena_postfork_child
#undef arena_postfork_parent
#undef arena_prefork
#undef arena_prof_accum
#undef arena_prof_accum_impl
#undef arena_prof_accum_locked
#undef arena_prof_ctx_get
#undef arena_prof_ctx_set
#undef arena_prof_promoted
#undef arena_ptr_small_binind_get
#undef arena_purge_all
#undef arena_quarantine_junk_small
#undef arena_ralloc
#undef arena_ralloc_junk_large
#undef arena_ralloc_no_move
#undef arena_redzone_corruption
#undef arena_run_regind
#undef arena_runs_avail_tree_iter
#undef arena_salloc
#undef arena_stats_merge
#undef arena_tcache_fill_small
#undef arenas
#undef pools
#undef arenas_booted
#undef arenas_cleanup
#undef arenas_extend
#undef arenas_initialized
#undef arenas_lock
#undef arenas_tls
#undef arenas_tsd
#undef arenas_tsd_boot
#undef arenas_tsd_cleanup_wrapper
#undef arenas_tsd_get
#undef arenas_tsd_get_wrapper
#undef arenas_tsd_init_head
#undef arenas_tsd_set
#undef atomic_add_u
#undef atomic_add_uint32
#undef atomic_add_uint64
#undef atomic_add_z
#undef atomic_sub_u
#undef atomic_sub_uint32
#undef atomic_sub_uint64
#undef atomic_sub_z
#undef base_alloc
#undef base_boot
#undef base_calloc
#undef base_free_fn
#undef base_malloc_fn
#undef base_node_alloc
#undef base_node_dalloc
#undef base_pool
#undef base_postfork_child
#undef base_postfork_parent
#undef base_prefork
#undef bitmap_full
#undef bitmap_get
#undef bitmap_info_init
#undef bitmap_info_ngroups
#undef bitmap_init
#undef bitmap_set
#undef bitmap_sfu
#undef bitmap_size
#undef bitmap_unset
#undef bt_init
#undef buferror
#undef choose_arena
#undef choose_arena_hard
#undef chunk_alloc_arena
#undef chunk_alloc_base
#undef chunk_alloc_default
#undef chunk_alloc_dss
#undef chunk_alloc_mmap
#undef chunk_global_boot
#undef chunk_boot
#undef chunk_dalloc_default
#undef chunk_dalloc_mmap
#undef chunk_dss_boot
#undef chunk_dss_postfork_child
#undef chunk_dss_postfork_parent
#undef chunk_dss_prec_get
#undef chunk_dss_prec_set
#undef chunk_dss_prefork
#undef chunk_in_dss
#undef chunk_npages
#undef chunk_postfork_child
#undef chunk_postfork_parent
#undef chunk_prefork
#undef chunk_unmap
#undef chunk_record
#undef chunks_mtx
#undef chunks_rtree
#undef chunksize
#undef chunksize_mask
#undef ckh_bucket_search
#undef ckh_count
#undef ckh_delete
#undef ckh_evict_reloc_insert
#undef ckh_insert
#undef ckh_isearch
#undef ckh_iter
#undef ckh_new
#undef ckh_pointer_hash
#undef ckh_pointer_keycomp
#undef ckh_rebuild
#undef ckh_remove
#undef ckh_search
#undef ckh_string_hash
#undef ckh_string_keycomp
#undef ckh_try_bucket_insert
#undef ckh_try_insert
#undef ctl_boot
#undef ctl_bymib
#undef ctl_byname
#undef ctl_nametomib
#undef ctl_postfork_child
#undef ctl_postfork_parent
#undef ctl_prefork
#undef dss_prec_names
#undef extent_tree_ad_first
#undef extent_tree_ad_insert
#undef extent_tree_ad_iter
#undef extent_tree_ad_iter_recurse
#undef extent_tree_ad_iter_start
#undef extent_tree_ad_last
#undef extent_tree_ad_new
#undef extent_tree_ad_next
#undef extent_tree_ad_nsearch
#undef extent_tree_ad_prev
#undef extent_tree_ad_psearch
#undef extent_tree_ad_remove
#undef extent_tree_ad_reverse_iter
#undef extent_tree_ad_reverse_iter_recurse
#undef extent_tree_ad_reverse_iter_start
#undef extent_tree_ad_search
#undef extent_tree_szad_first
#undef extent_tree_szad_insert
#undef extent_tree_szad_iter
#undef extent_tree_szad_iter_recurse
#undef extent_tree_szad_iter_start
#undef extent_tree_szad_last
#undef extent_tree_szad_new
#undef extent_tree_szad_next
#undef extent_tree_szad_nsearch
#undef extent_tree_szad_prev
#undef extent_tree_szad_psearch
#undef extent_tree_szad_remove
#undef extent_tree_szad_reverse_iter
#undef extent_tree_szad_reverse_iter_recurse
#undef extent_tree_szad_reverse_iter_start
#undef extent_tree_szad_search
#undef get_errno
#undef hash
#undef hash_fmix_32
#undef hash_fmix_64
#undef hash_get_block_32
#undef hash_get_block_64
#undef hash_rotl_32
#undef hash_rotl_64
#undef hash_x64_128
#undef hash_x86_128
#undef hash_x86_32
#undef huge_allocated
#undef huge_boot
#undef huge_dalloc
#undef huge_dalloc_junk
#undef huge_malloc
#undef huge_ndalloc
#undef huge_nmalloc
#undef huge_palloc
#undef huge_postfork_child
#undef huge_postfork_parent
#undef huge_prefork
#undef huge_prof_ctx_get
#undef huge_prof_ctx_set
#undef huge_ralloc
#undef huge_ralloc_no_move
#undef huge_salloc
#undef icalloc
#undef icalloct
#undef idalloc
#undef idalloct
#undef imalloc
#undef imalloct
#undef in_valgrind
#undef ipalloc
#undef ipalloct
#undef iqalloc
#undef iqalloct
#undef iralloc
#undef iralloct
#undef iralloct_realign
#undef isalloc
#undef isthreaded
#undef ivsalloc
#undef ixalloc
#undef jemalloc_postfork_child
#undef jemalloc_postfork_parent
#undef jemalloc_prefork
#undef lg_floor
#undef malloc_cprintf
#undef malloc_mutex_init
#undef malloc_mutex_lock
#undef malloc_mutex_postfork_child
#undef malloc_mutex_postfork_parent
#undef malloc_mutex_prefork
#undef malloc_mutex_unlock
#undef malloc_rwlock_init
#undef malloc_rwlock_postfork_child
#undef malloc_rwlock_postfork_parent
#undef malloc_rwlock_prefork
#undef malloc_rwlock_rdlock
#undef malloc_rwlock_wrlock
#undef malloc_rwlock_unlock
#undef malloc_rwlock_destroy
#undef malloc_printf
#undef malloc_snprintf
#undef malloc_strtoumax
#undef malloc_tsd_boot
#undef malloc_tsd_cleanup_register
#undef malloc_tsd_dalloc
#undef malloc_tsd_malloc
#undef malloc_tsd_no_cleanup
#undef malloc_vcprintf
#undef malloc_vsnprintf
#undef malloc_write
#undef map_bias
#undef mb_write
#undef mutex_boot
#undef narenas_auto
#undef narenas_total
#undef narenas_total_get
#undef ncpus
#undef nhbins
#undef npools
#undef npools_cnt
#undef opt_abort
#undef opt_dss
#undef opt_junk
#undef opt_lg_chunk
#undef opt_lg_dirty_mult
#undef opt_lg_prof_interval
#undef opt_lg_prof_sample
#undef opt_lg_tcache_max
#undef opt_narenas
#undef opt_prof
#undef opt_prof_accum
#undef opt_prof_active
#undef opt_prof_final
#undef opt_prof_gdump
#undef opt_prof_leak
#undef opt_prof_prefix
#undef opt_quarantine
#undef opt_redzone
#undef opt_stats_print
#undef opt_tcache
#undef opt_utrace
#undef opt_xmalloc
#undef opt_zero
#undef p2rz
#undef pages_purge
#undef pools_shared_data_initialized
#undef pow2_ceil
#undef prof_backtrace
#undef prof_boot0
#undef prof_boot1
#undef prof_boot2
#undef prof_bt_count
#undef prof_ctx_get
#undef prof_ctx_set
#undef prof_dump_open
#undef prof_free
#undef prof_gdump
#undef prof_idump
#undef prof_interval
#undef prof_lookup
#undef prof_malloc
#undef prof_malloc_record_object
#undef prof_mdump
#undef prof_postfork_child
#undef prof_postfork_parent
#undef prof_prefork
#undef prof_realloc
#undef prof_sample_accum_update
#undef prof_sample_threshold_update
#undef prof_tdata_booted
#undef prof_tdata_cleanup
#undef prof_tdata_get
#undef prof_tdata_init
#undef prof_tdata_initialized
#undef prof_tdata_tls
#undef prof_tdata_tsd
#undef prof_tdata_tsd_boot
#undef prof_tdata_tsd_cleanup_wrapper
#undef prof_tdata_tsd_get
#undef prof_tdata_tsd_get_wrapper
#undef prof_tdata_tsd_init_head
#undef prof_tdata_tsd_set
#undef quarantine
#undef quarantine_alloc_hook
#undef quarantine_boot
#undef quarantine_booted
#undef quarantine_cleanup
#undef quarantine_init
#undef quarantine_tls
#undef quarantine_tsd
#undef quarantine_tsd_boot
#undef quarantine_tsd_cleanup_wrapper
#undef quarantine_tsd_get
#undef quarantine_tsd_get_wrapper
#undef quarantine_tsd_init_head
#undef quarantine_tsd_set
#undef register_zone
#undef rtree_delete
#undef rtree_get
#undef rtree_get_locked
#undef rtree_new
#undef rtree_postfork_child
#undef rtree_postfork_parent
#undef rtree_prefork
#undef rtree_set
#undef s2u
#undef sa2u
#undef set_errno
#undef small_bin2size
#undef small_bin2size_compute
#undef small_bin2size_lookup
#undef small_bin2size_tab
#undef small_s2u
#undef small_s2u_compute
#undef small_s2u_lookup
#undef small_size2bin
#undef small_size2bin_compute
#undef small_size2bin_lookup
#undef small_size2bin_tab
#undef stats_cactive
#undef stats_cactive_add
#undef stats_cactive_get
#undef stats_cactive_sub
#undef stats_chunks
#undef stats_print
#undef tcache_alloc_easy
#undef tcache_alloc_large
#undef tcache_alloc_small
#undef tcache_alloc_small_hard
#undef tcache_arena_associate
#undef tcache_arena_dissociate
#undef tcache_bin_flush_large
#undef tcache_bin_flush_small
#undef tcache_bin_info
#undef tcache_boot0
#undef tcache_boot1
#undef tcache_booted
#undef tcache_create
#undef tcache_dalloc_large
#undef tcache_dalloc_small
#undef tcache_destroy
#undef tcache_enabled_booted
#undef tcache_enabled_get
#undef tcache_enabled_initialized
#undef tcache_enabled_set
#undef tcache_enabled_tls
#undef tcache_enabled_tsd
#undef tcache_enabled_tsd_boot
#undef tcache_enabled_tsd_cleanup_wrapper
#undef tcache_enabled_tsd_get
#undef tcache_enabled_tsd_get_wrapper
#undef tcache_enabled_tsd_init_head
#undef tcache_enabled_tsd_set
#undef tcache_event
#undef tcache_event_hard
#undef tcache_flush
#undef tcache_get
#undef tcache_get_hard
#undef tcache_initialized
#undef tcache_maxclass
#undef tcache_salloc
#undef tcache_stats_merge
#undef tcache_thread_cleanup
#undef tcache_tls
#undef tcache_tsd
#undef tcache_tsd_boot
#undef tcache_tsd_cleanup_wrapper
#undef tcache_tsd_get
#undef tcache_tsd_get_wrapper
#undef tcache_tsd_init_head
#undef tcache_tsd_set
#undef thread_allocated_booted
#undef thread_allocated_initialized
#undef thread_allocated_tls
#undef thread_allocated_tsd
#undef thread_allocated_tsd_boot
#undef thread_allocated_tsd_cleanup_wrapper
#undef thread_allocated_tsd_get
#undef thread_allocated_tsd_get_wrapper
#undef thread_allocated_tsd_init_head
#undef thread_allocated_tsd_set
#undef tsd_init_check_recursion
#undef tsd_init_finish
#undef u2rz
#undef valgrind_freelike_block
#undef valgrind_make_mem_defined
#undef valgrind_make_mem_noaccess
#undef valgrind_make_mem_undefined
#undef pool_new
#undef pool_destroy
#undef pools_lock
#undef pool_base_lock
#undef pool_prefork
#undef pool_postfork_parent
#undef pool_postfork_child
#undef pool_alloc
#undef vec_get
#undef vec_set
#undef vec_delete
| 11,246 | 23.396963 | 44 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/err.h | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* err.h - error and warning messages
*/
#ifndef ERR_H
#define ERR_H 1
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
/*
* err - windows implementation of unix err function
*/
__declspec(noreturn) static void
err(int eval, const char *fmt, ...)
{
va_list vl;
va_start(vl, fmt);
vfprintf(stderr, fmt, vl);
va_end(vl);
exit(eval);
}
/*
* warn - windows implementation of unix warn function
*/
static void
warn(const char *fmt, ...)
{
va_list vl;
va_start(vl, fmt);
fprintf(stderr, "Warning: ");
vfprintf(stderr, fmt, vl);
va_end(vl);
}
#endif /* ERR_H */
| 2,190 | 29.859155 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sched.h | /*
* Copyright 2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake sched.h
*/
| 1,620 | 44.027778 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/win_mmap.h | /*
* Copyright 2015-2018, Intel Corporation
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* win_mmap.h -- (internal) tracks the regions mapped by mmap
*/
#ifndef WIN_MMAP_H
#define WIN_MMAP_H 1
#include "queue.h"
#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
#define rounddown(x, y) (((x) / (y)) * (y))
void win_mmap_init(void);
void win_mmap_fini(void);
/* allocation/mmap granularity */
extern unsigned long long Mmap_align;
typedef enum FILE_MAPPING_TRACKER_FLAGS {
FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED = 0x0001,
/*
* This should hold the value of all flags ORed for debug purpose.
*/
FILE_MAPPING_TRACKER_FLAGS_MASK =
FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED
} FILE_MAPPING_TRACKER_FLAGS;
/*
* this structure tracks the file mappings outstanding per file handle
*/
typedef struct FILE_MAPPING_TRACKER {
SORTEDQ_ENTRY(FILE_MAPPING_TRACKER) ListEntry;
HANDLE FileHandle;
HANDLE FileMappingHandle;
void *BaseAddress;
void *EndAddress;
DWORD Access;
os_off_t Offset;
size_t FileLen;
FILE_MAPPING_TRACKER_FLAGS Flags;
} FILE_MAPPING_TRACKER, *PFILE_MAPPING_TRACKER;
extern SRWLOCK FileMappingQLock;
extern SORTEDQ_HEAD(FMLHead, FILE_MAPPING_TRACKER) FileMappingQHead;
#endif /* WIN_MMAP_H */
| 2,817 | 33.790123 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/platform.h | /*
* Copyright 2015-2018, Intel Corporation
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* platform.h -- dirty hacks to compile Linux code on Windows using VC++
*
* This is included to each source file using "/FI" (forced include) option.
*
* XXX - it is a subject for refactoring
*/
#ifndef PLATFORM_H
#define PLATFORM_H 1
#pragma warning(disable : 4996)
#pragma warning(disable : 4200) /* allow flexible array member */
#pragma warning(disable : 4819) /* non unicode characteres */
#ifdef __cplusplus
extern "C" {
#endif
/* Prevent PMDK compilation for 32-bit platforms */
#if defined(_WIN32) && !defined(_WIN64)
#error "32-bit builds of PMDK are not supported!"
#endif
#define _CRT_RAND_S /* rand_s() */
#include <windows.h>
#include <stdint.h>
#include <time.h>
#include <io.h>
#include <process.h>
#include <fcntl.h>
#include <sys/types.h>
#include <malloc.h>
#include <signal.h>
#include <intrin.h>
#include <direct.h>
/* use uuid_t definition from util.h */
#ifdef uuid_t
#undef uuid_t
#endif
/* a few trivial substitutions */
#define PATH_MAX MAX_PATH
#define __thread __declspec(thread)
#define __func__ __FUNCTION__
#ifdef _DEBUG
#define DEBUG
#endif
/*
* The inline keyword is available only in VC++.
* https://msdn.microsoft.com/en-us/library/bw1hbe6y.aspx
*/
#ifndef __cplusplus
#define inline __inline
#endif
/* XXX - no equivalents in VC++ */
#define __attribute__(a)
#define __builtin_constant_p(cnd) 0
/*
* missing definitions
*/
/* errno.h */
#define ELIBACC 79 /* cannot access a needed shared library */
/* sys/stat.h */
#define S_IRUSR S_IREAD
#define S_IWUSR S_IWRITE
#define S_IRGRP S_IRUSR
#define S_IWGRP S_IWUSR
#define O_SYNC 0
typedef int mode_t;
#define fchmod(fd, mode) 0 /* XXX - dummy */
#define setlinebuf(fp) setvbuf(fp, NULL, _IOLBF, BUFSIZ);
/* unistd.h */
typedef long long os_off_t;
typedef long long ssize_t;
int setenv(const char *name, const char *value, int overwrite);
int unsetenv(const char *name);
/* fcntl.h */
int posix_fallocate(int fd, os_off_t offset, os_off_t len);
/* string.h */
#define strtok_r strtok_s
/* time.h */
#define CLOCK_MONOTONIC 1
#define CLOCK_REALTIME 2
int clock_gettime(int id, struct timespec *ts);
/* signal.h */
typedef unsigned long long sigset_t; /* one bit for each signal */
C_ASSERT(NSIG <= sizeof(sigset_t) * 8);
struct sigaction {
void (*sa_handler) (int signum);
/* void (*sa_sigaction)(int, siginfo_t *, void *); */
sigset_t sa_mask;
int sa_flags;
void (*sa_restorer) (void);
};
__inline int
sigemptyset(sigset_t *set)
{
*set = 0;
return 0;
}
__inline int
sigfillset(sigset_t *set)
{
*set = ~0;
return 0;
}
__inline int
sigaddset(sigset_t *set, int signum)
{
if (signum <= 0 || signum >= NSIG) {
errno = EINVAL;
return -1;
}
*set |= (1ULL << (signum - 1));
return 0;
}
__inline int
sigdelset(sigset_t *set, int signum)
{
if (signum <= 0 || signum >= NSIG) {
errno = EINVAL;
return -1;
}
*set &= ~(1ULL << (signum - 1));
return 0;
}
__inline int
sigismember(const sigset_t *set, int signum)
{
if (signum <= 0 || signum >= NSIG) {
errno = EINVAL;
return -1;
}
return ((*set & (1ULL << (signum - 1))) ? 1 : 0);
}
/* sched.h */
/*
* sched_yield -- yield the processor
*/
__inline int
sched_yield(void)
{
SwitchToThread();
return 0; /* always succeeds */
}
/*
* helper macros for library ctor/dtor function declarations
*/
#define MSVC_CONSTR(func) \
void func(void); \
__pragma(comment(linker, "/include:_" #func)) \
__pragma(section(".CRT$XCU", read)) \
__declspec(allocate(".CRT$XCU")) \
const void (WINAPI *_##func)(void) = (const void (WINAPI *)(void))func;
#define MSVC_DESTR(func) \
void func(void); \
static void _##func##_reg(void) { atexit(func); }; \
MSVC_CONSTR(_##func##_reg)
#ifdef __cplusplus
}
#endif
#endif /* PLATFORM_H */
| 5,389 | 22.744493 | 76 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/libgen.h | /*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake libgen.h
*/
| 1,621 | 44.055556 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/endian.h | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* endian.h -- convert values between host and big-/little-endian byte order
*/
#ifndef ENDIAN_H
#define ENDIAN_H 1
/*
* XXX: On Windows we can assume little-endian architecture
*/
#include <intrin.h>
#define htole16(a) (a)
#define htole32(a) (a)
#define htole64(a) (a)
#define le16toh(a) (a)
#define le32toh(a) (a)
#define le64toh(a) (a)
#define htobe16(x) _byteswap_ushort(x)
#define htobe32(x) _byteswap_ulong(x)
#define htobe64(x) _byteswap_uint64(x)
#define be16toh(x) _byteswap_ushort(x)
#define be32toh(x) _byteswap_ulong(x)
#define be64toh(x) _byteswap_uint64(x)
#endif /* ENDIAN_H */
| 2,211 | 34.677419 | 76 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/features.h | /*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake features.h
*/
| 1,623 | 44.111111 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/unistd.h | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* unistd.h -- compatibility layer for POSIX operating system API
*/
#ifndef UNISTD_H
#define UNISTD_H 1
#include <stdio.h>
#define _SC_PAGESIZE 0
#define _SC_NPROCESSORS_ONLN 1
#define R_OK 04
#define W_OK 02
#define X_OK 00 /* execute permission doesn't exist on Windows */
#define F_OK 00
/*
* sysconf -- get configuration information at run time
*/
static __inline long
sysconf(int p)
{
SYSTEM_INFO si;
int ret = 0;
switch (p) {
case _SC_PAGESIZE:
GetSystemInfo(&si);
return si.dwPageSize;
case _SC_NPROCESSORS_ONLN:
for (int i = 0; i < GetActiveProcessorGroupCount(); i++) {
ret += GetActiveProcessorCount(i);
}
return ret;
default:
return 0;
}
}
#define getpid _getpid
/*
* pread -- read from a file descriptor at given offset
*/
static ssize_t
pread(int fd, void *buf, size_t count, os_off_t offset)
{
__int64 position = _lseeki64(fd, 0, SEEK_CUR);
_lseeki64(fd, offset, SEEK_SET);
int ret = _read(fd, buf, (unsigned)count);
_lseeki64(fd, position, SEEK_SET);
return ret;
}
/*
* pwrite -- write to a file descriptor at given offset
*/
static ssize_t
pwrite(int fd, const void *buf, size_t count, os_off_t offset)
{
__int64 position = _lseeki64(fd, 0, SEEK_CUR);
_lseeki64(fd, offset, SEEK_SET);
int ret = _write(fd, buf, (unsigned)count);
_lseeki64(fd, position, SEEK_SET);
return ret;
}
#define S_ISBLK(x) 0 /* BLK devices not exist on Windows */
/*
* basename -- parse pathname and return filename component
*/
static char *
basename(char *path)
{
char fname[_MAX_FNAME];
char ext[_MAX_EXT];
_splitpath(path, NULL, NULL, fname, ext);
sprintf(path, "%s%s", fname, ext);
return path;
}
/*
* dirname -- parse pathname and return directory component
*/
static char *
dirname(char *path)
{
if (path == NULL)
return ".";
size_t len = strlen(path);
if (len == 0)
return ".";
char *end = path + len;
/* strip trailing forslashes and backslashes */
while ((--end) > path) {
if (*end != '\\' && *end != '/') {
*(end + 1) = '\0';
break;
}
}
/* strip basename */
while ((--end) > path) {
if (*end == '\\' || *end == '/') {
*end = '\0';
break;
}
}
if (end != path) {
return path;
/* handle edge cases */
} else if (*end == '\\' || *end == '/') {
*(end + 1) = '\0';
} else {
*end++ = '.';
*end = '\0';
}
return path;
}
#endif /* UNISTD_H */
| 3,962 | 22.873494 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/strings.h | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake strings.h
*/
| 1,627 | 44.222222 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/dirent.h | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake dirent.h
*/
| 1,626 | 44.194444 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/uio.h | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* sys/uio.h -- definition of iovec structure
*/
#ifndef SYS_UIO_H
#define SYS_UIO_H 1
#include <pmemcompat.h>
#ifdef __cplusplus
extern "C" {
#endif
ssize_t writev(int fd, const struct iovec *iov, int iovcnt);
#ifdef __cplusplus
}
#endif
#endif /* SYS_UIO_H */
| 1,874 | 34.377358 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/file.h | /*
* Copyright 2015-2018, Intel Corporation
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* sys/file.h -- file locking
*/
| 1,706 | 45.135135 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/statvfs.h | /*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake statvfs.h
*/
| 1,622 | 44.083333 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/param.h | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* sys/param.h -- a few useful macros
*/
#ifndef SYS_PARAM_H
#define SYS_PARAM_H 1
#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
#define howmany(x, y) (((x) + ((y) - 1)) / (y))
#define BPB 8 /* bits per byte */
#define setbit(b, i) ((b)[(i) / BPB] |= 1 << ((i) % BPB))
#define isset(b, i) ((b)[(i) / BPB] & (1 << ((i) % BPB)))
#define isclr(b, i) (((b)[(i) / BPB] & (1 << ((i) % BPB))) == 0)
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
#endif /* SYS_PARAM_H */
| 2,127 | 39.150943 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/mount.h | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake sys/mount.h
*/
| 1,629 | 44.277778 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/mman.h | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* sys/mman.h -- memory-mapped files for Windows
*/
#ifndef SYS_MMAN_H
#define SYS_MMAN_H 1
#ifdef __cplusplus
extern "C" {
#endif
#define PROT_NONE 0x0
#define PROT_READ 0x1
#define PROT_WRITE 0x2
#define PROT_EXEC 0x4
#define MAP_SHARED 0x1
#define MAP_PRIVATE 0x2
#define MAP_FIXED 0x10
#define MAP_ANONYMOUS 0x20
#define MAP_ANON MAP_ANONYMOUS
#define MAP_NORESERVE 0x04000
#define MS_ASYNC 1
#define MS_SYNC 4
#define MS_INVALIDATE 2
#define MAP_FAILED ((void *)(-1))
void *mmap(void *addr, size_t len, int prot, int flags,
int fd, os_off_t offset);
int munmap(void *addr, size_t len);
int msync(void *addr, size_t len, int flags);
int mprotect(void *addr, size_t len, int prot);
#ifdef __cplusplus
}
#endif
#endif /* SYS_MMAN_H */
| 2,357 | 30.026316 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/resource.h | /*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake sys/resource.h
*/
| 1,627 | 44.222222 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/wait.h | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake sys/wait.h
*/
| 1,628 | 44.25 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/linux/limits.h | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* linux/limits.h -- fake header file
*/
/*
* XXX - The only purpose of this empty file is to avoid preprocessor
* errors when including a Linux-specific header file that has no equivalent
* on Windows. With this cheap trick, we don't need a lot of preprocessor
* conditionals in all the source code files.
*
* In the future, this will be addressed in some other way.
*/
| 1,986 | 43.155556 | 76 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/chunk_dss.c | #define JEMALLOC_CHUNK_DSS_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
const char *dss_prec_names[] = {
"disabled",
"primary",
"secondary",
"N/A"
};
/* Current dss precedence default, used when creating new arenas. */
static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT;
/*
* Protects sbrk() calls. This avoids malloc races among threads, though it
* does not protect against races with threads that call sbrk() directly.
*/
static malloc_mutex_t dss_mtx;
/* Base address of the DSS. */
static void *dss_base;
/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
static void *dss_prev;
/* Current upper limit on DSS addresses. */
static void *dss_max;
/******************************************************************************/
static void *
chunk_dss_sbrk(intptr_t increment)
{
#ifdef JEMALLOC_DSS
return (sbrk(increment));
#else
not_implemented();
return (NULL);
#endif
}
dss_prec_t
chunk_dss_prec_get(void)
{
dss_prec_t ret;
if (have_dss == false)
return (dss_prec_disabled);
malloc_mutex_lock(&dss_mtx);
ret = dss_prec_default;
malloc_mutex_unlock(&dss_mtx);
return (ret);
}
bool
chunk_dss_prec_set(dss_prec_t dss_prec)
{
if (have_dss == false)
return (dss_prec != dss_prec_disabled);
malloc_mutex_lock(&dss_mtx);
dss_prec_default = dss_prec;
malloc_mutex_unlock(&dss_mtx);
return (false);
}
void *
chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
{
void *ret;
cassert(have_dss);
assert(size > 0 && (size & chunksize_mask) == 0);
assert(alignment > 0 && (alignment & chunksize_mask) == 0);
/*
* sbrk() uses a signed increment argument, so take care not to
* interpret a huge allocation request as a negative increment.
*/
if ((intptr_t)size < 0)
return (NULL);
malloc_mutex_lock(&dss_mtx);
if (dss_prev != (void *)-1) {
size_t gap_size, cpad_size;
void *cpad, *dss_next;
intptr_t incr;
/*
* The loop is necessary to recover from races with other
* threads that are using the DSS for something other than
* malloc.
*/
do {
/* Get the current end of the DSS. */
dss_max = chunk_dss_sbrk(0);
/*
* Calculate how much padding is necessary to
* chunk-align the end of the DSS.
*/
gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) &
chunksize_mask;
/*
* Compute how much chunk-aligned pad space (if any) is
* necessary to satisfy alignment. This space can be
* recycled for later use.
*/
cpad = (void *)((uintptr_t)dss_max + gap_size);
ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
alignment);
cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
dss_next = (void *)((uintptr_t)ret + size);
if ((uintptr_t)ret < (uintptr_t)dss_max ||
(uintptr_t)dss_next < (uintptr_t)dss_max) {
/* Wrap-around. */
malloc_mutex_unlock(&dss_mtx);
return (NULL);
}
incr = gap_size + cpad_size + size;
dss_prev = chunk_dss_sbrk(incr);
if (dss_prev == dss_max) {
/* Success. */
dss_max = dss_next;
malloc_mutex_unlock(&dss_mtx);
if (cpad_size != 0)
chunk_unmap(&base_pool, cpad, cpad_size);
if (*zero) {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
ret, size);
memset(ret, 0, size);
}
return (ret);
}
} while (dss_prev != (void *)-1);
}
malloc_mutex_unlock(&dss_mtx);
return (NULL);
}
bool
chunk_in_dss(void *chunk)
{
bool ret;
cassert(have_dss);
malloc_mutex_lock(&dss_mtx);
if ((uintptr_t)chunk >= (uintptr_t)dss_base
&& (uintptr_t)chunk < (uintptr_t)dss_max)
ret = true;
else
ret = false;
malloc_mutex_unlock(&dss_mtx);
return (ret);
}
bool
chunk_dss_boot(void)
{
cassert(have_dss);
if (malloc_mutex_init(&dss_mtx))
return (true);
dss_base = chunk_dss_sbrk(0);
dss_prev = dss_base;
dss_max = dss_base;
return (false);
}
void
chunk_dss_prefork(void)
{
if (have_dss)
malloc_mutex_prefork(&dss_mtx);
}
void
chunk_dss_postfork_parent(void)
{
if (have_dss)
malloc_mutex_postfork_parent(&dss_mtx);
}
void
chunk_dss_postfork_child(void)
{
if (have_dss)
malloc_mutex_postfork_child(&dss_mtx);
}
/******************************************************************************/
| 4,272 | 20.365 | 80 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/util.c | #define assert(e) do { \
if (config_debug && !(e)) { \
malloc_write("<jemalloc>: Failed assertion\n"); \
abort(); \
} \
} while (0)
#define not_reached() do { \
if (config_debug) { \
malloc_write("<jemalloc>: Unreachable code reached\n"); \
abort(); \
} \
} while (0)
#define not_implemented() do { \
if (config_debug) { \
malloc_write("<jemalloc>: Not implemented\n"); \
abort(); \
} \
} while (0)
#define JEMALLOC_UTIL_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void wrtmessage(void *cbopaque, const char *s);
#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1)
static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s,
size_t *slen_p);
#define D2S_BUFSIZE (1 + U2S_BUFSIZE)
static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p);
#define O2S_BUFSIZE (1 + U2S_BUFSIZE)
static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p);
#define X2S_BUFSIZE (2 + U2S_BUFSIZE)
static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s,
size_t *slen_p);
/******************************************************************************/
/* malloc_message() setup. */
static void
wrtmessage(void *cbopaque, const char *s)
{
#ifdef SYS_write
/*
* Use syscall(2) rather than write(2) when possible in order to avoid
* the possibility of memory allocation within libc. This is necessary
* on FreeBSD; most operating systems do not have this problem though.
*/
UNUSED int result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
#else
UNUSED int result = write(STDERR_FILENO, s, strlen(s));
#endif
}
JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s);
/*
* Wrapper around malloc_message() that avoids the need for
* je_malloc_message(...) throughout the code.
*/
void
malloc_write(const char *s)
{
if (je_malloc_message != NULL)
je_malloc_message(NULL, s);
else
wrtmessage(NULL, s);
}
/*
* glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
* provide a wrapper.
*/
int
buferror(int err, char *buf, size_t buflen)
{
#ifdef _WIN32
FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, GetLastError(), 0,
(LPSTR)buf, buflen, NULL);
return (0);
#elif defined(_GNU_SOURCE)
char *b = strerror_r(err, buf, buflen);
if (b != buf) {
strncpy(buf, b, buflen);
buf[buflen-1] = '\0';
}
return (0);
#else
return (strerror_r(err, buf, buflen));
#endif
}
uintmax_t
malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
{
uintmax_t ret, digit;
unsigned b;
bool neg;
const char *p, *ns;
p = nptr;
if (base < 0 || base == 1 || base > 36) {
ns = p;
set_errno(EINVAL);
ret = UINTMAX_MAX;
goto label_return;
}
b = base;
/* Swallow leading whitespace and get sign, if any. */
neg = false;
while (true) {
switch (*p) {
case '\t': case '\n': case '\v': case '\f': case '\r': case ' ':
p++;
break;
case '-':
neg = true;
/* Fall through. */
case '+':
p++;
/* Fall through. */
default:
goto label_prefix;
}
}
/* Get prefix, if any. */
label_prefix:
/*
* Note where the first non-whitespace/sign character is so that it is
* possible to tell whether any digits are consumed (e.g., " 0" vs.
* " -x").
*/
ns = p;
if (*p == '0') {
switch (p[1]) {
case '0': case '1': case '2': case '3': case '4': case '5':
case '6': case '7':
if (b == 0)
b = 8;
if (b == 8)
p++;
break;
case 'X': case 'x':
switch (p[2]) {
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
case 'A': case 'B': case 'C': case 'D': case 'E':
case 'F':
case 'a': case 'b': case 'c': case 'd': case 'e':
case 'f':
if (b == 0)
b = 16;
if (b == 16)
p += 2;
break;
default:
break;
}
break;
default:
p++;
ret = 0;
goto label_return;
}
}
if (b == 0)
b = 10;
/* Convert. */
ret = 0;
while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b)
|| (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b)
|| (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) {
uintmax_t pret = ret;
ret *= b;
ret += digit;
if (ret < pret) {
/* Overflow. */
set_errno(ERANGE);
ret = UINTMAX_MAX;
goto label_return;
}
p++;
}
if (neg)
ret = -ret;
if (p == ns) {
/* No conversion performed. */
set_errno(EINVAL);
ret = UINTMAX_MAX;
goto label_return;
}
label_return:
if (endptr != NULL) {
if (p == ns) {
/* No characters were converted. */
*endptr = (char *)nptr;
} else
*endptr = (char *)p;
}
return (ret);
}
static char *
u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p)
{
unsigned i;
i = U2S_BUFSIZE - 1;
s[i] = '\0';
switch (base) {
case 10:
do {
i--;
s[i] = "0123456789"[x % (uint64_t)10];
x /= (uint64_t)10;
} while (x > 0);
break;
case 16: {
const char *digits = (uppercase)
? "0123456789ABCDEF"
: "0123456789abcdef";
do {
i--;
s[i] = digits[x & 0xf];
x >>= 4;
} while (x > 0);
break;
} default: {
const char *digits = (uppercase)
? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
: "0123456789abcdefghijklmnopqrstuvwxyz";
assert(base >= 2 && base <= 36);
do {
i--;
s[i] = digits[x % (uint64_t)base];
x /= (uint64_t)base;
} while (x > 0);
}}
*slen_p = U2S_BUFSIZE - 1 - i;
return (&s[i]);
}
static char *
d2s(intmax_t x, char sign, char *s, size_t *slen_p)
{
bool neg;
if ((neg = (x < 0)))
x = -x;
s = u2s(x, 10, false, s, slen_p);
if (neg)
sign = '-';
switch (sign) {
case '-':
if (neg == false)
break;
/* Fall through. */
case ' ':
case '+':
s--;
(*slen_p)++;
*s = sign;
break;
default: not_reached();
}
return (s);
}
static char *
o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p)
{
s = u2s(x, 8, false, s, slen_p);
if (alt_form && *s != '0') {
s--;
(*slen_p)++;
*s = '0';
}
return (s);
}
static char *
x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p)
{
s = u2s(x, 16, uppercase, s, slen_p);
if (alt_form) {
s -= 2;
(*slen_p) += 2;
memcpy(s, uppercase ? "0X" : "0x", 2);
}
return (s);
}
int
malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
{
int ret;
size_t i;
const char *f;
#define APPEND_C(c) do { \
if (i < size) \
str[i] = (c); \
i++; \
} while (0)
#define APPEND_S(s, slen) do { \
if (i < size) { \
size_t cpylen = ((slen) <= size - i) ? (slen) : size - i; \
memcpy(&str[i], s, cpylen); \
} \
i += (slen); \
} while (0)
#define APPEND_PADDED_S(s, slen, width, left_justify) do { \
/* Left padding. */ \
size_t pad_len = ((width) == -1) ? 0 : (((slen) < (size_t)(width)) ? \
(size_t)(width) - (slen) : 0); \
if ((left_justify) == false && pad_len != 0) { \
size_t j; \
for (j = 0; j < pad_len; j++) \
APPEND_C(' '); \
} \
/* Value. */ \
APPEND_S(s, slen); \
/* Right padding. */ \
if ((left_justify) && pad_len != 0) { \
size_t j; \
for (j = 0; j < pad_len; j++) \
APPEND_C(' '); \
} \
} while (0)
#define GET_ARG_NUMERIC(val, len) do { \
switch (len) { \
case '?': \
val = va_arg(ap, int); \
break; \
case '?' | 0x80: \
val = va_arg(ap, unsigned int); \
break; \
case 'l': \
val = va_arg(ap, long); \
break; \
case 'l' | 0x80: \
val = va_arg(ap, unsigned long); \
break; \
case 'q': \
val = va_arg(ap, long long); \
break; \
case 'q' | 0x80: \
val = va_arg(ap, unsigned long long); \
break; \
case 'j': \
val = va_arg(ap, intmax_t); \
break; \
case 'j' | 0x80: \
val = va_arg(ap, uintmax_t); \
break; \
case 't': \
val = va_arg(ap, ptrdiff_t); \
break; \
case 'z': \
val = va_arg(ap, ssize_t); \
break; \
case 'z' | 0x80: \
val = va_arg(ap, size_t); \
break; \
case 'p': /* Synthetic; used for %p. */ \
val = va_arg(ap, uintptr_t); \
break; \
default: \
not_reached(); \
val = 0; \
} \
} while (0)
i = 0;
f = format;
while (true) {
switch (*f) {
case '\0': goto label_out;
case '%': {
bool alt_form = false;
bool left_justify = false;
bool plus_space = false;
bool plus_plus = false;
int prec = -1;
int width = -1;
unsigned char len = '?';
f++;
/* Flags. */
while (true) {
switch (*f) {
case '#':
assert(alt_form == false);
alt_form = true;
break;
case '-':
assert(left_justify == false);
left_justify = true;
break;
case ' ':
assert(plus_space == false);
plus_space = true;
break;
case '+':
assert(plus_plus == false);
plus_plus = true;
break;
default: goto label_width;
}
f++;
}
/* Width. */
label_width:
switch (*f) {
case '*':
width = va_arg(ap, int);
f++;
if (width < 0) {
left_justify = true;
width = -width;
}
break;
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9': {
uintmax_t uwidth;
set_errno(0);
uwidth = malloc_strtoumax(f, (char **)&f, 10);
assert(uwidth != UINTMAX_MAX || get_errno() !=
ERANGE);
width = (int)uwidth;
break;
} default:
break;
}
/* Width/precision separator. */
if (*f == '.')
f++;
else
goto label_length;
/* Precision. */
switch (*f) {
case '*':
prec = va_arg(ap, int);
f++;
break;
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9': {
uintmax_t uprec;
set_errno(0);
uprec = malloc_strtoumax(f, (char **)&f, 10);
assert(uprec != UINTMAX_MAX || get_errno() !=
ERANGE);
prec = (int)uprec;
break;
}
default: break;
}
/* Length. */
label_length:
switch (*f) {
case 'l':
f++;
if (*f == 'l') {
len = 'q';
f++;
} else
len = 'l';
break;
case 'q': case 'j': case 't': case 'z':
len = *f;
f++;
break;
default: break;
}
/* Conversion specifier. */
switch (*f) {
char *s;
size_t slen;
case '%':
/* %% */
APPEND_C(*f);
f++;
break;
case 'd': case 'i': {
intmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[D2S_BUFSIZE];
GET_ARG_NUMERIC(val, len);
s = d2s(val, (plus_plus ? '+' : (plus_space ?
' ' : '-')), buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} case 'o': {
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[O2S_BUFSIZE];
GET_ARG_NUMERIC(val, len | 0x80);
s = o2s(val, alt_form, buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} case 'u': {
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[U2S_BUFSIZE];
GET_ARG_NUMERIC(val, len | 0x80);
s = u2s(val, 10, false, buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} case 'x': case 'X': {
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[X2S_BUFSIZE];
GET_ARG_NUMERIC(val, len | 0x80);
s = x2s(val, alt_form, *f == 'X', buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} case 'c': {
unsigned char val;
char buf[2];
assert(len == '?' || len == 'l');
assert_not_implemented(len != 'l');
val = va_arg(ap, int);
buf[0] = val;
buf[1] = '\0';
APPEND_PADDED_S(buf, 1, width, left_justify);
f++;
break;
} case 's':
assert(len == '?' || len == 'l');
assert_not_implemented(len != 'l');
s = va_arg(ap, char *);
if (s) {
slen = (prec < 0) ? strlen(s) : (size_t)prec;
APPEND_PADDED_S(s, slen, width, left_justify);
} else {
APPEND_S("(null)", 6);
}
f++;
break;
case 'p': {
uintmax_t val;
char buf[X2S_BUFSIZE];
GET_ARG_NUMERIC(val, 'p');
s = x2s(val, true, false, buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} default: not_reached();
}
break;
} default: {
APPEND_C(*f);
f++;
break;
}}
}
label_out:
if (i < size)
str[i] = '\0';
else
str[size - 1] = '\0';
ret = i;
#undef APPEND_C
#undef APPEND_S
#undef APPEND_PADDED_S
#undef GET_ARG_NUMERIC
return (ret);
}
JEMALLOC_ATTR(format(printf, 3, 4))
int
malloc_snprintf(char *str, size_t size, const char *format, ...)
{
int ret;
va_list ap;
va_start(ap, format);
ret = malloc_vsnprintf(str, size, format, ap);
va_end(ap);
return (ret);
}
void
malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, va_list ap)
{
char buf[MALLOC_PRINTF_BUFSIZE];
if (write_cb == NULL) {
/*
* The caller did not provide an alternate write_cb callback
* function, so use the default one. malloc_write() is an
* inline function, so use malloc_message() directly here.
*/
write_cb = (je_malloc_message != NULL) ? je_malloc_message :
wrtmessage;
cbopaque = NULL;
}
malloc_vsnprintf(buf, sizeof(buf), format, ap);
write_cb(cbopaque, buf);
}
/*
* Print to a callback function in such a way as to (hopefully) avoid memory
* allocation.
*/
JEMALLOC_ATTR(format(printf, 3, 4))
void
malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, ...)
{
va_list ap;
va_start(ap, format);
malloc_vcprintf(write_cb, cbopaque, format, ap);
va_end(ap);
}
/* Print to stderr in such a way as to avoid memory allocation. */
JEMALLOC_ATTR(format(printf, 1, 2))
void
malloc_printf(const char *format, ...)
{
va_list ap;
va_start(ap, format);
malloc_vcprintf(NULL, NULL, format, ap);
va_end(ap);
}
| 14,080 | 20.49771 | 80 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/stats.c | #define JEMALLOC_STATS_C_
#include "jemalloc/internal/jemalloc_internal.h"
#define CTL_GET(n, v, t) do { \
size_t sz = sizeof(t); \
xmallctl(n, v, &sz, NULL, 0); \
} while (0)
#define CTL_P_GET_ARRAY(n, v, t, c) do { \
size_t mib[8]; \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t) * (c); \
xmallctlnametomib(n, mib, &miblen); \
mib[1] = p; \
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
#define CTL_P_GET(n, v, t) CTL_P_GET_ARRAY(n, v, t, 1)
#define CTL_PI_GET(n, v, t) do { \
size_t mib[8]; \
char buf[256]; \
snprintf(buf, sizeof(buf), n, p); \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(buf, mib, &miblen); \
mib[1] = p; \
mib[4] = i; \
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
#define CTL_PJ_GET(n, v, t) do { \
size_t mib[8]; \
char buf[256]; \
snprintf(buf, sizeof(buf), n, p); \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(buf, mib, &miblen); \
mib[1] = p; \
mib[4] = j; \
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
#define CTL_PIJ_GET(n, v, t) do { \
size_t mib[8]; \
char buf[256]; \
snprintf(buf, sizeof(buf), n, p); \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(buf, mib, &miblen); \
mib[1] = p; \
mib[4] = i; \
mib[6] = j; \
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
/******************************************************************************/
/* Data. */
bool opt_stats_print = false;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned p, unsigned i);
static void stats_arena_lruns_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned p, unsigned i);
static void stats_arena_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned p, unsigned i, bool bins, bool large);
/******************************************************************************/
static void
stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned p, unsigned i)
{
size_t page;
bool config_tcache;
unsigned nbins, j, gap_start;
CTL_P_GET("pool.0.arenas.page", &page, size_t);
CTL_P_GET("config.tcache", &config_tcache, bool);
if (config_tcache) {
malloc_cprintf(write_cb, cbopaque,
"bins: bin size regs pgs allocated nmalloc"
" ndalloc nrequests nfills nflushes"
" newruns reruns curruns\n");
} else {
malloc_cprintf(write_cb, cbopaque,
"bins: bin size regs pgs allocated nmalloc"
" ndalloc newruns reruns curruns\n");
}
CTL_P_GET("pool.0.arenas.nbins", &nbins, unsigned);
for (j = 0, gap_start = UINT_MAX; j < nbins; j++) {
uint64_t nruns;
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.nruns", &nruns, uint64_t);
if (nruns == 0) {
if (gap_start == UINT_MAX)
gap_start = j;
} else {
size_t reg_size, run_size, allocated;
uint32_t nregs;
uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
uint64_t reruns;
size_t curruns;
if (gap_start != UINT_MAX) {
if (j > gap_start + 1) {
/* Gap of more than one size class. */
malloc_cprintf(write_cb, cbopaque,
"[%u..%u]\n", gap_start,
j - 1);
} else {
/* Gap of one size class. */
malloc_cprintf(write_cb, cbopaque,
"[%u]\n", gap_start);
}
gap_start = UINT_MAX;
}
CTL_PJ_GET("pool.%u.arenas.bin.0.size", ®_size, size_t);
CTL_PJ_GET("pool.%u.arenas.bin.0.nregs", &nregs, uint32_t);
CTL_PJ_GET("pool.%u.arenas.bin.0.run_size", &run_size, size_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.allocated",
&allocated, size_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.nmalloc",
&nmalloc, uint64_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.ndalloc",
&ndalloc, uint64_t);
if (config_tcache) {
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.nrequests",
&nrequests, uint64_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.nfills",
&nfills, uint64_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.nflushes",
&nflushes, uint64_t);
}
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.nreruns", &reruns,
uint64_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.bins.0.curruns", &curruns,
size_t);
if (config_tcache) {
malloc_cprintf(write_cb, cbopaque,
"%13u %5zu %4u %3zu %12zu %12"PRIu64
" %12"PRIu64" %12"PRIu64" %12"PRIu64
" %12"PRIu64" %12"PRIu64" %12"PRIu64
" %12zu\n",
j, reg_size, nregs, run_size / page,
allocated, nmalloc, ndalloc, nrequests,
nfills, nflushes, nruns, reruns, curruns);
} else {
malloc_cprintf(write_cb, cbopaque,
"%13u %5zu %4u %3zu %12zu %12"PRIu64
" %12"PRIu64" %12"PRIu64" %12"PRIu64
" %12zu\n",
j, reg_size, nregs, run_size / page,
allocated, nmalloc, ndalloc, nruns, reruns,
curruns);
}
}
}
if (gap_start != UINT_MAX) {
if (j > gap_start + 1) {
/* Gap of more than one size class. */
malloc_cprintf(write_cb, cbopaque, "[%u..%u]\n",
gap_start, j - 1);
} else {
/* Gap of one size class. */
malloc_cprintf(write_cb, cbopaque, "[%u]\n", gap_start);
}
}
}
static void
stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned p, unsigned i)
{
size_t page, nlruns, j;
ssize_t gap_start;
CTL_P_GET("pool.0.arenas.page", &page, size_t);
malloc_cprintf(write_cb, cbopaque,
"large: size pages nmalloc ndalloc nrequests"
" curruns\n");
CTL_P_GET("pool.0.arenas.nlruns", &nlruns, size_t);
for (j = 0, gap_start = -1; j < nlruns; j++) {
uint64_t nmalloc, ndalloc, nrequests;
size_t run_size, curruns;
CTL_PIJ_GET("pool.%u.stats.arenas.0.lruns.0.nmalloc", &nmalloc,
uint64_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.lruns.0.ndalloc", &ndalloc,
uint64_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.lruns.0.nrequests", &nrequests,
uint64_t);
if (nrequests == 0) {
if (gap_start == -1)
gap_start = j;
} else {
CTL_PJ_GET("pool.%u.arenas.lrun.0.size", &run_size, size_t);
CTL_PIJ_GET("pool.%u.stats.arenas.0.lruns.0.curruns", &curruns,
size_t);
if (gap_start != -1) {
malloc_cprintf(write_cb, cbopaque, "[%zu]\n",
j - gap_start);
gap_start = -1;
}
malloc_cprintf(write_cb, cbopaque,
"%13zu %5zu %12"PRIu64" %12"PRIu64" %12"PRIu64
" %12zu\n",
run_size, run_size / page, nmalloc, ndalloc,
nrequests, curruns);
}
}
if (gap_start != -1)
malloc_cprintf(write_cb, cbopaque, "[%zu]\n", j - gap_start);
}
static void
stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned p, unsigned i, bool bins, bool large)
{
unsigned nthreads;
const char *dss;
size_t page, pactive, pdirty, mapped;
uint64_t npurge, nmadvise, purged;
size_t small_allocated;
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
size_t large_allocated;
uint64_t large_nmalloc, large_ndalloc, large_nrequests;
size_t huge_allocated;
uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests;
CTL_P_GET("pool.0.arenas.page", &page, size_t);
CTL_PI_GET("pool.%u.stats.arenas.0.nthreads", &nthreads, unsigned);
malloc_cprintf(write_cb, cbopaque,
"assigned threads: %u\n", nthreads);
CTL_PI_GET("pool.%u.stats.arenas.0.dss", &dss, const char *);
malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n",
dss);
CTL_PI_GET("pool.%u.stats.arenas.0.pactive", &pactive, size_t);
CTL_PI_GET("pool.%u.stats.arenas.0.pdirty", &pdirty, size_t);
CTL_PI_GET("pool.%u.stats.arenas.0.npurge", &npurge, uint64_t);
CTL_PI_GET("pool.%u.stats.arenas.0.nmadvise", &nmadvise, uint64_t);
CTL_PI_GET("pool.%u.stats.arenas.0.purged", &purged, uint64_t);
malloc_cprintf(write_cb, cbopaque,
"dirty pages: %zu:%zu active:dirty, %"PRIu64" sweep%s,"
" %"PRIu64" madvise%s, %"PRIu64" purged\n",
pactive, pdirty, npurge, npurge == 1 ? "" : "s",
nmadvise, nmadvise == 1 ? "" : "s", purged);
malloc_cprintf(write_cb, cbopaque,
" allocated nmalloc ndalloc nrequests\n");
CTL_PI_GET("pool.%u.stats.arenas.0.small.allocated", &small_allocated, size_t);
CTL_PI_GET("pool.%u.stats.arenas.0.small.nmalloc", &small_nmalloc, uint64_t);
CTL_PI_GET("pool.%u.stats.arenas.0.small.ndalloc", &small_ndalloc, uint64_t);
CTL_PI_GET("pool.%u.stats.arenas.0.small.nrequests", &small_nrequests, uint64_t);
malloc_cprintf(write_cb, cbopaque,
"small: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
small_allocated, small_nmalloc, small_ndalloc, small_nrequests);
CTL_PI_GET("pool.%u.stats.arenas.0.large.allocated", &large_allocated, size_t);
CTL_PI_GET("pool.%u.stats.arenas.0.large.nmalloc", &large_nmalloc, uint64_t);
CTL_PI_GET("pool.%u.stats.arenas.0.large.ndalloc", &large_ndalloc, uint64_t);
CTL_PI_GET("pool.%u.stats.arenas.0.large.nrequests", &large_nrequests, uint64_t);
malloc_cprintf(write_cb, cbopaque,
"large: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
CTL_PI_GET("pool.%u.stats.arenas.0.huge.allocated", &huge_allocated, size_t);
CTL_PI_GET("pool.%u.stats.arenas.0.huge.nmalloc", &huge_nmalloc, uint64_t);
CTL_PI_GET("pool.%u.stats.arenas.0.huge.ndalloc", &huge_ndalloc, uint64_t);
CTL_PI_GET("pool.%u.stats.arenas.0.huge.nrequests", &huge_nrequests, uint64_t);
malloc_cprintf(write_cb, cbopaque,
"huge: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests);
malloc_cprintf(write_cb, cbopaque,
"total: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
small_allocated + large_allocated + huge_allocated,
small_nmalloc + large_nmalloc + huge_nmalloc,
small_ndalloc + large_ndalloc + huge_ndalloc,
small_nrequests + large_nrequests + huge_nrequests);
malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", pactive * page);
CTL_PI_GET("pool.%u.stats.arenas.0.mapped", &mapped, size_t);
malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped);
if (bins)
stats_arena_bins_print(write_cb, cbopaque, p, i);
if (large)
stats_arena_lruns_print(write_cb, cbopaque, p, i);
}
void
stats_print(pool_t *pool, void (*write_cb)(void *, const char *), void *cbopaque,
const char *opts)
{
int err;
uint64_t epoch;
size_t u64sz;
bool general = true;
bool merged = true;
bool unmerged = true;
bool bins = true;
bool large = true;
unsigned p = pool->pool_id;
/*
* Refresh stats, in case mallctl() was called by the application.
*
* Check for OOM here, since refreshing the ctl cache can trigger
* allocation. In practice, none of the subsequent mallctl()-related
* calls in this function will cause OOM if this one succeeds.
* */
epoch = 1;
u64sz = sizeof(uint64_t);
err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t));
if (err != 0) {
if (err == EAGAIN) {
malloc_write("<jemalloc>: Memory allocation failure in "
"mallctl(\"epoch\", ...)\n");
return;
}
malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
"...)\n");
abort();
}
if (opts != NULL) {
unsigned i;
for (i = 0; opts[i] != '\0'; i++) {
switch (opts[i]) {
case 'g':
general = false;
break;
case 'm':
merged = false;
break;
case 'a':
unmerged = false;
break;
case 'b':
bins = false;
break;
case 'l':
large = false;
break;
default:;
}
}
}
malloc_cprintf(write_cb, cbopaque,
"___ Begin jemalloc statistics ___\n");
if (general) {
int err;
const char *cpv;
bool bv;
unsigned uv;
ssize_t ssv;
size_t sv, bsz, ssz, sssz, cpsz;
bsz = sizeof(bool);
ssz = sizeof(size_t);
sssz = sizeof(ssize_t);
cpsz = sizeof(const char *);
CTL_GET("version", &cpv, const char *);
malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
CTL_GET("config.debug", &bv, bool);
malloc_cprintf(write_cb, cbopaque, "Assertions %s\n",
bv ? "enabled" : "disabled");
#define OPT_WRITE_BOOL(n) \
if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0)) \
== 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %s\n", bv ? "true" : "false"); \
}
#define OPT_WRITE_SIZE_T(n) \
if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0)) \
== 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zu\n", sv); \
}
#define OPT_WRITE_SSIZE_T(n) \
if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0)) \
== 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zd\n", ssv); \
}
#define OPT_WRITE_CHAR_P(n) \
if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0)) \
== 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": \"%s\"\n", cpv); \
}
malloc_cprintf(write_cb, cbopaque,
"Run-time option settings:\n");
OPT_WRITE_BOOL(abort)
OPT_WRITE_SIZE_T(lg_chunk)
OPT_WRITE_CHAR_P(dss)
OPT_WRITE_SIZE_T(narenas)
OPT_WRITE_SSIZE_T(lg_dirty_mult)
OPT_WRITE_BOOL(stats_print)
OPT_WRITE_BOOL(junk)
OPT_WRITE_SIZE_T(quarantine)
OPT_WRITE_BOOL(redzone)
OPT_WRITE_BOOL(zero)
OPT_WRITE_BOOL(utrace)
OPT_WRITE_BOOL(valgrind)
OPT_WRITE_BOOL(xmalloc)
OPT_WRITE_BOOL(tcache)
OPT_WRITE_SSIZE_T(lg_tcache_max)
OPT_WRITE_BOOL(prof)
OPT_WRITE_CHAR_P(prof_prefix)
OPT_WRITE_BOOL(prof_active)
OPT_WRITE_SSIZE_T(lg_prof_sample)
OPT_WRITE_BOOL(prof_accum)
OPT_WRITE_SSIZE_T(lg_prof_interval)
OPT_WRITE_BOOL(prof_gdump)
OPT_WRITE_BOOL(prof_final)
OPT_WRITE_BOOL(prof_leak)
#undef OPT_WRITE_BOOL
#undef OPT_WRITE_SIZE_T
#undef OPT_WRITE_SSIZE_T
#undef OPT_WRITE_CHAR_P
malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus);
CTL_P_GET("pool.0.arenas.narenas", &uv, unsigned);
malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv);
malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n",
sizeof(void *));
CTL_P_GET("pool.0.arenas.quantum", &sv, size_t);
malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);
CTL_P_GET("pool.0.arenas.page", &sv, size_t);
malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
CTL_P_GET("opt.lg_dirty_mult", &ssv, ssize_t);
if (ssv >= 0) {
malloc_cprintf(write_cb, cbopaque,
"Min active:dirty page ratio per arena: %u:1\n",
(1U << ssv));
} else {
malloc_cprintf(write_cb, cbopaque,
"Min active:dirty page ratio per arena: N/A\n");
}
if ((err = je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0))
== 0) {
malloc_cprintf(write_cb, cbopaque,
"Maximum thread-cached size class: %zu\n", sv);
}
if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 &&
bv) {
CTL_GET("opt.lg_prof_sample", &sv, size_t);
malloc_cprintf(write_cb, cbopaque,
"Average profile sample interval: %"PRIu64
" (2^%zu)\n", (((uint64_t)1U) << sv), sv);
CTL_GET("opt.lg_prof_interval", &ssv, ssize_t);
if (ssv >= 0) {
malloc_cprintf(write_cb, cbopaque,
"Average profile dump interval: %"PRIu64
" (2^%zd)\n",
(((uint64_t)1U) << ssv), ssv);
} else {
malloc_cprintf(write_cb, cbopaque,
"Average profile dump interval: N/A\n");
}
}
CTL_GET("opt.lg_chunk", &sv, size_t);
malloc_cprintf(write_cb, cbopaque, "Chunk size: %zu (2^%zu)\n",
(ZU(1) << sv), sv);
}
if (config_stats) {
size_t *cactive;
size_t allocated, active, mapped;
size_t chunks_current, chunks_high;
uint64_t chunks_total;
CTL_P_GET("pool.0.stats.cactive", &cactive, size_t *);
CTL_P_GET("pool.0.stats.allocated", &allocated, size_t);
CTL_P_GET("pool.0.stats.active", &active, size_t);
CTL_P_GET("pool.0.stats.mapped", &mapped, size_t);
malloc_cprintf(write_cb, cbopaque,
"Allocated: %zu, active: %zu, mapped: %zu\n",
allocated, active, mapped);
malloc_cprintf(write_cb, cbopaque,
"Current active ceiling: %zu\n", atomic_read_z(cactive));
/* Print chunk stats. */
CTL_P_GET("pool.0.stats.chunks.total", &chunks_total, uint64_t);
CTL_P_GET("pool.0.stats.chunks.high", &chunks_high, size_t);
CTL_P_GET("pool.0.stats.chunks.current", &chunks_current, size_t);
malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
"highchunks curchunks\n");
malloc_cprintf(write_cb, cbopaque,
" %13"PRIu64" %12zu %12zu\n",
chunks_total, chunks_high, chunks_current);
if (merged) {
unsigned narenas;
CTL_P_GET("pool.0.arenas.narenas", &narenas, unsigned);
{
VARIABLE_ARRAY(bool, initialized, narenas);
unsigned i, ninitialized;
CTL_P_GET_ARRAY("pool.0.arenas.initialized",
initialized, bool, narenas);
for (i = ninitialized = 0; i < narenas; i++) {
if (initialized[i])
ninitialized++;
}
if (ninitialized > 1 || unmerged == false) {
/* Print merged arena stats. */
malloc_cprintf(write_cb, cbopaque,
"\nMerged arenas stats:\n");
stats_arena_print(write_cb, cbopaque,
p, narenas, bins, large);
}
}
}
if (unmerged) {
unsigned narenas;
/* Print stats for each arena. */
CTL_P_GET("pool.0.arenas.narenas", &narenas, unsigned);
{
VARIABLE_ARRAY(bool, initialized, narenas);
unsigned i;
CTL_P_GET_ARRAY("pool.0.arenas.initialized",
initialized, bool, narenas);
for (i = 0; i < narenas; i++) {
if (initialized[i]) {
malloc_cprintf(write_cb,
cbopaque,
"\narenas[%u]:\n", i);
stats_arena_print(write_cb,
cbopaque, p, i, bins, large);
}
}
}
}
}
malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n");
}
| 18,137 | 31.216696 | 82 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/vector.c | #define JEMALLOC_VECTOR_C_
#include "jemalloc/internal/jemalloc_internal.h"
/* Round up the value to the closest power of two. */
static inline unsigned
ceil_p2(unsigned n)
{
return 1 << (32 - __builtin_clz(n));
}
/* Calculate how big should be the vector list array. */
static inline unsigned
get_vec_part_len(unsigned n)
{
return MAX(ceil_p2(n), VECTOR_MIN_PART_SIZE);
}
/*
* Find the vector list element in which the index should be stored,
* if no such list exist return a pointer to a place in memory where it should
* be allocated.
*/
static vec_list_t **
find_vec_list(vector_t *vector, int *index)
{
vec_list_t **vec_list;
for (vec_list = &vector->list;
*vec_list != NULL; vec_list = &(*vec_list)->next) {
if (*index < (*vec_list)->length)
break;
*index -= (*vec_list)->length;
}
return vec_list;
}
/* Return a value from vector at index. */
void *
vec_get(vector_t *vector, int index)
{
vec_list_t *vec_list = *find_vec_list(vector, &index);
return (vec_list == NULL) ? NULL : vec_list->data[index];
}
/* Set a value to vector at index. */
void
vec_set(vector_t *vector, int index, void *val)
{
vec_list_t **vec_list = find_vec_list(vector, &index);
/*
* There's no array to put the value in,
* which means a new one has to be allocated.
*/
if (*vec_list == NULL) {
int vec_part_len = get_vec_part_len(index);
*vec_list = base_malloc_fn(sizeof(vec_list_t) +
sizeof(void *) * vec_part_len);
if (*vec_list == NULL)
return;
(*vec_list)->next = NULL;
(*vec_list)->length = vec_part_len;
}
(*vec_list)->data[index] = val;
}
/* Free all the memory in the container. */
void
vec_delete(vector_t *vector)
{
vec_list_t *vec_list_next, *vec_list = vector->list;
while (vec_list != NULL) {
vec_list_next = vec_list->next;
base_free_fn(vec_list);
vec_list = vec_list_next;
}
} | 1,845 | 21.512195 | 78 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/zone.c | #include "jemalloc/internal/jemalloc_internal.h"
#ifndef JEMALLOC_ZONE
# error "This source file is for zones on Darwin (OS X)."
#endif
/*
* The malloc_default_purgeable_zone function is only available on >= 10.6.
* We need to check whether it is present at runtime, thus the weak_import.
*/
extern malloc_zone_t *malloc_default_purgeable_zone(void)
JEMALLOC_ATTR(weak_import);
/******************************************************************************/
/* Data. */
static malloc_zone_t zone;
static struct malloc_introspection_t zone_introspect;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static size_t zone_size(malloc_zone_t *zone, void *ptr);
static void *zone_malloc(malloc_zone_t *zone, size_t size);
static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
static void *zone_valloc(malloc_zone_t *zone, size_t size);
static void zone_free(malloc_zone_t *zone, void *ptr);
static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
#if (JEMALLOC_ZONE_VERSION >= 5)
static void *zone_memalign(malloc_zone_t *zone, size_t alignment,
#endif
#if (JEMALLOC_ZONE_VERSION >= 6)
size_t size);
static void zone_free_definite_size(malloc_zone_t *zone, void *ptr,
size_t size);
#endif
static void *zone_destroy(malloc_zone_t *zone);
static size_t zone_good_size(malloc_zone_t *zone, size_t size);
static void zone_force_lock(malloc_zone_t *zone);
static void zone_force_unlock(malloc_zone_t *zone);
/******************************************************************************/
/*
* Functions.
*/
static size_t
zone_size(malloc_zone_t *zone, void *ptr)
{
/*
* There appear to be places within Darwin (such as setenv(3)) that
* cause calls to this function with pointers that *no* zone owns. If
* we knew that all pointers were owned by *some* zone, we could split
* our zone into two parts, and use one as the default allocator and
* the other as the default deallocator/reallocator. Since that will
* not work in practice, we must check all pointers to assure that they
* reside within a mapped chunk before determining size.
*/
return (ivsalloc(ptr, config_prof));
}
static void *
zone_malloc(malloc_zone_t *zone, size_t size)
{
return (je_malloc(size));
}
static void *
zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
{
return (je_calloc(num, size));
}
static void *
zone_valloc(malloc_zone_t *zone, size_t size)
{
void *ret = NULL; /* Assignment avoids useless compiler warning. */
je_posix_memalign(&ret, PAGE, size);
return (ret);
}
static void
zone_free(malloc_zone_t *zone, void *ptr)
{
if (ivsalloc(ptr, config_prof) != 0) {
je_free(ptr);
return;
}
free(ptr);
}
static void *
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
{
if (ivsalloc(ptr, config_prof) != 0)
return (je_realloc(ptr, size));
return (realloc(ptr, size));
}
#if (JEMALLOC_ZONE_VERSION >= 5)
static void *
zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
{
void *ret = NULL; /* Assignment avoids useless compiler warning. */
je_posix_memalign(&ret, alignment, size);
return (ret);
}
#endif
#if (JEMALLOC_ZONE_VERSION >= 6)
static void
zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
{
if (ivsalloc(ptr, config_prof) != 0) {
assert(ivsalloc(ptr, config_prof) == size);
je_free(ptr);
return;
}
free(ptr);
}
#endif
static void *
zone_destroy(malloc_zone_t *zone)
{
/* This function should never be called. */
not_reached();
return (NULL);
}
static size_t
zone_good_size(malloc_zone_t *zone, size_t size)
{
if (size == 0)
size = 1;
return (s2u(size));
}
static void
zone_force_lock(malloc_zone_t *zone)
{
if (isthreaded)
jemalloc_prefork();
}
static void
zone_force_unlock(malloc_zone_t *zone)
{
if (isthreaded)
jemalloc_postfork_parent();
}
JEMALLOC_ATTR(constructor)
void
register_zone(void)
{
/*
* If something else replaced the system default zone allocator, don't
* register jemalloc's.
*/
malloc_zone_t *default_zone = malloc_default_zone();
malloc_zone_t *purgeable_zone = NULL;
if (!default_zone->zone_name ||
strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) {
return;
}
zone.size = (void *)zone_size;
zone.malloc = (void *)zone_malloc;
zone.calloc = (void *)zone_calloc;
zone.valloc = (void *)zone_valloc;
zone.free = (void *)zone_free;
zone.realloc = (void *)zone_realloc;
zone.destroy = (void *)zone_destroy;
zone.zone_name = "jemalloc_zone";
zone.batch_malloc = NULL;
zone.batch_free = NULL;
zone.introspect = &zone_introspect;
zone.version = JEMALLOC_ZONE_VERSION;
#if (JEMALLOC_ZONE_VERSION >= 5)
zone.memalign = zone_memalign;
#endif
#if (JEMALLOC_ZONE_VERSION >= 6)
zone.free_definite_size = zone_free_definite_size;
#endif
#if (JEMALLOC_ZONE_VERSION >= 8)
zone.pressure_relief = NULL;
#endif
zone_introspect.enumerator = NULL;
zone_introspect.good_size = (void *)zone_good_size;
zone_introspect.check = NULL;
zone_introspect.print = NULL;
zone_introspect.log = NULL;
zone_introspect.force_lock = (void *)zone_force_lock;
zone_introspect.force_unlock = (void *)zone_force_unlock;
zone_introspect.statistics = NULL;
#if (JEMALLOC_ZONE_VERSION >= 6)
zone_introspect.zone_locked = NULL;
#endif
#if (JEMALLOC_ZONE_VERSION >= 7)
zone_introspect.enable_discharge_checking = NULL;
zone_introspect.disable_discharge_checking = NULL;
zone_introspect.discharge = NULL;
#ifdef __BLOCKS__
zone_introspect.enumerate_discharged_pointers = NULL;
#else
zone_introspect.enumerate_unavailable_without_blocks = NULL;
#endif
#endif
/*
* The default purgeable zone is created lazily by OSX's libc. It uses
* the default zone when it is created for "small" allocations
* (< 15 KiB), but assumes the default zone is a scalable_zone. This
* obviously fails when the default zone is the jemalloc zone, so
* malloc_default_purgeable_zone is called beforehand so that the
* default purgeable zone is created when the default zone is still
* a scalable_zone. As purgeable zones only exist on >= 10.6, we need
* to check for the existence of malloc_default_purgeable_zone() at
* run time.
*/
if (malloc_default_purgeable_zone != NULL)
purgeable_zone = malloc_default_purgeable_zone();
/* Register the custom zone. At this point it won't be the default. */
malloc_zone_register(&zone);
do {
default_zone = malloc_default_zone();
/*
* Unregister and reregister the default zone. On OSX >= 10.6,
* unregistering takes the last registered zone and places it
* at the location of the specified zone. Unregistering the
* default zone thus makes the last registered one the default.
* On OSX < 10.6, unregistering shifts all registered zones.
* The first registered zone then becomes the default.
*/
malloc_zone_unregister(default_zone);
malloc_zone_register(default_zone);
/*
* On OSX 10.6, having the default purgeable zone appear before
* the default zone makes some things crash because it thinks it
* owns the default zone allocated pointers. We thus unregister/
* re-register it in order to ensure it's always after the
* default zone. On OSX < 10.6, there is no purgeable zone, so
* this does nothing. On OSX >= 10.6, unregistering replaces the
* purgeable zone with the last registered zone above, i.e the
* default zone. Registering it again then puts it at the end,
* obviously after the default zone.
*/
if (purgeable_zone) {
malloc_zone_unregister(purgeable_zone);
malloc_zone_register(purgeable_zone);
}
} while (malloc_default_zone() != &zone);
}
| 7,677 | 26.92 | 80 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/bitmap.c | #define JEMALLOC_BITMAP_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static size_t bits2groups(size_t nbits);
/******************************************************************************/
static size_t
bits2groups(size_t nbits)
{
return ((nbits >> LG_BITMAP_GROUP_NBITS) +
!!(nbits & BITMAP_GROUP_NBITS_MASK));
}
void
bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
{
unsigned i;
size_t group_count;
assert(nbits > 0);
assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
/*
* Compute the number of groups necessary to store nbits bits, and
* progressively work upward through the levels until reaching a level
* that requires only one group.
*/
binfo->levels[0].group_offset = 0;
group_count = bits2groups(nbits);
for (i = 1; group_count > 1; i++) {
assert(i < BITMAP_MAX_LEVELS);
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ group_count;
group_count = bits2groups(group_count);
}
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ group_count;
binfo->nlevels = i;
binfo->nbits = nbits;
}
size_t
bitmap_info_ngroups(const bitmap_info_t *binfo)
{
return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP);
}
size_t
bitmap_size(size_t nbits)
{
bitmap_info_t binfo;
bitmap_info_init(&binfo, nbits);
return (bitmap_info_ngroups(&binfo));
}
void
bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
{
size_t extra;
unsigned i;
/*
* Bits are actually inverted with regard to the external bitmap
* interface, so the bitmap starts out with all 1 bits, except for
* trailing unused bits (if any). Note that each group uses bit 0 to
* correspond to the first logical bit in the group, so extra bits
* are the most significant bits of the last group.
*/
memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset <<
LG_SIZEOF_BITMAP);
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK;
if (extra != 0)
bitmap[binfo->levels[1].group_offset - 1] >>= extra;
for (i = 1; i < binfo->nlevels; i++) {
size_t group_count = binfo->levels[i].group_offset -
binfo->levels[i-1].group_offset;
extra = (BITMAP_GROUP_NBITS - (group_count &
BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK;
if (extra != 0)
bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
}
}
| 2,516 | 26.659341 | 80 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/ckh.c | /*
*******************************************************************************
* Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each
* hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash
* functions are employed. The original cuckoo hashing algorithm was described
* in:
*
* Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms
* 51(2):122-144.
*
* Generalization of cuckoo hashing was discussed in:
*
* Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical
* alternative to traditional hash tables. In Proceedings of the 7th
* Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA,
* January 2006.
*
* This implementation uses precisely two hash functions because that is the
* fewest that can work, and supporting multiple hashes is an implementation
* burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006)
* that shows approximate expected maximum load factors for various
* configurations:
*
* | #cells/bucket |
* #hashes | 1 | 2 | 4 | 8 |
* --------+-------+-------+-------+-------+
* 1 | 0.006 | 0.006 | 0.03 | 0.12 |
* 2 | 0.49 | 0.86 |>0.93< |>0.96< |
* 3 | 0.91 | 0.97 | 0.98 | 0.999 |
* 4 | 0.97 | 0.99 | 0.999 | |
*
* The number of cells per bucket is chosen such that a bucket fits in one cache
* line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing,
* respectively.
*
******************************************************************************/
#define JEMALLOC_CKH_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static bool ckh_grow(ckh_t *ckh);
static void ckh_shrink(ckh_t *ckh);
/******************************************************************************/
/*
* Search bucket for key and return the cell number if found; SIZE_T_MAX
* otherwise.
*/
JEMALLOC_INLINE_C size_t
ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
{
ckhc_t *cell;
unsigned i;
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
if (cell->key != NULL && ckh->keycomp(key, cell->key))
return ((bucket << LG_CKH_BUCKET_CELLS) + i);
}
return (SIZE_T_MAX);
}
/*
* Search table for key and return cell number if found; SIZE_T_MAX otherwise.
*/
JEMALLOC_INLINE_C size_t
ckh_isearch(ckh_t *ckh, const void *key)
{
size_t hashes[2], bucket, cell;
assert(ckh != NULL);
ckh->hash(key, hashes);
/* Search primary bucket. */
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
if (cell != SIZE_T_MAX)
return (cell);
/* Search secondary bucket. */
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
return (cell);
}
JEMALLOC_INLINE_C bool
ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
const void *data)
{
ckhc_t *cell;
unsigned offset, i;
/*
* Cycle through the cells in the bucket, starting at a random position.
* The randomness avoids worst-case search overhead as buckets fill up.
*/
prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
if (cell->key == NULL) {
cell->key = key;
cell->data = data;
ckh->count++;
return (false);
}
}
return (true);
}
/*
* No space is available in bucket. Randomly evict an item, then try to find an
* alternate location for that item. Iteratively repeat this
* eviction/relocation procedure until either success or detection of an
* eviction/relocation bucket cycle.
*/
JEMALLOC_INLINE_C bool
ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
void const **argdata)
{
const void *key, *data, *tkey, *tdata;
ckhc_t *cell;
size_t hashes[2], bucket, tbucket;
unsigned i;
bucket = argbucket;
key = *argkey;
data = *argdata;
while (true) {
/*
* Choose a random item within the bucket to evict. This is
* critical to correct function, because without (eventually)
* evicting all items within a bucket during iteration, it
* would be possible to get stuck in an infinite loop if there
* were an item for which both hashes indicated the same
* bucket.
*/
prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
assert(cell->key != NULL);
/* Swap cell->{key,data} and {key,data} (evict). */
tkey = cell->key; tdata = cell->data;
cell->key = key; cell->data = data;
key = tkey; data = tdata;
#ifdef CKH_COUNT
ckh->nrelocs++;
#endif
/* Find the alternate bucket for the evicted item. */
ckh->hash(key, hashes);
tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (tbucket == bucket) {
tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets)
- 1);
/*
* It may be that (tbucket == bucket) still, if the
* item's hashes both indicate this bucket. However,
* we are guaranteed to eventually escape this bucket
* during iteration, assuming pseudo-random item
* selection (true randomness would make infinite
* looping a remote possibility). The reason we can
* never get trapped forever is that there are two
* cases:
*
* 1) This bucket == argbucket, so we will quickly
* detect an eviction cycle and terminate.
* 2) An item was evicted to this bucket from another,
* which means that at least one item in this bucket
* has hashes that indicate distinct buckets.
*/
}
/* Check for a cycle. */
if (tbucket == argbucket) {
*argkey = key;
*argdata = data;
return (true);
}
bucket = tbucket;
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
return (false);
}
}
JEMALLOC_INLINE_C bool
ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
{
size_t hashes[2], bucket;
const void *key = *argkey;
const void *data = *argdata;
ckh->hash(key, hashes);
/* Try to insert in primary bucket. */
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
return (false);
/* Try to insert in secondary bucket. */
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
return (false);
/*
* Try to find a place for this item via iterative eviction/relocation.
*/
return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata));
}
/*
* Try to rebuild the hash table from scratch by inserting all items from the
* old table into the new.
*/
JEMALLOC_INLINE_C bool
ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
{
size_t count, i, nins;
const void *key, *data;
count = ckh->count;
ckh->count = 0;
for (i = nins = 0; nins < count; i++) {
if (aTab[i].key != NULL) {
key = aTab[i].key;
data = aTab[i].data;
if (ckh_try_insert(ckh, &key, &data)) {
ckh->count = count;
return (true);
}
nins++;
}
}
return (false);
}
static bool
ckh_grow(ckh_t *ckh)
{
bool ret;
ckhc_t *tab, *ttab;
size_t lg_curcells;
unsigned lg_prevbuckets;
#ifdef CKH_COUNT
ckh->ngrows++;
#endif
/*
* It is possible (though unlikely, given well behaved hashes) that the
* table will have to be doubled more than once in order to create a
* usable table.
*/
lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS;
while (true) {
size_t usize;
lg_curcells++;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (usize == 0) {
ret = true;
goto label_return;
}
tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
if (tab == NULL) {
ret = true;
goto label_return;
}
/* Swap in new table. */
ttab = ckh->tab;
ckh->tab = tab;
tab = ttab;
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (ckh_rebuild(ckh, tab) == false) {
idalloc(tab);
break;
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloc(ckh->tab);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
}
ret = false;
label_return:
return (ret);
}
static void
ckh_shrink(ckh_t *ckh)
{
ckhc_t *tab, *ttab;
size_t lg_curcells, usize;
unsigned lg_prevbuckets;
/*
* It is possible (though unlikely, given well behaved hashes) that the
* table rebuild will fail.
*/
lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (usize == 0)
return;
tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
if (tab == NULL) {
/*
* An OOM error isn't worth propagating, since it doesn't
* prevent this or future operations from proceeding.
*/
return;
}
/* Swap in new table. */
ttab = ckh->tab;
ckh->tab = tab;
tab = ttab;
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (ckh_rebuild(ckh, tab) == false) {
idalloc(tab);
#ifdef CKH_COUNT
ckh->nshrinks++;
#endif
return;
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloc(ckh->tab);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
#ifdef CKH_COUNT
ckh->nshrinkfails++;
#endif
}
bool
ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
{
bool ret;
size_t mincells, usize;
unsigned lg_mincells;
assert(minitems > 0);
assert(hash != NULL);
assert(keycomp != NULL);
#ifdef CKH_COUNT
ckh->ngrows = 0;
ckh->nshrinks = 0;
ckh->nshrinkfails = 0;
ckh->ninserts = 0;
ckh->nrelocs = 0;
#endif
ckh->prng_state = 42; /* Value doesn't really matter. */
ckh->count = 0;
/*
* Find the minimum power of 2 that is large enough to fit aBaseCount
* entries. We are using (2+,2) cuckoo hashing, which has an expected
* maximum load factor of at least ~0.86, so 0.75 is a conservative load
* factor that will typically allow 2^aLgMinItems to fit without ever
* growing the table.
*/
assert(LG_CKH_BUCKET_CELLS > 0);
mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2;
for (lg_mincells = LG_CKH_BUCKET_CELLS;
(ZU(1) << lg_mincells) < mincells;
lg_mincells++)
; /* Do nothing. */
ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->hash = hash;
ckh->keycomp = keycomp;
usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
if (usize == 0) {
ret = true;
goto label_return;
}
ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
if (ckh->tab == NULL) {
ret = true;
goto label_return;
}
ret = false;
label_return:
return (ret);
}
void
ckh_delete(ckh_t *ckh)
{
assert(ckh != NULL);
#ifdef CKH_VERBOSE
malloc_printf(
"%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64","
" nshrinkfails: %"PRIu64", ninserts: %"PRIu64","
" nrelocs: %"PRIu64"\n", __func__, ckh,
(unsigned long long)ckh->ngrows,
(unsigned long long)ckh->nshrinks,
(unsigned long long)ckh->nshrinkfails,
(unsigned long long)ckh->ninserts,
(unsigned long long)ckh->nrelocs);
#endif
idalloc(ckh->tab);
if (config_debug)
memset(ckh, 0x5a, sizeof(ckh_t));
}
size_t
ckh_count(ckh_t *ckh)
{
assert(ckh != NULL);
return (ckh->count);
}
bool
ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data)
{
size_t i, ncells;
for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets +
LG_CKH_BUCKET_CELLS)); i < ncells; i++) {
if (ckh->tab[i].key != NULL) {
if (key != NULL)
*key = (void *)ckh->tab[i].key;
if (data != NULL)
*data = (void *)ckh->tab[i].data;
*tabind = i + 1;
return (false);
}
}
return (true);
}
bool
ckh_insert(ckh_t *ckh, const void *key, const void *data)
{
bool ret;
assert(ckh != NULL);
assert(ckh_search(ckh, key, NULL, NULL));
#ifdef CKH_COUNT
ckh->ninserts++;
#endif
while (ckh_try_insert(ckh, &key, &data)) {
if (ckh_grow(ckh)) {
ret = true;
goto label_return;
}
}
ret = false;
label_return:
return (ret);
}
bool
ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
{
size_t cell;
assert(ckh != NULL);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
if (key != NULL)
*key = (void *)ckh->tab[cell].key;
if (data != NULL)
*data = (void *)ckh->tab[cell].data;
ckh->tab[cell].key = NULL;
ckh->tab[cell].data = NULL; /* Not necessary. */
ckh->count--;
/* Try to halve the table if it is less than 1/4 full. */
if (ckh->count < (ZU(1) << (ckh->lg_curbuckets
+ LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets
> ckh->lg_minbuckets) {
/* Ignore error due to OOM. */
ckh_shrink(ckh);
}
return (false);
}
return (true);
}
bool
ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
{
size_t cell;
assert(ckh != NULL);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
if (key != NULL)
*key = (void *)ckh->tab[cell].key;
if (data != NULL)
*data = (void *)ckh->tab[cell].data;
return (false);
}
return (true);
}
void
ckh_string_hash(const void *key, size_t r_hash[2])
{
hash(key, strlen((const char *)key), 0x94122f33U, r_hash);
}
bool
ckh_string_keycomp(const void *k1, const void *k2)
{
assert(k1 != NULL);
assert(k2 != NULL);
return (strcmp((char *)k1, (char *)k2) ? false : true);
}
void
ckh_pointer_hash(const void *key, size_t r_hash[2])
{
union {
const void *v;
size_t i;
} u;
assert(sizeof(u.v) == sizeof(u.i));
u.v = key;
hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash);
}
bool
ckh_pointer_keycomp(const void *k1, const void *k2)
{
return ((k1 == k2) ? true : false);
}
| 13,888 | 23.625887 | 80 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/extent.c | #define JEMALLOC_EXTENT_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
static inline int
extent_szad_comp(extent_node_t *a, extent_node_t *b)
{
int ret;
size_t a_size = a->size;
size_t b_size = b->size;
ret = (a_size > b_size) - (a_size < b_size);
if (ret == 0) {
uintptr_t a_addr = (uintptr_t)a->addr;
uintptr_t b_addr = (uintptr_t)b->addr;
ret = (a_addr > b_addr) - (a_addr < b_addr);
}
return (ret);
}
/* Generate red-black tree functions. */
rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad,
extent_szad_comp)
static inline int
extent_ad_comp(extent_node_t *a, extent_node_t *b)
{
uintptr_t a_addr = (uintptr_t)a->addr;
uintptr_t b_addr = (uintptr_t)b->addr;
return ((a_addr > b_addr) - (a_addr < b_addr));
}
/* Generate red-black tree functions. */
rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad,
extent_ad_comp)
| 973 | 23.35 | 80 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/rtree.c | #define JEMALLOC_RTREE_C_
#include "jemalloc/internal/jemalloc_internal.h"
rtree_t *
rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc,
pool_t *pool)
{
rtree_t *ret;
unsigned bits_per_level, bits_in_leaf, height, i;
assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
bits_per_level = jemalloc_ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void *)))) - 1;
bits_in_leaf = jemalloc_ffs(pow2_ceil((RTREE_NODESIZE / sizeof(uint8_t)))) - 1;
if (bits > bits_in_leaf) {
height = 1 + (bits - bits_in_leaf) / bits_per_level;
if ((height-1) * bits_per_level + bits_in_leaf != bits)
height++;
} else {
height = 1;
}
assert((height-1) * bits_per_level + bits_in_leaf >= bits);
ret = (rtree_t*)alloc(pool, offsetof(rtree_t, level2bits) +
(sizeof(unsigned) * height));
if (ret == NULL)
return (NULL);
memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) *
height));
ret->alloc = alloc;
ret->dalloc = dalloc;
ret->pool = pool;
if (malloc_mutex_init(&ret->mutex)) {
if (dalloc != NULL)
dalloc(pool, ret);
return (NULL);
}
ret->height = height;
if (height > 1) {
if ((height-1) * bits_per_level + bits_in_leaf > bits) {
ret->level2bits[0] = (bits - bits_in_leaf) %
bits_per_level;
} else
ret->level2bits[0] = bits_per_level;
for (i = 1; i < height-1; i++)
ret->level2bits[i] = bits_per_level;
ret->level2bits[height-1] = bits_in_leaf;
} else
ret->level2bits[0] = bits;
ret->root = (void**)alloc(pool, sizeof(void *) << ret->level2bits[0]);
if (ret->root == NULL) {
if (dalloc != NULL)
dalloc(pool, ret);
return (NULL);
}
memset(ret->root, 0, sizeof(void *) << ret->level2bits[0]);
return (ret);
}
static void
rtree_delete_subtree(rtree_t *rtree, void **node, unsigned level)
{
if (level < rtree->height - 1) {
size_t nchildren, i;
nchildren = ZU(1) << rtree->level2bits[level];
for (i = 0; i < nchildren; i++) {
void **child = (void **)node[i];
if (child != NULL)
rtree_delete_subtree(rtree, child, level + 1);
}
}
if (rtree->dalloc)
rtree->dalloc(rtree->pool, node);
}
void
rtree_delete(rtree_t *rtree)
{
rtree_delete_subtree(rtree, rtree->root, 0);
malloc_mutex_destroy(&rtree->mutex);
if (rtree->dalloc)
rtree->dalloc(rtree->pool, rtree);
}
void
rtree_prefork(rtree_t *rtree)
{
malloc_mutex_prefork(&rtree->mutex);
}
void
rtree_postfork_parent(rtree_t *rtree)
{
malloc_mutex_postfork_parent(&rtree->mutex);
}
void
rtree_postfork_child(rtree_t *rtree)
{
malloc_mutex_postfork_child(&rtree->mutex);
}
| 2,549 | 21.767857 | 81 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/huge.c | #define JEMALLOC_HUGE_C_
#include "jemalloc/internal/jemalloc_internal.h"
void *
huge_malloc(arena_t *arena, size_t size, bool zero)
{
return (huge_palloc(arena, size, chunksize, zero));
}
void *
huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
{
void *ret;
size_t csize;
extent_node_t *node;
bool is_zeroed;
pool_t *pool;
/* Allocate one or more contiguous chunks for this request. */
csize = CHUNK_CEILING(size);
if (csize == 0) {
/* size is large enough to cause size_t wrap-around. */
return (NULL);
}
/*
* Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
arena = choose_arena(arena);
if (arena == NULL)
return (NULL);
pool = arena->pool;
/* Allocate an extent node with which to track the chunk. */
node = base_node_alloc(pool);
if (node == NULL)
return (NULL);
ret = arena_chunk_alloc_huge(arena, NULL, csize, alignment, &is_zeroed);
if (ret == NULL) {
base_node_dalloc(pool, node);
return (NULL);
}
/* Insert node into huge. */
node->addr = ret;
node->size = csize;
node->arena = arena;
malloc_mutex_lock(&pool->huge_mtx);
extent_tree_ad_insert(&pool->huge, node);
malloc_mutex_unlock(&pool->huge_mtx);
if (config_fill && zero == false) {
if (opt_junk)
memset(ret, 0xa5, csize);
else if (opt_zero && is_zeroed == false)
memset(ret, 0, csize);
}
return (ret);
}
#ifdef JEMALLOC_JET
#undef huge_dalloc_junk
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
#endif
static void
huge_dalloc_junk(void *ptr, size_t usize)
{
if (config_fill && have_dss && unlikely(opt_junk)) {
/*
* Only bother junk filling if the chunk isn't about to be
* unmapped.
*/
if (config_munmap == false || (have_dss && chunk_in_dss(ptr)))
memset(ptr, 0x5a, usize);
}
}
#ifdef JEMALLOC_JET
#undef huge_dalloc_junk
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
#endif
static bool
huge_ralloc_no_move_expand(pool_t *pool, char *ptr, size_t oldsize, size_t size, bool zero) {
size_t csize;
void *expand_addr;
size_t expand_size;
extent_node_t *node, key;
arena_t *arena;
bool is_zeroed;
void *ret;
csize = CHUNK_CEILING(size);
if (csize == 0) {
/* size is large enough to cause size_t wrap-around. */
return (true);
}
expand_addr = ptr + oldsize;
expand_size = csize - oldsize;
malloc_mutex_lock(&pool->huge_mtx);
key.addr = ptr;
node = extent_tree_ad_search(&pool->huge, &key);
assert(node != NULL);
assert(node->addr == ptr);
/* Find the current arena. */
arena = node->arena;
malloc_mutex_unlock(&pool->huge_mtx);
/*
* Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
ret = arena_chunk_alloc_huge(arena, expand_addr, expand_size, chunksize,
&is_zeroed);
if (ret == NULL)
return (true);
assert(ret == expand_addr);
malloc_mutex_lock(&pool->huge_mtx);
/* Update the size of the huge allocation. */
node->size = csize;
malloc_mutex_unlock(&pool->huge_mtx);
if (config_fill && !zero) {
if (unlikely(opt_junk))
memset(expand_addr, 0xa5, expand_size);
else if (unlikely(opt_zero) && !is_zeroed)
memset(expand_addr, 0, expand_size);
}
return (false);
}
bool
huge_ralloc_no_move(pool_t *pool, void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero)
{
/* Both allocations must be huge to avoid a move. */
if (oldsize <= arena_maxclass)
return (true);
assert(CHUNK_CEILING(oldsize) == oldsize);
/*
* Avoid moving the allocation if the size class can be left the same.
*/
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
return (false);
}
/* Overflow. */
if (CHUNK_CEILING(size) == 0)
return (true);
/* Shrink the allocation in-place. */
if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(size)) {
extent_node_t *node, key;
void *excess_addr;
size_t excess_size;
malloc_mutex_lock(&pool->huge_mtx);
key.addr = ptr;
node = extent_tree_ad_search(&pool->huge, &key);
assert(node != NULL);
assert(node->addr == ptr);
/* Update the size of the huge allocation. */
node->size = CHUNK_CEILING(size);
malloc_mutex_unlock(&pool->huge_mtx);
excess_addr = (char *)node->addr + CHUNK_CEILING(size);
excess_size = CHUNK_CEILING(oldsize) - CHUNK_CEILING(size);
/* Zap the excess chunks. */
huge_dalloc_junk(excess_addr, excess_size);
arena_chunk_dalloc_huge(node->arena, excess_addr, excess_size);
return (false);
}
/* Attempt to expand the allocation in-place. */
if (huge_ralloc_no_move_expand(pool, ptr, oldsize, size + extra, zero)) {
if (extra == 0)
return (true);
/* Try again, this time without extra. */
return (huge_ralloc_no_move_expand(pool, ptr, oldsize, size, zero));
}
return (false);
}
void *
huge_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, bool try_tcache_dalloc)
{
void *ret;
size_t copysize;
/* Try to avoid moving the allocation. */
if (huge_ralloc_no_move(arena->pool, ptr, oldsize, size, extra, zero) == false)
return (ptr);
/*
* size and oldsize are different enough that we need to use a
* different size class. In that case, fall back to allocating new
* space and copying.
*/
if (alignment > chunksize)
ret = huge_palloc(arena, size + extra, alignment, zero);
else
ret = huge_malloc(arena, size + extra, zero);
if (ret == NULL) {
if (extra == 0)
return (NULL);
/* Try again, this time without extra. */
if (alignment > chunksize)
ret = huge_palloc(arena, size, alignment, zero);
else
ret = huge_malloc(arena, size, zero);
if (ret == NULL)
return (NULL);
}
/*
* Copy at most size bytes (not size+extra), since the caller has no
* expectation that the extra bytes will be reliably preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(ret, ptr, copysize);
pool_iqalloct(arena->pool, ptr, try_tcache_dalloc);
return (ret);
}
void
huge_dalloc(pool_t *pool, void *ptr)
{
extent_node_t *node, key;
malloc_mutex_lock(&pool->huge_mtx);
/* Extract from tree of huge allocations. */
key.addr = ptr;
node = extent_tree_ad_search(&pool->huge, &key);
assert(node != NULL);
assert(node->addr == ptr);
extent_tree_ad_remove(&pool->huge, node);
malloc_mutex_unlock(&pool->huge_mtx);
huge_dalloc_junk(node->addr, node->size);
arena_chunk_dalloc_huge(node->arena, node->addr, node->size);
base_node_dalloc(pool, node);
}
size_t
huge_salloc(const void *ptr)
{
size_t ret = 0;
size_t i;
extent_node_t *node, key;
malloc_mutex_lock(&pools_lock);
for (i = 0; i < npools; ++i) {
pool_t *pool = pools[i];
if (pool == NULL)
continue;
malloc_mutex_lock(&pool->huge_mtx);
/* Extract from tree of huge allocations. */
key.addr = __DECONST(void *, ptr);
node = extent_tree_ad_search(&pool->huge, &key);
if (node != NULL)
ret = node->size;
malloc_mutex_unlock(&pool->huge_mtx);
if (ret != 0)
break;
}
malloc_mutex_unlock(&pools_lock);
return (ret);
}
size_t
huge_pool_salloc(pool_t *pool, const void *ptr)
{
size_t ret = 0;
extent_node_t *node, key;
malloc_mutex_lock(&pool->huge_mtx);
/* Extract from tree of huge allocations. */
key.addr = __DECONST(void *, ptr);
node = extent_tree_ad_search(&pool->huge, &key);
if (node != NULL)
ret = node->size;
malloc_mutex_unlock(&pool->huge_mtx);
return (ret);
}
prof_ctx_t *
huge_prof_ctx_get(const void *ptr)
{
prof_ctx_t *ret = NULL;
size_t i;
extent_node_t *node, key;
malloc_mutex_lock(&pools_lock);
for (i = 0; i < npools; ++i) {
pool_t *pool = pools[i];
if (pool == NULL)
continue;
malloc_mutex_lock(&pool->huge_mtx);
/* Extract from tree of huge allocations. */
key.addr = __DECONST(void *, ptr);
node = extent_tree_ad_search(&pool->huge, &key);
if (node != NULL)
ret = node->prof_ctx;
malloc_mutex_unlock(&pool->huge_mtx);
if (ret != NULL)
break;
}
malloc_mutex_unlock(&pools_lock);
return (ret);
}
void
huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
{
extent_node_t *node, key;
size_t i;
malloc_mutex_lock(&pools_lock);
for (i = 0; i < npools; ++i) {
pool_t *pool = pools[i];
if (pool == NULL)
continue;
malloc_mutex_lock(&pool->huge_mtx);
/* Extract from tree of huge allocations. */
key.addr = __DECONST(void *, ptr);
node = extent_tree_ad_search(&pool->huge, &key);
if (node != NULL)
node->prof_ctx = ctx;
malloc_mutex_unlock(&pool->huge_mtx);
if (node != NULL)
break;
}
malloc_mutex_unlock(&pools_lock);
}
/*
* Called at each pool opening.
*/
bool
huge_boot(pool_t *pool)
{
if (malloc_mutex_init(&pool->huge_mtx))
return (true);
return (false);
}
/*
* Called only at pool creation.
*/
bool
huge_init(pool_t *pool)
{
if (huge_boot(pool))
return (true);
/* Initialize chunks data. */
extent_tree_ad_new(&pool->huge);
return (false);
}
void
huge_prefork(pool_t *pool)
{
malloc_mutex_prefork(&pool->huge_mtx);
}
void
huge_postfork_parent(pool_t *pool)
{
malloc_mutex_postfork_parent(&pool->huge_mtx);
}
void
huge_postfork_child(pool_t *pool)
{
malloc_mutex_postfork_child(&pool->huge_mtx);
}
| 9,358 | 21.021176 | 93 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/tcache.c | #define JEMALLOC_TCACHE_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
#define ARR_INITIALIZER JEMALLOC_ARG_CONCAT({0})
malloc_tsd_data(, tcache, tsd_tcache_t, TSD_TCACHE_INITIALIZER)
malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default)
bool opt_tcache = true;
ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
tcache_bin_info_t *tcache_bin_info;
static unsigned stack_nelms; /* Total stack elms per tcache. */
size_t nhbins;
size_t tcache_maxclass;
/******************************************************************************/
size_t tcache_salloc(const void *ptr)
{
return (arena_salloc(ptr, false));
}
void
tcache_event_hard(tcache_t *tcache)
{
size_t binind = tcache->next_gc_bin;
tcache_bin_t *tbin = &tcache->tbins[binind];
tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
if (tbin->low_water > 0) {
/*
* Flush (ceiling) 3/4 of the objects below the low water mark.
*/
if (binind < NBINS) {
tcache_bin_flush_small(tbin, binind, tbin->ncached -
tbin->low_water + (tbin->low_water >> 2), tcache);
} else {
tcache_bin_flush_large(tbin, binind, tbin->ncached -
tbin->low_water + (tbin->low_water >> 2), tcache);
}
/*
* Reduce fill count by 2X. Limit lg_fill_div such that the
* fill count is always at least 1.
*/
if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
tbin->lg_fill_div++;
} else if (tbin->low_water < 0) {
/*
* Increase fill count by 2X. Make sure lg_fill_div stays
* greater than 0.
*/
if (tbin->lg_fill_div > 1)
tbin->lg_fill_div--;
}
tbin->low_water = tbin->ncached;
tcache->next_gc_bin++;
if (tcache->next_gc_bin == nhbins)
tcache->next_gc_bin = 0;
tcache->ev_cnt = 0;
}
void *
tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
{
void *ret;
arena_tcache_fill_small(tcache->arena, tbin, binind,
config_prof ? tcache->prof_accumbytes : 0);
if (config_prof)
tcache->prof_accumbytes = 0;
ret = tcache_alloc_easy(tbin);
return (ret);
}
void
tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
tcache_t *tcache)
{
void *ptr;
unsigned i, nflush, ndeferred;
bool merged_stats = false;
assert(binind < NBINS);
assert(rem <= tbin->ncached);
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena bin associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
tbin->avail[0]);
arena_t *arena = chunk->arena;
arena_bin_t *bin = &arena->bins[binind];
if (config_prof && arena == tcache->arena) {
if (arena_prof_accum(arena, tcache->prof_accumbytes))
prof_idump();
tcache->prof_accumbytes = 0;
}
malloc_mutex_lock(&bin->lock);
if (config_stats && arena == tcache->arena) {
assert(merged_stats == false);
merged_stats = true;
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
ndeferred = 0;
for (i = 0; i < nflush; i++) {
ptr = tbin->avail[i];
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk->arena == arena) {
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_t *mapelm =
arena_mapp_get(chunk, pageind);
if (config_fill && opt_junk) {
arena_alloc_junk_small(ptr,
&arena_bin_info[binind], true);
}
arena_dalloc_bin_locked(arena, chunk, ptr,
mapelm);
} else {
/*
* This object was allocated via a different
* arena bin than the one that is currently
* locked. Stash the object, so that it can be
* handled in a future pass.
*/
tbin->avail[ndeferred] = ptr;
ndeferred++;
}
}
malloc_mutex_unlock(&bin->lock);
}
if (config_stats && merged_stats == false) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
*/
arena_bin_t *bin = &tcache->arena->bins[binind];
malloc_mutex_lock(&bin->lock);
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
malloc_mutex_unlock(&bin->lock);
}
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
rem * sizeof(void *));
tbin->ncached = rem;
if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
}
void
tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
tcache_t *tcache)
{
void *ptr;
unsigned i, nflush, ndeferred;
bool merged_stats = false;
assert(binind < nhbins);
assert(rem <= tbin->ncached);
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
tbin->avail[0]);
arena_t *arena = chunk->arena;
UNUSED bool idump;
if (config_prof)
idump = false;
malloc_mutex_lock(&arena->lock);
if ((config_prof || config_stats) && arena == tcache->arena) {
if (config_prof) {
idump = arena_prof_accum_locked(arena,
tcache->prof_accumbytes);
tcache->prof_accumbytes = 0;
}
if (config_stats) {
merged_stats = true;
arena->stats.nrequests_large +=
tbin->tstats.nrequests;
arena->stats.lstats[binind - NBINS].nrequests +=
tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
}
ndeferred = 0;
for (i = 0; i < nflush; i++) {
ptr = tbin->avail[i];
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk->arena == arena)
arena_dalloc_large_locked(arena, chunk, ptr);
else {
/*
* This object was allocated via a different
* arena than the one that is currently locked.
* Stash the object, so that it can be handled
* in a future pass.
*/
tbin->avail[ndeferred] = ptr;
ndeferred++;
}
}
malloc_mutex_unlock(&arena->lock);
if (config_prof && idump)
prof_idump();
}
if (config_stats && merged_stats == false) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
*/
arena_t *arena = tcache->arena;
malloc_mutex_lock(&arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[binind - NBINS].nrequests +=
tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
malloc_mutex_unlock(&arena->lock);
}
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
rem * sizeof(void *));
tbin->ncached = rem;
if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
}
void
tcache_arena_associate(tcache_t *tcache, arena_t *arena)
{
if (config_stats) {
/* Link into list of extant tcaches. */
malloc_mutex_lock(&arena->lock);
ql_elm_new(tcache, link);
ql_tail_insert(&arena->tcache_ql, tcache, link);
malloc_mutex_unlock(&arena->lock);
}
tcache->arena = arena;
}
void
tcache_arena_dissociate(tcache_t *tcache)
{
if (config_stats) {
/* Unlink from list of extant tcaches. */
malloc_mutex_lock(&tcache->arena->lock);
ql_remove(&tcache->arena->tcache_ql, tcache, link);
tcache_stats_merge(tcache, tcache->arena);
malloc_mutex_unlock(&tcache->arena->lock);
}
}
tcache_t *
tcache_get_hard(tcache_t *tcache, pool_t *pool, bool create)
{
arena_t dummy;
DUMMY_ARENA_INITIALIZE(dummy, pool);
if (tcache == NULL) {
if (create == false) {
/*
* Creating a tcache here would cause
* allocation as a side effect of free().
* Ordinarily that would be okay since
* tcache_create() failure is a soft failure
* that doesn't propagate. However, if TLS
* data are freed via free() as in glibc,
* subtle corruption could result from setting
* a TLS variable after its backing memory is
* freed.
*/
return (NULL);
}
if (tcache_enabled_get() == false) {
tcache_enabled_set(false); /* Memoize. */
return (NULL);
}
return (tcache_create(choose_arena(&dummy)));
}
if (tcache == TCACHE_STATE_PURGATORY) {
/*
* Make a note that an allocator function was called
* after tcache_thread_cleanup() was called.
*/
tsd_tcache_t *tsd = tcache_tsd_get();
tcache = TCACHE_STATE_REINCARNATED;
tsd->seqno[pool->pool_id] = pool->seqno;
tsd->tcaches[pool->pool_id] = tcache;
return (NULL);
}
if (tcache == TCACHE_STATE_REINCARNATED)
return (NULL);
not_reached();
return (NULL);
}
tcache_t *
tcache_create(arena_t *arena)
{
tcache_t *tcache;
size_t size, stack_offset;
unsigned i;
tsd_tcache_t *tsd = tcache_tsd_get();
size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
/* Naturally align the pointer stacks. */
size = PTR_CEILING(size);
stack_offset = size;
size += stack_nelms * sizeof(void *);
/*
* Round up to the nearest multiple of the cacheline size, in order to
* avoid the possibility of false cacheline sharing.
*
* That this works relies on the same logic as in ipalloc(), but we
* cannot directly call ipalloc() here due to tcache bootstrapping
* issues.
*/
size = (size + CACHELINE_MASK) & (-CACHELINE);
if (size <= SMALL_MAXCLASS)
tcache = (tcache_t *)arena_malloc_small(arena, size, true);
else if (size <= tcache_maxclass)
tcache = (tcache_t *)arena_malloc_large(arena, size, true);
else
tcache = (tcache_t *)icalloct(size, false, arena);
if (tcache == NULL)
return (NULL);
tcache_arena_associate(tcache, arena);
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
for (i = 0; i < nhbins; i++) {
tcache->tbins[i].lg_fill_div = 1;
tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
(uintptr_t)stack_offset);
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
}
tsd->seqno[arena->pool->pool_id] = arena->pool->seqno;
tsd->tcaches[arena->pool->pool_id] = tcache;
return (tcache);
}
void
tcache_destroy(tcache_t *tcache)
{
unsigned i;
size_t tcache_size;
tcache_arena_dissociate(tcache);
for (i = 0; i < NBINS; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
tcache_bin_flush_small(tbin, i, 0, tcache);
if (config_stats && tbin->tstats.nrequests != 0) {
arena_t *arena = tcache->arena;
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(&bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
malloc_mutex_unlock(&bin->lock);
}
}
for (; i < nhbins; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
tcache_bin_flush_large(tbin, i, 0, tcache);
if (config_stats && tbin->tstats.nrequests != 0) {
arena_t *arena = tcache->arena;
malloc_mutex_lock(&arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[i - NBINS].nrequests +=
tbin->tstats.nrequests;
malloc_mutex_unlock(&arena->lock);
}
}
if (config_prof && tcache->prof_accumbytes > 0 &&
arena_prof_accum(tcache->arena, tcache->prof_accumbytes))
prof_idump();
tcache_size = arena_salloc(tcache, false);
if (tcache_size <= SMALL_MAXCLASS) {
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
arena_t *arena = chunk->arena;
size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
LG_PAGE;
arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm);
} else if (tcache_size <= tcache_maxclass) {
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
arena_t *arena = chunk->arena;
arena_dalloc_large(arena, chunk, tcache);
} else
idalloct(tcache, false);
}
bool
tcache_tsd_extend(tsd_tcache_t *tsd, unsigned len)
{
if (len == UINT_MAX)
return (true);
assert(len < POOLS_MAX);
/* round up the new length to the nearest power of 2... */
size_t npools = 1ULL << (32 - __builtin_clz(len + 1));
/* ... but not less than */
if (npools < POOLS_MIN)
npools = POOLS_MIN;
unsigned *tseqno = base_malloc_fn(npools * sizeof (unsigned));
if (tseqno == NULL)
return (true);
if (tsd->seqno != NULL)
memcpy(tseqno, tsd->seqno, tsd->npools * sizeof (unsigned));
memset(&tseqno[tsd->npools], 0, (npools - tsd->npools) * sizeof (unsigned));
tcache_t **tcaches = base_malloc_fn(npools * sizeof (tcache_t *));
if (tcaches == NULL) {
base_free_fn(tseqno);
return (true);
}
if (tsd->tcaches != NULL)
memcpy(tcaches, tsd->tcaches, tsd->npools * sizeof (tcache_t *));
memset(&tcaches[tsd->npools], 0, (npools - tsd->npools) * sizeof (tcache_t *));
base_free_fn(tsd->seqno);
tsd->seqno = tseqno;
base_free_fn(tsd->tcaches);
tsd->tcaches = tcaches;
tsd->npools = npools;
return (false);
}
void
tcache_thread_cleanup(void *arg)
{
int i;
tsd_tcache_t *tsd_array = arg;
malloc_mutex_lock(&pools_lock);
for (i = 0; i < tsd_array->npools; ++i) {
tcache_t *tcache = tsd_array->tcaches[i];
if (tcache != NULL) {
if (tcache == TCACHE_STATE_DISABLED) {
/* Do nothing. */
} else if (tcache == TCACHE_STATE_REINCARNATED) {
/*
* Another destructor called an allocator function after this
* destructor was called. Reset tcache to
* TCACHE_STATE_PURGATORY in order to receive another callback.
*/
tsd_array->tcaches[i] = TCACHE_STATE_PURGATORY;
} else if (tcache == TCACHE_STATE_PURGATORY) {
/*
* The previous time this destructor was called, we set the key
* to TCACHE_STATE_PURGATORY so that other destructors wouldn't
* cause re-creation of the tcache. This time, do nothing, so
* that the destructor will not be called again.
*/
} else if (tcache != NULL) {
assert(tcache != TCACHE_STATE_PURGATORY);
if (pools[i] != NULL && tsd_array->seqno[i] == pools[i]->seqno)
tcache_destroy(tcache);
tsd_array->tcaches[i] = TCACHE_STATE_PURGATORY;
}
}
}
base_free_fn(tsd_array->seqno);
base_free_fn(tsd_array->tcaches);
tsd_array->npools = 0;
malloc_mutex_unlock(&pools_lock);
}
/* Caller must own arena->lock. */
void
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
{
unsigned i;
cassert(config_stats);
/* Merge and reset tcache stats. */
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
tcache_bin_t *tbin = &tcache->tbins[i];
malloc_mutex_lock(&bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
malloc_mutex_unlock(&bin->lock);
tbin->tstats.nrequests = 0;
}
for (; i < nhbins; i++) {
malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
tcache_bin_t *tbin = &tcache->tbins[i];
arena->stats.nrequests_large += tbin->tstats.nrequests;
lstats->nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
}
bool
tcache_boot0(void)
{
unsigned i;
/* Array still initialized */
if (tcache_bin_info != NULL)
return (false);
/*
* If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is
* known.
*/
if (opt_lg_tcache_max < 0 || (1ULL << opt_lg_tcache_max) < SMALL_MAXCLASS)
tcache_maxclass = SMALL_MAXCLASS;
else if ((1ULL << opt_lg_tcache_max) > arena_maxclass)
tcache_maxclass = arena_maxclass;
else
tcache_maxclass = (1ULL << opt_lg_tcache_max);
nhbins = NBINS + (tcache_maxclass >> LG_PAGE);
/* Initialize tcache_bin_info. */
tcache_bin_info = (tcache_bin_info_t *)base_alloc(&base_pool,
nhbins * sizeof(tcache_bin_info_t));
if (tcache_bin_info == NULL)
return (true);
stack_nelms = 0;
for (i = 0; i < NBINS; i++) {
if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) {
tcache_bin_info[i].ncached_max =
(arena_bin_info[i].nregs << 1);
} else {
tcache_bin_info[i].ncached_max =
TCACHE_NSLOTS_SMALL_MAX;
}
stack_nelms += tcache_bin_info[i].ncached_max;
}
for (; i < nhbins; i++) {
tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
stack_nelms += tcache_bin_info[i].ncached_max;
}
return (false);
}
bool
tcache_boot1(void)
{
if (tcache_tsd_boot() || tcache_enabled_tsd_boot())
return (true);
return (false);
}
| 15,882 | 26.057922 | 80 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/chunk.c | #define JEMALLOC_CHUNK_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
const char *opt_dss = DSS_DEFAULT;
size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
/* Various chunk-related settings. */
size_t chunksize;
size_t chunksize_mask; /* (chunksize - 1). */
size_t chunk_npages;
size_t map_bias;
size_t arena_maxclass; /* Max size class for arenas. */
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
* definition.
*/
static void chunk_dalloc_core(pool_t *pool, void *chunk, size_t size);
/******************************************************************************/
static void *
chunk_recycle(pool_t *pool, extent_tree_t *chunks_szad, extent_tree_t *chunks_ad,
void *new_addr, size_t size, size_t alignment, bool base, bool *zero)
{
void *ret;
extent_node_t *node;
extent_node_t key;
size_t alloc_size, leadsize, trailsize;
bool zeroed;
if (base) {
/*
* This function may need to call base_node_{,de}alloc(), but
* the current chunk allocation request is on behalf of the
* base allocator. Avoid deadlock (and if that weren't an
* issue, potential for infinite recursion) by returning NULL.
*/
return (NULL);
}
alloc_size = size + alignment - chunksize;
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);
key.addr = new_addr;
key.size = alloc_size;
malloc_mutex_lock(&pool->chunks_mtx);
node = extent_tree_szad_nsearch(chunks_szad, &key);
if (node == NULL || (new_addr && node->addr != new_addr)) {
malloc_mutex_unlock(&pool->chunks_mtx);
return (NULL);
}
leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
(uintptr_t)node->addr;
assert(node->size >= leadsize + size);
trailsize = node->size - leadsize - size;
ret = (void *)((uintptr_t)node->addr + leadsize);
zeroed = node->zeroed;
if (zeroed)
*zero = true;
/* Remove node from the tree. */
extent_tree_szad_remove(chunks_szad, node);
extent_tree_ad_remove(chunks_ad, node);
if (leadsize != 0) {
/* Insert the leading space as a smaller chunk. */
node->size = leadsize;
extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node);
node = NULL;
}
if (trailsize != 0) {
/* Insert the trailing space as a smaller chunk. */
if (node == NULL) {
/*
* An additional node is required, but
* base_node_alloc() can cause a new base chunk to be
* allocated. Drop chunks_mtx in order to avoid
* deadlock, and if node allocation fails, deallocate
* the result before returning an error.
*/
malloc_mutex_unlock(&pool->chunks_mtx);
node = base_node_alloc(pool);
if (node == NULL) {
chunk_dalloc_core(pool, ret, size);
return (NULL);
}
malloc_mutex_lock(&pool->chunks_mtx);
}
node->addr = (void *)((uintptr_t)(ret) + size);
node->size = trailsize;
node->zeroed = zeroed;
extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node);
node = NULL;
}
malloc_mutex_unlock(&pool->chunks_mtx);
if (node != NULL)
base_node_dalloc(pool, node);
if (*zero) {
if (zeroed == false)
memset(ret, 0, size);
else if (config_debug) {
size_t i;
size_t *p = (size_t *)(uintptr_t)ret;
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
for (i = 0; i < size / sizeof(size_t); i++)
assert(p[i] == 0);
}
}
return (ret);
}
/*
* If the caller specifies (*zero == false), it is still possible to receive
* zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc()
* takes advantage of this to avoid demanding zeroed chunks, but taking
* advantage of them if they are returned.
*/
static void *
chunk_alloc_core(pool_t *pool, void *new_addr, size_t size, size_t alignment,
bool base, bool *zero, dss_prec_t dss_prec)
{
void *ret;
assert(size != 0);
assert((size & chunksize_mask) == 0);
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
/* "primary" dss. */
if (have_dss && dss_prec == dss_prec_primary) {
if ((ret = chunk_recycle(pool, &pool->chunks_szad_dss, &pool->chunks_ad_dss,
new_addr, size, alignment, base, zero)) != NULL)
return (ret);
/* requesting an address only implemented for recycle */
if (new_addr == NULL
&& (ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
return (ret);
}
/* mmap. */
if ((ret = chunk_recycle(pool, &pool->chunks_szad_mmap, &pool->chunks_ad_mmap,
new_addr, size, alignment, base, zero)) != NULL)
return (ret);
/* requesting an address only implemented for recycle */
if (new_addr == NULL &&
(ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
return (ret);
/* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary) {
if ((ret = chunk_recycle(pool, &pool->chunks_szad_dss, &pool->chunks_ad_dss,
new_addr, size, alignment, base, zero)) != NULL)
return (ret);
/* requesting an address only implemented for recycle */
if (new_addr == NULL &&
(ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
return (ret);
}
/* All strategies for allocation failed. */
return (NULL);
}
static bool
chunk_register(pool_t *pool, void *chunk, size_t size, bool base)
{
assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk);
if (config_ivsalloc && base == false) {
if (rtree_set(pool->chunks_rtree, (uintptr_t)chunk, 1))
return (true);
}
if (config_stats || config_prof) {
bool gdump;
malloc_mutex_lock(&pool->chunks_mtx);
if (config_stats)
pool->stats_chunks.nchunks += (size / chunksize);
pool->stats_chunks.curchunks += (size / chunksize);
if (pool->stats_chunks.curchunks > pool->stats_chunks.highchunks) {
pool->stats_chunks.highchunks =
pool->stats_chunks.curchunks;
if (config_prof)
gdump = true;
} else if (config_prof)
gdump = false;
malloc_mutex_unlock(&pool->chunks_mtx);
if (config_prof && opt_prof && opt_prof_gdump && gdump)
prof_gdump();
}
if (config_valgrind)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(chunk, size);
return (false);
}
void *
chunk_alloc_base(pool_t *pool, size_t size)
{
void *ret;
bool zero;
zero = false;
if (pool->pool_id != 0) {
/* Custom pools can only use existing chunks. */
ret = chunk_recycle(pool, &pool->chunks_szad_mmap,
&pool->chunks_ad_mmap, NULL, size,
chunksize, false, &zero);
} else {
ret = chunk_alloc_core(pool, NULL, size, chunksize, true, &zero,
chunk_dss_prec_get());
}
if (ret == NULL)
return (NULL);
if (chunk_register(pool, ret, size, true)) {
chunk_dalloc_core(pool, ret, size);
return (NULL);
}
return (ret);
}
void *
chunk_alloc_arena(chunk_alloc_t *chunk_alloc, chunk_dalloc_t *chunk_dalloc,
arena_t *arena, void *new_addr, size_t size, size_t alignment, bool *zero)
{
void *ret;
ret = chunk_alloc(new_addr, size, alignment, zero,
arena->ind, arena->pool);
if (ret != NULL && chunk_register(arena->pool, ret, size, false)) {
chunk_dalloc(ret, size, arena->ind, arena->pool);
ret = NULL;
}
return (ret);
}
/* Default arena chunk allocation routine in the absence of user override. */
void *
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
unsigned arena_ind, pool_t *pool)
{
if (pool->pool_id != 0) {
/* Custom pools can only use existing chunks. */
return (chunk_recycle(pool, &pool->chunks_szad_mmap,
&pool->chunks_ad_mmap, new_addr, size,
alignment, false, zero));
} else {
malloc_rwlock_rdlock(&pool->arenas_lock);
dss_prec_t dss_prec = pool->arenas[arena_ind]->dss_prec;
malloc_rwlock_unlock(&pool->arenas_lock);
return (chunk_alloc_core(pool, new_addr, size, alignment,
false, zero, dss_prec));
}
}
void
chunk_record(pool_t *pool, extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
size_t size, bool zeroed)
{
bool unzeroed, file_mapped;
extent_node_t *xnode, *node, *prev, *xprev, key;
file_mapped = pool_is_file_mapped(pool);
unzeroed = pages_purge(chunk, size, file_mapped);
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
/*
* If pages_purge() returned that the pages were zeroed
* as a side effect of purging we can safely do this assignment.
*/
if (zeroed == false && unzeroed == false) {
zeroed = true;
}
/*
* Allocate a node before acquiring chunks_mtx even though it might not
* be needed, because base_node_alloc() may cause a new base chunk to
* be allocated, which could cause deadlock if chunks_mtx were already
* held.
*/
xnode = base_node_alloc(pool);
/* Use xprev to implement conditional deferred deallocation of prev. */
xprev = NULL;
malloc_mutex_lock(&pool->chunks_mtx);
key.addr = (void *)((uintptr_t)chunk + size);
node = extent_tree_ad_nsearch(chunks_ad, &key);
/* Try to coalesce forward. */
if (node != NULL && node->addr == key.addr) {
/*
* Coalesce chunk with the following address range. This does
* not change the position within chunks_ad, so only
* remove/insert from/into chunks_szad.
*/
extent_tree_szad_remove(chunks_szad, node);
node->addr = chunk;
node->size += size;
node->zeroed = (node->zeroed && zeroed);
extent_tree_szad_insert(chunks_szad, node);
} else {
/* Coalescing forward failed, so insert a new node. */
if (xnode == NULL) {
/*
* base_node_alloc() failed, which is an exceedingly
* unlikely failure. Leak chunk; its pages have
* already been purged, so this is only a virtual
* memory leak.
*/
goto label_return;
}
node = xnode;
xnode = NULL; /* Prevent deallocation below. */
node->addr = chunk;
node->size = size;
node->zeroed = zeroed;
extent_tree_ad_insert(chunks_ad, node);
extent_tree_szad_insert(chunks_szad, node);
}
/* Try to coalesce backward. */
prev = extent_tree_ad_prev(chunks_ad, node);
if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
chunk) {
/*
* Coalesce chunk with the previous address range. This does
* not change the position within chunks_ad, so only
* remove/insert node from/into chunks_szad.
*/
extent_tree_szad_remove(chunks_szad, prev);
extent_tree_ad_remove(chunks_ad, prev);
extent_tree_szad_remove(chunks_szad, node);
node->addr = prev->addr;
node->size += prev->size;
node->zeroed = (node->zeroed && prev->zeroed);
extent_tree_szad_insert(chunks_szad, node);
xprev = prev;
}
label_return:
malloc_mutex_unlock(&pool->chunks_mtx);
/*
* Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
* avoid potential deadlock.
*/
if (xnode != NULL)
base_node_dalloc(pool, xnode);
if (xprev != NULL)
base_node_dalloc(pool, xprev);
}
void
chunk_unmap(pool_t *pool, void *chunk, size_t size)
{
assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk);
assert(size != 0);
assert((size & chunksize_mask) == 0);
if (have_dss && chunk_in_dss(chunk))
chunk_record(pool, &pool->chunks_szad_dss, &pool->chunks_ad_dss, chunk, size, false);
else if (chunk_dalloc_mmap(chunk, size))
chunk_record(pool, &pool->chunks_szad_mmap, &pool->chunks_ad_mmap, chunk, size, false);
}
static void
chunk_dalloc_core(pool_t *pool, void *chunk, size_t size)
{
assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk);
assert(size != 0);
assert((size & chunksize_mask) == 0);
if (config_ivsalloc)
rtree_set(pool->chunks_rtree, (uintptr_t)chunk, 0);
if (config_stats || config_prof) {
malloc_mutex_lock(&pool->chunks_mtx);
assert(pool->stats_chunks.curchunks >= (size / chunksize));
pool->stats_chunks.curchunks -= (size / chunksize);
malloc_mutex_unlock(&pool->chunks_mtx);
}
chunk_unmap(pool, chunk, size);
}
/* Default arena chunk deallocation routine in the absence of user override. */
bool
chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind, pool_t *pool)
{
chunk_dalloc_core(pool, chunk, size);
return (false);
}
bool
chunk_global_boot()
{
if (have_dss && chunk_dss_boot())
return (true);
/* Set variables according to the value of opt_lg_chunk. */
chunksize = (ZU(1) << opt_lg_chunk);
assert(chunksize >= PAGE);
chunksize_mask = chunksize - 1;
chunk_npages = (chunksize >> LG_PAGE);
return (false);
}
/*
* Called at each pool opening.
*/
bool
chunk_boot(pool_t *pool)
{
if (config_stats || config_prof) {
if (malloc_mutex_init(&pool->chunks_mtx))
return (true);
}
if (pool->chunks_rtree) {
rtree_t *rtree = pool->chunks_rtree;
if (malloc_mutex_init(&rtree->mutex))
return (true);
}
return (false);
}
/*
* Called only at pool creation.
*/
bool
chunk_init(pool_t *pool)
{
if (chunk_boot(pool))
return (true);
if (config_stats || config_prof)
memset(&pool->stats_chunks, 0, sizeof(chunk_stats_t));
extent_tree_szad_new(&pool->chunks_szad_mmap);
extent_tree_ad_new(&pool->chunks_ad_mmap);
extent_tree_szad_new(&pool->chunks_szad_dss);
extent_tree_ad_new(&pool->chunks_ad_dss);
if (config_ivsalloc) {
pool->chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
opt_lg_chunk, base_alloc, NULL, pool);
if (pool->chunks_rtree == NULL)
return (true);
}
return (false);
}
void
chunk_prefork0(pool_t *pool)
{
if (config_ivsalloc)
rtree_prefork(pool->chunks_rtree);
}
void
chunk_prefork1(pool_t *pool)
{
malloc_mutex_prefork(&pool->chunks_mtx);
}
void
chunk_postfork_parent0(pool_t *pool)
{
if (config_ivsalloc)
rtree_postfork_parent(pool->chunks_rtree);
}
void
chunk_postfork_parent1(pool_t *pool)
{
malloc_mutex_postfork_parent(&pool->chunks_mtx);
}
void
chunk_postfork_child0(pool_t *pool)
{
if (config_ivsalloc)
rtree_postfork_child(pool->chunks_rtree);
}
void
chunk_postfork_child1(pool_t *pool)
{
malloc_mutex_postfork_child(&pool->chunks_mtx);
}
| 13,794 | 26.371032 | 93 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/chunk_mmap.c | #define JEMALLOC_CHUNK_MMAP_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void *pages_map(void *addr, size_t size);
static void pages_unmap(void *addr, size_t size);
static void *chunk_alloc_mmap_slow(size_t size, size_t alignment,
bool *zero);
/******************************************************************************/
static void *
pages_map(void *addr, size_t size)
{
void *ret;
assert(size != 0);
#ifdef _WIN32
/*
* If VirtualAlloc can't allocate at the given address when one is
* given, it fails and returns NULL.
*/
ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
PAGE_READWRITE);
#else
/*
* We don't use MAP_FIXED here, because it can cause the *replacement*
* of existing mappings, and we only want to create new mappings.
*/
ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
-1, 0);
assert(ret != NULL);
if (ret == MAP_FAILED)
ret = NULL;
else if (addr != NULL && ret != addr) {
/*
* We succeeded in mapping memory, but not in the right place.
*/
if (munmap(ret, size) == -1) {
char buf[BUFERROR_BUF];
buferror(get_errno(), buf, sizeof(buf));
malloc_printf("<jemalloc: Error in munmap(): %s\n",
buf);
if (opt_abort)
abort();
}
ret = NULL;
}
#endif
assert(ret == NULL || (addr == NULL && ret != addr)
|| (addr != NULL && ret == addr));
return (ret);
}
static void
pages_unmap(void *addr, size_t size)
{
#ifdef _WIN32
if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
#else
if (munmap(addr, size) == -1)
#endif
{
char buf[BUFERROR_BUF];
buferror(get_errno(), buf, sizeof(buf));
malloc_printf("<jemalloc>: Error in "
#ifdef _WIN32
"VirtualFree"
#else
"munmap"
#endif
"(): %s\n", buf);
if (opt_abort)
abort();
}
}
static void *
pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
{
void *ret = (void *)((uintptr_t)addr + leadsize);
assert(alloc_size >= leadsize + size);
#ifdef _WIN32
{
void *new_addr;
pages_unmap(addr, alloc_size);
new_addr = pages_map(ret, size);
if (new_addr == ret)
return (ret);
if (new_addr)
pages_unmap(new_addr, size);
return (NULL);
}
#else
{
size_t trailsize = alloc_size - leadsize - size;
if (leadsize != 0)
pages_unmap(addr, leadsize);
if (trailsize != 0)
pages_unmap((void *)((uintptr_t)ret + size), trailsize);
return (ret);
}
#endif
}
bool
pages_purge(void *addr, size_t length, bool file_mapped)
{
bool unzeroed;
#ifdef _WIN32
VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE);
unzeroed = true;
#elif defined(JEMALLOC_HAVE_MADVISE)
# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
# define JEMALLOC_MADV_ZEROS true
# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
# define JEMALLOC_MADV_PURGE MADV_FREE
# define JEMALLOC_MADV_ZEROS false
# else
# error "No madvise(2) flag defined for purging unused dirty pages."
# endif
int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
unzeroed = (JEMALLOC_MADV_ZEROS == false || file_mapped || err != 0);
# undef JEMALLOC_MADV_PURGE
# undef JEMALLOC_MADV_ZEROS
#else
/* Last resort no-op. */
unzeroed = true;
#endif
return (unzeroed);
}
static void *
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
{
void *ret, *pages;
size_t alloc_size, leadsize;
alloc_size = size + alignment - PAGE;
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);
do {
pages = pages_map(NULL, alloc_size);
if (pages == NULL)
return (NULL);
leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
(uintptr_t)pages;
ret = pages_trim(pages, alloc_size, leadsize, size);
} while (ret == NULL);
assert(ret != NULL);
*zero = true;
return (ret);
}
void *
chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
{
void *ret;
size_t offset;
/*
* Ideally, there would be a way to specify alignment to mmap() (like
* NetBSD has), but in the absence of such a feature, we have to work
* hard to efficiently create aligned mappings. The reliable, but
* slow method is to create a mapping that is over-sized, then trim the
* excess. However, that always results in one or two calls to
* pages_unmap().
*
* Optimistically try mapping precisely the right amount before falling
* back to the slow method, with the expectation that the optimistic
* approach works most of the time.
*/
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
ret = pages_map(NULL, size);
if (ret == NULL)
return (NULL);
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
if (offset != 0) {
pages_unmap(ret, size);
return (chunk_alloc_mmap_slow(size, alignment, zero));
}
assert(ret != NULL);
*zero = true;
return (ret);
}
bool
chunk_dalloc_mmap(void *chunk, size_t size)
{
if (config_munmap)
pages_unmap(chunk, size);
return (config_munmap == false);
}
| 5,078 | 22.733645 | 80 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/quarantine.c | #define JEMALLOC_QUARANTINE_C_
#include "jemalloc/internal/jemalloc_internal.h"
/*
* quarantine pointers close to NULL are used to encode state information that
* is used for cleaning up during thread shutdown.
*/
#define QUARANTINE_STATE_REINCARNATED ((quarantine_t *)(uintptr_t)1)
#define QUARANTINE_STATE_PURGATORY ((quarantine_t *)(uintptr_t)2)
#define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY
/******************************************************************************/
/* Data. */
malloc_tsd_data(, quarantine, quarantine_t *, NULL)
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static quarantine_t *quarantine_grow(quarantine_t *quarantine);
static void quarantine_drain_one(quarantine_t *quarantine);
static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound);
/******************************************************************************/
quarantine_t *
quarantine_init(size_t lg_maxobjs)
{
quarantine_t *quarantine;
quarantine = (quarantine_t *)imalloc(offsetof(quarantine_t, objs) +
((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)));
if (quarantine == NULL)
return (NULL);
quarantine->curbytes = 0;
quarantine->curobjs = 0;
quarantine->first = 0;
quarantine->lg_maxobjs = lg_maxobjs;
quarantine_tsd_set(&quarantine);
return (quarantine);
}
static quarantine_t *
quarantine_grow(quarantine_t *quarantine)
{
quarantine_t *ret;
ret = quarantine_init(quarantine->lg_maxobjs + 1);
if (ret == NULL) {
quarantine_drain_one(quarantine);
return (quarantine);
}
ret->curbytes = quarantine->curbytes;
ret->curobjs = quarantine->curobjs;
if (quarantine->first + quarantine->curobjs <= (ZU(1) <<
quarantine->lg_maxobjs)) {
/* objs ring buffer data are contiguous. */
memcpy(ret->objs, &quarantine->objs[quarantine->first],
quarantine->curobjs * sizeof(quarantine_obj_t));
} else {
/* objs ring buffer data wrap around. */
size_t ncopy_a = (ZU(1) << quarantine->lg_maxobjs) -
quarantine->first;
size_t ncopy_b = quarantine->curobjs - ncopy_a;
memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy_a
* sizeof(quarantine_obj_t));
memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
sizeof(quarantine_obj_t));
}
idalloc(quarantine);
return (ret);
}
static void
quarantine_drain_one(quarantine_t *quarantine)
{
quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
assert(obj->usize == isalloc(obj->ptr, config_prof));
idalloc(obj->ptr);
quarantine->curbytes -= obj->usize;
quarantine->curobjs--;
quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
quarantine->lg_maxobjs) - 1);
}
static void
quarantine_drain(quarantine_t *quarantine, size_t upper_bound)
{
while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0)
quarantine_drain_one(quarantine);
}
void
quarantine(void *ptr)
{
quarantine_t *quarantine;
size_t usize = isalloc(ptr, config_prof);
cassert(config_fill);
assert(opt_quarantine);
quarantine = *quarantine_tsd_get();
if ((uintptr_t)quarantine <= (uintptr_t)QUARANTINE_STATE_MAX) {
if (quarantine == QUARANTINE_STATE_PURGATORY) {
/*
* Make a note that quarantine() was called after
* quarantine_cleanup() was called.
*/
quarantine = QUARANTINE_STATE_REINCARNATED;
quarantine_tsd_set(&quarantine);
}
idalloc(ptr);
return;
}
/*
* Drain one or more objects if the quarantine size limit would be
* exceeded by appending ptr.
*/
if (quarantine->curbytes + usize > opt_quarantine) {
size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
- usize : 0;
quarantine_drain(quarantine, upper_bound);
}
/* Grow the quarantine ring buffer if it's full. */
if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
quarantine = quarantine_grow(quarantine);
/* quarantine_grow() must free a slot if it fails to grow. */
assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs));
/* Append ptr if its size doesn't exceed the quarantine size. */
if (quarantine->curbytes + usize <= opt_quarantine) {
size_t offset = (quarantine->first + quarantine->curobjs) &
((ZU(1) << quarantine->lg_maxobjs) - 1);
quarantine_obj_t *obj = &quarantine->objs[offset];
obj->ptr = ptr;
obj->usize = usize;
quarantine->curbytes += usize;
quarantine->curobjs++;
if (config_fill && opt_junk) {
/*
* Only do redzone validation if Valgrind isn't in
* operation.
*/
if ((config_valgrind == false || in_valgrind == false)
&& usize <= SMALL_MAXCLASS)
arena_quarantine_junk_small(ptr, usize);
else
memset(ptr, 0x5a, usize);
}
} else {
assert(quarantine->curbytes == 0);
idalloc(ptr);
}
}
void
quarantine_cleanup(void *arg)
{
quarantine_t *quarantine = *(quarantine_t **)arg;
if (quarantine == QUARANTINE_STATE_REINCARNATED) {
/*
* Another destructor deallocated memory after this destructor
* was called. Reset quarantine to QUARANTINE_STATE_PURGATORY
* in order to receive another callback.
*/
quarantine = QUARANTINE_STATE_PURGATORY;
quarantine_tsd_set(&quarantine);
} else if (quarantine == QUARANTINE_STATE_PURGATORY) {
/*
* The previous time this destructor was called, we set the key
* to QUARANTINE_STATE_PURGATORY so that other destructors
* wouldn't cause re-creation of the quarantine. This time, do
* nothing, so that the destructor will not be called again.
*/
} else if (quarantine != NULL) {
quarantine_drain(quarantine, 0);
idalloc(quarantine);
quarantine = QUARANTINE_STATE_PURGATORY;
quarantine_tsd_set(&quarantine);
}
}
bool
quarantine_boot(void)
{
cassert(config_fill);
if (quarantine_tsd_boot())
return (true);
return (false);
}
| 5,792 | 27.965 | 80 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/mutex.c | #define JEMALLOC_MUTEX_C_
#include "jemalloc/internal/jemalloc_internal.h"
#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
#include <dlfcn.h>
#endif
#ifndef _CRT_SPINCOUNT
#define _CRT_SPINCOUNT 4000
#endif
/******************************************************************************/
/* Data. */
#ifdef JEMALLOC_LAZY_LOCK
bool isthreaded = false;
#endif
#ifdef JEMALLOC_MUTEX_INIT_CB
static bool postpone_init = true;
static malloc_mutex_t *postponed_mutexes = NULL;
#endif
/******************************************************************************/
/*
* We intercept pthread_create() calls in order to toggle isthreaded if the
* process goes multi-threaded.
*/
#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
static void pthread_create_once(void);
static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
void *(*)(void *), void *__restrict);
static void
pthread_create_once(void)
{
pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
if (pthread_create_fptr == NULL) {
malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
"\"pthread_create\")\n");
abort();
}
isthreaded = true;
}
JEMALLOC_EXPORT int
pthread_create(pthread_t *__restrict thread,
const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
void *__restrict arg)
{
static pthread_once_t once_control = PTHREAD_ONCE_INIT;
pthread_once(&once_control, pthread_create_once);
return (pthread_create_fptr(thread, attr, start_routine, arg));
}
#endif
/******************************************************************************/
#ifdef JEMALLOC_MUTEX_INIT_CB
JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
void *(calloc_cb)(size_t, size_t));
static void *
base_calloc_wrapper(size_t number, size_t size)
{
return base_calloc(&base_pool, number, size);
}
/* XXX We need somewhere to allocate mutexes from during early initialization */
#define BOOTSTRAP_POOL_SIZE 4096
#define BP_MASK 0xfffffffffffffff0UL
static char bootstrap_pool[BOOTSTRAP_POOL_SIZE] __attribute__((aligned (16)));
static char *bpp = bootstrap_pool;
static void *
bootstrap_calloc(size_t number, size_t size)
{
size_t my_size = ((number * size) + 0xf) & BP_MASK;
bpp += my_size;
if ((bpp - bootstrap_pool) > BOOTSTRAP_POOL_SIZE) {
return NULL;
}
return (void *)(bpp - my_size);
}
#endif
bool
malloc_mutex_init(malloc_mutex_t *mutex)
{
#ifdef _WIN32
if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
_CRT_SPINCOUNT))
return (true);
#elif (defined(JEMALLOC_OSSPIN))
mutex->lock = 0;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
if (postpone_init) {
mutex->postponed_next = postponed_mutexes;
postponed_mutexes = mutex;
} else {
if (_pthread_mutex_init_calloc_cb(&mutex->lock,
base_calloc_wrapper) != 0)
return (true);
}
#else
pthread_mutexattr_t attr;
if (pthread_mutexattr_init(&attr) != 0)
return (true);
pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
pthread_mutexattr_destroy(&attr);
return (true);
}
pthread_mutexattr_destroy(&attr);
#endif
return (false);
}
void
malloc_mutex_prefork(malloc_mutex_t *mutex)
{
malloc_mutex_lock(mutex);
}
void
malloc_mutex_postfork_parent(malloc_mutex_t *mutex)
{
malloc_mutex_unlock(mutex);
}
bool
mutex_boot(void)
{
#ifdef JEMALLOC_MUTEX_INIT_CB
postpone_init = false;
while (postponed_mutexes != NULL) {
if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
bootstrap_calloc) != 0)
return (true);
postponed_mutexes = postponed_mutexes->postponed_next;
}
#endif
return (false);
}
void
malloc_mutex_postfork_child(malloc_mutex_t *mutex)
{
#if (defined(JEMALLOC_MUTEX_INIT_CB) || defined(JEMALLOC_DISABLE_BSD_MALLOC_HOOKS))
malloc_mutex_unlock(mutex);
#else
if (malloc_mutex_init(mutex)) {
malloc_printf("<jemalloc>: Error re-initializing mutex in "
"child\n");
if (opt_abort)
abort();
}
#endif
}
void
malloc_rwlock_prefork(malloc_rwlock_t *rwlock)
{
malloc_rwlock_wrlock(rwlock);
}
void
malloc_rwlock_postfork_parent(malloc_rwlock_t *rwlock)
{
malloc_rwlock_unlock(rwlock);
}
void
malloc_rwlock_postfork_child(malloc_rwlock_t *rwlock)
{
#if (defined(JEMALLOC_MUTEX_INIT_CB) || defined(JEMALLOC_DISABLE_BSD_MALLOC_HOOKS))
malloc_rwlock_unlock(rwlock);
#else
if (malloc_rwlock_init(rwlock)) {
malloc_printf("<jemalloc>: Error re-initializing rwlock in "
"child\n");
if (opt_abort)
abort();
}
#endif
}
| 4,488 | 21.445 | 83 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/src/pool.c | #define JEMALLOC_POOL_C_
#include "jemalloc/internal/jemalloc_internal.h"
malloc_mutex_t pool_base_lock;
malloc_mutex_t pools_lock;
/*
* Initialize runtime state of the pool.
* Called both at pool creation and each pool opening.
*/
bool
pool_boot(pool_t *pool, unsigned pool_id)
{
pool->pool_id = pool_id;
if (malloc_mutex_init(&pool->memory_range_mtx))
return (true);
/*
* Rwlock initialization must be deferred if we are
* creating the base pool in the JEMALLOC_LAZY_LOCK case.
* This is safe because the lock won't be used until
* isthreaded has been set.
*/
if ((isthreaded || (pool != &base_pool))
&& malloc_rwlock_init(&pool->arenas_lock))
return (true);
return (false);
}
/*
* Initialize runtime state of the pool.
* Called at each pool opening.
*/
bool
pool_runtime_init(pool_t *pool, unsigned pool_id)
{
if (pool_boot(pool, pool_id))
return (true);
if (base_boot(pool))
return (true);
if (chunk_boot(pool))
return (true);
if (huge_boot(pool))
return (true);
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(pool->arenas,
sizeof(arena_t) * pool->narenas_total);
for (size_t i = 0; i < pool->narenas_total; ++i) {
if (pool->arenas[i] != NULL) {
arena_t *arena = pool->arenas[i];
if (arena_boot(arena))
return (true);
}
}
return (false);
}
/*
* Initialize pool and create its base arena.
* Called only at pool creation.
*/
bool
pool_new(pool_t *pool, unsigned pool_id)
{
if (pool_boot(pool, pool_id))
return (true);
if (base_init(pool))
return (true);
if (chunk_init(pool))
return (true);
if (huge_init(pool))
return (true);
if (pools_shared_data_create())
return (true);
pool->stats_cactive = 0;
pool->ctl_stats_active = 0;
pool->ctl_stats_allocated = 0;
pool->ctl_stats_mapped = 0;
pool->narenas_auto = opt_narenas;
/*
* Make sure that the arenas array can be allocated. In practice, this
* limit is enough to allow the allocator to function, but the ctl
* machinery will fail to allocate memory at far lower limits.
*/
if (pool->narenas_auto > chunksize / sizeof(arena_t *)) {
pool->narenas_auto = chunksize / sizeof(arena_t *);
malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
pool->narenas_auto);
}
pool->narenas_total = pool->narenas_auto;
/* Allocate and initialize arenas. */
pool->arenas = (arena_t **)base_calloc(pool, sizeof(arena_t *),
pool->narenas_total);
if (pool->arenas == NULL)
return (true);
arenas_extend(pool, 0);
return false;
}
/* Release the arenas associated with a pool. */
void
pool_destroy(pool_t *pool)
{
size_t i, j;
for (i = 0; i < pool->narenas_total; ++i) {
if (pool->arenas[i] != NULL) {
arena_t *arena = pool->arenas[i];
//arena_purge_all(arena); /* XXX */
for (j = 0; j < NBINS; j++)
malloc_mutex_destroy(&arena->bins[j].lock);
malloc_mutex_destroy(&arena->lock);
}
}
/*
* Set 'pool_id' to an incorrect value so that the pool cannot be used
* after being deleted.
*/
pool->pool_id = UINT_MAX;
if (pool->chunks_rtree) {
rtree_t *rtree = pool->chunks_rtree;
malloc_mutex_destroy(&rtree->mutex);
}
malloc_mutex_destroy(&pool->memory_range_mtx);
malloc_mutex_destroy(&pool->base_mtx);
malloc_mutex_destroy(&pool->base_node_mtx);
malloc_mutex_destroy(&pool->chunks_mtx);
malloc_mutex_destroy(&pool->huge_mtx);
malloc_rwlock_destroy(&pool->arenas_lock);
}
void pool_prefork()
{
malloc_mutex_prefork(&pools_lock);
malloc_mutex_prefork(&pool_base_lock);
}
void pool_postfork_parent()
{
malloc_mutex_postfork_parent(&pools_lock);
malloc_mutex_postfork_parent(&pool_base_lock);
}
void pool_postfork_child()
{
malloc_mutex_postfork_child(&pools_lock);
malloc_mutex_postfork_child(&pool_base_lock);
}
| 3,723 | 21.166667 | 72 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/rb.c | #include "test/jemalloc_test.h"
#define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \
a_type *rbp_bh_t; \
for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; \
rbp_bh_t != &(a_rbt)->rbt_nil; \
rbp_bh_t = rbtn_left_get(a_type, a_field, rbp_bh_t)) { \
if (rbtn_red_get(a_type, a_field, rbp_bh_t) == false) { \
(r_height)++; \
} \
} \
} while (0)
typedef struct node_s node_t;
struct node_s {
#define NODE_MAGIC 0x9823af7e
uint32_t magic;
rb_node(node_t) link;
uint64_t key;
};
static int
node_cmp(node_t *a, node_t *b) {
int ret;
assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
ret = (a->key > b->key) - (a->key < b->key);
if (ret == 0) {
/*
* Duplicates are not allowed in the tree, so force an
* arbitrary ordering for non-identical items with equal keys.
*/
ret = (((uintptr_t)a) > ((uintptr_t)b))
- (((uintptr_t)a) < ((uintptr_t)b));
}
return (ret);
}
typedef rb_tree(node_t) tree_t;
rb_gen(static, tree_, tree_t, node_t, link, node_cmp);
TEST_BEGIN(test_rb_empty)
{
tree_t tree;
node_t key;
tree_new(&tree);
assert_ptr_null(tree_first(&tree), "Unexpected node");
assert_ptr_null(tree_last(&tree), "Unexpected node");
key.key = 0;
key.magic = NODE_MAGIC;
assert_ptr_null(tree_search(&tree, &key), "Unexpected node");
key.key = 0;
key.magic = NODE_MAGIC;
assert_ptr_null(tree_nsearch(&tree, &key), "Unexpected node");
key.key = 0;
key.magic = NODE_MAGIC;
assert_ptr_null(tree_psearch(&tree, &key), "Unexpected node");
}
TEST_END
static unsigned
tree_recurse(node_t *node, unsigned black_height, unsigned black_depth,
node_t *nil)
{
unsigned ret = 0;
node_t *left_node = rbtn_left_get(node_t, link, node);
node_t *right_node = rbtn_right_get(node_t, link, node);
if (rbtn_red_get(node_t, link, node) == false)
black_depth++;
/* Red nodes must be interleaved with black nodes. */
if (rbtn_red_get(node_t, link, node)) {
assert_false(rbtn_red_get(node_t, link, left_node),
"Node should be black");
assert_false(rbtn_red_get(node_t, link, right_node),
"Node should be black");
}
if (node == nil)
return (ret);
/* Self. */
assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
/* Left subtree. */
if (left_node != nil)
ret += tree_recurse(left_node, black_height, black_depth, nil);
else
ret += (black_depth != black_height);
/* Right subtree. */
if (right_node != nil)
ret += tree_recurse(right_node, black_height, black_depth, nil);
else
ret += (black_depth != black_height);
return (ret);
}
static node_t *
tree_iterate_cb(tree_t *tree, node_t *node, void *data)
{
unsigned *i = (unsigned *)data;
node_t *search_node;
assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
/* Test rb_search(). */
search_node = tree_search(tree, node);
assert_ptr_eq(search_node, node,
"tree_search() returned unexpected node");
/* Test rb_nsearch(). */
search_node = tree_nsearch(tree, node);
assert_ptr_eq(search_node, node,
"tree_nsearch() returned unexpected node");
/* Test rb_psearch(). */
search_node = tree_psearch(tree, node);
assert_ptr_eq(search_node, node,
"tree_psearch() returned unexpected node");
(*i)++;
return (NULL);
}
static unsigned
tree_iterate(tree_t *tree)
{
unsigned i;
i = 0;
tree_iter(tree, NULL, tree_iterate_cb, (void *)&i);
return (i);
}
static unsigned
tree_iterate_reverse(tree_t *tree)
{
unsigned i;
i = 0;
tree_reverse_iter(tree, NULL, tree_iterate_cb, (void *)&i);
return (i);
}
static void
node_remove(tree_t *tree, node_t *node, unsigned nnodes)
{
node_t *search_node;
unsigned black_height, imbalances;
tree_remove(tree, node);
/* Test rb_nsearch(). */
search_node = tree_nsearch(tree, node);
if (search_node != NULL) {
assert_u64_ge(search_node->key, node->key,
"Key ordering error");
}
/* Test rb_psearch(). */
search_node = tree_psearch(tree, node);
if (search_node != NULL) {
assert_u64_le(search_node->key, node->key,
"Key ordering error");
}
node->magic = 0;
rbtn_black_height(node_t, link, tree, black_height);
imbalances = tree_recurse(tree->rbt_root, black_height, 0,
&(tree->rbt_nil));
assert_u_eq(imbalances, 0, "Tree is unbalanced");
assert_u_eq(tree_iterate(tree), nnodes-1,
"Unexpected node iteration count");
assert_u_eq(tree_iterate_reverse(tree), nnodes-1,
"Unexpected node iteration count");
}
static node_t *
remove_iterate_cb(tree_t *tree, node_t *node, void *data)
{
unsigned *nnodes = (unsigned *)data;
node_t *ret = tree_next(tree, node);
node_remove(tree, node, *nnodes);
return (ret);
}
static node_t *
remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data)
{
unsigned *nnodes = (unsigned *)data;
node_t *ret = tree_prev(tree, node);
node_remove(tree, node, *nnodes);
return (ret);
}
TEST_BEGIN(test_rb_random)
{
#define NNODES 25
#define NBAGS 250
#define SEED 42
sfmt_t *sfmt;
uint64_t bag[NNODES];
tree_t tree;
node_t nodes[NNODES];
unsigned i, j, k, black_height, imbalances;
sfmt = init_gen_rand(SEED);
for (i = 0; i < NBAGS; i++) {
switch (i) {
case 0:
/* Insert in order. */
for (j = 0; j < NNODES; j++)
bag[j] = j;
break;
case 1:
/* Insert in reverse order. */
for (j = 0; j < NNODES; j++)
bag[j] = NNODES - j - 1;
break;
default:
for (j = 0; j < NNODES; j++)
bag[j] = gen_rand64_range(sfmt, NNODES);
}
for (j = 1; j <= NNODES; j++) {
/* Initialize tree and nodes. */
tree_new(&tree);
tree.rbt_nil.magic = 0;
for (k = 0; k < j; k++) {
nodes[k].magic = NODE_MAGIC;
nodes[k].key = bag[k];
}
/* Insert nodes. */
for (k = 0; k < j; k++) {
tree_insert(&tree, &nodes[k]);
rbtn_black_height(node_t, link, &tree,
black_height);
imbalances = tree_recurse(tree.rbt_root,
black_height, 0, &(tree.rbt_nil));
assert_u_eq(imbalances, 0,
"Tree is unbalanced");
assert_u_eq(tree_iterate(&tree), k+1,
"Unexpected node iteration count");
assert_u_eq(tree_iterate_reverse(&tree), k+1,
"Unexpected node iteration count");
assert_ptr_not_null(tree_first(&tree),
"Tree should not be empty");
assert_ptr_not_null(tree_last(&tree),
"Tree should not be empty");
tree_next(&tree, &nodes[k]);
tree_prev(&tree, &nodes[k]);
}
/* Remove nodes. */
switch (i % 4) {
case 0:
for (k = 0; k < j; k++)
node_remove(&tree, &nodes[k], j - k);
break;
case 1:
for (k = j; k > 0; k--)
node_remove(&tree, &nodes[k-1], k);
break;
case 2: {
node_t *start;
unsigned nnodes = j;
start = NULL;
do {
start = tree_iter(&tree, start,
remove_iterate_cb, (void *)&nnodes);
nnodes--;
} while (start != NULL);
assert_u_eq(nnodes, 0,
"Removal terminated early");
break;
} case 3: {
node_t *start;
unsigned nnodes = j;
start = NULL;
do {
start = tree_reverse_iter(&tree, start,
remove_reverse_iterate_cb,
(void *)&nnodes);
nnodes--;
} while (start != NULL);
assert_u_eq(nnodes, 0,
"Removal terminated early");
break;
} default:
not_reached();
}
}
}
fini_gen_rand(sfmt);
#undef NNODES
#undef NBAGS
#undef SEED
}
TEST_END
int
main(void)
{
return (test(
test_rb_empty,
test_rb_random));
}
| 7,430 | 21.248503 | 71 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/util.c | #include "test/jemalloc_test.h"
TEST_BEGIN(test_pow2_ceil)
{
unsigned i, pow2;
size_t x;
assert_zu_eq(pow2_ceil(0), 0, "Unexpected result");
for (i = 0; i < sizeof(size_t) * 8; i++) {
assert_zu_eq(pow2_ceil(ZU(1) << i), ZU(1) << i,
"Unexpected result");
}
for (i = 2; i < sizeof(size_t) * 8; i++) {
assert_zu_eq(pow2_ceil((ZU(1) << i) - 1), ZU(1) << i,
"Unexpected result");
}
for (i = 0; i < sizeof(size_t) * 8 - 1; i++) {
assert_zu_eq(pow2_ceil((ZU(1) << i) + 1), ZU(1) << (i+1),
"Unexpected result");
}
for (pow2 = 1; pow2 < 25; pow2++) {
for (x = (ZU(1) << (pow2-1)) + 1; x <= ZU(1) << pow2; x++) {
assert_zu_eq(pow2_ceil(x), ZU(1) << pow2,
"Unexpected result, x=%zu", x);
}
}
}
TEST_END
TEST_BEGIN(test_malloc_strtoumax_no_endptr)
{
int err;
set_errno(0);
assert_ju_eq(malloc_strtoumax("0", NULL, 0), 0, "Unexpected result");
err = get_errno();
assert_d_eq(err, 0, "Unexpected failure");
}
TEST_END
TEST_BEGIN(test_malloc_strtoumax)
{
struct test_s {
const char *input;
const char *expected_remainder;
int base;
int expected_errno;
const char *expected_errno_name;
uintmax_t expected_x;
};
#define ERR(e) e, #e
#define KUMAX(x) ((uintmax_t)x##ULL)
struct test_s tests[] = {
{"0", "0", -1, ERR(EINVAL), UINTMAX_MAX},
{"0", "0", 1, ERR(EINVAL), UINTMAX_MAX},
{"0", "0", 37, ERR(EINVAL), UINTMAX_MAX},
{"", "", 0, ERR(EINVAL), UINTMAX_MAX},
{"+", "+", 0, ERR(EINVAL), UINTMAX_MAX},
{"++3", "++3", 0, ERR(EINVAL), UINTMAX_MAX},
{"-", "-", 0, ERR(EINVAL), UINTMAX_MAX},
{"42", "", 0, ERR(0), KUMAX(42)},
{"+42", "", 0, ERR(0), KUMAX(42)},
{"-42", "", 0, ERR(0), KUMAX(-42)},
{"042", "", 0, ERR(0), KUMAX(042)},
{"+042", "", 0, ERR(0), KUMAX(042)},
{"-042", "", 0, ERR(0), KUMAX(-042)},
{"0x42", "", 0, ERR(0), KUMAX(0x42)},
{"+0x42", "", 0, ERR(0), KUMAX(0x42)},
{"-0x42", "", 0, ERR(0), KUMAX(-0x42)},
{"0", "", 0, ERR(0), KUMAX(0)},
{"1", "", 0, ERR(0), KUMAX(1)},
{"42", "", 0, ERR(0), KUMAX(42)},
{" 42", "", 0, ERR(0), KUMAX(42)},
{"42 ", " ", 0, ERR(0), KUMAX(42)},
{"0x", "x", 0, ERR(0), KUMAX(0)},
{"42x", "x", 0, ERR(0), KUMAX(42)},
{"07", "", 0, ERR(0), KUMAX(7)},
{"010", "", 0, ERR(0), KUMAX(8)},
{"08", "8", 0, ERR(0), KUMAX(0)},
{"0_", "_", 0, ERR(0), KUMAX(0)},
{"0x", "x", 0, ERR(0), KUMAX(0)},
{"0X", "X", 0, ERR(0), KUMAX(0)},
{"0xg", "xg", 0, ERR(0), KUMAX(0)},
{"0XA", "", 0, ERR(0), KUMAX(10)},
{"010", "", 10, ERR(0), KUMAX(10)},
{"0x3", "x3", 10, ERR(0), KUMAX(0)},
{"12", "2", 2, ERR(0), KUMAX(1)},
{"78", "8", 8, ERR(0), KUMAX(7)},
{"9a", "a", 10, ERR(0), KUMAX(9)},
{"9A", "A", 10, ERR(0), KUMAX(9)},
{"fg", "g", 16, ERR(0), KUMAX(15)},
{"FG", "G", 16, ERR(0), KUMAX(15)},
{"0xfg", "g", 16, ERR(0), KUMAX(15)},
{"0XFG", "G", 16, ERR(0), KUMAX(15)},
{"z_", "_", 36, ERR(0), KUMAX(35)},
{"Z_", "_", 36, ERR(0), KUMAX(35)}
};
#undef ERR
#undef KUMAX
unsigned i;
for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) {
struct test_s *test = &tests[i];
int err;
uintmax_t result;
char *remainder;
set_errno(0);
result = malloc_strtoumax(test->input, &remainder, test->base);
err = get_errno();
assert_d_eq(err, test->expected_errno,
"Expected errno %s for \"%s\", base %d",
test->expected_errno_name, test->input, test->base);
assert_str_eq(remainder, test->expected_remainder,
"Unexpected remainder for \"%s\", base %d",
test->input, test->base);
if (err == 0) {
assert_ju_eq(result, test->expected_x,
"Unexpected result for \"%s\", base %d",
test->input, test->base);
}
}
}
TEST_END
TEST_BEGIN(test_malloc_snprintf_truncated)
{
#define BUFLEN 15
char buf[BUFLEN];
int result;
size_t len;
#define TEST(expected_str_untruncated, ...) do { \
result = malloc_snprintf(buf, len, __VA_ARGS__); \
assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \
"Unexpected string inequality (\"%s\" vs \"%s\")", \
buf, expected_str_untruncated); \
assert_d_eq(result, strlen(expected_str_untruncated), \
"Unexpected result"); \
} while (0)
for (len = 1; len < BUFLEN; len++) {
TEST("012346789", "012346789");
TEST("a0123b", "a%sb", "0123");
TEST("a01234567", "a%s%s", "0123", "4567");
TEST("a0123 ", "a%-6s", "0123");
TEST("a 0123", "a%6s", "0123");
TEST("a 012", "a%6.3s", "0123");
TEST("a 012", "a%*.*s", 6, 3, "0123");
TEST("a 123b", "a% db", 123);
TEST("a123b", "a%-db", 123);
TEST("a-123b", "a%-db", -123);
TEST("a+123b", "a%+db", 123);
}
#undef BUFLEN
#undef TEST
}
TEST_END
TEST_BEGIN(test_malloc_snprintf)
{
#define BUFLEN 128
char buf[BUFLEN];
int result;
#define TEST(expected_str, ...) do { \
result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \
assert_str_eq(buf, expected_str, "Unexpected output"); \
assert_d_eq(result, strlen(expected_str), "Unexpected result"); \
} while (0)
TEST("hello", "hello");
TEST("50%, 100%", "50%%, %d%%", 100);
TEST("a0123b", "a%sb", "0123");
TEST("a 0123b", "a%5sb", "0123");
TEST("a 0123b", "a%*sb", 5, "0123");
TEST("a0123 b", "a%-5sb", "0123");
TEST("a0123b", "a%*sb", -1, "0123");
TEST("a0123 b", "a%*sb", -5, "0123");
TEST("a0123 b", "a%-*sb", -5, "0123");
TEST("a012b", "a%.3sb", "0123");
TEST("a012b", "a%.*sb", 3, "0123");
TEST("a0123b", "a%.*sb", -3, "0123");
TEST("a 012b", "a%5.3sb", "0123");
TEST("a 012b", "a%5.*sb", 3, "0123");
TEST("a 012b", "a%*.3sb", 5, "0123");
TEST("a 012b", "a%*.*sb", 5, 3, "0123");
TEST("a 0123b", "a%*.*sb", 5, -3, "0123");
TEST("_abcd_", "_%x_", 0xabcd);
TEST("_0xabcd_", "_%#x_", 0xabcd);
TEST("_1234_", "_%o_", 01234);
TEST("_01234_", "_%#o_", 01234);
TEST("_1234_", "_%u_", 1234);
TEST("_1234_", "_%d_", 1234);
TEST("_ 1234_", "_% d_", 1234);
TEST("_+1234_", "_%+d_", 1234);
TEST("_-1234_", "_%d_", -1234);
TEST("_-1234_", "_% d_", -1234);
TEST("_-1234_", "_%+d_", -1234);
TEST("_-1234_", "_%d_", -1234);
TEST("_1234_", "_%d_", 1234);
TEST("_-1234_", "_%i_", -1234);
TEST("_1234_", "_%i_", 1234);
TEST("_01234_", "_%#o_", 01234);
TEST("_1234_", "_%u_", 1234);
TEST("_0x1234abc_", "_%#x_", 0x1234abc);
TEST("_0X1234ABC_", "_%#X_", 0x1234abc);
TEST("_c_", "_%c_", 'c');
TEST("_string_", "_%s_", "string");
TEST("_0x42_", "_%p_", ((void *)0x42));
TEST("_-1234_", "_%ld_", ((long)-1234));
TEST("_1234_", "_%ld_", ((long)1234));
TEST("_-1234_", "_%li_", ((long)-1234));
TEST("_1234_", "_%li_", ((long)1234));
TEST("_01234_", "_%#lo_", ((long)01234));
TEST("_1234_", "_%lu_", ((long)1234));
TEST("_0x1234abc_", "_%#lx_", ((long)0x1234abc));
TEST("_0X1234ABC_", "_%#lX_", ((long)0x1234ABC));
TEST("_-1234_", "_%lld_", ((long long)-1234));
TEST("_1234_", "_%lld_", ((long long)1234));
TEST("_-1234_", "_%lli_", ((long long)-1234));
TEST("_1234_", "_%lli_", ((long long)1234));
TEST("_01234_", "_%#llo_", ((long long)01234));
TEST("_1234_", "_%llu_", ((long long)1234));
TEST("_0x1234abc_", "_%#llx_", ((long long)0x1234abc));
TEST("_0X1234ABC_", "_%#llX_", ((long long)0x1234ABC));
#ifdef __INTEL_COMPILER
/* turn off ICC warnings on invalid format string conversion */
#pragma warning (push)
#pragma warning (disable: 269)
#endif
TEST("_-1234_", "_%qd_", ((long long)-1234));
TEST("_1234_", "_%qd_", ((long long)1234));
TEST("_-1234_", "_%qi_", ((long long)-1234));
TEST("_1234_", "_%qi_", ((long long)1234));
TEST("_01234_", "_%#qo_", ((long long)01234));
TEST("_1234_", "_%qu_", ((long long)1234));
TEST("_0x1234abc_", "_%#qx_", ((long long)0x1234abc));
TEST("_0X1234ABC_", "_%#qX_", ((long long)0x1234ABC));
#ifdef __INTEL_COMPILER
#pragma warning (pop)
#endif
TEST("_-1234_", "_%jd_", ((intmax_t)-1234));
TEST("_1234_", "_%jd_", ((intmax_t)1234));
TEST("_-1234_", "_%ji_", ((intmax_t)-1234));
TEST("_1234_", "_%ji_", ((intmax_t)1234));
TEST("_01234_", "_%#jo_", ((intmax_t)01234));
TEST("_1234_", "_%ju_", ((intmax_t)1234));
TEST("_0x1234abc_", "_%#jx_", ((intmax_t)0x1234abc));
TEST("_0X1234ABC_", "_%#jX_", ((intmax_t)0x1234ABC));
TEST("_1234_", "_%td_", ((ptrdiff_t)1234));
TEST("_-1234_", "_%td_", ((ptrdiff_t)-1234));
TEST("_1234_", "_%ti_", ((ptrdiff_t)1234));
TEST("_-1234_", "_%ti_", ((ptrdiff_t)-1234));
TEST("_-1234_", "_%zd_", ((ssize_t)-1234));
TEST("_1234_", "_%zd_", ((ssize_t)1234));
TEST("_-1234_", "_%zi_", ((ssize_t)-1234));
TEST("_1234_", "_%zi_", ((ssize_t)1234));
TEST("_01234_", "_%#zo_", ((ssize_t)01234));
TEST("_1234_", "_%zu_", ((ssize_t)1234));
TEST("_0x1234abc_", "_%#zx_", ((ssize_t)0x1234abc));
TEST("_0X1234ABC_", "_%#zX_", ((ssize_t)0x1234ABC));
#undef BUFLEN
}
TEST_END
int
main(void)
{
return (test(
test_pow2_ceil,
test_malloc_strtoumax_no_endptr,
test_malloc_strtoumax,
test_malloc_snprintf_truncated,
test_malloc_snprintf));
}
| 8,905 | 28.2 | 70 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/stats.c | #include "test/jemalloc_test.h"
TEST_BEGIN(test_stats_summary)
{
size_t *cactive;
size_t sz, allocated, active, mapped;
int expected = config_stats ? 0 : ENOENT;
sz = sizeof(cactive);
assert_d_eq(mallctl("pool.0.stats.cactive", &cactive, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.allocated", &allocated, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.active", &active, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.mapped", &mapped, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
if (config_stats) {
assert_zu_le(active, *cactive,
"active should be no larger than cactive");
assert_zu_le(allocated, active,
"allocated should be no larger than active");
assert_zu_le(active, mapped,
"active should be no larger than mapped");
}
}
TEST_END
TEST_BEGIN(test_stats_chunks)
{
size_t current, high;
uint64_t total;
size_t sz;
int expected = config_stats ? 0 : ENOENT;
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.chunks.current", ¤t, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("pool.0.stats.chunks.total", &total, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.chunks.high", &high, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
if (config_stats) {
assert_zu_le(current, high,
"current should be no larger than high");
assert_u64_le((uint64_t)high, total,
"high should be no larger than total");
}
}
TEST_END
TEST_BEGIN(test_stats_huge)
{
void *p;
uint64_t epoch;
size_t allocated;
uint64_t nmalloc, ndalloc, nrequests;
size_t sz;
int expected = config_stats ? 0 : ENOENT;
p = mallocx(arena_maxclass+1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
"Unexpected mallctl() failure");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.huge.allocated", &allocated, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.huge.nmalloc", &nmalloc, &sz, NULL,
0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.huge.ndalloc", &ndalloc, &sz, NULL,
0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.huge.nrequests", &nrequests, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_zu_gt(allocated, 0,
"allocated should be greater than zero");
assert_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
assert_u64_le(nmalloc, nrequests,
"nmalloc should no larger than nrequests");
}
dallocx(p, 0);
}
TEST_END
TEST_BEGIN(test_stats_arenas_summary)
{
unsigned arena;
void *little, *large;
uint64_t epoch;
size_t sz;
int expected = config_stats ? 0 : ENOENT;
size_t mapped;
uint64_t npurge, nmadvise, purged;
arena = 0;
assert_d_eq(mallctl("thread.pool.0.arena", NULL, NULL, &arena, sizeof(arena)),
0, "Unexpected mallctl() failure");
little = mallocx(SMALL_MAXCLASS, 0);
assert_ptr_not_null(little, "Unexpected mallocx() failure");
large = mallocx(arena_maxclass, 0);
assert_ptr_not_null(large, "Unexpected mallocx() failure");
assert_d_eq(mallctl("pool.0.arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
"Unexpected mallctl() failure");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.mapped", &mapped, &sz, NULL, 0),
expected, "Unexepected mallctl() result");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.npurge", &npurge, &sz, NULL, 0),
expected, "Unexepected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.nmadvise", &nmadvise, &sz, NULL, 0),
expected, "Unexepected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.purged", &purged, &sz, NULL, 0),
expected, "Unexepected mallctl() result");
if (config_stats) {
assert_u64_gt(npurge, 0,
"At least one purge should have occurred");
assert_u64_le(nmadvise, purged,
"nmadvise should be no greater than purged");
}
dallocx(little, 0);
dallocx(large, 0);
}
TEST_END
void *
thd_start(void *arg)
{
return (NULL);
}
static void
no_lazy_lock(void)
{
thd_t thd;
thd_create(&thd, thd_start, NULL);
thd_join(thd, NULL);
}
TEST_BEGIN(test_stats_arenas_small)
{
unsigned arena;
void *p;
size_t sz, allocated;
uint64_t epoch, nmalloc, ndalloc, nrequests;
int expected = config_stats ? 0 : ENOENT;
no_lazy_lock(); /* Lazy locking would dodge tcache testing. */
arena = 0;
assert_d_eq(mallctl("thread.pool.0.arena", NULL, NULL, &arena, sizeof(arena)),
0, "Unexpected mallctl() failure");
p = mallocx(SMALL_MAXCLASS, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
config_tcache ? 0 : ENOENT, "Unexpected mallctl() result");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
"Unexpected mallctl() failure");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.small.allocated", &allocated, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.small.nmalloc", &nmalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.small.ndalloc", &ndalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.small.nrequests", &nrequests, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_zu_gt(allocated, 0,
"allocated should be greater than zero");
assert_u64_gt(nmalloc, 0,
"nmalloc should be no greater than zero");
assert_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
assert_u64_gt(nrequests, 0,
"nrequests should be greater than zero");
}
dallocx(p, 0);
}
TEST_END
TEST_BEGIN(test_stats_arenas_large)
{
unsigned arena;
void *p;
size_t sz, allocated;
uint64_t epoch, nmalloc, ndalloc, nrequests;
int expected = config_stats ? 0 : ENOENT;
arena = 0;
assert_d_eq(mallctl("thread.pool.0.arena", NULL, NULL, &arena, sizeof(arena)),
0, "Unexpected mallctl() failure");
p = mallocx(arena_maxclass, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
"Unexpected mallctl() failure");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.large.allocated", &allocated, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.large.nmalloc", &nmalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.large.ndalloc", &ndalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.large.nrequests", &nrequests, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_zu_gt(allocated, 0,
"allocated should be greater than zero");
assert_zu_gt(nmalloc, 0,
"nmalloc should be greater than zero");
assert_zu_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
assert_zu_gt(nrequests, 0,
"nrequests should be greater than zero");
}
dallocx(p, 0);
}
TEST_END
TEST_BEGIN(test_stats_arenas_bins)
{
unsigned arena;
void *p;
size_t sz, allocated, curruns;
uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes;
uint64_t nruns, nreruns;
int expected = config_stats ? 0 : ENOENT;
arena = 0;
assert_d_eq(mallctl("thread.pool.0.arena", NULL, NULL, &arena, sizeof(arena)),
0, "Unexpected mallctl() failure");
p = mallocx(arena_bin_info[0].reg_size, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
config_tcache ? 0 : ENOENT, "Unexpected mallctl() result");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
"Unexpected mallctl() failure");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.allocated", &allocated, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.nmalloc", &nmalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.ndalloc", &ndalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.nrequests", &nrequests, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.nfills", &nfills, &sz,
NULL, 0), config_tcache ? expected : ENOENT,
"Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.nflushes", &nflushes, &sz,
NULL, 0), config_tcache ? expected : ENOENT,
"Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.nruns", &nruns, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.nreruns", &nreruns, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.bins.0.curruns", &curruns, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_zu_gt(allocated, 0,
"allocated should be greater than zero");
assert_u64_gt(nmalloc, 0,
"nmalloc should be greater than zero");
assert_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
assert_u64_gt(nrequests, 0,
"nrequests should be greater than zero");
if (config_tcache) {
assert_u64_gt(nfills, 0,
"At least one fill should have occurred");
assert_u64_gt(nflushes, 0,
"At least one flush should have occurred");
}
assert_u64_gt(nruns, 0,
"At least one run should have been allocated");
assert_zu_gt(curruns, 0,
"At least one run should be currently allocated");
}
dallocx(p, 0);
}
TEST_END
TEST_BEGIN(test_stats_arenas_lruns)
{
unsigned arena;
void *p;
uint64_t epoch, nmalloc, ndalloc, nrequests;
size_t curruns, sz;
int expected = config_stats ? 0 : ENOENT;
arena = 0;
assert_d_eq(mallctl("thread.pool.0.arena", NULL, NULL, &arena, sizeof(arena)),
0, "Unexpected mallctl() failure");
p = mallocx(SMALL_MAXCLASS+1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
"Unexpected mallctl() failure");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.lruns.0.nmalloc", &nmalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.lruns.0.ndalloc", &ndalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("pool.0.stats.arenas.0.lruns.0.nrequests", &nrequests, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(size_t);
assert_d_eq(mallctl("pool.0.stats.arenas.0.lruns.0.curruns", &curruns, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_u64_gt(nmalloc, 0,
"nmalloc should be greater than zero");
assert_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
assert_u64_gt(nrequests, 0,
"nrequests should be greater than zero");
assert_u64_gt(curruns, 0,
"At least one run should be currently allocated");
}
dallocx(p, 0);
}
TEST_END
int
main(void)
{
return (test(
test_stats_summary,
test_stats_chunks,
test_stats_huge,
test_stats_arenas_summary,
test_stats_arenas_small,
test_stats_arenas_large,
test_stats_arenas_bins,
test_stats_arenas_lruns));
}
| 12,318 | 30.997403 | 80 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/bitmap.c | #include "test/jemalloc_test.h"
#if (LG_BITMAP_MAXBITS > 12)
# define MAXBITS 4500
#else
# define MAXBITS (1U << LG_BITMAP_MAXBITS)
#endif
TEST_BEGIN(test_bitmap_size)
{
size_t i, prev_size;
prev_size = 0;
for (i = 1; i <= MAXBITS; i++) {
size_t size = bitmap_size(i);
assert_true(size >= prev_size,
"Bitmap size is smaller than expected");
prev_size = size;
}
}
TEST_END
TEST_BEGIN(test_bitmap_init)
{
size_t i;
for (i = 1; i <= MAXBITS; i++) {
bitmap_info_t binfo;
bitmap_info_init(&binfo, i);
{
size_t j;
bitmap_t *bitmap = malloc(sizeof(bitmap_t) *
bitmap_info_ngroups(&binfo));
bitmap_init(bitmap, &binfo);
for (j = 0; j < i; j++) {
assert_false(bitmap_get(bitmap, &binfo, j),
"Bit should be unset");
}
free(bitmap);
}
}
}
TEST_END
TEST_BEGIN(test_bitmap_set)
{
size_t i;
for (i = 1; i <= MAXBITS; i++) {
bitmap_info_t binfo;
bitmap_info_init(&binfo, i);
{
size_t j;
bitmap_t *bitmap = malloc(sizeof(bitmap_t) *
bitmap_info_ngroups(&binfo));
bitmap_init(bitmap, &binfo);
for (j = 0; j < i; j++)
bitmap_set(bitmap, &binfo, j);
assert_true(bitmap_full(bitmap, &binfo),
"All bits should be set");
free(bitmap);
}
}
}
TEST_END
TEST_BEGIN(test_bitmap_unset)
{
size_t i;
for (i = 1; i <= MAXBITS; i++) {
bitmap_info_t binfo;
bitmap_info_init(&binfo, i);
{
size_t j;
bitmap_t *bitmap = malloc(sizeof(bitmap_t) *
bitmap_info_ngroups(&binfo));
bitmap_init(bitmap, &binfo);
for (j = 0; j < i; j++)
bitmap_set(bitmap, &binfo, j);
assert_true(bitmap_full(bitmap, &binfo),
"All bits should be set");
for (j = 0; j < i; j++)
bitmap_unset(bitmap, &binfo, j);
for (j = 0; j < i; j++)
bitmap_set(bitmap, &binfo, j);
assert_true(bitmap_full(bitmap, &binfo),
"All bits should be set");
free(bitmap);
}
}
}
TEST_END
TEST_BEGIN(test_bitmap_sfu)
{
size_t i;
for (i = 1; i <= MAXBITS; i++) {
bitmap_info_t binfo;
bitmap_info_init(&binfo, i);
{
ssize_t j;
bitmap_t *bitmap = malloc(sizeof(bitmap_t) *
bitmap_info_ngroups(&binfo));
bitmap_init(bitmap, &binfo);
/* Iteratively set bits starting at the beginning. */
for (j = 0; j < i; j++) {
assert_zd_eq(bitmap_sfu(bitmap, &binfo), j,
"First unset bit should be just after "
"previous first unset bit");
}
assert_true(bitmap_full(bitmap, &binfo),
"All bits should be set");
/*
* Iteratively unset bits starting at the end, and
* verify that bitmap_sfu() reaches the unset bits.
*/
for (j = i - 1; j >= 0; j--) {
bitmap_unset(bitmap, &binfo, j);
assert_zd_eq(bitmap_sfu(bitmap, &binfo), j,
"First unset bit should the bit previously "
"unset");
bitmap_unset(bitmap, &binfo, j);
}
assert_false(bitmap_get(bitmap, &binfo, 0),
"Bit should be unset");
/*
* Iteratively set bits starting at the beginning, and
* verify that bitmap_sfu() looks past them.
*/
for (j = 1; j < i; j++) {
bitmap_set(bitmap, &binfo, j - 1);
assert_zd_eq(bitmap_sfu(bitmap, &binfo), j,
"First unset bit should be just after the "
"bit previously set");
bitmap_unset(bitmap, &binfo, j);
}
assert_zd_eq(bitmap_sfu(bitmap, &binfo), i - 1,
"First unset bit should be the last bit");
assert_true(bitmap_full(bitmap, &binfo),
"All bits should be set");
free(bitmap);
}
}
}
TEST_END
int
main(void)
{
return (test(
test_bitmap_size,
test_bitmap_init,
test_bitmap_set,
test_bitmap_unset,
test_bitmap_sfu));
}
| 3,614 | 20.777108 | 57 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/junk.c | #include "test/jemalloc_test.h"
#ifdef JEMALLOC_FILL
const char *malloc_conf =
"abort:false,junk:true,zero:false,redzone:true,quarantine:0";
#endif
static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig;
static arena_dalloc_junk_large_t *arena_dalloc_junk_large_orig;
static huge_dalloc_junk_t *huge_dalloc_junk_orig;
static void *most_recently_junked;
static void
arena_dalloc_junk_small_intercept(void *ptr, arena_bin_info_t *bin_info)
{
size_t i;
arena_dalloc_junk_small_orig(ptr, bin_info);
for (i = 0; i < bin_info->reg_size; i++) {
assert_c_eq(((char *)ptr)[i], 0x5a,
"Missing junk fill for byte %zu/%zu of deallocated region",
i, bin_info->reg_size);
}
most_recently_junked = ptr;
}
static void
arena_dalloc_junk_large_intercept(void *ptr, size_t usize)
{
size_t i;
arena_dalloc_junk_large_orig(ptr, usize);
for (i = 0; i < usize; i++) {
assert_c_eq(((char *)ptr)[i], 0x5a,
"Missing junk fill for byte %zu/%zu of deallocated region",
i, usize);
}
most_recently_junked = ptr;
}
static void
huge_dalloc_junk_intercept(void *ptr, size_t usize)
{
huge_dalloc_junk_orig(ptr, usize);
/*
* The conditions under which junk filling actually occurs are nuanced
* enough that it doesn't make sense to duplicate the decision logic in
* test code, so don't actually check that the region is junk-filled.
*/
most_recently_junked = ptr;
}
static void
test_junk(size_t sz_min, size_t sz_max)
{
char *s;
size_t sz_prev, sz, i;
arena_dalloc_junk_small_orig = arena_dalloc_junk_small;
arena_dalloc_junk_small = arena_dalloc_junk_small_intercept;
arena_dalloc_junk_large_orig = arena_dalloc_junk_large;
arena_dalloc_junk_large = arena_dalloc_junk_large_intercept;
huge_dalloc_junk_orig = huge_dalloc_junk;
huge_dalloc_junk = huge_dalloc_junk_intercept;
sz_prev = 0;
s = (char *)mallocx(sz_min, 0);
assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
for (sz = sallocx(s, 0); sz <= sz_max;
sz_prev = sz, sz = sallocx(s, 0)) {
if (sz_prev > 0) {
assert_c_eq(s[0], 'a',
"Previously allocated byte %zu/%zu is corrupted",
ZU(0), sz_prev);
assert_c_eq(s[sz_prev-1], 'a',
"Previously allocated byte %zu/%zu is corrupted",
sz_prev-1, sz_prev);
}
for (i = sz_prev; i < sz; i++) {
assert_c_eq(s[i], 0xa5,
"Newly allocated byte %zu/%zu isn't junk-filled",
i, sz);
s[i] = 'a';
}
if (xallocx(s, sz+1, 0, 0) == sz) {
void *junked = (void *)s;
s = (char *)rallocx(s, sz+1, 0);
assert_ptr_not_null((void *)s,
"Unexpected rallocx() failure");
assert_ptr_eq(most_recently_junked, junked,
"Expected region of size %zu to be junk-filled",
sz);
}
}
dallocx(s, 0);
assert_ptr_eq(most_recently_junked, (void *)s,
"Expected region of size %zu to be junk-filled", sz);
arena_dalloc_junk_small = arena_dalloc_junk_small_orig;
arena_dalloc_junk_large = arena_dalloc_junk_large_orig;
huge_dalloc_junk = huge_dalloc_junk_orig;
}
TEST_BEGIN(test_junk_small)
{
test_skip_if(!config_fill);
test_junk(1, SMALL_MAXCLASS-1);
}
TEST_END
TEST_BEGIN(test_junk_large)
{
test_skip_if(!config_fill);
test_junk(SMALL_MAXCLASS+1, arena_maxclass);
}
TEST_END
TEST_BEGIN(test_junk_huge)
{
test_skip_if(!config_fill);
test_junk(arena_maxclass+1, chunksize*2);
}
TEST_END
arena_ralloc_junk_large_t *arena_ralloc_junk_large_orig;
static void *most_recently_trimmed;
static void
arena_ralloc_junk_large_intercept(void *ptr, size_t old_usize, size_t usize)
{
arena_ralloc_junk_large_orig(ptr, old_usize, usize);
assert_zu_eq(old_usize, arena_maxclass, "Unexpected old_usize");
assert_zu_eq(usize, arena_maxclass-PAGE, "Unexpected usize");
most_recently_trimmed = ptr;
}
TEST_BEGIN(test_junk_large_ralloc_shrink)
{
void *p1, *p2;
p1 = mallocx(arena_maxclass, 0);
assert_ptr_not_null(p1, "Unexpected mallocx() failure");
arena_ralloc_junk_large_orig = arena_ralloc_junk_large;
arena_ralloc_junk_large = arena_ralloc_junk_large_intercept;
p2 = rallocx(p1, arena_maxclass-PAGE, 0);
assert_ptr_eq(p1, p2, "Unexpected move during shrink");
arena_ralloc_junk_large = arena_ralloc_junk_large_orig;
assert_ptr_eq(most_recently_trimmed, p1,
"Expected trimmed portion of region to be junk-filled");
}
TEST_END
static bool detected_redzone_corruption;
static void
arena_redzone_corruption_replacement(void *ptr, size_t usize, bool after,
size_t offset, uint8_t byte)
{
detected_redzone_corruption = true;
}
TEST_BEGIN(test_junk_redzone)
{
char *s;
arena_redzone_corruption_t *arena_redzone_corruption_orig;
test_skip_if(!config_fill);
arena_redzone_corruption_orig = arena_redzone_corruption;
arena_redzone_corruption = arena_redzone_corruption_replacement;
/* Test underflow. */
detected_redzone_corruption = false;
s = (char *)mallocx(1, 0);
assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
s[-1] = 0xbb;
dallocx(s, 0);
assert_true(detected_redzone_corruption,
"Did not detect redzone corruption");
/* Test overflow. */
detected_redzone_corruption = false;
s = (char *)mallocx(1, 0);
assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
s[sallocx(s, 0)] = 0xbb;
dallocx(s, 0);
assert_true(detected_redzone_corruption,
"Did not detect redzone corruption");
arena_redzone_corruption = arena_redzone_corruption_orig;
}
TEST_END
int
main(void)
{
return (test(
test_junk_small,
test_junk_large,
test_junk_huge,
test_junk_large_ralloc_shrink,
test_junk_redzone));
}
| 5,541 | 24.190909 | 76 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/ckh.c | #include "test/jemalloc_test.h"
TEST_BEGIN(test_new_delete)
{
ckh_t ckh;
assert_false(ckh_new(&ckh, 2, ckh_string_hash, ckh_string_keycomp),
"Unexpected ckh_new() error");
ckh_delete(&ckh);
assert_false(ckh_new(&ckh, 3, ckh_pointer_hash, ckh_pointer_keycomp),
"Unexpected ckh_new() error");
ckh_delete(&ckh);
}
TEST_END
TEST_BEGIN(test_count_insert_search_remove)
{
ckh_t ckh;
const char *strs[] = {
"a string",
"A string",
"a string.",
"A string."
};
const char *missing = "A string not in the hash table.";
size_t i;
assert_false(ckh_new(&ckh, 2, ckh_string_hash, ckh_string_keycomp),
"Unexpected ckh_new() error");
assert_zu_eq(ckh_count(&ckh), 0,
"ckh_count() should return %zu, but it returned %zu", ZU(0),
ckh_count(&ckh));
/* Insert. */
for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
ckh_insert(&ckh, strs[i], strs[i]);
assert_zu_eq(ckh_count(&ckh), i+1,
"ckh_count() should return %zu, but it returned %zu", i+1,
ckh_count(&ckh));
}
/* Search. */
for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
union {
void *p;
const char *s;
} k, v;
void **kp, **vp;
const char *ks, *vs;
kp = (i & 1) ? &k.p : NULL;
vp = (i & 2) ? &v.p : NULL;
k.p = NULL;
v.p = NULL;
assert_false(ckh_search(&ckh, strs[i], kp, vp),
"Unexpected ckh_search() error");
ks = (i & 1) ? strs[i] : (const char *)NULL;
vs = (i & 2) ? strs[i] : (const char *)NULL;
assert_ptr_eq((void *)ks, (void *)k.s,
"Key mismatch, i=%zu", i);
assert_ptr_eq((void *)vs, (void *)v.s,
"Value mismatch, i=%zu", i);
}
assert_true(ckh_search(&ckh, missing, NULL, NULL),
"Unexpected ckh_search() success");
/* Remove. */
for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
union {
void *p;
const char *s;
} k, v;
void **kp, **vp;
const char *ks, *vs;
kp = (i & 1) ? &k.p : NULL;
vp = (i & 2) ? &v.p : NULL;
k.p = NULL;
v.p = NULL;
assert_false(ckh_remove(&ckh, strs[i], kp, vp),
"Unexpected ckh_remove() error");
ks = (i & 1) ? strs[i] : (const char *)NULL;
vs = (i & 2) ? strs[i] : (const char *)NULL;
assert_ptr_eq((void *)ks, (void *)k.s,
"Key mismatch, i=%zu", i);
assert_ptr_eq((void *)vs, (void *)v.s,
"Value mismatch, i=%zu", i);
assert_zu_eq(ckh_count(&ckh),
sizeof(strs)/sizeof(const char *) - i - 1,
"ckh_count() should return %zu, but it returned %zu",
sizeof(strs)/sizeof(const char *) - i - 1,
ckh_count(&ckh));
}
ckh_delete(&ckh);
}
TEST_END
TEST_BEGIN(test_insert_iter_remove)
{
#define NITEMS ZU(1000)
ckh_t ckh;
void **p[NITEMS];
void *q, *r;
size_t i;
assert_false(ckh_new(&ckh, 2, ckh_pointer_hash, ckh_pointer_keycomp),
"Unexpected ckh_new() error");
for (i = 0; i < NITEMS; i++) {
p[i] = mallocx(i+1, 0);
assert_ptr_not_null(p[i], "Unexpected mallocx() failure");
}
for (i = 0; i < NITEMS; i++) {
size_t j;
for (j = i; j < NITEMS; j++) {
assert_false(ckh_insert(&ckh, p[j], p[j]),
"Unexpected ckh_insert() failure");
assert_false(ckh_search(&ckh, p[j], &q, &r),
"Unexpected ckh_search() failure");
assert_ptr_eq(p[j], q, "Key pointer mismatch");
assert_ptr_eq(p[j], r, "Value pointer mismatch");
}
assert_zu_eq(ckh_count(&ckh), NITEMS,
"ckh_count() should return %zu, but it returned %zu",
NITEMS, ckh_count(&ckh));
for (j = i + 1; j < NITEMS; j++) {
assert_false(ckh_search(&ckh, p[j], NULL, NULL),
"Unexpected ckh_search() failure");
assert_false(ckh_remove(&ckh, p[j], &q, &r),
"Unexpected ckh_remove() failure");
assert_ptr_eq(p[j], q, "Key pointer mismatch");
assert_ptr_eq(p[j], r, "Value pointer mismatch");
assert_true(ckh_search(&ckh, p[j], NULL, NULL),
"Unexpected ckh_search() success");
assert_true(ckh_remove(&ckh, p[j], &q, &r),
"Unexpected ckh_remove() success");
}
{
bool seen[NITEMS];
size_t tabind;
memset(seen, 0, sizeof(seen));
for (tabind = 0; ckh_iter(&ckh, &tabind, &q, &r) ==
false;) {
size_t k;
assert_ptr_eq(q, r, "Key and val not equal");
for (k = 0; k < NITEMS; k++) {
if (p[k] == q) {
assert_false(seen[k],
"Item %zu already seen", k);
seen[k] = true;
break;
}
}
}
for (j = 0; j < i + 1; j++)
assert_true(seen[j], "Item %zu not seen", j);
for (; j < NITEMS; j++)
assert_false(seen[j], "Item %zu seen", j);
}
}
for (i = 0; i < NITEMS; i++) {
assert_false(ckh_search(&ckh, p[i], NULL, NULL),
"Unexpected ckh_search() failure");
assert_false(ckh_remove(&ckh, p[i], &q, &r),
"Unexpected ckh_remove() failure");
assert_ptr_eq(p[i], q, "Key pointer mismatch");
assert_ptr_eq(p[i], r, "Value pointer mismatch");
assert_true(ckh_search(&ckh, p[i], NULL, NULL),
"Unexpected ckh_search() success");
assert_true(ckh_remove(&ckh, p[i], &q, &r),
"Unexpected ckh_remove() success");
dallocx(p[i], 0);
}
assert_zu_eq(ckh_count(&ckh), 0,
"ckh_count() should return %zu, but it returned %zu", ZU(0),
ckh_count(&ckh));
ckh_delete(&ckh);
#undef NITEMS
}
TEST_END
int
main(void)
{
return (test(
test_new_delete,
test_count_insert_search_remove,
test_insert_iter_remove));
}
| 5,301 | 24.613527 | 70 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/math.c | #include "test/jemalloc_test.h"
#define MAX_REL_ERR 1.0e-9
#define MAX_ABS_ERR 1.0e-9
#include <float.h>
#ifndef INFINITY
#define INFINITY (DBL_MAX + DBL_MAX)
#endif
static bool
double_eq_rel(double a, double b, double max_rel_err, double max_abs_err)
{
double rel_err;
if (fabs(a - b) < max_abs_err)
return (true);
rel_err = (fabs(b) > fabs(a)) ? fabs((a-b)/b) : fabs((a-b)/a);
return (rel_err < max_rel_err);
}
static uint64_t
factorial(unsigned x)
{
uint64_t ret = 1;
unsigned i;
for (i = 2; i <= x; i++)
ret *= (uint64_t)i;
return (ret);
}
TEST_BEGIN(test_ln_gamma_factorial)
{
unsigned x;
/* exp(ln_gamma(x)) == (x-1)! for integer x. */
for (x = 1; x <= 21; x++) {
assert_true(double_eq_rel(exp(ln_gamma(x)),
(double)factorial(x-1), MAX_REL_ERR, MAX_ABS_ERR),
"Incorrect factorial result for x=%u", x);
}
}
TEST_END
/* Expected ln_gamma([0.0..100.0] increment=0.25). */
static const double ln_gamma_misc_expected[] = {
INFINITY,
1.28802252469807743, 0.57236494292470008, 0.20328095143129538,
0.00000000000000000, -0.09827183642181320, -0.12078223763524518,
-0.08440112102048555, 0.00000000000000000, 0.12487171489239651,
0.28468287047291918, 0.47521466691493719, 0.69314718055994529,
0.93580193110872523, 1.20097360234707429, 1.48681557859341718,
1.79175946922805496, 2.11445692745037128, 2.45373657084244234,
2.80857141857573644, 3.17805383034794575, 3.56137591038669710,
3.95781396761871651, 4.36671603662228680, 4.78749174278204581,
5.21960398699022932, 5.66256205985714178, 6.11591589143154568,
6.57925121201010121, 7.05218545073853953, 7.53436423675873268,
8.02545839631598312, 8.52516136106541467, 9.03318691960512332,
9.54926725730099690, 10.07315123968123949, 10.60460290274525086,
11.14340011995171231, 11.68933342079726856, 12.24220494005076176,
12.80182748008146909, 13.36802367147604720, 13.94062521940376342,
14.51947222506051816, 15.10441257307551943, 15.69530137706046524,
16.29200047656724237, 16.89437797963419285, 17.50230784587389010,
18.11566950571089407, 18.73434751193644843, 19.35823122022435427,
19.98721449566188468, 20.62119544270163018, 21.26007615624470048,
21.90376249182879320, 22.55216385312342098, 23.20519299513386002,
23.86276584168908954, 24.52480131594137802, 25.19122118273868338,
25.86194990184851861, 26.53691449111561340, 27.21604439872720604,
27.89927138384089389, 28.58652940490193828, 29.27775451504081516,
29.97288476399884871, 30.67186010608067548, 31.37462231367769050,
32.08111489594735843, 32.79128302226991565, 33.50507345013689076,
34.22243445715505317, 34.94331577687681545, 35.66766853819134298,
36.39544520803305261, 37.12659953718355865, 37.86108650896109395,
38.59886229060776230, 39.33988418719949465, 40.08411059791735198,
40.83150097453079752, 41.58201578195490100, 42.33561646075348506,
43.09226539146988699, 43.85192586067515208, 44.61456202863158893,
45.38013889847690052, 46.14862228684032885, 46.91997879580877395,
47.69417578616628361, 48.47118135183522014, 49.25096429545256882,
50.03349410501914463, 50.81874093156324790, 51.60667556776436982,
52.39726942748592364, 53.19049452616926743, 53.98632346204390586,
54.78472939811231157, 55.58568604486942633, 56.38916764371992940,
57.19514895105859864, 58.00360522298051080, 58.81451220059079787,
59.62784609588432261, 60.44358357816834371, 61.26170176100199427,
62.08217818962842927, 62.90499082887649962, 63.73011805151035958,
64.55753862700632340, 65.38723171073768015, 66.21917683354901385,
67.05335389170279825, 67.88974313718154008, 68.72832516833013017,
69.56908092082363737, 70.41199165894616385, 71.25703896716800045,
72.10420474200799390, 72.95347118416940191, 73.80482079093779646,
74.65823634883015814, 75.51370092648485866, 76.37119786778275454,
77.23071078519033961, 78.09222355331530707, 78.95572030266725960,
79.82118541361435859, 80.68860351052903468, 81.55795945611502873,
82.42923834590904164, 83.30242550295004378, 84.17750647261028973,
85.05446701758152983, 85.93329311301090456, 86.81397094178107920,
87.69648688992882057, 88.58082754219766741, 89.46697967771913795,
90.35493026581838194, 91.24466646193963015, 92.13617560368709292,
93.02944520697742803, 93.92446296229978486, 94.82121673107967297,
95.71969454214321615, 96.61988458827809723, 97.52177522288820910,
98.42535495673848800, 99.33061245478741341, 100.23753653310367895,
101.14611615586458981, 102.05634043243354370, 102.96819861451382394,
103.88168009337621811, 104.79677439715833032, 105.71347118823287303,
106.63176026064346047, 107.55163153760463501, 108.47307506906540198,
109.39608102933323153, 110.32063971475740516, 111.24674154146920557,
112.17437704317786995, 113.10353686902013237, 114.03421178146170689,
114.96639265424990128, 115.90007047041454769, 116.83523632031698014,
117.77188139974506953, 118.70999700805310795, 119.64957454634490830,
120.59060551569974962, 121.53308151543865279, 122.47699424143097247,
123.42233548443955726, 124.36909712850338394, 125.31727114935689826,
126.26684961288492559, 127.21782467361175861, 128.17018857322420899,
129.12393363912724453, 130.07905228303084755, 131.03553699956862033,
131.99338036494577864, 132.95257503561629164, 133.91311374698926784,
134.87498931216194364, 135.83819462068046846, 136.80272263732638294,
137.76856640092901785, 138.73571902320256299, 139.70417368760718091,
140.67392364823425055, 141.64496222871400732, 142.61728282114600574,
143.59087888505104047, 144.56574394634486680, 145.54187159633210058,
146.51925549072063859, 147.49788934865566148, 148.47776695177302031,
149.45888214327129617, 150.44122882700193600, 151.42480096657754984,
152.40959258449737490, 153.39559776128982094, 154.38281063467164245,
155.37122539872302696, 156.36083630307879844, 157.35163765213474107,
158.34362380426921391, 159.33678917107920370, 160.33112821663092973,
161.32663545672428995, 162.32330545817117695, 163.32113283808695314,
164.32011226319519892, 165.32023844914485267, 166.32150615984036790,
167.32391020678358018, 168.32744544842768164, 169.33210678954270634,
170.33788918059275375, 171.34478761712384198, 172.35279713916281707,
173.36191283062726143, 174.37212981874515094, 175.38344327348534080,
176.39584840699734514, 177.40934047306160437, 178.42391476654847793,
179.43956662288721304, 180.45629141754378111, 181.47408456550741107,
182.49294152078630304, 183.51285777591152737, 184.53382886144947861,
185.55585034552262869, 186.57891783333786861, 187.60302696672312095,
188.62817342367162610, 189.65435291789341932, 190.68156119837468054,
191.70979404894376330, 192.73904728784492590, 193.76931676731820176,
194.80059837318714244, 195.83288802445184729, 196.86618167288995096,
197.90047530266301123, 198.93576492992946214, 199.97204660246373464,
201.00931639928148797, 202.04757043027063901, 203.08680483582807597,
204.12701578650228385, 205.16819948264117102, 206.21035215404597807,
207.25347005962987623, 208.29754948708190909, 209.34258675253678916,
210.38857820024875878, 211.43552020227099320, 212.48340915813977858,
213.53224149456323744, 214.58201366511514152, 215.63272214993284592,
216.68436345542014010, 217.73693411395422004, 218.79043068359703739,
219.84484974781133815, 220.90018791517996988, 221.95644181913033322,
223.01360811766215875, 224.07168349307951871, 225.13066465172661879,
226.19054832372759734, 227.25133126272962159, 228.31301024565024704,
229.37558207242807384, 230.43904356577689896, 231.50339157094342113,
232.56862295546847008, 233.63473460895144740, 234.70172344281823484,
235.76958639009222907, 236.83832040516844586, 237.90792246359117712,
238.97838956183431947, 240.04971871708477238, 241.12190696702904802,
242.19495136964280846, 243.26884900298270509, 244.34359696498191283,
245.41919237324782443, 246.49563236486270057, 247.57291409618682110,
248.65103474266476269, 249.72999149863338175, 250.80978157713354904,
251.89040220972316320, 252.97185064629374551, 254.05412415488834199,
255.13722002152300661, 256.22113555000953511, 257.30586806178126835,
258.39141489572085675, 259.47777340799029844, 260.56494097186322279,
261.65291497755913497, 262.74169283208021852, 263.83127195904967266,
264.92164979855277807, 266.01282380697938379, 267.10479145686849733,
268.19755023675537586, 269.29109765101975427, 270.38543121973674488,
271.48054847852881721, 272.57644697842033565, 273.67312428569374561,
274.77057798174683967, 275.86880566295326389, 276.96780494052313770,
278.06757344036617496, 279.16810880295668085, 280.26940868320008349,
281.37147075030043197, 282.47429268763045229, 283.57787219260217171,
284.68220697654078322, 285.78729476455760050, 286.89313329542699194,
287.99972032146268930, 289.10705360839756395, 290.21513093526289140,
291.32395009427028754, 292.43350889069523646, 293.54380514276073200,
294.65483668152336350, 295.76660135076059532, 296.87909700685889902,
297.99232151870342022, 299.10627276756946458, 300.22094864701409733,
301.33634706277030091, 302.45246593264130297, 303.56930318639643929,
304.68685676566872189, 305.80512462385280514, 306.92410472600477078,
308.04379504874236773, 309.16419358014690033, 310.28529831966631036,
311.40710727801865687, 312.52961847709792664, 313.65282994987899201,
314.77673974032603610, 315.90134590329950015, 317.02664650446632777,
318.15263962020929966, 319.27932333753892635, 320.40669575400545455,
321.53475497761127144, 322.66349912672620803, 323.79292633000159185,
324.92303472628691452, 326.05382246454587403, 327.18528770377525916,
328.31742861292224234, 329.45024337080525356, 330.58373016603343331,
331.71788719692847280, 332.85271267144611329, 333.98820480709991898,
335.12436183088397001, 336.26118197919845443, 337.39866349777429377,
338.53680464159958774, 339.67560367484657036, 340.81505887079896411,
341.95516851178109619, 343.09593088908627578, 344.23734430290727460,
345.37940706226686416, 346.52211748494903532, 347.66547389743118401,
348.80947463481720661, 349.95411804077025408, 351.09940246744753267,
352.24532627543504759, 353.39188783368263103, 354.53908551944078908,
355.68691771819692349, 356.83538282361303118, 357.98447923746385868,
359.13420536957539753
};
TEST_BEGIN(test_ln_gamma_misc)
{
unsigned i;
for (i = 1; i < sizeof(ln_gamma_misc_expected)/sizeof(double); i++) {
double x = (double)i * 0.25;
assert_true(double_eq_rel(ln_gamma(x),
ln_gamma_misc_expected[i], MAX_REL_ERR, MAX_ABS_ERR),
"Incorrect ln_gamma result for i=%u", i);
}
}
TEST_END
/* Expected pt_norm([0.01..0.99] increment=0.01). */
static const double pt_norm_expected[] = {
-INFINITY,
-2.32634787404084076, -2.05374891063182252, -1.88079360815125085,
-1.75068607125216946, -1.64485362695147264, -1.55477359459685305,
-1.47579102817917063, -1.40507156030963221, -1.34075503369021654,
-1.28155156554460081, -1.22652812003661049, -1.17498679206608991,
-1.12639112903880045, -1.08031934081495606, -1.03643338949378938,
-0.99445788320975281, -0.95416525314619416, -0.91536508784281390,
-0.87789629505122846, -0.84162123357291418, -0.80642124701824025,
-0.77219321418868492, -0.73884684918521371, -0.70630256284008752,
-0.67448975019608171, -0.64334540539291685, -0.61281299101662701,
-0.58284150727121620, -0.55338471955567281, -0.52440051270804067,
-0.49585034734745320, -0.46769879911450812, -0.43991316567323380,
-0.41246312944140462, -0.38532046640756751, -0.35845879325119373,
-0.33185334643681652, -0.30548078809939738, -0.27931903444745404,
-0.25334710313579978, -0.22754497664114931, -0.20189347914185077,
-0.17637416478086135, -0.15096921549677725, -0.12566134685507399,
-0.10043372051146975, -0.07526986209982976, -0.05015358346473352,
-0.02506890825871106, 0.00000000000000000, 0.02506890825871106,
0.05015358346473366, 0.07526986209982990, 0.10043372051146990,
0.12566134685507413, 0.15096921549677739, 0.17637416478086146,
0.20189347914185105, 0.22754497664114931, 0.25334710313579978,
0.27931903444745404, 0.30548078809939738, 0.33185334643681652,
0.35845879325119373, 0.38532046640756762, 0.41246312944140484,
0.43991316567323391, 0.46769879911450835, 0.49585034734745348,
0.52440051270804111, 0.55338471955567303, 0.58284150727121620,
0.61281299101662701, 0.64334540539291685, 0.67448975019608171,
0.70630256284008752, 0.73884684918521371, 0.77219321418868492,
0.80642124701824036, 0.84162123357291441, 0.87789629505122879,
0.91536508784281423, 0.95416525314619460, 0.99445788320975348,
1.03643338949378938, 1.08031934081495606, 1.12639112903880045,
1.17498679206608991, 1.22652812003661049, 1.28155156554460081,
1.34075503369021654, 1.40507156030963265, 1.47579102817917085,
1.55477359459685394, 1.64485362695147308, 1.75068607125217102,
1.88079360815125041, 2.05374891063182208, 2.32634787404084076
};
TEST_BEGIN(test_pt_norm)
{
unsigned i;
for (i = 1; i < sizeof(pt_norm_expected)/sizeof(double); i++) {
double p = (double)i * 0.01;
assert_true(double_eq_rel(pt_norm(p), pt_norm_expected[i],
MAX_REL_ERR, MAX_ABS_ERR),
"Incorrect pt_norm result for i=%u", i);
}
}
TEST_END
/*
* Expected pt_chi2(p=[0.01..0.99] increment=0.07,
* df={0.1, 1.1, 10.1, 100.1, 1000.1}).
*/
static const double pt_chi2_df[] = {0.1, 1.1, 10.1, 100.1, 1000.1};
static const double pt_chi2_expected[] = {
1.168926411457320e-40, 1.347680397072034e-22, 3.886980416666260e-17,
8.245951724356564e-14, 2.068936347497604e-11, 1.562561743309233e-09,
5.459543043426564e-08, 1.114775688149252e-06, 1.532101202364371e-05,
1.553884683726585e-04, 1.239396954915939e-03, 8.153872320255721e-03,
4.631183739647523e-02, 2.473187311701327e-01, 2.175254800183617e+00,
0.0003729887888876379, 0.0164409238228929513, 0.0521523015190650113,
0.1064701372271216612, 0.1800913735793082115, 0.2748704281195626931,
0.3939246282787986497, 0.5420727552260817816, 0.7267265822221973259,
0.9596554296000253670, 1.2607440376386165326, 1.6671185084541604304,
2.2604828984738705167, 3.2868613342148607082, 6.9298574921692139839,
2.606673548632508, 4.602913725294877, 5.646152813924212,
6.488971315540869, 7.249823275816285, 7.977314231410841,
8.700354939944047, 9.441728024225892, 10.224338321374127,
11.076435368801061, 12.039320937038386, 13.183878752697167,
14.657791935084575, 16.885728216339373, 23.361991680031817,
70.14844087392152, 80.92379498849355, 85.53325420085891,
88.94433120715347, 91.83732712857017, 94.46719943606301,
96.96896479994635, 99.43412843510363, 101.94074719829733,
104.57228644307247, 107.43900093448734, 110.71844673417287,
114.76616819871325, 120.57422505959563, 135.92318818757556,
899.0072447849649, 937.9271278858220, 953.8117189560207,
965.3079371501154, 974.8974061207954, 983.4936235182347,
991.5691170518946, 999.4334123954690, 1007.3391826856553,
1015.5445154999951, 1024.3777075619569, 1034.3538789836223,
1046.4872561869577, 1063.5717461999654, 1107.0741966053859
};
TEST_BEGIN(test_pt_chi2)
{
unsigned i, j;
unsigned e = 0;
for (i = 0; i < sizeof(pt_chi2_df)/sizeof(double); i++) {
double df = pt_chi2_df[i];
double ln_gamma_df = ln_gamma(df * 0.5);
for (j = 1; j < 100; j += 7) {
double p = (double)j * 0.01;
assert_true(double_eq_rel(pt_chi2(p, df, ln_gamma_df),
pt_chi2_expected[e], MAX_REL_ERR, MAX_ABS_ERR),
"Incorrect pt_chi2 result for i=%u, j=%u", i, j);
e++;
}
}
}
TEST_END
/*
* Expected pt_gamma(p=[0.1..0.99] increment=0.07,
* shape=[0.5..3.0] increment=0.5).
*/
static const double pt_gamma_shape[] = {0.5, 1.0, 1.5, 2.0, 2.5, 3.0};
static const double pt_gamma_expected[] = {
7.854392895485103e-05, 5.043466107888016e-03, 1.788288957794883e-02,
3.900956150232906e-02, 6.913847560638034e-02, 1.093710833465766e-01,
1.613412523825817e-01, 2.274682115597864e-01, 3.114117323127083e-01,
4.189466220207417e-01, 5.598106789059246e-01, 7.521856146202706e-01,
1.036125427911119e+00, 1.532450860038180e+00, 3.317448300510606e+00,
0.01005033585350144, 0.08338160893905107, 0.16251892949777497,
0.24846135929849966, 0.34249030894677596, 0.44628710262841947,
0.56211891815354142, 0.69314718055994529, 0.84397007029452920,
1.02165124753198167, 1.23787435600161766, 1.51412773262977574,
1.89711998488588196, 2.52572864430825783, 4.60517018598809091,
0.05741590094955853, 0.24747378084860744, 0.39888572212236084,
0.54394139997444901, 0.69048812513915159, 0.84311389861296104,
1.00580622221479898, 1.18298694218766931, 1.38038096305861213,
1.60627736383027453, 1.87396970522337947, 2.20749220408081070,
2.65852391865854942, 3.37934630984842244, 5.67243336507218476,
0.1485547402532659, 0.4657458011640391, 0.6832386130709406,
0.8794297834672100, 1.0700752852474524, 1.2629614217350744,
1.4638400448580779, 1.6783469900166610, 1.9132338090606940,
2.1778589228618777, 2.4868823970010991, 2.8664695666264195,
3.3724415436062114, 4.1682658512758071, 6.6383520679938108,
0.2771490383641385, 0.7195001279643727, 0.9969081732265243,
1.2383497880608061, 1.4675206597269927, 1.6953064251816552,
1.9291243435606809, 2.1757300955477641, 2.4428032131216391,
2.7406534569230616, 3.0851445039665513, 3.5043101122033367,
4.0575997065264637, 4.9182956424675286, 7.5431362346944937,
0.4360451650782932, 0.9983600902486267, 1.3306365880734528,
1.6129750834753802, 1.8767241606994294, 2.1357032436097660,
2.3988853336865565, 2.6740603137235603, 2.9697561737517959,
3.2971457713883265, 3.6731795898504660, 4.1275751617770631,
4.7230515633946677, 5.6417477865306020, 8.4059469148854635
};
TEST_BEGIN(test_pt_gamma_shape)
{
unsigned i, j;
unsigned e = 0;
for (i = 0; i < sizeof(pt_gamma_shape)/sizeof(double); i++) {
double shape = pt_gamma_shape[i];
double ln_gamma_shape = ln_gamma(shape);
for (j = 1; j < 100; j += 7) {
double p = (double)j * 0.01;
assert_true(double_eq_rel(pt_gamma(p, shape, 1.0,
ln_gamma_shape), pt_gamma_expected[e], MAX_REL_ERR,
MAX_ABS_ERR),
"Incorrect pt_gamma result for i=%u, j=%u", i, j);
e++;
}
}
}
TEST_END
TEST_BEGIN(test_pt_gamma_scale)
{
double shape = 1.0;
double ln_gamma_shape = ln_gamma(shape);
assert_true(double_eq_rel(
pt_gamma(0.5, shape, 1.0, ln_gamma_shape) * 10.0,
pt_gamma(0.5, shape, 10.0, ln_gamma_shape), MAX_REL_ERR,
MAX_ABS_ERR),
"Scale should be trivially equivalent to external multiplication");
}
TEST_END
int
main(void)
{
return (test(
test_ln_gamma_factorial,
test_ln_gamma_misc,
test_pt_norm,
test_pt_chi2,
test_pt_gamma_shape,
test_pt_gamma_scale));
}
| 18,448 | 45.706329 | 73 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/ql.c | #include "test/jemalloc_test.h"
/* Number of ring entries, in [2..26]. */
#define NENTRIES 9
typedef struct list_s list_t;
typedef ql_head(list_t) list_head_t;
struct list_s {
ql_elm(list_t) link;
char id;
};
static void
test_empty_list(list_head_t *head)
{
list_t *t;
unsigned i;
assert_ptr_null(ql_first(head), "Unexpected element for empty list");
assert_ptr_null(ql_last(head, link),
"Unexpected element for empty list");
i = 0;
ql_foreach(t, head, link) {
i++;
}
assert_u_eq(i, 0, "Unexpected element for empty list");
i = 0;
ql_reverse_foreach(t, head, link) {
i++;
}
assert_u_eq(i, 0, "Unexpected element for empty list");
}
TEST_BEGIN(test_ql_empty)
{
list_head_t head;
ql_new(&head);
test_empty_list(&head);
}
TEST_END
static void
init_entries(list_t *entries, unsigned nentries)
{
unsigned i;
for (i = 0; i < nentries; i++) {
entries[i].id = 'a' + i;
ql_elm_new(&entries[i], link);
}
}
static void
test_entries_list(list_head_t *head, list_t *entries, unsigned nentries)
{
list_t *t;
unsigned i;
assert_c_eq(ql_first(head)->id, entries[0].id, "Element id mismatch");
assert_c_eq(ql_last(head, link)->id, entries[nentries-1].id,
"Element id mismatch");
i = 0;
ql_foreach(t, head, link) {
assert_c_eq(t->id, entries[i].id, "Element id mismatch");
i++;
}
i = 0;
ql_reverse_foreach(t, head, link) {
assert_c_eq(t->id, entries[nentries-i-1].id,
"Element id mismatch");
i++;
}
for (i = 0; i < nentries-1; i++) {
t = ql_next(head, &entries[i], link);
assert_c_eq(t->id, entries[i+1].id, "Element id mismatch");
}
assert_ptr_null(ql_next(head, &entries[nentries-1], link),
"Unexpected element");
assert_ptr_null(ql_prev(head, &entries[0], link), "Unexpected element");
for (i = 1; i < nentries; i++) {
t = ql_prev(head, &entries[i], link);
assert_c_eq(t->id, entries[i-1].id, "Element id mismatch");
}
}
TEST_BEGIN(test_ql_tail_insert)
{
list_head_t head;
list_t entries[NENTRIES];
unsigned i;
ql_new(&head);
init_entries(entries, sizeof(entries)/sizeof(list_t));
for (i = 0; i < NENTRIES; i++)
ql_tail_insert(&head, &entries[i], link);
test_entries_list(&head, entries, NENTRIES);
}
TEST_END
TEST_BEGIN(test_ql_tail_remove)
{
list_head_t head;
list_t entries[NENTRIES];
unsigned i;
ql_new(&head);
init_entries(entries, sizeof(entries)/sizeof(list_t));
for (i = 0; i < NENTRIES; i++)
ql_tail_insert(&head, &entries[i], link);
for (i = 0; i < NENTRIES; i++) {
test_entries_list(&head, entries, NENTRIES-i);
ql_tail_remove(&head, list_t, link);
}
test_empty_list(&head);
}
TEST_END
TEST_BEGIN(test_ql_head_insert)
{
list_head_t head;
list_t entries[NENTRIES];
unsigned i;
ql_new(&head);
init_entries(entries, sizeof(entries)/sizeof(list_t));
for (i = 0; i < NENTRIES; i++)
ql_head_insert(&head, &entries[NENTRIES-i-1], link);
test_entries_list(&head, entries, NENTRIES);
}
TEST_END
TEST_BEGIN(test_ql_head_remove)
{
list_head_t head;
list_t entries[NENTRIES];
unsigned i;
ql_new(&head);
init_entries(entries, sizeof(entries)/sizeof(list_t));
for (i = 0; i < NENTRIES; i++)
ql_head_insert(&head, &entries[NENTRIES-i-1], link);
for (i = 0; i < NENTRIES; i++) {
test_entries_list(&head, &entries[i], NENTRIES-i);
ql_head_remove(&head, list_t, link);
}
test_empty_list(&head);
}
TEST_END
TEST_BEGIN(test_ql_insert)
{
list_head_t head;
list_t entries[8];
list_t *a, *b, *c, *d, *e, *f, *g, *h;
ql_new(&head);
init_entries(entries, sizeof(entries)/sizeof(list_t));
a = &entries[0];
b = &entries[1];
c = &entries[2];
d = &entries[3];
e = &entries[4];
f = &entries[5];
g = &entries[6];
h = &entries[7];
/*
* ql_remove(), ql_before_insert(), and ql_after_insert() are used
* internally by other macros that are already tested, so there's no
* need to test them completely. However, insertion/deletion from the
* middle of lists is not otherwise tested; do so here.
*/
ql_tail_insert(&head, f, link);
ql_before_insert(&head, f, b, link);
ql_before_insert(&head, f, c, link);
ql_after_insert(f, h, link);
ql_after_insert(f, g, link);
ql_before_insert(&head, b, a, link);
ql_after_insert(c, d, link);
ql_before_insert(&head, f, e, link);
test_entries_list(&head, entries, sizeof(entries)/sizeof(list_t));
}
TEST_END
int
main(void)
{
return (test(
test_ql_empty,
test_ql_tail_insert,
test_ql_tail_remove,
test_ql_head_insert,
test_ql_head_remove,
test_ql_insert));
}
| 4,483 | 20.352381 | 73 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/mallctl.c | #include "test/jemalloc_test.h"
TEST_BEGIN(test_mallctl_errors)
{
uint64_t epoch;
size_t sz;
assert_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT,
"mallctl() should return ENOENT for non-existent names");
assert_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")),
EPERM, "mallctl() should return EPERM on attempt to write "
"read-only value");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)-1),
EINVAL, "mallctl() should return EINVAL for input size mismatch");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)+1),
EINVAL, "mallctl() should return EINVAL for input size mismatch");
sz = sizeof(epoch)-1;
assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL,
"mallctl() should return EINVAL for output size mismatch");
sz = sizeof(epoch)+1;
assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL,
"mallctl() should return EINVAL for output size mismatch");
}
TEST_END
TEST_BEGIN(test_mallctlnametomib_errors)
{
size_t mib[1];
size_t miblen;
miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT,
"mallctlnametomib() should return ENOENT for non-existent names");
}
TEST_END
TEST_BEGIN(test_mallctlbymib_errors)
{
uint64_t epoch;
size_t sz;
size_t mib[1];
size_t miblen;
miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("version", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0",
strlen("0.0.0")), EPERM, "mallctl() should return EPERM on "
"attempt to write read-only value");
miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("epoch", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch,
sizeof(epoch)-1), EINVAL,
"mallctlbymib() should return EINVAL for input size mismatch");
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch,
sizeof(epoch)+1), EINVAL,
"mallctlbymib() should return EINVAL for input size mismatch");
sz = sizeof(epoch)-1;
assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL,
"mallctlbymib() should return EINVAL for output size mismatch");
sz = sizeof(epoch)+1;
assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL,
"mallctlbymib() should return EINVAL for output size mismatch");
}
TEST_END
TEST_BEGIN(test_mallctl_read_write)
{
uint64_t old_epoch, new_epoch;
size_t sz = sizeof(old_epoch);
/* Blind. */
assert_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
/* Read. */
assert_d_eq(mallctl("epoch", &old_epoch, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
/* Write. */
assert_d_eq(mallctl("epoch", NULL, NULL, &new_epoch, sizeof(new_epoch)),
0, "Unexpected mallctl() failure");
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
/* Read+write. */
assert_d_eq(mallctl("epoch", &old_epoch, &sz, &new_epoch,
sizeof(new_epoch)), 0, "Unexpected mallctl() failure");
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
}
TEST_END
TEST_BEGIN(test_mallctlnametomib_short_mib)
{
size_t mib[6];
size_t miblen;
void *mem;
pool_t *pool;
unsigned npools;
size_t sz = sizeof(npools);
mem = calloc(1, POOL_MINIMAL_SIZE);
assert_ptr_ne(mem, NULL, "Unexpected calloc() failure");
pool = je_pool_create(mem, POOL_MINIMAL_SIZE, 1, 1);
assert_ptr_ne((void*)pool, NULL, "Unexpected je_pool_create() failure");
assert_d_eq(mallctl("pools.npools", &npools, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_u_eq(npools, 2, "Unexpected number of pools");
miblen = 5;
mib[5] = 42;
assert_d_eq(mallctlnametomib("pool.1.arenas.bin.0.nregs", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
assert_zu_eq(miblen, 5, "Unexpected mib output length");
assert_zu_eq(mib[5], 42,
"mallctlnametomib() wrote past the end of the input mib");
je_pool_delete(pool);
free(mem);
}
TEST_END
TEST_BEGIN(test_mallctl_config)
{
#define TEST_MALLCTL_CONFIG(config) do { \
bool oldval; \
size_t sz = sizeof(oldval); \
assert_d_eq(mallctl("config."#config, &oldval, &sz, NULL, 0), \
0, "Unexpected mallctl() failure"); \
assert_b_eq(oldval, config_##config, "Incorrect config value"); \
assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
} while (0)
TEST_MALLCTL_CONFIG(debug);
TEST_MALLCTL_CONFIG(fill);
TEST_MALLCTL_CONFIG(lazy_lock);
TEST_MALLCTL_CONFIG(munmap);
TEST_MALLCTL_CONFIG(prof);
TEST_MALLCTL_CONFIG(prof_libgcc);
TEST_MALLCTL_CONFIG(prof_libunwind);
TEST_MALLCTL_CONFIG(stats);
TEST_MALLCTL_CONFIG(tcache);
TEST_MALLCTL_CONFIG(tls);
TEST_MALLCTL_CONFIG(utrace);
TEST_MALLCTL_CONFIG(valgrind);
TEST_MALLCTL_CONFIG(xmalloc);
#undef TEST_MALLCTL_CONFIG
}
TEST_END
TEST_BEGIN(test_mallctl_opt)
{
bool config_always = true;
#define TEST_MALLCTL_OPT(t, opt, config) do { \
t oldval; \
size_t sz = sizeof(oldval); \
int expected = config_##config ? 0 : ENOENT; \
int result = mallctl("opt."#opt, &oldval, &sz, NULL, 0); \
assert_d_eq(result, expected, \
"Unexpected mallctl() result for opt."#opt); \
assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
} while (0)
TEST_MALLCTL_OPT(bool, abort, always);
TEST_MALLCTL_OPT(size_t, lg_chunk, always);
TEST_MALLCTL_OPT(const char *, dss, always);
TEST_MALLCTL_OPT(size_t, narenas, always);
TEST_MALLCTL_OPT(ssize_t, lg_dirty_mult, always);
TEST_MALLCTL_OPT(bool, stats_print, always);
TEST_MALLCTL_OPT(bool, junk, fill);
TEST_MALLCTL_OPT(size_t, quarantine, fill);
TEST_MALLCTL_OPT(bool, redzone, fill);
TEST_MALLCTL_OPT(bool, zero, fill);
TEST_MALLCTL_OPT(bool, utrace, utrace);
TEST_MALLCTL_OPT(bool, xmalloc, xmalloc);
TEST_MALLCTL_OPT(bool, tcache, tcache);
TEST_MALLCTL_OPT(size_t, lg_tcache_max, tcache);
TEST_MALLCTL_OPT(bool, prof, prof);
TEST_MALLCTL_OPT(const char *, prof_prefix, prof);
TEST_MALLCTL_OPT(bool, prof_active, prof);
TEST_MALLCTL_OPT(ssize_t, lg_prof_sample, prof);
TEST_MALLCTL_OPT(bool, prof_accum, prof);
TEST_MALLCTL_OPT(ssize_t, lg_prof_interval, prof);
TEST_MALLCTL_OPT(bool, prof_gdump, prof);
TEST_MALLCTL_OPT(bool, prof_final, prof);
TEST_MALLCTL_OPT(bool, prof_leak, prof);
#undef TEST_MALLCTL_OPT
}
TEST_END
/*
* create a couple of pools and check their size
* using mib feature
*/
TEST_BEGIN(test_mallctl_with_multiple_pools)
{
#define NPOOLS 4
pool_t *pools[NPOOLS];
void *mem;
unsigned npools;
int i;
size_t sz = sizeof(npools);
size_t mib[4], miblen;
mem = calloc(NPOOLS, POOL_MINIMAL_SIZE);
assert_ptr_ne(mem, NULL, "Unexpected calloc() failure");
for (i = 0; i < NPOOLS; ++i) {
pools[i] = je_pool_create( mem + (i*POOL_MINIMAL_SIZE), POOL_MINIMAL_SIZE, 1, 1);
assert_ptr_ne( (void*)pools[i], NULL, "Unexpected je_pool_create() failure");
}
assert_d_eq(mallctl("pools.npools", &npools, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_u_eq(npools, NPOOLS+1, "Unexpected number of pools");
miblen = 4;
assert_d_eq(mallctlnametomib("pool.0.arenas.narenas", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
/*
* This loop does not use local variable pools.
* Moreover we ommit pool[0].
*/
for (i = 1; i <= NPOOLS; ++i) {
unsigned narenas;
mib[1] = i;
sz = sizeof(narenas);
assert_d_eq(mallctlbymib(mib, miblen, &narenas, &sz, NULL, 0),
0, "Unexpected mallctlbymib() failure");
}
for (i = 0; i < NPOOLS; ++i) {
je_pool_delete( pools[i]);
}
free(mem);
#undef NPOOLS
}
TEST_END
TEST_BEGIN(test_manpage_example)
{
unsigned nbins, i;
size_t mib[6];
size_t len, miblen;
len = sizeof(nbins);
assert_d_eq(mallctl("pool.0.arenas.nbins", &nbins, &len, NULL, 0), 0,
"Unexpected mallctl() failure");
miblen = 6;
assert_d_eq(mallctlnametomib("pool.0.arenas.bin.0.size", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
for (i = 0; i < nbins; i++) {
size_t bin_size;
mib[4] = i;
len = sizeof(bin_size);
assert_d_eq(mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0),
0, "Unexpected mallctlbymib() failure");
/* Do something with bin_size... */
}
}
TEST_END
TEST_BEGIN(test_thread_arena)
{
unsigned arena_old, arena_new, narenas;
size_t sz = sizeof(unsigned);
assert_d_eq(mallctl("pool.0.arenas.narenas", &narenas, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect");
arena_new = narenas - 1;
assert_d_eq(mallctl("thread.pool.0.arena", &arena_old, &sz, &arena_new,
sizeof(unsigned)), 0, "Unexpected mallctl() failure");
arena_new = 0;
assert_d_eq(mallctl("thread.pool.0.arena", &arena_old, &sz, &arena_new,
sizeof(unsigned)), 0, "Unexpected mallctl() failure");
}
TEST_END
TEST_BEGIN(test_arena_i_purge)
{
unsigned narenas;
unsigned npools;
size_t sz = sizeof(unsigned);
size_t mib[5];
size_t miblen = 5;
void *mem;
pool_t *pool;
mem = calloc(1, POOL_MINIMAL_SIZE);
assert_ptr_ne(mem, NULL, "Unexpected calloc() failure");
pool = je_pool_create(mem, POOL_MINIMAL_SIZE, 1, 1);
assert_ptr_ne( (void*)pool, NULL, "Unexpected je_pool_create() failure");
assert_d_eq(mallctl("pools.npools", &npools, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_u_eq(npools, 2, "Unexpected number of pools");
assert_d_eq(mallctl("pool.1.arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_d_eq(mallctl("pool.1.arenas.narenas", &narenas, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_d_eq(mallctlnametomib("pool.1.arena.0.purge", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[3] = narenas;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
je_pool_delete(pool);
free(mem);
}
TEST_END
TEST_BEGIN(test_arena_i_dss)
{
const char *dss_prec_old, *dss_prec_new;
size_t sz = sizeof(dss_prec_old);
size_t mib[5];
size_t miblen;
miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("pool.0.arena.0.dss", mib, &miblen), 0,
"Unexpected mallctlnametomib() error");
dss_prec_new = "disabled";
assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new,
sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure");
assert_str_ne(dss_prec_old, "primary",
"Unexpected default for dss precedence");
assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_new, &sz, &dss_prec_old,
sizeof(dss_prec_old)), 0, "Unexpected mallctl() failure");
mib[3] = narenas_total_get(pools[0]);
dss_prec_new = "disabled";
assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new,
sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure");
assert_str_ne(dss_prec_old, "primary",
"Unexpected default for dss precedence");
}
TEST_END
TEST_BEGIN(test_arenas_initialized)
{
unsigned narenas;
size_t sz = sizeof(narenas);
assert_d_eq(mallctl("pool.0.arenas.narenas", &narenas, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
{
VARIABLE_ARRAY(bool, initialized, narenas);
sz = narenas * sizeof(bool);
assert_d_eq(mallctl("pool.0.arenas.initialized", initialized, &sz,
NULL, 0), 0, "Unexpected mallctl() failure");
}
}
TEST_END
TEST_BEGIN(test_arenas_constants)
{
#define TEST_ARENAS_CONSTANT(t, name, expected) do { \
t name; \
size_t sz = sizeof(t); \
assert_d_eq(mallctl("pool.0.arenas."#name, &(name), &sz, NULL, 0), 0, \
"Unexpected mallctl() failure"); \
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0)
TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM);
TEST_ARENAS_CONSTANT(size_t, page, PAGE);
TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS);
TEST_ARENAS_CONSTANT(size_t, nlruns, nlclasses);
#undef TEST_ARENAS_CONSTANT
}
TEST_END
TEST_BEGIN(test_arenas_bin_constants)
{
#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \
t name; \
size_t sz = sizeof(t); \
assert_d_eq(mallctl("pool.0.arenas.bin.0."#name, &(name), &sz, NULL, 0), \
0, "Unexpected mallctl() failure"); \
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0)
TEST_ARENAS_BIN_CONSTANT(size_t, size, arena_bin_info[0].reg_size);
TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, arena_bin_info[0].nregs);
TEST_ARENAS_BIN_CONSTANT(size_t, run_size, arena_bin_info[0].run_size);
#undef TEST_ARENAS_BIN_CONSTANT
}
TEST_END
TEST_BEGIN(test_arenas_lrun_constants)
{
#define TEST_ARENAS_LRUN_CONSTANT(t, name, expected) do { \
t name; \
size_t sz = sizeof(t); \
assert_d_eq(mallctl("pool.0.arenas.lrun.0."#name, &(name), &sz, NULL, \
0), 0, "Unexpected mallctl() failure"); \
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0)
TEST_ARENAS_LRUN_CONSTANT(size_t, size, (1 << LG_PAGE));
#undef TEST_ARENAS_LRUN_CONSTANT
}
TEST_END
/*
* create a couple of pools and extend their arenas
*/
TEST_BEGIN(test_arenas_extend)
{
#define NPOOLS 4
pool_t *pools[NPOOLS];
void *mem;
unsigned npools, narenas_before, arena, narenas_after;
int i;
size_t mib_narenas[4],
mib_extend[4],
miblen = sizeof(mib_narenas),
sz = sizeof(unsigned);
mem = calloc(NPOOLS, POOL_MINIMAL_SIZE);
assert_ptr_ne(mem, NULL, "Unexpected calloc() failure");
for (i = 0; i < NPOOLS; ++i) {
pools[i] = je_pool_create(mem + (i*POOL_MINIMAL_SIZE), POOL_MINIMAL_SIZE, 0, 1);
assert_ptr_ne((void *)pools[i], NULL, "Unexpected je_pool_create() failure");
}
assert_d_eq(mallctl("pools.npools", &npools, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_u_eq(npools, NPOOLS+1, "Unexpected number of pools");
assert_d_eq(mallctlnametomib("pool.0.arenas.narenas", mib_narenas, &miblen), 0,
"Unexpected mallctlnametomib() failure");
assert_d_eq(mallctlnametomib("pool.0.arenas.extend", mib_extend, &miblen), 0,
"Unexpected mallctlnametomib() failure");
/*
* This loop does not use local variable pools.
* Moreover we ommit pool[0].
*/
for (i = 1; i <= NPOOLS; ++i) {
mib_narenas[1] = i;
mib_extend[1] = i;
assert_d_eq(mallctlbymib(mib_narenas, miblen, &narenas_before, &sz, NULL, 0),
0, "Unexpected mallctlbymib() failure");
assert_d_eq(mallctlbymib(mib_extend, miblen, &arena, &sz, NULL, 0),
0, "Unexpected mallctlbymib() failure");
assert_d_eq(mallctlbymib(mib_narenas, miblen, &narenas_after, &sz, NULL, 0),
0, "Unexpected mallctlbymib() failure");
assert_u_eq(narenas_before+1, narenas_after,
"Unexpected number of arenas before versus after extension");
assert_u_eq(arena, narenas_after-1, "Unexpected arena index");
}
for (i = 0; i < NPOOLS; ++i) {
je_pool_delete( pools[i]);
}
free(mem);
#undef NPOOLS
}
TEST_END
TEST_BEGIN(test_stats_arenas)
{
#define TEST_STATS_ARENAS(t, name) do { \
t name; \
size_t sz = sizeof(t); \
assert_d_eq(mallctl("pool.0.stats.arenas.0."#name, &(name), &sz, NULL, \
0), 0, "Unexpected mallctl() failure"); \
} while (0)
TEST_STATS_ARENAS(const char *, dss);
TEST_STATS_ARENAS(unsigned, nthreads);
TEST_STATS_ARENAS(size_t, pactive);
TEST_STATS_ARENAS(size_t, pdirty);
#undef TEST_STATS_ARENAS
}
TEST_END
/*
* Each arena allocates 32 kilobytes of CTL metadata, and since we only
* have 12 megabytes, we have to hard-limit it to a known value, otherwise
* on systems with high CPU count, the tests might run out of memory.
*/
#define NARENAS_IN_POOL 64
int
main(void)
{
opt_narenas = NARENAS_IN_POOL;
return (test(
test_mallctl_errors,
test_mallctlnametomib_errors,
test_mallctlbymib_errors,
test_mallctl_read_write,
test_mallctlnametomib_short_mib,
test_mallctl_config,
test_mallctl_opt,
test_mallctl_with_multiple_pools,
test_manpage_example,
test_thread_arena,
test_arena_i_purge,
test_arena_i_dss,
test_arenas_initialized,
test_arenas_constants,
test_arenas_bin_constants,
test_arenas_lrun_constants,
test_arenas_extend,
test_stats_arenas));
}
| 16,249 | 28.98155 | 83 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/rtree.c | #include "test/jemalloc_test.h"
void *
rtree_malloc(pool_t *pool, size_t size)
{
return imalloc(size);
}
void
rtree_free(pool_t *pool, void *ptr)
{
return idalloc(ptr);
}
TEST_BEGIN(test_rtree_get_empty)
{
unsigned i;
for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
rtree_t *rtree = rtree_new(i, rtree_malloc, rtree_free, pools[0]);
assert_u_eq(rtree_get(rtree, 0), 0,
"rtree_get() should return NULL for empty tree");
rtree_delete(rtree);
}
}
TEST_END
TEST_BEGIN(test_rtree_extrema)
{
unsigned i;
for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
rtree_t *rtree = rtree_new(i, rtree_malloc, rtree_free, pools[0]);
rtree_set(rtree, 0, 1);
assert_u_eq(rtree_get(rtree, 0), 1,
"rtree_get() should return previously set value");
rtree_set(rtree, ~((uintptr_t)0), 1);
assert_u_eq(rtree_get(rtree, ~((uintptr_t)0)), 1,
"rtree_get() should return previously set value");
rtree_delete(rtree);
}
}
TEST_END
TEST_BEGIN(test_rtree_bits)
{
unsigned i, j, k;
for (i = 1; i < (sizeof(uintptr_t) << 3); i++) {
uintptr_t keys[] = {0, 1,
(((uintptr_t)1) << (sizeof(uintptr_t)*8-i)) - 1};
rtree_t *rtree = rtree_new(i, rtree_malloc, rtree_free, pools[0]);
for (j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
rtree_set(rtree, keys[j], 1);
for (k = 0; k < sizeof(keys)/sizeof(uintptr_t); k++) {
assert_u_eq(rtree_get(rtree, keys[k]), 1,
"rtree_get() should return previously set "
"value and ignore insignificant key bits; "
"i=%u, j=%u, k=%u, set key=%#"PRIxPTR", "
"get key=%#"PRIxPTR, i, j, k, keys[j],
keys[k]);
}
assert_u_eq(rtree_get(rtree,
(((uintptr_t)1) << (sizeof(uintptr_t)*8-i))), 0,
"Only leftmost rtree leaf should be set; "
"i=%u, j=%u", i, j);
rtree_set(rtree, keys[j], 0);
}
rtree_delete(rtree);
}
}
TEST_END
TEST_BEGIN(test_rtree_random)
{
unsigned i;
sfmt_t *sfmt;
#define NSET 100
#define SEED 42
sfmt = init_gen_rand(SEED);
for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
rtree_t *rtree = rtree_new(i, rtree_malloc, rtree_free, pools[0]);
uintptr_t keys[NSET];
unsigned j;
for (j = 0; j < NSET; j++) {
keys[j] = (uintptr_t)gen_rand64(sfmt);
rtree_set(rtree, keys[j], 1);
assert_u_eq(rtree_get(rtree, keys[j]), 1,
"rtree_get() should return previously set value");
}
for (j = 0; j < NSET; j++) {
assert_u_eq(rtree_get(rtree, keys[j]), 1,
"rtree_get() should return previously set value");
}
for (j = 0; j < NSET; j++) {
rtree_set(rtree, keys[j], 0);
assert_u_eq(rtree_get(rtree, keys[j]), 0,
"rtree_get() should return previously set value");
}
for (j = 0; j < NSET; j++) {
assert_u_eq(rtree_get(rtree, keys[j]), 0,
"rtree_get() should return previously set value");
}
rtree_delete(rtree);
}
fini_gen_rand(sfmt);
#undef NSET
#undef SEED
}
TEST_END
int
main(void)
{
return (test(
test_rtree_get_empty,
test_rtree_extrema,
test_rtree_bits,
test_rtree_random));
}
| 3,032 | 22.152672 | 68 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/hash.c | /*
* This file is based on code that is part of SMHasher
* (https://code.google.com/p/smhasher/), and is subject to the MIT license
* (http://www.opensource.org/licenses/mit-license.php). Both email addresses
* associated with the source code's revision history belong to Austin Appleby,
* and the revision history ranges from 2010 to 2012. Therefore the copyright
* and license are here taken to be:
*
* Copyright (c) 2010-2012 Austin Appleby
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "test/jemalloc_test.h"
typedef enum {
hash_variant_x86_32,
hash_variant_x86_128,
hash_variant_x64_128
} hash_variant_t;
static size_t
hash_variant_bits(hash_variant_t variant)
{
switch (variant) {
case hash_variant_x86_32: return (32);
case hash_variant_x86_128: return (128);
case hash_variant_x64_128: return (128);
default: not_reached();
}
}
static const char *
hash_variant_string(hash_variant_t variant)
{
switch (variant) {
case hash_variant_x86_32: return ("hash_x86_32");
case hash_variant_x86_128: return ("hash_x86_128");
case hash_variant_x64_128: return ("hash_x64_128");
default: not_reached();
}
}
static void
hash_variant_verify(hash_variant_t variant)
{
const size_t hashbytes = hash_variant_bits(variant) / 8;
uint8_t key[256];
VARIABLE_ARRAY(uint8_t, hashes, hashbytes * 256);
VARIABLE_ARRAY(uint8_t, final, hashbytes);
unsigned i;
uint32_t computed, expected;
memset(key, 0, sizeof(key));
memset(hashes, 0, sizeof(hashes));
memset(final, 0, sizeof(final));
/*
* Hash keys of the form {0}, {0,1}, {0,1,2}, ..., {0,1,...,255} as the
* seed.
*/
for (i = 0; i < 256; i++) {
key[i] = (uint8_t)i;
switch (variant) {
case hash_variant_x86_32: {
uint32_t out;
out = hash_x86_32(key, i, 256-i);
memcpy(&hashes[i*hashbytes], &out, hashbytes);
break;
} case hash_variant_x86_128: {
uint64_t out[2];
hash_x86_128(key, i, 256-i, out);
memcpy(&hashes[i*hashbytes], out, hashbytes);
break;
} case hash_variant_x64_128: {
uint64_t out[2];
hash_x64_128(key, i, 256-i, out);
memcpy(&hashes[i*hashbytes], out, hashbytes);
break;
} default: not_reached();
}
}
/* Hash the result array. */
switch (variant) {
case hash_variant_x86_32: {
uint32_t out = hash_x86_32(hashes, hashbytes*256, 0);
memcpy(final, &out, sizeof(out));
break;
} case hash_variant_x86_128: {
uint64_t out[2];
hash_x86_128(hashes, hashbytes*256, 0, out);
memcpy(final, out, sizeof(out));
break;
} case hash_variant_x64_128: {
uint64_t out[2];
hash_x64_128(hashes, hashbytes*256, 0, out);
memcpy(final, out, sizeof(out));
break;
} default: not_reached();
}
computed = (final[0] << 0) | (final[1] << 8) | (final[2] << 16) |
(final[3] << 24);
switch (variant) {
#ifdef JEMALLOC_BIG_ENDIAN
case hash_variant_x86_32: expected = 0x6213303eU; break;
case hash_variant_x86_128: expected = 0x266820caU; break;
case hash_variant_x64_128: expected = 0xcc622b6fU; break;
#else
case hash_variant_x86_32: expected = 0xb0f57ee3U; break;
case hash_variant_x86_128: expected = 0xb3ece62aU; break;
case hash_variant_x64_128: expected = 0x6384ba69U; break;
#endif
default: not_reached();
}
assert_u32_eq(computed, expected,
"Hash mismatch for %s(): expected %#x but got %#x",
hash_variant_string(variant), expected, computed);
}
TEST_BEGIN(test_hash_x86_32)
{
hash_variant_verify(hash_variant_x86_32);
}
TEST_END
TEST_BEGIN(test_hash_x86_128)
{
hash_variant_verify(hash_variant_x86_128);
}
TEST_END
TEST_BEGIN(test_hash_x64_128)
{
hash_variant_verify(hash_variant_x64_128);
}
TEST_END
int
main(void)
{
return (test(
test_hash_x86_32,
test_hash_x86_128,
test_hash_x64_128));
}
| 4,746 | 26.598837 | 80 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/prof_accum.h | #include "test/jemalloc_test.h"
#define NTHREADS 4
#define NALLOCS_PER_THREAD 50
#define DUMP_INTERVAL 1
#define BT_COUNT_CHECK_INTERVAL 5
#define alloc_n_proto(n) \
void *alloc_##n(unsigned bits);
alloc_n_proto(0)
alloc_n_proto(1)
#define alloc_n_gen(n) \
void * \
alloc_##n(unsigned bits) \
{ \
void *p; \
\
if (bits == 0) \
p = mallocx(1, 0); \
else { \
switch (bits & 0x1U) { \
case 0: \
p = (alloc_0(bits >> 1)); \
break; \
case 1: \
p = (alloc_1(bits >> 1)); \
break; \
default: not_reached(); \
} \
} \
/* Intentionally sabotage tail call optimization. */ \
assert_ptr_not_null(p, "Unexpected mallocx() failure"); \
return (p); \
}
| 794 | 21.083333 | 59 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/quarantine.c | #include "test/jemalloc_test.h"
#define QUARANTINE_SIZE 8192
#define STRINGIFY_HELPER(x) #x
#define STRINGIFY(x) STRINGIFY_HELPER(x)
#ifdef JEMALLOC_FILL
const char *malloc_conf = "abort:false,junk:true,redzone:true,quarantine:"
STRINGIFY(QUARANTINE_SIZE);
#endif
void
quarantine_clear(void)
{
void *p;
p = mallocx(QUARANTINE_SIZE*2, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
dallocx(p, 0);
}
TEST_BEGIN(test_quarantine)
{
#define SZ ZU(256)
#define NQUARANTINED (QUARANTINE_SIZE/SZ)
void *quarantined[NQUARANTINED+1];
size_t i, j;
test_skip_if(!config_fill);
assert_zu_eq(nallocx(SZ, 0), SZ,
"SZ=%zu does not precisely equal a size class", SZ);
quarantine_clear();
/*
* Allocate enough regions to completely fill the quarantine, plus one
* more. The last iteration occurs with a completely full quarantine,
* but no regions should be drained from the quarantine until the last
* deallocation occurs. Therefore no region recycling should occur
* until after this loop completes.
*/
for (i = 0; i < NQUARANTINED+1; i++) {
void *p = mallocx(SZ, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
quarantined[i] = p;
dallocx(p, 0);
for (j = 0; j < i; j++) {
assert_ptr_ne(p, quarantined[j],
"Quarantined region recycled too early; "
"i=%zu, j=%zu", i, j);
}
}
#undef NQUARANTINED
#undef SZ
}
TEST_END
static bool detected_redzone_corruption;
static void
arena_redzone_corruption_replacement(void *ptr, size_t usize, bool after,
size_t offset, uint8_t byte)
{
detected_redzone_corruption = true;
}
TEST_BEGIN(test_quarantine_redzone)
{
char *s;
arena_redzone_corruption_t *arena_redzone_corruption_orig;
test_skip_if(!config_fill);
arena_redzone_corruption_orig = arena_redzone_corruption;
arena_redzone_corruption = arena_redzone_corruption_replacement;
/* Test underflow. */
detected_redzone_corruption = false;
s = (char *)mallocx(1, 0);
assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
s[-1] = 0xbb;
dallocx(s, 0);
assert_true(detected_redzone_corruption,
"Did not detect redzone corruption");
/* Test overflow. */
detected_redzone_corruption = false;
s = (char *)mallocx(1, 0);
assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
s[sallocx(s, 0)] = 0xbb;
dallocx(s, 0);
assert_true(detected_redzone_corruption,
"Did not detect redzone corruption");
arena_redzone_corruption = arena_redzone_corruption_orig;
}
TEST_END
int
main(void)
{
return (test(
test_quarantine,
test_quarantine_redzone));
}
| 2,583 | 22.706422 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/pool.h | #include "test/jemalloc_test.h"
#define TEST_POOL_SIZE (16L * 1024L * 1024L)
#define TEST_TOO_SMALL_POOL_SIZE (2L * 1024L * 1024L)
#define TEST_VALUE 123456
#define TEST_MALLOC_FREE_LOOPS 2
#define TEST_MALLOC_SIZE 1024
#define TEST_ALLOCS_SIZE (TEST_POOL_SIZE / 8)
#define TEST_BUFFOR_CMP_SIZE (4L * 1024L * 1024L)
static char mem_pool[TEST_POOL_SIZE];
static char mem_extend_ok[TEST_POOL_SIZE];
static void* allocs[TEST_ALLOCS_SIZE];
static int custom_allocs;
TEST_BEGIN(test_pool_create_errors) {
pool_t *pool;
memset(mem_pool, 1, TEST_POOL_SIZE);
pool = pool_create(mem_pool, 0, 0, 1);
assert_ptr_null(pool, "pool_create() should return NULL for size 0");
pool = pool_create(NULL, TEST_POOL_SIZE, 0, 1);
assert_ptr_null(pool, "pool_create() should return NULL for input addr NULL");
}
TEST_END
TEST_BEGIN(test_pool_create) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
assert_ptr_eq(pool, mem_pool, "pool_create() should return addr with valid input");
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_malloc) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
int *test = pool_malloc(pool, sizeof(int));
assert_ptr_not_null(test, "pool_malloc should return valid ptr");
*test = TEST_VALUE;
assert_x_eq(*test, TEST_VALUE, "ptr should be usable");
assert_lu_gt((uintptr_t)test, (uintptr_t)mem_pool,
"pool_malloc() should return pointer to memory from pool");
assert_lu_lt((uintptr_t)test, (uintptr_t)mem_pool+TEST_POOL_SIZE,
"pool_malloc() should return pointer to memory from pool");
pool_free(pool, test);
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_free) {
pool_t *pool;
int i, j, s = 0, prev_s = 0;
int allocs = TEST_POOL_SIZE/TEST_MALLOC_SIZE;
void *arr[allocs];
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
for (i = 0; i < TEST_MALLOC_FREE_LOOPS; ++i) {
for (j = 0; j < allocs; ++j) {
arr[j] = pool_malloc(pool, TEST_MALLOC_SIZE);
if (arr[j] != NULL) {
s++;
}
}
for (j = 0; j < allocs; ++j) {
if (arr[j] != NULL) {
pool_free(pool, arr[j]);
}
}
if (prev_s != 0) {
assert_x_eq(s, prev_s,
"pool_free() should record back used chunks");
}
prev_s = s;
s = 0;
}
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_calloc) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 1, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 0, 1);
int *test = pool_calloc(pool, 1, sizeof(int));
assert_ptr_not_null(test, "pool_calloc should return valid ptr");
assert_x_eq(*test, 0, "pool_calloc should return zeroed memory");
pool_free(pool, test);
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_realloc) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
int *test = pool_ralloc(pool, NULL, sizeof(int));
assert_ptr_not_null(test, "pool_ralloc with NULL addr should return valid ptr");
int *test2 = pool_ralloc(pool, test, sizeof(int)*2);
assert_ptr_not_null(test, "pool_ralloc should return valid ptr");
test2[0] = TEST_VALUE;
test2[1] = TEST_VALUE;
assert_x_eq(test[1], TEST_VALUE, "ptr should be usable");
pool_free(pool, test2);
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_aligned_alloc) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
int *test = pool_aligned_alloc(pool, 1024, 1024);
assert_ptr_not_null(test, "pool_aligned_alloc should return valid ptr");
assert_x_eq(((uintptr_t)(test) & 1023), 0, "ptr should be aligned");
assert_lu_gt((uintptr_t)test, (uintptr_t)mem_pool,
"pool_aligned_alloc() should return pointer to memory from pool");
assert_lu_lt((uintptr_t)test, (uintptr_t)mem_pool+TEST_POOL_SIZE,
"pool_aligned_alloc() should return pointer to memory from pool");
*test = TEST_VALUE;
assert_x_eq(*test, TEST_VALUE, "ptr should be usable");
pool_free(pool, test);
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_reuse_pool) {
pool_t *pool;
size_t pool_num = 0;
custom_allocs = 0;
/* create and destroy pool multiple times */
for (; pool_num<100; ++pool_num) {
pool = pool_create(mem_pool, TEST_POOL_SIZE, 0, 1);
assert_ptr_not_null(pool, "Can not create pool!!!");
if (pool == NULL) {
break;
}
void *prev = NULL;
size_t i = 0;
/* allocate memory from pool */
for (; i<100; ++i) {
void **next = pool_malloc(pool, sizeof (void *));
assert_lu_gt((uintptr_t)next, (uintptr_t)mem_pool,
"pool_malloc() should return pointer to memory from pool");
assert_lu_lt((uintptr_t)next, (uintptr_t)mem_pool+TEST_POOL_SIZE,
"pool_malloc() should return pointer to memory from pool");
*next = prev;
prev = next;
}
/* free all allocated memory from pool */
while (prev != NULL) {
void **act = prev;
prev = *act;
pool_free(pool, act);
}
pool_delete(pool);
}
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_check_memory) {
pool_t *pool;
size_t pool_size = POOL_MINIMAL_SIZE;
assert_lu_lt(POOL_MINIMAL_SIZE, TEST_POOL_SIZE, "Too small pool size");
size_t object_size;
size_t size_allocated;
size_t i;
size_t j;
for (object_size = 8; object_size <= TEST_BUFFOR_CMP_SIZE ; object_size *= 2) {
custom_allocs = 0;
pool = pool_create(mem_pool, pool_size, 0, 1);
assert_ptr_not_null(pool, "Can not create pool!!!");
size_allocated = 0;
memset(allocs, 0, TEST_ALLOCS_SIZE * sizeof(void *));
for (i = 0; i < TEST_ALLOCS_SIZE;++i) {
allocs[i] = pool_malloc(pool, object_size);
if (allocs[i] == NULL) {
/* out of memory in pool */
break;
}
assert_lu_gt((uintptr_t)allocs[i], (uintptr_t)mem_pool,
"pool_malloc() should return pointer to memory from pool");
assert_lu_lt((uintptr_t)allocs[i], (uintptr_t)mem_pool+pool_size,
"pool_malloc() should return pointer to memory from pool");
size_allocated += object_size;
/* fill each allocation with a unique value */
memset(allocs[i], (char)i, object_size);
}
assert_ptr_not_null(allocs[0], "pool_malloc should return valid ptr");
assert_lu_lt(i + 1, TEST_ALLOCS_SIZE, "All memory should be used");
/* check for unexpected modifications of prepare data */
for (i = 0; i < TEST_ALLOCS_SIZE && allocs[i] != NULL; ++i) {
char *buffer = allocs[i];
for (j = 0; j < object_size; ++j)
if (buffer[j] != (char)i) {
assert_true(0, "Content of data object was modified unexpectedly"
" for object size: %zu, id: %zu", object_size, j);
break;
}
}
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
}
TEST_END
TEST_BEGIN(test_pool_use_all_memory) {
pool_t *pool;
size_t size = 0;
size_t pool_size = POOL_MINIMAL_SIZE;
assert_lu_lt(POOL_MINIMAL_SIZE, TEST_POOL_SIZE, "Too small pool size");
custom_allocs = 0;
pool = pool_create(mem_pool, pool_size, 0, 1);
assert_ptr_not_null(pool, "Can not create pool!!!");
void *prev = NULL;
for (;;) {
void **next = pool_malloc(pool, sizeof (void *));
if (next == NULL) {
/* Out of memory in pool, test end */
break;
}
size += sizeof (void *);
assert_ptr_not_null(next, "pool_malloc should return valid ptr");
assert_lu_gt((uintptr_t)next, (uintptr_t)mem_pool,
"pool_malloc() should return pointer to memory from pool");
assert_lu_lt((uintptr_t)next, (uintptr_t)mem_pool+pool_size,
"pool_malloc() should return pointer to memory from pool");
*next = prev;
assert_x_eq((uintptr_t)(*next), (uintptr_t)(prev), "ptr should be usable");
prev = next;
}
assert_lu_gt(size, 0, "Can not alloc any memory from pool");
/* Free all allocated memory from pool */
while (prev != NULL) {
void **act = prev;
prev = *act;
pool_free(pool, act);
}
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_extend_errors) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
memset(mem_extend_ok, 0, TEST_TOO_SMALL_POOL_SIZE);
size_t usable_size = pool_extend(pool, mem_extend_ok, TEST_TOO_SMALL_POOL_SIZE, 0);
assert_zu_eq(usable_size, 0, "pool_extend() should return 0"
" when provided with memory size smaller then chunksize");
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_extend) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
memset(mem_extend_ok, 0, TEST_POOL_SIZE);
size_t usable_size = pool_extend(pool, mem_extend_ok, TEST_POOL_SIZE, 0);
assert_zu_ne(usable_size, 0, "pool_extend() should return value"
" after alignment when provided with enough memory");
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_extend_after_out_of_memory) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
/* use the all memory from pool and from base allocator */
while (pool_malloc(pool, sizeof (void *)));
pool->base_next_addr = pool->base_past_addr;
memset(mem_extend_ok, 0, TEST_POOL_SIZE);
size_t usable_size = pool_extend(pool, mem_extend_ok, TEST_POOL_SIZE, 0);
assert_zu_ne(usable_size, 0, "pool_extend() should return value"
" after alignment when provided with enough memory");
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
/*
* print_jemalloc_messages -- custom print function, for jemalloc
*/
static void
print_jemalloc_messages(void* ignore, const char *s)
{
}
TEST_BEGIN(test_pool_check_extend) {
je_malloc_message = print_jemalloc_messages;
pool_t *pool;
custom_allocs = 0;
pool = pool_create(mem_pool, TEST_POOL_SIZE, 0, 1);
pool_malloc(pool, 100);
assert_d_eq(je_pool_check(pool), 1, "je_pool_check() return error");
pool_delete(pool);
assert_d_ne(je_pool_check(pool), 1, "je_pool_check() not return error");
pool = pool_create(mem_pool, TEST_POOL_SIZE, 0, 1);
assert_d_eq(je_pool_check(pool), 1, "je_pool_check() return error");
size_t size_extend = pool_extend(pool, mem_extend_ok, TEST_POOL_SIZE, 1);
assert_zu_ne(size_extend, 0, "pool_extend() should add some free space");
assert_d_eq(je_pool_check(pool), 1, "je_pool_check() return error");
pool_malloc(pool, 100);
pool_delete(pool);
assert_d_ne(je_pool_check(pool), 1, "je_pool_check() not return error");
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
je_malloc_message = NULL;
}
TEST_END
TEST_BEGIN(test_pool_check_memory_out_of_range) {
je_malloc_message = print_jemalloc_messages;
pool_t *pool;
custom_allocs = 0;
pool = pool_create(mem_pool, TEST_POOL_SIZE, 0, 1);
assert_d_eq(je_pool_check(pool), 1, "je_pool_check() return error");
void *usable_addr = (void *)CHUNK_CEILING((uintptr_t)mem_extend_ok);
size_t usable_size = (TEST_POOL_SIZE - (uintptr_t)(usable_addr -
(void *)mem_extend_ok)) & ~chunksize_mask;
chunk_record(pool,
&pool->chunks_szad_mmap, &pool->chunks_ad_mmap,
usable_addr, usable_size, 0);
assert_d_ne(je_pool_check(pool), 1, "je_pool_check() not return error");
pool_delete(pool);
assert_d_ne(je_pool_check(pool), 1, "je_pool_check() return error");
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
je_malloc_message = NULL;
}
TEST_END
TEST_BEGIN(test_pool_check_memory_overlap) {
je_malloc_message = print_jemalloc_messages;
pool_t *pool;
pool_t *pool2;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
size_t size_extend = pool_extend(pool, mem_extend_ok, TEST_POOL_SIZE, 1);
assert_zu_ne(size_extend, 0, "pool_extend() should add some free space");
assert_d_eq(je_pool_check(pool), 1, "je_pool_check() return error");
/* create another pool in the same memory region */
pool2 = pool_create(mem_extend_ok, TEST_POOL_SIZE, 0, 1);
assert_d_ne(je_pool_check(pool), 1, "je_pool_check() not return error");
assert_d_ne(je_pool_check(pool2), 1, "je_pool_check() not return error");
pool_delete(pool2);
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
je_malloc_message = NULL;
}
TEST_END
#define POOL_TEST_CASES\
test_pool_create_errors, \
test_pool_create, \
test_pool_malloc, \
test_pool_free, \
test_pool_calloc, \
test_pool_realloc, \
test_pool_aligned_alloc, \
test_pool_reuse_pool, \
test_pool_check_memory, \
test_pool_use_all_memory, \
test_pool_extend_errors, \
test_pool_extend, \
test_pool_extend_after_out_of_memory, \
test_pool_check_extend, \
test_pool_check_memory_out_of_range, \
test_pool_check_memory_overlap
| 13,511 | 27.267782 | 84 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/src/SFMT.c | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file SFMT.c
* @brief SIMD oriented Fast Mersenne Twister(SFMT)
*
* @author Mutsuo Saito (Hiroshima University)
* @author Makoto Matsumoto (Hiroshima University)
*
* Copyright (C) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* The new BSD License is applied to this software, see LICENSE.txt
*/
#define SFMT_C_
#include "test/jemalloc_test.h"
#include "test/SFMT-params.h"
#if defined(JEMALLOC_BIG_ENDIAN) && !defined(BIG_ENDIAN64)
#define BIG_ENDIAN64 1
#endif
#if defined(__BIG_ENDIAN__) && !defined(__amd64) && !defined(BIG_ENDIAN64)
#define BIG_ENDIAN64 1
#endif
#if defined(HAVE_ALTIVEC) && !defined(BIG_ENDIAN64)
#define BIG_ENDIAN64 1
#endif
#if defined(ONLY64) && !defined(BIG_ENDIAN64)
#if defined(__GNUC__)
#error "-DONLY64 must be specified with -DBIG_ENDIAN64"
#endif
#undef ONLY64
#endif
/*------------------------------------------------------
128-bit SIMD data type for Altivec, SSE2 or standard C
------------------------------------------------------*/
#if defined(HAVE_ALTIVEC)
/** 128-bit data structure */
union W128_T {
vector unsigned int s;
uint32_t u[4];
};
/** 128-bit data type */
typedef union W128_T w128_t;
#elif defined(HAVE_SSE2)
/** 128-bit data structure */
union W128_T {
__m128i si;
uint32_t u[4];
};
/** 128-bit data type */
typedef union W128_T w128_t;
#else
/** 128-bit data structure */
struct W128_T {
uint32_t u[4];
};
/** 128-bit data type */
typedef struct W128_T w128_t;
#endif
struct sfmt_s {
/** the 128-bit internal state array */
w128_t sfmt[N];
/** index counter to the 32-bit internal state array */
int idx;
/** a flag: it is 0 if and only if the internal state is not yet
* initialized. */
int initialized;
};
/*--------------------------------------
FILE GLOBAL VARIABLES
internal state, index counter and flag
--------------------------------------*/
/** a parity check vector which certificate the period of 2^{MEXP} */
static uint32_t parity[4] = {PARITY1, PARITY2, PARITY3, PARITY4};
/*----------------
STATIC FUNCTIONS
----------------*/
JEMALLOC_INLINE_C int idxof(int i);
#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift);
JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift);
#endif
JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx);
JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size);
JEMALLOC_INLINE_C uint32_t func1(uint32_t x);
JEMALLOC_INLINE_C uint32_t func2(uint32_t x);
static void period_certification(sfmt_t *ctx);
#if defined(BIG_ENDIAN64) && !defined(ONLY64)
JEMALLOC_INLINE_C void swap(w128_t *array, int size);
#endif
#if defined(HAVE_ALTIVEC)
#include "test/SFMT-alti.h"
#elif defined(HAVE_SSE2)
#include "test/SFMT-sse2.h"
#endif
/**
* This function simulate a 64-bit index of LITTLE ENDIAN
* in BIG ENDIAN machine.
*/
#ifdef ONLY64
JEMALLOC_INLINE_C int idxof(int i) {
return i ^ 1;
}
#else
JEMALLOC_INLINE_C int idxof(int i) {
return i;
}
#endif
/**
* This function simulates SIMD 128-bit right shift by the standard C.
* The 128-bit integer given in in is shifted by (shift * 8) bits.
* This function simulates the LITTLE ENDIAN SIMD.
* @param out the output of this function
* @param in the 128-bit data to be shifted
* @param shift the shift value
*/
#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
#ifdef ONLY64
JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) {
uint64_t th, tl, oh, ol;
th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]);
tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]);
oh = th >> (shift * 8);
ol = tl >> (shift * 8);
ol |= th << (64 - shift * 8);
out->u[0] = (uint32_t)(ol >> 32);
out->u[1] = (uint32_t)ol;
out->u[2] = (uint32_t)(oh >> 32);
out->u[3] = (uint32_t)oh;
}
#else
JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) {
uint64_t th, tl, oh, ol;
th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]);
tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]);
oh = th >> (shift * 8);
ol = tl >> (shift * 8);
ol |= th << (64 - shift * 8);
out->u[1] = (uint32_t)(ol >> 32);
out->u[0] = (uint32_t)ol;
out->u[3] = (uint32_t)(oh >> 32);
out->u[2] = (uint32_t)oh;
}
#endif
/**
* This function simulates SIMD 128-bit left shift by the standard C.
* The 128-bit integer given in in is shifted by (shift * 8) bits.
* This function simulates the LITTLE ENDIAN SIMD.
* @param out the output of this function
* @param in the 128-bit data to be shifted
* @param shift the shift value
*/
#ifdef ONLY64
JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) {
uint64_t th, tl, oh, ol;
th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]);
tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]);
oh = th << (shift * 8);
ol = tl << (shift * 8);
oh |= tl >> (64 - shift * 8);
out->u[0] = (uint32_t)(ol >> 32);
out->u[1] = (uint32_t)ol;
out->u[2] = (uint32_t)(oh >> 32);
out->u[3] = (uint32_t)oh;
}
#else
JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) {
uint64_t th, tl, oh, ol;
th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]);
tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]);
oh = th << (shift * 8);
ol = tl << (shift * 8);
oh |= tl >> (64 - shift * 8);
out->u[1] = (uint32_t)(ol >> 32);
out->u[0] = (uint32_t)ol;
out->u[3] = (uint32_t)(oh >> 32);
out->u[2] = (uint32_t)oh;
}
#endif
#endif
/**
* This function represents the recursion formula.
* @param r output
* @param a a 128-bit part of the internal state array
* @param b a 128-bit part of the internal state array
* @param c a 128-bit part of the internal state array
* @param d a 128-bit part of the internal state array
*/
#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
#ifdef ONLY64
JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c,
w128_t *d) {
w128_t x;
w128_t y;
lshift128(&x, a, SL2);
rshift128(&y, c, SR2);
r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK2) ^ y.u[0]
^ (d->u[0] << SL1);
r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK1) ^ y.u[1]
^ (d->u[1] << SL1);
r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK4) ^ y.u[2]
^ (d->u[2] << SL1);
r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK3) ^ y.u[3]
^ (d->u[3] << SL1);
}
#else
JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c,
w128_t *d) {
w128_t x;
w128_t y;
lshift128(&x, a, SL2);
rshift128(&y, c, SR2);
r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK1) ^ y.u[0]
^ (d->u[0] << SL1);
r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK2) ^ y.u[1]
^ (d->u[1] << SL1);
r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK3) ^ y.u[2]
^ (d->u[2] << SL1);
r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK4) ^ y.u[3]
^ (d->u[3] << SL1);
}
#endif
#endif
#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
/**
* This function fills the internal state array with pseudorandom
* integers.
*/
JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx) {
int i;
w128_t *r1, *r2;
r1 = &ctx->sfmt[N - 2];
r2 = &ctx->sfmt[N - 1];
for (i = 0; i < N - POS1; i++) {
do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1,
r2);
r1 = r2;
r2 = &ctx->sfmt[i];
}
for (; i < N; i++) {
do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1 - N], r1,
r2);
r1 = r2;
r2 = &ctx->sfmt[i];
}
}
/**
* This function fills the user-specified array with pseudorandom
* integers.
*
* @param array an 128-bit array to be filled by pseudorandom numbers.
* @param size number of 128-bit pseudorandom numbers to be generated.
*/
JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
int i, j;
w128_t *r1, *r2;
r1 = &ctx->sfmt[N - 2];
r2 = &ctx->sfmt[N - 1];
for (i = 0; i < N - POS1; i++) {
do_recursion(&array[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, r2);
r1 = r2;
r2 = &array[i];
}
for (; i < N; i++) {
do_recursion(&array[i], &ctx->sfmt[i], &array[i + POS1 - N], r1, r2);
r1 = r2;
r2 = &array[i];
}
for (; i < size - N; i++) {
do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2);
r1 = r2;
r2 = &array[i];
}
for (j = 0; j < 2 * N - size; j++) {
ctx->sfmt[j] = array[j + size - N];
}
for (; i < size; i++, j++) {
do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2);
r1 = r2;
r2 = &array[i];
ctx->sfmt[j] = array[i];
}
}
#endif
#if defined(BIG_ENDIAN64) && !defined(ONLY64) && !defined(HAVE_ALTIVEC)
JEMALLOC_INLINE_C void swap(w128_t *array, int size) {
int i;
uint32_t x, y;
for (i = 0; i < size; i++) {
x = array[i].u[0];
y = array[i].u[2];
array[i].u[0] = array[i].u[1];
array[i].u[2] = array[i].u[3];
array[i].u[1] = x;
array[i].u[3] = y;
}
}
#endif
/**
* This function represents a function used in the initialization
* by init_by_array
* @param x 32-bit integer
* @return 32-bit integer
*/
static uint32_t func1(uint32_t x) {
return (x ^ (x >> 27)) * (uint32_t)1664525UL;
}
/**
* This function represents a function used in the initialization
* by init_by_array
* @param x 32-bit integer
* @return 32-bit integer
*/
static uint32_t func2(uint32_t x) {
return (x ^ (x >> 27)) * (uint32_t)1566083941UL;
}
/**
* This function certificate the period of 2^{MEXP}
*/
static void period_certification(sfmt_t *ctx) {
int inner = 0;
int i, j;
uint32_t work;
uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
for (i = 0; i < 4; i++)
inner ^= psfmt32[idxof(i)] & parity[i];
for (i = 16; i > 0; i >>= 1)
inner ^= inner >> i;
inner &= 1;
/* check OK */
if (inner == 1) {
return;
}
/* check NG, and modification */
for (i = 0; i < 4; i++) {
work = 1;
for (j = 0; j < 32; j++) {
if ((work & parity[i]) != 0) {
psfmt32[idxof(i)] ^= work;
return;
}
work = work << 1;
}
}
}
/*----------------
PUBLIC FUNCTIONS
----------------*/
/**
* This function returns the identification string.
* The string shows the word size, the Mersenne exponent,
* and all parameters of this generator.
*/
const char *get_idstring(void) {
return IDSTR;
}
/**
* This function returns the minimum size of array used for \b
* fill_array32() function.
* @return minimum size of array used for fill_array32() function.
*/
int get_min_array_size32(void) {
return N32;
}
/**
* This function returns the minimum size of array used for \b
* fill_array64() function.
* @return minimum size of array used for fill_array64() function.
*/
int get_min_array_size64(void) {
return N64;
}
#ifndef ONLY64
/**
* This function generates and returns 32-bit pseudorandom number.
* init_gen_rand or init_by_array must be called before this function.
* @return 32-bit pseudorandom number
*/
uint32_t gen_rand32(sfmt_t *ctx) {
uint32_t r;
uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
assert(ctx->initialized);
if (ctx->idx >= N32) {
gen_rand_all(ctx);
ctx->idx = 0;
}
r = psfmt32[ctx->idx++];
return r;
}
/* Generate a random integer in [0..limit). */
uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit) {
uint32_t ret, above;
above = 0xffffffffU - (0xffffffffU % limit);
while (1) {
ret = gen_rand32(ctx);
if (ret < above) {
ret %= limit;
break;
}
}
return ret;
}
#endif
/**
* This function generates and returns 64-bit pseudorandom number.
* init_gen_rand or init_by_array must be called before this function.
* The function gen_rand64 should not be called after gen_rand32,
* unless an initialization is again executed.
* @return 64-bit pseudorandom number
*/
uint64_t gen_rand64(sfmt_t *ctx) {
#if defined(BIG_ENDIAN64) && !defined(ONLY64)
uint32_t r1, r2;
uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
#else
uint64_t r;
uint64_t *psfmt64 = (uint64_t *)&ctx->sfmt[0].u[0];
#endif
assert(ctx->initialized);
assert(ctx->idx % 2 == 0);
if (ctx->idx >= N32) {
gen_rand_all(ctx);
ctx->idx = 0;
}
#if defined(BIG_ENDIAN64) && !defined(ONLY64)
r1 = psfmt32[ctx->idx];
r2 = psfmt32[ctx->idx + 1];
ctx->idx += 2;
return ((uint64_t)r2 << 32) | r1;
#else
r = psfmt64[ctx->idx / 2];
ctx->idx += 2;
return r;
#endif
}
/* Generate a random integer in [0..limit). */
uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit) {
uint64_t ret, above;
above = KQU(0xffffffffffffffff) - (KQU(0xffffffffffffffff) % limit);
while (1) {
ret = gen_rand64(ctx);
if (ret < above) {
ret %= limit;
break;
}
}
return ret;
}
#ifndef ONLY64
/**
* This function generates pseudorandom 32-bit integers in the
* specified array[] by one call. The number of pseudorandom integers
* is specified by the argument size, which must be at least 624 and a
* multiple of four. The generation by this function is much faster
* than the following gen_rand function.
*
* For initialization, init_gen_rand or init_by_array must be called
* before the first call of this function. This function can not be
* used after calling gen_rand function, without initialization.
*
* @param array an array where pseudorandom 32-bit integers are filled
* by this function. The pointer to the array must be \b "aligned"
* (namely, must be a multiple of 16) in the SIMD version, since it
* refers to the address of a 128-bit integer. In the standard C
* version, the pointer is arbitrary.
*
* @param size the number of 32-bit pseudorandom integers to be
* generated. size must be a multiple of 4, and greater than or equal
* to (MEXP / 128 + 1) * 4.
*
* @note \b memalign or \b posix_memalign is available to get aligned
* memory. Mac OSX doesn't have these functions, but \b malloc of OSX
* returns the pointer to the aligned memory block.
*/
void fill_array32(sfmt_t *ctx, uint32_t *array, int size) {
assert(ctx->initialized);
assert(ctx->idx == N32);
assert(size % 4 == 0);
assert(size >= N32);
gen_rand_array(ctx, (w128_t *)array, size / 4);
ctx->idx = N32;
}
#endif
/**
* This function generates pseudorandom 64-bit integers in the
* specified array[] by one call. The number of pseudorandom integers
* is specified by the argument size, which must be at least 312 and a
* multiple of two. The generation by this function is much faster
* than the following gen_rand function.
*
* For initialization, init_gen_rand or init_by_array must be called
* before the first call of this function. This function can not be
* used after calling gen_rand function, without initialization.
*
* @param array an array where pseudorandom 64-bit integers are filled
* by this function. The pointer to the array must be "aligned"
* (namely, must be a multiple of 16) in the SIMD version, since it
* refers to the address of a 128-bit integer. In the standard C
* version, the pointer is arbitrary.
*
* @param size the number of 64-bit pseudorandom integers to be
* generated. size must be a multiple of 2, and greater than or equal
* to (MEXP / 128 + 1) * 2
*
* @note \b memalign or \b posix_memalign is available to get aligned
* memory. Mac OSX doesn't have these functions, but \b malloc of OSX
* returns the pointer to the aligned memory block.
*/
void fill_array64(sfmt_t *ctx, uint64_t *array, int size) {
assert(ctx->initialized);
assert(ctx->idx == N32);
assert(size % 2 == 0);
assert(size >= N64);
gen_rand_array(ctx, (w128_t *)array, size / 2);
ctx->idx = N32;
#if defined(BIG_ENDIAN64) && !defined(ONLY64)
swap((w128_t *)array, size /2);
#endif
}
/**
* This function initializes the internal state array with a 32-bit
* integer seed.
*
* @param seed a 32-bit integer used as the seed.
*/
sfmt_t *init_gen_rand(uint32_t seed) {
void *p;
sfmt_t *ctx;
int i;
uint32_t *psfmt32;
if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) {
return NULL;
}
ctx = (sfmt_t *)p;
psfmt32 = &ctx->sfmt[0].u[0];
psfmt32[idxof(0)] = seed;
for (i = 1; i < N32; i++) {
psfmt32[idxof(i)] = 1812433253UL * (psfmt32[idxof(i - 1)]
^ (psfmt32[idxof(i - 1)] >> 30))
+ i;
}
ctx->idx = N32;
period_certification(ctx);
ctx->initialized = 1;
return ctx;
}
/**
* This function initializes the internal state array,
* with an array of 32-bit integers used as the seeds
* @param init_key the array of 32-bit integers, used as a seed.
* @param key_length the length of init_key.
*/
sfmt_t *init_by_array(uint32_t *init_key, int key_length) {
void *p;
sfmt_t *ctx;
int i, j, count;
uint32_t r;
int lag;
int mid;
int size = N * 4;
uint32_t *psfmt32;
if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) {
return NULL;
}
ctx = (sfmt_t *)p;
psfmt32 = &ctx->sfmt[0].u[0];
if (size >= 623) {
lag = 11;
} else if (size >= 68) {
lag = 7;
} else if (size >= 39) {
lag = 5;
} else {
lag = 3;
}
mid = (size - lag) / 2;
memset(ctx->sfmt, 0x8b, sizeof(ctx->sfmt));
if (key_length + 1 > N32) {
count = key_length + 1;
} else {
count = N32;
}
r = func1(psfmt32[idxof(0)] ^ psfmt32[idxof(mid)]
^ psfmt32[idxof(N32 - 1)]);
psfmt32[idxof(mid)] += r;
r += key_length;
psfmt32[idxof(mid + lag)] += r;
psfmt32[idxof(0)] = r;
count--;
for (i = 1, j = 0; (j < count) && (j < key_length); j++) {
r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)]
^ psfmt32[idxof((i + N32 - 1) % N32)]);
psfmt32[idxof((i + mid) % N32)] += r;
r += init_key[j] + i;
psfmt32[idxof((i + mid + lag) % N32)] += r;
psfmt32[idxof(i)] = r;
i = (i + 1) % N32;
}
for (; j < count; j++) {
r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)]
^ psfmt32[idxof((i + N32 - 1) % N32)]);
psfmt32[idxof((i + mid) % N32)] += r;
r += i;
psfmt32[idxof((i + mid + lag) % N32)] += r;
psfmt32[idxof(i)] = r;
i = (i + 1) % N32;
}
for (j = 0; j < N32; j++) {
r = func2(psfmt32[idxof(i)] + psfmt32[idxof((i + mid) % N32)]
+ psfmt32[idxof((i + N32 - 1) % N32)]);
psfmt32[idxof((i + mid) % N32)] ^= r;
r -= i;
psfmt32[idxof((i + mid + lag) % N32)] ^= r;
psfmt32[idxof(i)] = r;
i = (i + 1) % N32;
}
ctx->idx = N32;
period_certification(ctx);
ctx->initialized = 1;
return ctx;
}
void fini_gen_rand(sfmt_t *ctx) {
assert(ctx != NULL);
ctx->initialized = 0;
free(ctx);
}
| 20,765 | 27.841667 | 79 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/src/test.c | #include "test/jemalloc_test.h"
static unsigned test_count = 0;
static test_status_t test_counts[test_status_count] = {0, 0, 0};
static test_status_t test_status = test_status_pass;
static const char * test_name = "";
JEMALLOC_ATTR(format(printf, 1, 2))
void
test_skip(const char *format, ...)
{
va_list ap;
va_start(ap, format);
malloc_vcprintf(NULL, NULL, format, ap);
va_end(ap);
malloc_printf("\n");
test_status = test_status_skip;
}
JEMALLOC_ATTR(format(printf, 1, 2))
void
test_fail(const char *format, ...)
{
va_list ap;
va_start(ap, format);
malloc_vcprintf(NULL, NULL, format, ap);
va_end(ap);
malloc_printf("\n");
test_status = test_status_fail;
}
static const char *
test_status_string(test_status_t test_status)
{
switch (test_status) {
case test_status_pass: return "pass";
case test_status_skip: return "skip";
case test_status_fail: return "fail";
default: not_reached();
}
}
void
p_test_init(const char *name)
{
test_count++;
test_status = test_status_pass;
test_name = name;
}
void
p_test_fini(void)
{
test_counts[test_status]++;
malloc_printf("%s: %s\n", test_name, test_status_string(test_status));
}
test_status_t
p_test(test_t *t, ...)
{
test_status_t ret;
va_list ap;
/*
* Make sure initialization occurs prior to running tests. Tests are
* special because they may use internal facilities prior to triggering
* initialization as a side effect of calling into the public API. This
* is a final safety that works even if jemalloc_constructor() doesn't
* run, as for MSVC builds.
*/
if (nallocx(1, 0) == 0) {
malloc_printf("Initialization error");
return (test_status_fail);
}
ret = test_status_pass;
va_start(ap, t);
for (; t != NULL; t = va_arg(ap, test_t *)) {
t();
if (test_status > ret)
ret = test_status;
}
va_end(ap);
malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n",
test_status_string(test_status_pass),
test_counts[test_status_pass], test_count,
test_status_string(test_status_skip),
test_counts[test_status_skip], test_count,
test_status_string(test_status_fail),
test_counts[test_status_fail], test_count);
return (ret);
}
test_status_t
p_test_not_init(test_t *t, ...)
{
test_status_t ret;
va_list ap;
ret = test_status_pass;
va_start(ap, t);
for (; t != NULL; t = va_arg(ap, test_t *)) {
t();
if (test_status > ret)
ret = test_status;
}
va_end(ap);
malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n",
test_status_string(test_status_pass),
test_counts[test_status_pass], test_count,
test_status_string(test_status_skip),
test_counts[test_status_skip], test_count,
test_status_string(test_status_fail),
test_counts[test_status_fail], test_count);
return (ret);
}
void
p_test_fail(const char *prefix, const char *message)
{
malloc_cprintf(NULL, NULL, "%s%s\n", prefix, message);
test_status = test_status_fail;
}
| 2,920 | 20.798507 | 73 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/integration/allocm.c | #include "test/jemalloc_test.h"
#define CHUNK 0x400000
#define MAXALIGN (((size_t)1) << 25)
#define NITER 4
TEST_BEGIN(test_basic)
{
size_t nsz, rsz, sz;
void *p;
sz = 42;
nsz = 0;
assert_d_eq(nallocm(&nsz, sz, 0), ALLOCM_SUCCESS,
"Unexpected nallocm() error");
rsz = 0;
assert_d_eq(allocm(&p, &rsz, sz, 0), ALLOCM_SUCCESS,
"Unexpected allocm() error");
assert_zu_ge(rsz, sz, "Real size smaller than expected");
assert_zu_eq(nsz, rsz, "nallocm()/allocm() rsize mismatch");
assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
"Unexpected dallocm() error");
assert_d_eq(allocm(&p, NULL, sz, 0), ALLOCM_SUCCESS,
"Unexpected allocm() error");
assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
"Unexpected dallocm() error");
nsz = 0;
assert_d_eq(nallocm(&nsz, sz, ALLOCM_ZERO), ALLOCM_SUCCESS,
"Unexpected nallocm() error");
rsz = 0;
assert_d_eq(allocm(&p, &rsz, sz, ALLOCM_ZERO), ALLOCM_SUCCESS,
"Unexpected allocm() error");
assert_zu_eq(nsz, rsz, "nallocm()/allocm() rsize mismatch");
assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
"Unexpected dallocm() error");
}
TEST_END
TEST_BEGIN(test_alignment_and_size)
{
int r;
size_t nsz, rsz, sz, alignment, total;
unsigned i;
void *ps[NITER];
for (i = 0; i < NITER; i++)
ps[i] = NULL;
for (alignment = 8;
alignment <= MAXALIGN;
alignment <<= 1) {
total = 0;
for (sz = 1;
sz < 3 * alignment && sz < (1U << 31);
sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
for (i = 0; i < NITER; i++) {
nsz = 0;
r = nallocm(&nsz, sz, ALLOCM_ALIGN(alignment) |
ALLOCM_ZERO);
assert_d_eq(r, ALLOCM_SUCCESS,
"nallocm() error for alignment=%zu, "
"size=%zu (%#zx): %d",
alignment, sz, sz, r);
rsz = 0;
r = allocm(&ps[i], &rsz, sz,
ALLOCM_ALIGN(alignment) | ALLOCM_ZERO);
assert_d_eq(r, ALLOCM_SUCCESS,
"allocm() error for alignment=%zu, "
"size=%zu (%#zx): %d",
alignment, sz, sz, r);
assert_zu_ge(rsz, sz,
"Real size smaller than expected for "
"alignment=%zu, size=%zu", alignment, sz);
assert_zu_eq(nsz, rsz,
"nallocm()/allocm() rsize mismatch for "
"alignment=%zu, size=%zu", alignment, sz);
assert_ptr_null(
(void *)((uintptr_t)ps[i] & (alignment-1)),
"%p inadequately aligned for"
" alignment=%zu, size=%zu", ps[i],
alignment, sz);
sallocm(ps[i], &rsz, 0);
total += rsz;
if (total >= (MAXALIGN << 1))
break;
}
for (i = 0; i < NITER; i++) {
if (ps[i] != NULL) {
dallocm(ps[i], 0);
ps[i] = NULL;
}
}
}
}
}
TEST_END
int
main(void)
{
return (test(
test_basic,
test_alignment_and_size));
}
| 2,719 | 24.185185 | 63 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/integration/allocated.c | #include "test/jemalloc_test.h"
static const bool config_stats =
#ifdef JEMALLOC_STATS
true
#else
false
#endif
;
void *
thd_start(void *arg)
{
int err;
void *p;
uint64_t a0, a1, d0, d1;
uint64_t *ap0, *ap1, *dp0, *dp1;
size_t sz, usize;
sz = sizeof(a0);
if ((err = mallctl("thread.allocated", &a0, &sz, NULL, 0))) {
if (err == ENOENT)
goto label_ENOENT;
test_fail("%s(): Error in mallctl(): %s", __func__,
strerror(err));
}
sz = sizeof(ap0);
if ((err = mallctl("thread.allocatedp", &ap0, &sz, NULL, 0))) {
if (err == ENOENT)
goto label_ENOENT;
test_fail("%s(): Error in mallctl(): %s", __func__,
strerror(err));
}
assert_u64_eq(*ap0, a0,
"\"thread.allocatedp\" should provide a pointer to internal "
"storage");
sz = sizeof(d0);
if ((err = mallctl("thread.deallocated", &d0, &sz, NULL, 0))) {
if (err == ENOENT)
goto label_ENOENT;
test_fail("%s(): Error in mallctl(): %s", __func__,
strerror(err));
}
sz = sizeof(dp0);
if ((err = mallctl("thread.deallocatedp", &dp0, &sz, NULL, 0))) {
if (err == ENOENT)
goto label_ENOENT;
test_fail("%s(): Error in mallctl(): %s", __func__,
strerror(err));
}
assert_u64_eq(*dp0, d0,
"\"thread.deallocatedp\" should provide a pointer to internal "
"storage");
p = malloc(1);
assert_ptr_not_null(p, "Unexpected malloc() error");
sz = sizeof(a1);
mallctl("thread.allocated", &a1, &sz, NULL, 0);
sz = sizeof(ap1);
mallctl("thread.allocatedp", &ap1, &sz, NULL, 0);
assert_u64_eq(*ap1, a1,
"Dereferenced \"thread.allocatedp\" value should equal "
"\"thread.allocated\" value");
assert_ptr_eq(ap0, ap1,
"Pointer returned by \"thread.allocatedp\" should not change");
usize = malloc_usable_size(p);
assert_u64_le(a0 + usize, a1,
"Allocated memory counter should increase by at least the amount "
"explicitly allocated");
free(p);
sz = sizeof(d1);
mallctl("thread.deallocated", &d1, &sz, NULL, 0);
sz = sizeof(dp1);
mallctl("thread.deallocatedp", &dp1, &sz, NULL, 0);
assert_u64_eq(*dp1, d1,
"Dereferenced \"thread.deallocatedp\" value should equal "
"\"thread.deallocated\" value");
assert_ptr_eq(dp0, dp1,
"Pointer returned by \"thread.deallocatedp\" should not change");
assert_u64_le(d0 + usize, d1,
"Deallocated memory counter should increase by at least the amount "
"explicitly deallocated");
return (NULL);
label_ENOENT:
assert_false(config_stats,
"ENOENT should only be returned if stats are disabled");
test_skip("\"thread.allocated\" mallctl not available");
return (NULL);
}
TEST_BEGIN(test_main_thread)
{
thd_start(NULL);
}
TEST_END
TEST_BEGIN(test_subthread)
{
thd_t thd;
thd_create(&thd, thd_start, NULL);
thd_join(thd, NULL);
}
TEST_END
int
main(void)
{
/* Run tests multiple times to check for bad interactions. */
return (test(
test_main_thread,
test_subthread,
test_main_thread,
test_subthread,
test_main_thread));
}
| 2,989 | 22.730159 | 73 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/integration/mallocx.c | #include "test/jemalloc_test.h"
#define CHUNK 0x400000
#define MAXALIGN (((size_t)1) << 25)
#define NITER 4
TEST_BEGIN(test_basic)
{
size_t nsz, rsz, sz;
void *p;
sz = 42;
nsz = nallocx(sz, 0);
assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
p = mallocx(sz, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
rsz = sallocx(p, 0);
assert_zu_ge(rsz, sz, "Real size smaller than expected");
assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
dallocx(p, 0);
p = mallocx(sz, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
dallocx(p, 0);
nsz = nallocx(sz, MALLOCX_ZERO);
assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
p = mallocx(sz, MALLOCX_ZERO);
assert_ptr_not_null(p, "Unexpected mallocx() error");
rsz = sallocx(p, 0);
assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
dallocx(p, 0);
}
TEST_END
TEST_BEGIN(test_alignment_and_size)
{
size_t nsz, rsz, sz, alignment, total;
unsigned i;
void *ps[NITER];
for (i = 0; i < NITER; i++)
ps[i] = NULL;
for (alignment = 8;
alignment <= MAXALIGN;
alignment <<= 1) {
total = 0;
for (sz = 1;
sz < 3 * alignment && sz < (1U << 31);
sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
for (i = 0; i < NITER; i++) {
nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
MALLOCX_ZERO);
assert_zu_ne(nsz, 0,
"nallocx() error for alignment=%zu, "
"size=%zu (%#zx)", alignment, sz, sz);
ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) |
MALLOCX_ZERO);
assert_ptr_not_null(ps[i],
"mallocx() error for alignment=%zu, "
"size=%zu (%#zx)", alignment, sz, sz);
rsz = sallocx(ps[i], 0);
assert_zu_ge(rsz, sz,
"Real size smaller than expected for "
"alignment=%zu, size=%zu", alignment, sz);
assert_zu_eq(nsz, rsz,
"nallocx()/sallocx() size mismatch for "
"alignment=%zu, size=%zu", alignment, sz);
assert_ptr_null(
(void *)((uintptr_t)ps[i] & (alignment-1)),
"%p inadequately aligned for"
" alignment=%zu, size=%zu", ps[i],
alignment, sz);
total += rsz;
if (total >= (MAXALIGN << 1))
break;
}
for (i = 0; i < NITER; i++) {
if (ps[i] != NULL) {
dallocx(ps[i], 0);
ps[i] = NULL;
}
}
}
}
}
TEST_END
int
main(void)
{
return (test(
test_basic,
test_alignment_and_size));
}
| 2,387 | 23.367347 | 62 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/integration/rallocm.c | #include "test/jemalloc_test.h"
TEST_BEGIN(test_same_size)
{
void *p, *q;
size_t sz, tsz;
assert_d_eq(allocm(&p, &sz, 42, 0), ALLOCM_SUCCESS,
"Unexpected allocm() error");
q = p;
assert_d_eq(rallocm(&q, &tsz, sz, 0, ALLOCM_NO_MOVE), ALLOCM_SUCCESS,
"Unexpected rallocm() error");
assert_ptr_eq(q, p, "Unexpected object move");
assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
"Unexpected dallocm() error");
}
TEST_END
TEST_BEGIN(test_extra_no_move)
{
void *p, *q;
size_t sz, tsz;
assert_d_eq(allocm(&p, &sz, 42, 0), ALLOCM_SUCCESS,
"Unexpected allocm() error");
q = p;
assert_d_eq(rallocm(&q, &tsz, sz, sz-42, ALLOCM_NO_MOVE),
ALLOCM_SUCCESS, "Unexpected rallocm() error");
assert_ptr_eq(q, p, "Unexpected object move");
assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
"Unexpected dallocm() error");
}
TEST_END
TEST_BEGIN(test_no_move_fail)
{
void *p, *q;
size_t sz, tsz;
assert_d_eq(allocm(&p, &sz, 42, 0), ALLOCM_SUCCESS,
"Unexpected allocm() error");
q = p;
assert_d_eq(rallocm(&q, &tsz, sz + 5, 0, ALLOCM_NO_MOVE),
ALLOCM_ERR_NOT_MOVED, "Unexpected rallocm() result");
assert_ptr_eq(q, p, "Unexpected object move");
assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
"Unexpected dallocm() error");
}
TEST_END
TEST_BEGIN(test_grow_and_shrink)
{
void *p, *q;
size_t tsz;
#define NCYCLES 3
unsigned i, j;
#define NSZS 2500
size_t szs[NSZS];
#define MAXSZ ZU(12 * 1024 * 1024)
assert_d_eq(allocm(&p, &szs[0], 1, 0), ALLOCM_SUCCESS,
"Unexpected allocm() error");
for (i = 0; i < NCYCLES; i++) {
for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) {
q = p;
assert_d_eq(rallocm(&q, &szs[j], szs[j-1]+1, 0, 0),
ALLOCM_SUCCESS,
"Unexpected rallocm() error for size=%zu-->%zu",
szs[j-1], szs[j-1]+1);
assert_zu_ne(szs[j], szs[j-1]+1,
"Expected size to at least: %zu", szs[j-1]+1);
p = q;
}
for (j--; j > 0; j--) {
q = p;
assert_d_eq(rallocm(&q, &tsz, szs[j-1], 0, 0),
ALLOCM_SUCCESS,
"Unexpected rallocm() error for size=%zu-->%zu",
szs[j], szs[j-1]);
assert_zu_eq(tsz, szs[j-1],
"Expected size=%zu, got size=%zu", szs[j-1], tsz);
p = q;
}
}
assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
"Unexpected dallocm() error");
}
TEST_END
int
main(void)
{
return (test(
test_same_size,
test_extra_no_move,
test_no_move_fail,
test_grow_and_shrink));
}
| 2,637 | 22.553571 | 71 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/integration/rallocx.c | #include "test/jemalloc_test.h"
TEST_BEGIN(test_grow_and_shrink)
{
void *p, *q;
size_t tsz;
#define NCYCLES 3
unsigned i, j;
#define NSZS 2500
size_t szs[NSZS];
#define MAXSZ ZU(12 * 1024 * 1024)
p = mallocx(1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
szs[0] = sallocx(p, 0);
for (i = 0; i < NCYCLES; i++) {
for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) {
q = rallocx(p, szs[j-1]+1, 0);
assert_ptr_not_null(q,
"Unexpected rallocx() error for size=%zu-->%zu",
szs[j-1], szs[j-1]+1);
szs[j] = sallocx(q, 0);
assert_zu_ne(szs[j], szs[j-1]+1,
"Expected size to at least: %zu", szs[j-1]+1);
p = q;
}
for (j--; j > 0; j--) {
q = rallocx(p, szs[j-1], 0);
assert_ptr_not_null(q,
"Unexpected rallocx() error for size=%zu-->%zu",
szs[j], szs[j-1]);
tsz = sallocx(q, 0);
assert_zu_eq(tsz, szs[j-1],
"Expected size=%zu, got size=%zu", szs[j-1], tsz);
p = q;
}
}
dallocx(p, 0);
#undef MAXSZ
#undef NSZS
#undef NCYCLES
}
TEST_END
static bool
validate_fill(const void *p, uint8_t c, size_t offset, size_t len)
{
bool ret = false;
const uint8_t *buf = (const uint8_t *)p;
size_t i;
for (i = 0; i < len; i++) {
uint8_t b = buf[offset+i];
if (b != c) {
test_fail("Allocation at %p contains %#x rather than "
"%#x at offset %zu", p, b, c, offset+i);
ret = true;
}
}
return (ret);
}
TEST_BEGIN(test_zero)
{
void *p, *q;
size_t psz, qsz, i, j;
size_t start_sizes[] = {1, 3*1024, 63*1024, 4095*1024};
#define FILL_BYTE 0xaaU
#define RANGE 2048
for (i = 0; i < sizeof(start_sizes)/sizeof(size_t); i++) {
size_t start_size = start_sizes[i];
p = mallocx(start_size, MALLOCX_ZERO);
assert_ptr_not_null(p, "Unexpected mallocx() error");
psz = sallocx(p, 0);
assert_false(validate_fill(p, 0, 0, psz),
"Expected zeroed memory");
memset(p, FILL_BYTE, psz);
assert_false(validate_fill(p, FILL_BYTE, 0, psz),
"Expected filled memory");
for (j = 1; j < RANGE; j++) {
q = rallocx(p, start_size+j, MALLOCX_ZERO);
assert_ptr_not_null(q, "Unexpected rallocx() error");
qsz = sallocx(q, 0);
if (q != p || qsz != psz) {
assert_false(validate_fill(q, FILL_BYTE, 0,
psz), "Expected filled memory");
assert_false(validate_fill(q, 0, psz, qsz-psz),
"Expected zeroed memory");
}
if (psz != qsz) {
memset((void *)((uintptr_t)q+psz), FILL_BYTE,
qsz-psz);
psz = qsz;
}
p = q;
}
assert_false(validate_fill(p, FILL_BYTE, 0, psz),
"Expected filled memory");
dallocx(p, 0);
}
#undef FILL_BYTE
}
TEST_END
TEST_BEGIN(test_align)
{
void *p, *q;
size_t align;
#define MAX_ALIGN (ZU(1) << 25)
align = ZU(1);
p = mallocx(1, MALLOCX_ALIGN(align));
assert_ptr_not_null(p, "Unexpected mallocx() error");
for (align <<= 1; align <= MAX_ALIGN; align <<= 1) {
q = rallocx(p, 1, MALLOCX_ALIGN(align));
assert_ptr_not_null(q,
"Unexpected rallocx() error for align=%zu", align);
assert_ptr_null(
(void *)((uintptr_t)q & (align-1)),
"%p inadequately aligned for align=%zu",
q, align);
p = q;
}
dallocx(p, 0);
#undef MAX_ALIGN
}
TEST_END
TEST_BEGIN(test_lg_align_and_zero)
{
void *p, *q;
size_t lg_align, sz;
#define MAX_LG_ALIGN 25
#define MAX_VALIDATE (ZU(1) << 22)
lg_align = ZU(0);
p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
assert_ptr_not_null(p, "Unexpected mallocx() error");
for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) {
q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
assert_ptr_not_null(q,
"Unexpected rallocx() error for lg_align=%zu", lg_align);
assert_ptr_null(
(void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)),
"%p inadequately aligned for lg_align=%zu",
q, lg_align);
sz = sallocx(q, 0);
if ((sz << 1) <= MAX_VALIDATE) {
assert_false(validate_fill(q, 0, 0, sz),
"Expected zeroed memory");
} else {
assert_false(validate_fill(q, 0, 0, MAX_VALIDATE),
"Expected zeroed memory");
assert_false(validate_fill(
(void *)((uintptr_t)q+sz-MAX_VALIDATE),
0, 0, MAX_VALIDATE), "Expected zeroed memory");
}
p = q;
}
dallocx(p, 0);
#undef MAX_VALIDATE
#undef MAX_LG_ALIGN
}
TEST_END
int
main(void)
{
return (test(
test_grow_and_shrink,
test_zero,
test_align,
test_lg_align_and_zero));
}
| 4,365 | 22.6 | 66 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/integration/thread_tcache_enabled.c | #include "test/jemalloc_test.h"
static const bool config_tcache =
#ifdef JEMALLOC_TCACHE
true
#else
false
#endif
;
void *
thd_start(void *arg)
{
int err;
size_t sz;
bool e0, e1;
sz = sizeof(bool);
if ((err = mallctl("thread.tcache.enabled", &e0, &sz, NULL, 0))) {
if (err == ENOENT) {
assert_false(config_tcache,
"ENOENT should only be returned if tcache is "
"disabled");
}
goto label_ENOENT;
}
if (e0) {
e1 = false;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz),
0, "Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled");
}
e1 = true;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
"Unexpected mallctl() error");
assert_false(e0, "tcache should be disabled");
e1 = true;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
"Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled");
e1 = false;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
"Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled");
e1 = false;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
"Unexpected mallctl() error");
assert_false(e0, "tcache should be disabled");
free(malloc(1));
e1 = true;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
"Unexpected mallctl() error");
assert_false(e0, "tcache should be disabled");
free(malloc(1));
e1 = true;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
"Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled");
free(malloc(1));
e1 = false;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
"Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled");
free(malloc(1));
e1 = false;
assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
"Unexpected mallctl() error");
assert_false(e0, "tcache should be disabled");
free(malloc(1));
return (NULL);
label_ENOENT:
test_skip("\"thread.tcache.enabled\" mallctl not available");
return (NULL);
}
TEST_BEGIN(test_main_thread)
{
thd_start(NULL);
}
TEST_END
TEST_BEGIN(test_subthread)
{
thd_t thd;
thd_create(&thd, thd_start, NULL);
thd_join(thd, NULL);
}
TEST_END
int
main(void)
{
/* Run tests multiple times to check for bad interactions. */
return (test(
test_main_thread,
test_subthread,
test_main_thread,
test_subthread,
test_main_thread));
}
| 2,535 | 21.245614 | 68 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/integration/chunk.c | #include "test/jemalloc_test.h"
chunk_alloc_t *old_alloc;
chunk_dalloc_t *old_dalloc;
bool
chunk_dalloc(void *chunk, size_t size, unsigned arena_ind, pool_t *pool)
{
return (old_dalloc(chunk, size, arena_ind, pool));
}
void *
chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero,
unsigned arena_ind, pool_t *pool)
{
return (old_alloc(new_addr, size, alignment, zero, arena_ind, pool));
}
TEST_BEGIN(test_chunk)
{
void *p;
chunk_alloc_t *new_alloc;
chunk_dalloc_t *new_dalloc;
size_t old_size, new_size;
new_alloc = chunk_alloc;
new_dalloc = chunk_dalloc;
old_size = sizeof(chunk_alloc_t *);
new_size = sizeof(chunk_alloc_t *);
assert_d_eq(mallctl("pool.0.arena.0.chunk.alloc", &old_alloc,
&old_size, &new_alloc, new_size), 0,
"Unexpected alloc error");
assert_ptr_ne(old_alloc, new_alloc,
"Unexpected alloc error");
assert_d_eq(mallctl("pool.0.arena.0.chunk.dalloc", &old_dalloc, &old_size,
&new_dalloc, new_size), 0, "Unexpected dalloc error");
assert_ptr_ne(old_dalloc, new_dalloc, "Unexpected dalloc error");
p = mallocx(42, 0);
assert_ptr_ne(p, NULL, "Unexpected alloc error");
free(p);
assert_d_eq(mallctl("pool.0.arena.0.chunk.alloc", NULL,
NULL, &old_alloc, old_size), 0,
"Unexpected alloc error");
assert_d_eq(mallctl("pool.0.arena.0.chunk.dalloc", NULL, NULL, &old_dalloc,
old_size), 0, "Unexpected dalloc error");
}
TEST_END
int
main(void)
{
return (test(test_chunk));
}
| 1,469 | 23.5 | 76 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/integration/aligned_alloc.c | #include "test/jemalloc_test.h"
#define CHUNK 0x400000
/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */
#define MAXALIGN ((size_t)0x2000000LU)
#define NITER 4
TEST_BEGIN(test_alignment_errors)
{
size_t alignment;
void *p;
alignment = 0;
set_errno(0);
p = aligned_alloc(alignment, 1);
assert_false(p != NULL || get_errno() != EINVAL,
"Expected error for invalid alignment %zu", alignment);
for (alignment = sizeof(size_t); alignment < MAXALIGN;
alignment <<= 1) {
set_errno(0);
p = aligned_alloc(alignment + 1, 1);
assert_false(p != NULL || get_errno() != EINVAL,
"Expected error for invalid alignment %zu",
alignment + 1);
}
}
TEST_END
TEST_BEGIN(test_oom_errors)
{
size_t alignment, size;
void *p;
#if LG_SIZEOF_PTR == 3
alignment = UINT64_C(0x8000000000000000);
size = UINT64_C(0x8000000000000000);
#else
alignment = 0x80000000LU;
size = 0x80000000LU;
#endif
set_errno(0);
p = aligned_alloc(alignment, size);
assert_false(p != NULL || get_errno() != ENOMEM,
"Expected error for aligned_alloc(%zu, %zu)",
alignment, size);
#if LG_SIZEOF_PTR == 3
alignment = UINT64_C(0x4000000000000000);
size = UINT64_C(0xc000000000000001);
#else
alignment = 0x40000000LU;
size = 0xc0000001LU;
#endif
set_errno(0);
p = aligned_alloc(alignment, size);
assert_false(p != NULL || get_errno() != ENOMEM,
"Expected error for aligned_alloc(%zu, %zu)",
alignment, size);
alignment = 0x10LU;
#if LG_SIZEOF_PTR == 3
size = UINT64_C(0xfffffffffffffff0);
#else
size = 0xfffffff0LU;
#endif
set_errno(0);
p = aligned_alloc(alignment, size);
assert_false(p != NULL || get_errno() != ENOMEM,
"Expected error for aligned_alloc(&p, %zu, %zu)",
alignment, size);
}
TEST_END
TEST_BEGIN(test_alignment_and_size)
{
size_t alignment, size, total;
unsigned i;
void *ps[NITER];
for (i = 0; i < NITER; i++)
ps[i] = NULL;
for (alignment = 8;
alignment <= MAXALIGN;
alignment <<= 1) {
total = 0;
for (size = 1;
size < 3 * alignment && size < (1U << 31);
size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
for (i = 0; i < NITER; i++) {
ps[i] = aligned_alloc(alignment, size);
if (ps[i] == NULL) {
char buf[BUFERROR_BUF];
buferror(get_errno(), buf, sizeof(buf));
test_fail(
"Error for alignment=%zu, "
"size=%zu (%#zx): %s",
alignment, size, size, buf);
}
total += malloc_usable_size(ps[i]);
if (total >= (MAXALIGN << 1))
break;
}
for (i = 0; i < NITER; i++) {
if (ps[i] != NULL) {
free(ps[i]);
ps[i] = NULL;
}
}
}
}
}
TEST_END
int
main(void)
{
return (test(
test_alignment_errors,
test_oom_errors,
test_alignment_and_size));
}
| 2,760 | 20.912698 | 60 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/integration/posix_memalign.c | #include "test/jemalloc_test.h"
#define CHUNK 0x400000
/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */
#define MAXALIGN ((size_t)0x2000000LU)
#define NITER 4
TEST_BEGIN(test_alignment_errors)
{
size_t alignment;
void *p;
for (alignment = 0; alignment < sizeof(void *); alignment++) {
assert_d_eq(posix_memalign(&p, alignment, 1), EINVAL,
"Expected error for invalid alignment %zu",
alignment);
}
for (alignment = sizeof(size_t); alignment < MAXALIGN;
alignment <<= 1) {
assert_d_ne(posix_memalign(&p, alignment + 1, 1), 0,
"Expected error for invalid alignment %zu",
alignment + 1);
}
}
TEST_END
TEST_BEGIN(test_oom_errors)
{
size_t alignment, size;
void *p;
#if LG_SIZEOF_PTR == 3
alignment = UINT64_C(0x8000000000000000);
size = UINT64_C(0x8000000000000000);
#else
alignment = 0x80000000LU;
size = 0x80000000LU;
#endif
assert_d_ne(posix_memalign(&p, alignment, size), 0,
"Expected error for posix_memalign(&p, %zu, %zu)",
alignment, size);
#if LG_SIZEOF_PTR == 3
alignment = UINT64_C(0x4000000000000000);
size = UINT64_C(0xc000000000000001);
#else
alignment = 0x40000000LU;
size = 0xc0000001LU;
#endif
assert_d_ne(posix_memalign(&p, alignment, size), 0,
"Expected error for posix_memalign(&p, %zu, %zu)",
alignment, size);
alignment = 0x10LU;
#if LG_SIZEOF_PTR == 3
size = UINT64_C(0xfffffffffffffff0);
#else
size = 0xfffffff0LU;
#endif
assert_d_ne(posix_memalign(&p, alignment, size), 0,
"Expected error for posix_memalign(&p, %zu, %zu)",
alignment, size);
}
TEST_END
TEST_BEGIN(test_alignment_and_size)
{
size_t alignment, size, total;
unsigned i;
int err;
void *ps[NITER];
for (i = 0; i < NITER; i++)
ps[i] = NULL;
for (alignment = 8;
alignment <= MAXALIGN;
alignment <<= 1) {
total = 0;
for (size = 1;
size < 3 * alignment && size < (1U << 31);
size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
for (i = 0; i < NITER; i++) {
err = posix_memalign(&ps[i],
alignment, size);
if (err) {
char buf[BUFERROR_BUF];
buferror(get_errno(), buf, sizeof(buf));
test_fail(
"Error for alignment=%zu, "
"size=%zu (%#zx): %s",
alignment, size, size, buf);
}
total += malloc_usable_size(ps[i]);
if (total >= (MAXALIGN << 1))
break;
}
for (i = 0; i < NITER; i++) {
if (ps[i] != NULL) {
free(ps[i]);
ps[i] = NULL;
}
}
}
}
}
TEST_END
int
main(void)
{
return (test(
test_alignment_errors,
test_oom_errors,
test_alignment_and_size));
}
| 2,603 | 20.7 | 63 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/integration/mremap.c | #include "test/jemalloc_test.h"
TEST_BEGIN(test_mremap)
{
int err;
size_t sz, lg_chunk, chunksize, i;
char *p, *q;
sz = sizeof(lg_chunk);
err = mallctl("opt.lg_chunk", &lg_chunk, &sz, NULL, 0);
assert_d_eq(err, 0, "Error in mallctl(): %s", strerror(err));
chunksize = ((size_t)1U) << lg_chunk;
p = (char *)malloc(chunksize);
assert_ptr_not_null(p, "malloc(%zu) --> %p", chunksize, p);
memset(p, 'a', chunksize);
q = (char *)realloc(p, chunksize * 2);
assert_ptr_not_null(q, "realloc(%p, %zu) --> %p", p, chunksize * 2,
q);
for (i = 0; i < chunksize; i++) {
assert_c_eq(q[i], 'a',
"realloc() should preserve existing bytes across copies");
}
p = q;
q = (char *)realloc(p, chunksize);
assert_ptr_not_null(q, "realloc(%p, %zu) --> %p", p, chunksize, q);
for (i = 0; i < chunksize; i++) {
assert_c_eq(q[i], 'a',
"realloc() should preserve existing bytes across copies");
}
free(q);
}
TEST_END
int
main(void)
{
return (test(
test_mremap));
}
| 993 | 20.608696 | 68 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SFMT_PARAMS_H
#define SFMT_PARAMS_H
#if !defined(MEXP)
#ifdef __GNUC__
#warning "MEXP is not defined. I assume MEXP is 19937."
#endif
#define MEXP 19937
#endif
/*-----------------
BASIC DEFINITIONS
-----------------*/
/** Mersenne Exponent. The period of the sequence
* is a multiple of 2^MEXP-1.
* #define MEXP 19937 */
/** SFMT generator has an internal state array of 128-bit integers,
* and N is its size. */
#define N (MEXP / 128 + 1)
/** N32 is the size of internal state array when regarded as an array
* of 32-bit integers.*/
#define N32 (N * 4)
/** N64 is the size of internal state array when regarded as an array
* of 64-bit integers.*/
#define N64 (N * 2)
/*----------------------
the parameters of SFMT
following definitions are in paramsXXXX.h file.
----------------------*/
/** the pick up position of the array.
#define POS1 122
*/
/** the parameter of shift left as four 32-bit registers.
#define SL1 18
*/
/** the parameter of shift left as one 128-bit register.
* The 128-bit integer is shifted by (SL2 * 8) bits.
#define SL2 1
*/
/** the parameter of shift right as four 32-bit registers.
#define SR1 11
*/
/** the parameter of shift right as one 128-bit register.
* The 128-bit integer is shifted by (SL2 * 8) bits.
#define SR2 1
*/
/** A bitmask, used in the recursion. These parameters are introduced
* to break symmetry of SIMD.
#define MSK1 0xdfffffefU
#define MSK2 0xddfecb7fU
#define MSK3 0xbffaffffU
#define MSK4 0xbffffff6U
*/
/** These definitions are part of a 128-bit period certification vector.
#define PARITY1 0x00000001U
#define PARITY2 0x00000000U
#define PARITY3 0x00000000U
#define PARITY4 0xc98e126aU
*/
#if MEXP == 607
#include "test/SFMT-params607.h"
#elif MEXP == 1279
#include "test/SFMT-params1279.h"
#elif MEXP == 2281
#include "test/SFMT-params2281.h"
#elif MEXP == 4253
#include "test/SFMT-params4253.h"
#elif MEXP == 11213
#include "test/SFMT-params11213.h"
#elif MEXP == 19937
#include "test/SFMT-params19937.h"
#elif MEXP == 44497
#include "test/SFMT-params44497.h"
#elif MEXP == 86243
#include "test/SFMT-params86243.h"
#elif MEXP == 132049
#include "test/SFMT-params132049.h"
#elif MEXP == 216091
#include "test/SFMT-params216091.h"
#else
#ifdef __GNUC__
#error "MEXP is not valid."
#undef MEXP
#else
#undef MEXP
#endif
#endif
#endif /* SFMT_PARAMS_H */
| 4,286 | 31.233083 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params4253.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SFMT_PARAMS4253_H
#define SFMT_PARAMS4253_H
#define POS1 17
#define SL1 20
#define SL2 1
#define SR1 7
#define SR2 1
#define MSK1 0x9f7bffffU
#define MSK2 0x9fffff5fU
#define MSK3 0x3efffffbU
#define MSK4 0xfffff7bbU
#define PARITY1 0xa8000001U
#define PARITY2 0xaf5390a3U
#define PARITY3 0xb740b3f8U
#define PARITY4 0x6c11486dU
/* PARAMETERS FOR ALTIVEC */
#if defined(__APPLE__) /* For OSX */
#define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
#define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
#define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
#define ALTI_MSK64 \
(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
#define ALTI_SL2_PERM \
(vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
#define ALTI_SL2_PERM64 \
(vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
#define ALTI_SR2_PERM \
(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
#define ALTI_SR2_PERM64 \
(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
#else /* For OTHER OSs(Linux?) */
#define ALTI_SL1 {SL1, SL1, SL1, SL1}
#define ALTI_SR1 {SR1, SR1, SR1, SR1}
#define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
#define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
#define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
#define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
#define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
#define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
#endif /* For OSX */
#define IDSTR "SFMT-4253:17-20-1-7-1:9f7bffff-9fffff5f-3efffffb-fffff7bb"
#endif /* SFMT_PARAMS4253_H */
| 3,552 | 42.329268 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params607.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SFMT_PARAMS607_H
#define SFMT_PARAMS607_H
#define POS1 2
#define SL1 15
#define SL2 3
#define SR1 13
#define SR2 3
#define MSK1 0xfdff37ffU
#define MSK2 0xef7f3f7dU
#define MSK3 0xff777b7dU
#define MSK4 0x7ff7fb2fU
#define PARITY1 0x00000001U
#define PARITY2 0x00000000U
#define PARITY3 0x00000000U
#define PARITY4 0x5986f054U
/* PARAMETERS FOR ALTIVEC */
#if defined(__APPLE__) /* For OSX */
#define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
#define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
#define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
#define ALTI_MSK64 \
(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
#define ALTI_SL2_PERM \
(vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
#define ALTI_SL2_PERM64 \
(vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
#define ALTI_SR2_PERM \
(vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12)
#define ALTI_SR2_PERM64 \
(vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12)
#else /* For OTHER OSs(Linux?) */
#define ALTI_SL1 {SL1, SL1, SL1, SL1}
#define ALTI_SR1 {SR1, SR1, SR1, SR1}
#define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
#define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
#define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
#define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
#define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12}
#define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12}
#endif /* For OSX */
#define IDSTR "SFMT-607:2-15-3-13-3:fdff37ff-ef7f3f7d-ff777b7d-7ff7fb2f"
#endif /* SFMT_PARAMS607_H */
| 3,558 | 42.402439 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params216091.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SFMT_PARAMS216091_H
#define SFMT_PARAMS216091_H
#define POS1 627
#define SL1 11
#define SL2 3
#define SR1 10
#define SR2 1
#define MSK1 0xbff7bff7U
#define MSK2 0xbfffffffU
#define MSK3 0xbffffa7fU
#define MSK4 0xffddfbfbU
#define PARITY1 0xf8000001U
#define PARITY2 0x89e80709U
#define PARITY3 0x3bd2b64bU
#define PARITY4 0x0c64b1e4U
/* PARAMETERS FOR ALTIVEC */
#if defined(__APPLE__) /* For OSX */
#define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
#define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
#define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
#define ALTI_MSK64 \
(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
#define ALTI_SL2_PERM \
(vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
#define ALTI_SL2_PERM64 \
(vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
#define ALTI_SR2_PERM \
(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
#define ALTI_SR2_PERM64 \
(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
#else /* For OTHER OSs(Linux?) */
#define ALTI_SL1 {SL1, SL1, SL1, SL1}
#define ALTI_SR1 {SR1, SR1, SR1, SR1}
#define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
#define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
#define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
#define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
#define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
#define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
#endif /* For OSX */
#define IDSTR "SFMT-216091:627-11-3-10-1:bff7bff7-bfffffff-bffffa7f-ffddfbfb"
#endif /* SFMT_PARAMS216091_H */
| 3,566 | 42.5 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/mq.h | /*
* Simple templated message queue implementation that relies on only mutexes for
* synchronization (which reduces portability issues). Given the following
* setup:
*
* typedef struct mq_msg_s mq_msg_t;
* struct mq_msg_s {
* mq_msg(mq_msg_t) link;
* [message data]
* };
* mq_gen(, mq_, mq_t, mq_msg_t, link)
*
* The API is as follows:
*
* bool mq_init(mq_t *mq);
* void mq_fini(mq_t *mq);
* unsigned mq_count(mq_t *mq);
* mq_msg_t *mq_tryget(mq_t *mq);
* mq_msg_t *mq_get(mq_t *mq);
* void mq_put(mq_t *mq, mq_msg_t *msg);
*
* The message queue linkage embedded in each message is to be treated as
* externally opaque (no need to initialize or clean up externally). mq_fini()
* does not perform any cleanup of messages, since it knows nothing of their
* payloads.
*/
#define mq_msg(a_mq_msg_type) ql_elm(a_mq_msg_type)
#define mq_gen(a_attr, a_prefix, a_mq_type, a_mq_msg_type, a_field) \
typedef struct { \
mtx_t lock; \
ql_head(a_mq_msg_type) msgs; \
unsigned count; \
} a_mq_type; \
a_attr bool \
a_prefix##init(a_mq_type *mq) { \
\
if (mtx_init(&mq->lock)) \
return (true); \
ql_new(&mq->msgs); \
mq->count = 0; \
return (false); \
} \
a_attr void \
a_prefix##fini(a_mq_type *mq) \
{ \
\
mtx_fini(&mq->lock); \
} \
a_attr unsigned \
a_prefix##count(a_mq_type *mq) \
{ \
unsigned count; \
\
mtx_lock(&mq->lock); \
count = mq->count; \
mtx_unlock(&mq->lock); \
return (count); \
} \
a_attr a_mq_msg_type * \
a_prefix##tryget(a_mq_type *mq) \
{ \
a_mq_msg_type *msg; \
\
mtx_lock(&mq->lock); \
msg = ql_first(&mq->msgs); \
if (msg != NULL) { \
ql_head_remove(&mq->msgs, a_mq_msg_type, a_field); \
mq->count--; \
} \
mtx_unlock(&mq->lock); \
return (msg); \
} \
a_attr a_mq_msg_type * \
a_prefix##get(a_mq_type *mq) \
{ \
a_mq_msg_type *msg; \
struct timespec timeout; \
\
msg = a_prefix##tryget(mq); \
if (msg != NULL) \
return (msg); \
\
timeout.tv_sec = 0; \
timeout.tv_nsec = 1; \
while (true) { \
nanosleep(&timeout, NULL); \
msg = a_prefix##tryget(mq); \
if (msg != NULL) \
return (msg); \
if (timeout.tv_sec == 0) { \
/* Double sleep time, up to max 1 second. */ \
timeout.tv_nsec <<= 1; \
if (timeout.tv_nsec >= 1000*1000*1000) { \
timeout.tv_sec = 1; \
timeout.tv_nsec = 0; \
} \
} \
} \
} \
a_attr void \
a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg) \
{ \
\
mtx_lock(&mq->lock); \
ql_elm_new(msg, a_field); \
ql_tail_insert(&mq->msgs, msg, a_field); \
mq->count++; \
mtx_unlock(&mq->lock); \
}
| 2,992 | 25.963964 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params1279.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SFMT_PARAMS1279_H
#define SFMT_PARAMS1279_H
#define POS1 7
#define SL1 14
#define SL2 3
#define SR1 5
#define SR2 1
#define MSK1 0xf7fefffdU
#define MSK2 0x7fefcfffU
#define MSK3 0xaff3ef3fU
#define MSK4 0xb5ffff7fU
#define PARITY1 0x00000001U
#define PARITY2 0x00000000U
#define PARITY3 0x00000000U
#define PARITY4 0x20000000U
/* PARAMETERS FOR ALTIVEC */
#if defined(__APPLE__) /* For OSX */
#define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
#define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
#define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
#define ALTI_MSK64 \
(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
#define ALTI_SL2_PERM \
(vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
#define ALTI_SL2_PERM64 \
(vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
#define ALTI_SR2_PERM \
(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
#define ALTI_SR2_PERM64 \
(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
#else /* For OTHER OSs(Linux?) */
#define ALTI_SL1 {SL1, SL1, SL1, SL1}
#define ALTI_SR1 {SR1, SR1, SR1, SR1}
#define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
#define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
#define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
#define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
#define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
#define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
#endif /* For OSX */
#define IDSTR "SFMT-1279:7-14-3-5-1:f7fefffd-7fefcfff-aff3ef3f-b5ffff7f"
#endif /* SFMT_PARAMS1279_H */
| 3,552 | 42.329268 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params11213.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SFMT_PARAMS11213_H
#define SFMT_PARAMS11213_H
#define POS1 68
#define SL1 14
#define SL2 3
#define SR1 7
#define SR2 3
#define MSK1 0xeffff7fbU
#define MSK2 0xffffffefU
#define MSK3 0xdfdfbfffU
#define MSK4 0x7fffdbfdU
#define PARITY1 0x00000001U
#define PARITY2 0x00000000U
#define PARITY3 0xe8148000U
#define PARITY4 0xd0c7afa3U
/* PARAMETERS FOR ALTIVEC */
#if defined(__APPLE__) /* For OSX */
#define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
#define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
#define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
#define ALTI_MSK64 \
(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
#define ALTI_SL2_PERM \
(vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
#define ALTI_SL2_PERM64 \
(vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
#define ALTI_SR2_PERM \
(vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12)
#define ALTI_SR2_PERM64 \
(vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12)
#else /* For OTHER OSs(Linux?) */
#define ALTI_SL1 {SL1, SL1, SL1, SL1}
#define ALTI_SR1 {SR1, SR1, SR1, SR1}
#define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
#define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
#define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
#define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
#define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12}
#define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12}
#endif /* For OSX */
#define IDSTR "SFMT-11213:68-14-3-7-3:effff7fb-ffffffef-dfdfbfff-7fffdbfd"
#endif /* SFMT_PARAMS11213_H */
| 3,566 | 42.5 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-sse2.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file SFMT-sse2.h
* @brief SIMD oriented Fast Mersenne Twister(SFMT) for Intel SSE2
*
* @author Mutsuo Saito (Hiroshima University)
* @author Makoto Matsumoto (Hiroshima University)
*
* @note We assume LITTLE ENDIAN in this file
*
* Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* The new BSD License is applied to this software, see LICENSE.txt
*/
#ifndef SFMT_SSE2_H
#define SFMT_SSE2_H
/**
* This function represents the recursion formula.
* @param a a 128-bit part of the interal state array
* @param b a 128-bit part of the interal state array
* @param c a 128-bit part of the interal state array
* @param d a 128-bit part of the interal state array
* @param mask 128-bit mask
* @return output
*/
JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b,
__m128i c, __m128i d, __m128i mask) {
__m128i v, x, y, z;
x = _mm_load_si128(a);
y = _mm_srli_epi32(*b, SR1);
z = _mm_srli_si128(c, SR2);
v = _mm_slli_epi32(d, SL1);
z = _mm_xor_si128(z, x);
z = _mm_xor_si128(z, v);
x = _mm_slli_si128(x, SL2);
y = _mm_and_si128(y, mask);
z = _mm_xor_si128(z, x);
z = _mm_xor_si128(z, y);
return z;
}
/**
* This function fills the internal state array with pseudorandom
* integers.
*/
JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) {
int i;
__m128i r, r1, r2, mask;
mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1);
r1 = _mm_load_si128(&ctx->sfmt[N - 2].si);
r2 = _mm_load_si128(&ctx->sfmt[N - 1].si);
for (i = 0; i < N - POS1; i++) {
r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2,
mask);
_mm_store_si128(&ctx->sfmt[i].si, r);
r1 = r2;
r2 = r;
}
for (; i < N; i++) {
r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1 - N].si, r1, r2,
mask);
_mm_store_si128(&ctx->sfmt[i].si, r);
r1 = r2;
r2 = r;
}
}
/**
* This function fills the user-specified array with pseudorandom
* integers.
*
* @param array an 128-bit array to be filled by pseudorandom numbers.
* @param size number of 128-bit pesudorandom numbers to be generated.
*/
JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
int i, j;
__m128i r, r1, r2, mask;
mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1);
r1 = _mm_load_si128(&ctx->sfmt[N - 2].si);
r2 = _mm_load_si128(&ctx->sfmt[N - 1].si);
for (i = 0; i < N - POS1; i++) {
r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2,
mask);
_mm_store_si128(&array[i].si, r);
r1 = r2;
r2 = r;
}
for (; i < N; i++) {
r = mm_recursion(&ctx->sfmt[i].si, &array[i + POS1 - N].si, r1, r2,
mask);
_mm_store_si128(&array[i].si, r);
r1 = r2;
r2 = r;
}
/* main loop */
for (; i < size - N; i++) {
r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2,
mask);
_mm_store_si128(&array[i].si, r);
r1 = r2;
r2 = r;
}
for (j = 0; j < 2 * N - size; j++) {
r = _mm_load_si128(&array[j + size - N].si);
_mm_store_si128(&ctx->sfmt[j].si, r);
}
for (; i < size; i++) {
r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2,
mask);
_mm_store_si128(&array[i].si, r);
_mm_store_si128(&ctx->sfmt[j++].si, r);
r1 = r2;
r2 = r;
}
}
#endif
| 5,215 | 32.012658 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/math.h | #ifndef JEMALLOC_ENABLE_INLINE
double ln_gamma(double x);
double i_gamma(double x, double p, double ln_gamma_p);
double pt_norm(double p);
double pt_chi2(double p, double df, double ln_gamma_df_2);
double pt_gamma(double p, double shape, double scale, double ln_gamma_shape);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(MATH_C_))
/*
* Compute the natural log of Gamma(x), accurate to 10 decimal places.
*
* This implementation is based on:
*
* Pike, M.C., I.D. Hill (1966) Algorithm 291: Logarithm of Gamma function
* [S14]. Communications of the ACM 9(9):684.
*/
JEMALLOC_INLINE double
ln_gamma(double x)
{
double f, z;
assert(x > 0.0);
if (x < 7.0) {
f = 1.0;
z = x;
while (z < 7.0) {
f *= z;
z += 1.0;
}
x = z;
f = -log(f);
} else
f = 0.0;
z = 1.0 / (x * x);
return (f + (x-0.5) * log(x) - x + 0.918938533204673 +
(((-0.000595238095238 * z + 0.000793650793651) * z -
0.002777777777778) * z + 0.083333333333333) / x);
}
/*
* Compute the incomplete Gamma ratio for [0..x], where p is the shape
* parameter, and ln_gamma_p is ln_gamma(p).
*
* This implementation is based on:
*
* Bhattacharjee, G.P. (1970) Algorithm AS 32: The incomplete Gamma integral.
* Applied Statistics 19:285-287.
*/
JEMALLOC_INLINE double
i_gamma(double x, double p, double ln_gamma_p)
{
double acu, factor, oflo, gin, term, rn, a, b, an, dif;
double pn[6];
unsigned i;
assert(p > 0.0);
assert(x >= 0.0);
if (x == 0.0)
return (0.0);
acu = 1.0e-10;
oflo = 1.0e30;
gin = 0.0;
factor = exp(p * log(x) - x - ln_gamma_p);
if (x <= 1.0 || x < p) {
/* Calculation by series expansion. */
gin = 1.0;
term = 1.0;
rn = p;
while (true) {
rn += 1.0;
term *= x / rn;
gin += term;
if (term <= acu) {
gin *= factor / p;
return (gin);
}
}
} else {
/* Calculation by continued fraction. */
a = 1.0 - p;
b = a + x + 1.0;
term = 0.0;
pn[0] = 1.0;
pn[1] = x;
pn[2] = x + 1.0;
pn[3] = x * b;
gin = pn[2] / pn[3];
while (true) {
a += 1.0;
b += 2.0;
term += 1.0;
an = a * term;
for (i = 0; i < 2; i++)
pn[i+4] = b * pn[i+2] - an * pn[i];
if (pn[5] != 0.0) {
rn = pn[4] / pn[5];
dif = fabs(gin - rn);
if (dif <= acu && dif <= acu * rn) {
gin = 1.0 - factor * gin;
return (gin);
}
gin = rn;
}
for (i = 0; i < 4; i++)
pn[i] = pn[i+2];
if (fabs(pn[4]) >= oflo) {
for (i = 0; i < 4; i++)
pn[i] /= oflo;
}
}
}
}
/*
* Given a value p in [0..1] of the lower tail area of the normal distribution,
* compute the limit on the definite integral from [-inf..z] that satisfies p,
* accurate to 16 decimal places.
*
* This implementation is based on:
*
* Wichura, M.J. (1988) Algorithm AS 241: The percentage points of the normal
* distribution. Applied Statistics 37(3):477-484.
*/
JEMALLOC_INLINE double
pt_norm(double p)
{
double q, r, ret;
assert(p > 0.0 && p < 1.0);
q = p - 0.5;
if (fabs(q) <= 0.425) {
/* p close to 1/2. */
r = 0.180625 - q * q;
return (q * (((((((2.5090809287301226727e3 * r +
3.3430575583588128105e4) * r + 6.7265770927008700853e4) * r
+ 4.5921953931549871457e4) * r + 1.3731693765509461125e4) *
r + 1.9715909503065514427e3) * r + 1.3314166789178437745e2)
* r + 3.3871328727963666080e0) /
(((((((5.2264952788528545610e3 * r +
2.8729085735721942674e4) * r + 3.9307895800092710610e4) * r
+ 2.1213794301586595867e4) * r + 5.3941960214247511077e3) *
r + 6.8718700749205790830e2) * r + 4.2313330701600911252e1)
* r + 1.0));
} else {
if (q < 0.0)
r = p;
else
r = 1.0 - p;
assert(r > 0.0);
r = sqrt(-log(r));
if (r <= 5.0) {
/* p neither close to 1/2 nor 0 or 1. */
r -= 1.6;
ret = ((((((((7.74545014278341407640e-4 * r +
2.27238449892691845833e-2) * r +
2.41780725177450611770e-1) * r +
1.27045825245236838258e0) * r +
3.64784832476320460504e0) * r +
5.76949722146069140550e0) * r +
4.63033784615654529590e0) * r +
1.42343711074968357734e0) /
(((((((1.05075007164441684324e-9 * r +
5.47593808499534494600e-4) * r +
1.51986665636164571966e-2)
* r + 1.48103976427480074590e-1) * r +
6.89767334985100004550e-1) * r +
1.67638483018380384940e0) * r +
2.05319162663775882187e0) * r + 1.0));
} else {
/* p near 0 or 1. */
r -= 5.0;
ret = ((((((((2.01033439929228813265e-7 * r +
2.71155556874348757815e-5) * r +
1.24266094738807843860e-3) * r +
2.65321895265761230930e-2) * r +
2.96560571828504891230e-1) * r +
1.78482653991729133580e0) * r +
5.46378491116411436990e0) * r +
6.65790464350110377720e0) /
(((((((2.04426310338993978564e-15 * r +
1.42151175831644588870e-7) * r +
1.84631831751005468180e-5) * r +
7.86869131145613259100e-4) * r +
1.48753612908506148525e-2) * r +
1.36929880922735805310e-1) * r +
5.99832206555887937690e-1)
* r + 1.0));
}
if (q < 0.0)
ret = -ret;
return (ret);
}
}
/*
* Given a value p in [0..1] of the lower tail area of the Chi^2 distribution
* with df degrees of freedom, where ln_gamma_df_2 is ln_gamma(df/2.0), compute
* the upper limit on the definite integral from [0..z] that satisfies p,
* accurate to 12 decimal places.
*
* This implementation is based on:
*
* Best, D.J., D.E. Roberts (1975) Algorithm AS 91: The percentage points of
* the Chi^2 distribution. Applied Statistics 24(3):385-388.
*
* Shea, B.L. (1991) Algorithm AS R85: A remark on AS 91: The percentage
* points of the Chi^2 distribution. Applied Statistics 40(1):233-235.
*/
JEMALLOC_INLINE double
pt_chi2(double p, double df, double ln_gamma_df_2)
{
double e, aa, xx, c, ch, a, q, p1, p2, t, x, b, s1, s2, s3, s4, s5, s6;
unsigned i;
assert(p >= 0.0 && p < 1.0);
assert(df > 0.0);
e = 5.0e-7;
aa = 0.6931471805;
xx = 0.5 * df;
c = xx - 1.0;
if (df < -1.24 * log(p)) {
/* Starting approximation for small Chi^2. */
ch = pow(p * xx * exp(ln_gamma_df_2 + xx * aa), 1.0 / xx);
if (ch - e < 0.0)
return (ch);
} else {
if (df > 0.32) {
x = pt_norm(p);
/*
* Starting approximation using Wilson and Hilferty
* estimate.
*/
p1 = 0.222222 / df;
ch = df * pow(x * sqrt(p1) + 1.0 - p1, 3.0);
/* Starting approximation for p tending to 1. */
if (ch > 2.2 * df + 6.0) {
ch = -2.0 * (log(1.0 - p) - c * log(0.5 * ch) +
ln_gamma_df_2);
}
} else {
ch = 0.4;
a = log(1.0 - p);
while (true) {
q = ch;
p1 = 1.0 + ch * (4.67 + ch);
p2 = ch * (6.73 + ch * (6.66 + ch));
t = -0.5 + (4.67 + 2.0 * ch) / p1 - (6.73 + ch
* (13.32 + 3.0 * ch)) / p2;
ch -= (1.0 - exp(a + ln_gamma_df_2 + 0.5 * ch +
c * aa) * p2 / p1) / t;
if (fabs(q / ch - 1.0) - 0.01 <= 0.0)
break;
}
}
}
for (i = 0; i < 20; i++) {
/* Calculation of seven-term Taylor series. */
q = ch;
p1 = 0.5 * ch;
if (p1 < 0.0)
return (-1.0);
p2 = p - i_gamma(p1, xx, ln_gamma_df_2);
t = p2 * exp(xx * aa + ln_gamma_df_2 + p1 - c * log(ch));
b = t / ch;
a = 0.5 * t - b * c;
s1 = (210.0 + a * (140.0 + a * (105.0 + a * (84.0 + a * (70.0 +
60.0 * a))))) / 420.0;
s2 = (420.0 + a * (735.0 + a * (966.0 + a * (1141.0 + 1278.0 *
a)))) / 2520.0;
s3 = (210.0 + a * (462.0 + a * (707.0 + 932.0 * a))) / 2520.0;
s4 = (252.0 + a * (672.0 + 1182.0 * a) + c * (294.0 + a *
(889.0 + 1740.0 * a))) / 5040.0;
s5 = (84.0 + 264.0 * a + c * (175.0 + 606.0 * a)) / 2520.0;
s6 = (120.0 + c * (346.0 + 127.0 * c)) / 5040.0;
ch += t * (1.0 + 0.5 * t * s1 - b * c * (s1 - b * (s2 - b * (s3
- b * (s4 - b * (s5 - b * s6))))));
if (fabs(q / ch - 1.0) <= e)
break;
}
return (ch);
}
/*
* Given a value p in [0..1] and Gamma distribution shape and scale parameters,
* compute the upper limit on the definite integeral from [0..z] that satisfies
* p.
*/
JEMALLOC_INLINE double
pt_gamma(double p, double shape, double scale, double ln_gamma_shape)
{
return (pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale);
}
#endif
| 8,173 | 25.198718 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/mtx.h | /*
* mtx is a slightly simplified version of malloc_mutex. This code duplication
* is unfortunate, but there are allocator bootstrapping considerations that
* would leak into the test infrastructure if malloc_mutex were used directly
* in tests.
*/
typedef struct {
#ifdef _WIN32
CRITICAL_SECTION lock;
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLock lock;
#else
pthread_mutex_t lock;
#endif
} mtx_t;
bool mtx_init(mtx_t *mtx);
void mtx_fini(mtx_t *mtx);
void mtx_lock(mtx_t *mtx);
void mtx_unlock(mtx_t *mtx);
| 520 | 22.681818 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params2281.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SFMT_PARAMS2281_H
#define SFMT_PARAMS2281_H
#define POS1 12
#define SL1 19
#define SL2 1
#define SR1 5
#define SR2 1
#define MSK1 0xbff7ffbfU
#define MSK2 0xfdfffffeU
#define MSK3 0xf7ffef7fU
#define MSK4 0xf2f7cbbfU
#define PARITY1 0x00000001U
#define PARITY2 0x00000000U
#define PARITY3 0x00000000U
#define PARITY4 0x41dfa600U
/* PARAMETERS FOR ALTIVEC */
#if defined(__APPLE__) /* For OSX */
#define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
#define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
#define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
#define ALTI_MSK64 \
(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
#define ALTI_SL2_PERM \
(vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
#define ALTI_SL2_PERM64 \
(vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
#define ALTI_SR2_PERM \
(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
#define ALTI_SR2_PERM64 \
(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
#else /* For OTHER OSs(Linux?) */
#define ALTI_SL1 {SL1, SL1, SL1, SL1}
#define ALTI_SR1 {SR1, SR1, SR1, SR1}
#define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
#define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
#define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
#define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
#define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
#define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
#endif /* For OSX */
#define IDSTR "SFMT-2281:12-19-1-5-1:bff7ffbf-fdfffffe-f7ffef7f-f2f7cbbf"
#endif /* SFMT_PARAMS2281_H */
| 3,552 | 42.329268 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params19937.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SFMT_PARAMS19937_H
#define SFMT_PARAMS19937_H
#define POS1 122
#define SL1 18
#define SL2 1
#define SR1 11
#define SR2 1
#define MSK1 0xdfffffefU
#define MSK2 0xddfecb7fU
#define MSK3 0xbffaffffU
#define MSK4 0xbffffff6U
#define PARITY1 0x00000001U
#define PARITY2 0x00000000U
#define PARITY3 0x00000000U
#define PARITY4 0x13c9e684U
/* PARAMETERS FOR ALTIVEC */
#if defined(__APPLE__) /* For OSX */
#define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
#define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
#define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
#define ALTI_MSK64 \
(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
#define ALTI_SL2_PERM \
(vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
#define ALTI_SL2_PERM64 \
(vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
#define ALTI_SR2_PERM \
(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
#define ALTI_SR2_PERM64 \
(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
#else /* For OTHER OSs(Linux?) */
#define ALTI_SL1 {SL1, SL1, SL1, SL1}
#define ALTI_SR1 {SR1, SR1, SR1, SR1}
#define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
#define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
#define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
#define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
#define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
#define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
#endif /* For OSX */
#define IDSTR "SFMT-19937:122-18-1-11-1:dfffffef-ddfecb7f-bffaffff-bffffff6"
#endif /* SFMT_PARAMS19937_H */
| 3,560 | 42.426829 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/test.h | #define ASSERT_BUFSIZE 256
#define assert_cmp(t, a, b, cmp, neg_cmp, pri, ...) do { \
t a_ = (a); \
t b_ = (b); \
if (!(a_ cmp b_)) { \
char prefix[ASSERT_BUFSIZE]; \
char message[ASSERT_BUFSIZE]; \
malloc_snprintf(prefix, sizeof(prefix), \
"%s:%s:%d: Failed assertion: " \
"(%s) "#cmp" (%s) --> " \
"%"pri" "#neg_cmp" %"pri": ", \
__func__, __FILE__, __LINE__, \
#a, #b, a_, b_); \
malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
p_test_fail(prefix, message); \
} \
} while (0)
#define assert_ptr_eq(a, b, ...) assert_cmp(void *, a, b, ==, \
!=, "p", __VA_ARGS__)
#define assert_ptr_ne(a, b, ...) assert_cmp(void *, a, b, !=, \
==, "p", __VA_ARGS__)
#define assert_ptr_null(a, ...) assert_cmp(void *, a, NULL, ==, \
!=, "p", __VA_ARGS__)
#define assert_ptr_not_null(a, ...) assert_cmp(void *, a, NULL, !=, \
==, "p", __VA_ARGS__)
#define assert_c_eq(a, b, ...) assert_cmp(char, a, b, ==, !=, "c", __VA_ARGS__)
#define assert_c_ne(a, b, ...) assert_cmp(char, a, b, !=, ==, "c", __VA_ARGS__)
#define assert_c_lt(a, b, ...) assert_cmp(char, a, b, <, >=, "c", __VA_ARGS__)
#define assert_c_le(a, b, ...) assert_cmp(char, a, b, <=, >, "c", __VA_ARGS__)
#define assert_c_ge(a, b, ...) assert_cmp(char, a, b, >=, <, "c", __VA_ARGS__)
#define assert_c_gt(a, b, ...) assert_cmp(char, a, b, >, <=, "c", __VA_ARGS__)
#define assert_x_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__)
#define assert_x_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__)
#define assert_x_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "#x", __VA_ARGS__)
#define assert_x_le(a, b, ...) assert_cmp(int, a, b, <=, >, "#x", __VA_ARGS__)
#define assert_x_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "#x", __VA_ARGS__)
#define assert_x_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "#x", __VA_ARGS__)
#define assert_d_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "d", __VA_ARGS__)
#define assert_d_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "d", __VA_ARGS__)
#define assert_d_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "d", __VA_ARGS__)
#define assert_d_le(a, b, ...) assert_cmp(int, a, b, <=, >, "d", __VA_ARGS__)
#define assert_d_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "d", __VA_ARGS__)
#define assert_d_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "d", __VA_ARGS__)
#define assert_u_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "u", __VA_ARGS__)
#define assert_u_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "u", __VA_ARGS__)
#define assert_u_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "u", __VA_ARGS__)
#define assert_u_le(a, b, ...) assert_cmp(int, a, b, <=, >, "u", __VA_ARGS__)
#define assert_u_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "u", __VA_ARGS__)
#define assert_u_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "u", __VA_ARGS__)
#define assert_ld_eq(a, b, ...) assert_cmp(long, a, b, ==, \
!=, "ld", __VA_ARGS__)
#define assert_ld_ne(a, b, ...) assert_cmp(long, a, b, !=, \
==, "ld", __VA_ARGS__)
#define assert_ld_lt(a, b, ...) assert_cmp(long, a, b, <, \
>=, "ld", __VA_ARGS__)
#define assert_ld_le(a, b, ...) assert_cmp(long, a, b, <=, \
>, "ld", __VA_ARGS__)
#define assert_ld_ge(a, b, ...) assert_cmp(long, a, b, >=, \
<, "ld", __VA_ARGS__)
#define assert_ld_gt(a, b, ...) assert_cmp(long, a, b, >, \
<=, "ld", __VA_ARGS__)
#define assert_lu_eq(a, b, ...) assert_cmp(unsigned long, \
a, b, ==, !=, "lu", __VA_ARGS__)
#define assert_lu_ne(a, b, ...) assert_cmp(unsigned long, \
a, b, !=, ==, "lu", __VA_ARGS__)
#define assert_lu_lt(a, b, ...) assert_cmp(unsigned long, \
a, b, <, >=, "lu", __VA_ARGS__)
#define assert_lu_le(a, b, ...) assert_cmp(unsigned long, \
a, b, <=, >, "lu", __VA_ARGS__)
#define assert_lu_ge(a, b, ...) assert_cmp(unsigned long, \
a, b, >=, <, "lu", __VA_ARGS__)
#define assert_lu_gt(a, b, ...) assert_cmp(unsigned long, \
a, b, >, <=, "lu", __VA_ARGS__)
#define assert_qd_eq(a, b, ...) assert_cmp(long long, a, b, ==, \
!=, "qd", __VA_ARGS__)
#define assert_qd_ne(a, b, ...) assert_cmp(long long, a, b, !=, \
==, "qd", __VA_ARGS__)
#define assert_qd_lt(a, b, ...) assert_cmp(long long, a, b, <, \
>=, "qd", __VA_ARGS__)
#define assert_qd_le(a, b, ...) assert_cmp(long long, a, b, <=, \
>, "qd", __VA_ARGS__)
#define assert_qd_ge(a, b, ...) assert_cmp(long long, a, b, >=, \
<, "qd", __VA_ARGS__)
#define assert_qd_gt(a, b, ...) assert_cmp(long long, a, b, >, \
<=, "qd", __VA_ARGS__)
#define assert_qu_eq(a, b, ...) assert_cmp(unsigned long long, \
a, b, ==, !=, "qu", __VA_ARGS__)
#define assert_qu_ne(a, b, ...) assert_cmp(unsigned long long, \
a, b, !=, ==, "qu", __VA_ARGS__)
#define assert_qu_lt(a, b, ...) assert_cmp(unsigned long long, \
a, b, <, >=, "qu", __VA_ARGS__)
#define assert_qu_le(a, b, ...) assert_cmp(unsigned long long, \
a, b, <=, >, "qu", __VA_ARGS__)
#define assert_qu_ge(a, b, ...) assert_cmp(unsigned long long, \
a, b, >=, <, "qu", __VA_ARGS__)
#define assert_qu_gt(a, b, ...) assert_cmp(unsigned long long, \
a, b, >, <=, "qu", __VA_ARGS__)
#define assert_jd_eq(a, b, ...) assert_cmp(intmax_t, a, b, ==, \
!=, "jd", __VA_ARGS__)
#define assert_jd_ne(a, b, ...) assert_cmp(intmax_t, a, b, !=, \
==, "jd", __VA_ARGS__)
#define assert_jd_lt(a, b, ...) assert_cmp(intmax_t, a, b, <, \
>=, "jd", __VA_ARGS__)
#define assert_jd_le(a, b, ...) assert_cmp(intmax_t, a, b, <=, \
>, "jd", __VA_ARGS__)
#define assert_jd_ge(a, b, ...) assert_cmp(intmax_t, a, b, >=, \
<, "jd", __VA_ARGS__)
#define assert_jd_gt(a, b, ...) assert_cmp(intmax_t, a, b, >, \
<=, "jd", __VA_ARGS__)
#define assert_ju_eq(a, b, ...) assert_cmp(uintmax_t, a, b, ==, \
!=, "ju", __VA_ARGS__)
#define assert_ju_ne(a, b, ...) assert_cmp(uintmax_t, a, b, !=, \
==, "ju", __VA_ARGS__)
#define assert_ju_lt(a, b, ...) assert_cmp(uintmax_t, a, b, <, \
>=, "ju", __VA_ARGS__)
#define assert_ju_le(a, b, ...) assert_cmp(uintmax_t, a, b, <=, \
>, "ju", __VA_ARGS__)
#define assert_ju_ge(a, b, ...) assert_cmp(uintmax_t, a, b, >=, \
<, "ju", __VA_ARGS__)
#define assert_ju_gt(a, b, ...) assert_cmp(uintmax_t, a, b, >, \
<=, "ju", __VA_ARGS__)
#define assert_zd_eq(a, b, ...) assert_cmp(ssize_t, a, b, ==, \
!=, "zd", __VA_ARGS__)
#define assert_zd_ne(a, b, ...) assert_cmp(ssize_t, a, b, !=, \
==, "zd", __VA_ARGS__)
#define assert_zd_lt(a, b, ...) assert_cmp(ssize_t, a, b, <, \
>=, "zd", __VA_ARGS__)
#define assert_zd_le(a, b, ...) assert_cmp(ssize_t, a, b, <=, \
>, "zd", __VA_ARGS__)
#define assert_zd_ge(a, b, ...) assert_cmp(ssize_t, a, b, >=, \
<, "zd", __VA_ARGS__)
#define assert_zd_gt(a, b, ...) assert_cmp(ssize_t, a, b, >, \
<=, "zd", __VA_ARGS__)
#define assert_zu_eq(a, b, ...) assert_cmp(size_t, a, b, ==, \
!=, "zu", __VA_ARGS__)
#define assert_zu_ne(a, b, ...) assert_cmp(size_t, a, b, !=, \
==, "zu", __VA_ARGS__)
#define assert_zu_lt(a, b, ...) assert_cmp(size_t, a, b, <, \
>=, "zu", __VA_ARGS__)
#define assert_zu_le(a, b, ...) assert_cmp(size_t, a, b, <=, \
>, "zu", __VA_ARGS__)
#define assert_zu_ge(a, b, ...) assert_cmp(size_t, a, b, >=, \
<, "zu", __VA_ARGS__)
#define assert_zu_gt(a, b, ...) assert_cmp(size_t, a, b, >, \
<=, "zu", __VA_ARGS__)
#define assert_d32_eq(a, b, ...) assert_cmp(int32_t, a, b, ==, \
!=, PRId32, __VA_ARGS__)
#define assert_d32_ne(a, b, ...) assert_cmp(int32_t, a, b, !=, \
==, PRId32, __VA_ARGS__)
#define assert_d32_lt(a, b, ...) assert_cmp(int32_t, a, b, <, \
>=, PRId32, __VA_ARGS__)
#define assert_d32_le(a, b, ...) assert_cmp(int32_t, a, b, <=, \
>, PRId32, __VA_ARGS__)
#define assert_d32_ge(a, b, ...) assert_cmp(int32_t, a, b, >=, \
<, PRId32, __VA_ARGS__)
#define assert_d32_gt(a, b, ...) assert_cmp(int32_t, a, b, >, \
<=, PRId32, __VA_ARGS__)
#define assert_u32_eq(a, b, ...) assert_cmp(uint32_t, a, b, ==, \
!=, PRIu32, __VA_ARGS__)
#define assert_u32_ne(a, b, ...) assert_cmp(uint32_t, a, b, !=, \
==, PRIu32, __VA_ARGS__)
#define assert_u32_lt(a, b, ...) assert_cmp(uint32_t, a, b, <, \
>=, PRIu32, __VA_ARGS__)
#define assert_u32_le(a, b, ...) assert_cmp(uint32_t, a, b, <=, \
>, PRIu32, __VA_ARGS__)
#define assert_u32_ge(a, b, ...) assert_cmp(uint32_t, a, b, >=, \
<, PRIu32, __VA_ARGS__)
#define assert_u32_gt(a, b, ...) assert_cmp(uint32_t, a, b, >, \
<=, PRIu32, __VA_ARGS__)
#define assert_d64_eq(a, b, ...) assert_cmp(int64_t, a, b, ==, \
!=, PRId64, __VA_ARGS__)
#define assert_d64_ne(a, b, ...) assert_cmp(int64_t, a, b, !=, \
==, PRId64, __VA_ARGS__)
#define assert_d64_lt(a, b, ...) assert_cmp(int64_t, a, b, <, \
>=, PRId64, __VA_ARGS__)
#define assert_d64_le(a, b, ...) assert_cmp(int64_t, a, b, <=, \
>, PRId64, __VA_ARGS__)
#define assert_d64_ge(a, b, ...) assert_cmp(int64_t, a, b, >=, \
<, PRId64, __VA_ARGS__)
#define assert_d64_gt(a, b, ...) assert_cmp(int64_t, a, b, >, \
<=, PRId64, __VA_ARGS__)
#define assert_u64_eq(a, b, ...) assert_cmp(uint64_t, a, b, ==, \
!=, PRIu64, __VA_ARGS__)
#define assert_u64_ne(a, b, ...) assert_cmp(uint64_t, a, b, !=, \
==, PRIu64, __VA_ARGS__)
#define assert_u64_lt(a, b, ...) assert_cmp(uint64_t, a, b, <, \
>=, PRIu64, __VA_ARGS__)
#define assert_u64_le(a, b, ...) assert_cmp(uint64_t, a, b, <=, \
>, PRIu64, __VA_ARGS__)
#define assert_u64_ge(a, b, ...) assert_cmp(uint64_t, a, b, >=, \
<, PRIu64, __VA_ARGS__)
#define assert_u64_gt(a, b, ...) assert_cmp(uint64_t, a, b, >, \
<=, PRIu64, __VA_ARGS__)
#define assert_b_eq(a, b, ...) do { \
bool a_ = (a); \
bool b_ = (b); \
if (!(a_ == b_)) { \
char prefix[ASSERT_BUFSIZE]; \
char message[ASSERT_BUFSIZE]; \
malloc_snprintf(prefix, sizeof(prefix), \
"%s:%s:%d: Failed assertion: " \
"(%s) == (%s) --> %s != %s: ", \
__func__, __FILE__, __LINE__, \
#a, #b, a_ ? "true" : "false", \
b_ ? "true" : "false"); \
malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
p_test_fail(prefix, message); \
} \
} while (0)
#define assert_b_ne(a, b, ...) do { \
bool a_ = (a); \
bool b_ = (b); \
if (!(a_ != b_)) { \
char prefix[ASSERT_BUFSIZE]; \
char message[ASSERT_BUFSIZE]; \
malloc_snprintf(prefix, sizeof(prefix), \
"%s:%s:%d: Failed assertion: " \
"(%s) != (%s) --> %s == %s: ", \
__func__, __FILE__, __LINE__, \
#a, #b, a_ ? "true" : "false", \
b_ ? "true" : "false"); \
malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
p_test_fail(prefix, message); \
} \
} while (0)
#define assert_true(a, ...) assert_b_eq(a, true, __VA_ARGS__)
#define assert_false(a, ...) assert_b_eq(a, false, __VA_ARGS__)
#define assert_str_eq(a, b, ...) do { \
if (strcmp((a), (b))) { \
char prefix[ASSERT_BUFSIZE]; \
char message[ASSERT_BUFSIZE]; \
malloc_snprintf(prefix, sizeof(prefix), \
"%s:%s:%d: Failed assertion: " \
"(%s) same as (%s) --> " \
"\"%s\" differs from \"%s\": ", \
__func__, __FILE__, __LINE__, #a, #b, a, b); \
malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
p_test_fail(prefix, message); \
} \
} while (0)
#define assert_str_ne(a, b, ...) do { \
if (!strcmp((a), (b))) { \
char prefix[ASSERT_BUFSIZE]; \
char message[ASSERT_BUFSIZE]; \
malloc_snprintf(prefix, sizeof(prefix), \
"%s:%s:%d: Failed assertion: " \
"(%s) differs from (%s) --> " \
"\"%s\" same as \"%s\": ", \
__func__, __FILE__, __LINE__, #a, #b, a, b); \
malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
p_test_fail(prefix, message); \
} \
} while (0)
#define assert_not_reached(...) do { \
char prefix[ASSERT_BUFSIZE]; \
char message[ASSERT_BUFSIZE]; \
malloc_snprintf(prefix, sizeof(prefix), \
"%s:%s:%d: Unreachable code reached: ", \
__func__, __FILE__, __LINE__); \
malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
p_test_fail(prefix, message); \
} while (0)
/*
* If this enum changes, corresponding changes in test/test.sh.in are also
* necessary.
*/
typedef enum {
test_status_pass = 0,
test_status_skip = 1,
test_status_fail = 2,
test_status_count = 3
} test_status_t;
typedef void (test_t)(void);
#define TEST_BEGIN(f) \
static void \
f(void) \
{ \
p_test_init(#f);
#define TEST_END \
goto label_test_end; \
label_test_end: \
p_test_fini(); \
}
#define test(...) \
p_test(__VA_ARGS__, NULL)
#define test_not_init(...) \
p_test_not_init(__VA_ARGS__, NULL)
#define test_skip_if(e) do { \
if (e) { \
test_skip("%s:%s:%d: Test skipped: (%s)", \
__func__, __FILE__, __LINE__, #e); \
goto label_test_end; \
} \
} while (0)
void test_skip(const char *format, ...) JEMALLOC_ATTR(format(printf, 1, 2));
void test_fail(const char *format, ...) JEMALLOC_ATTR(format(printf, 1, 2));
/* For private use by macros. */
test_status_t p_test(test_t *t, ...);
test_status_t p_test_not_init(test_t *t, ...);
void p_test_init(const char *name);
void p_test_fini(void);
void p_test_fail(const char *prefix, const char *message);
| 13,309 | 38.731343 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file SFMT.h
*
* @brief SIMD oriented Fast Mersenne Twister(SFMT) pseudorandom
* number generator
*
* @author Mutsuo Saito (Hiroshima University)
* @author Makoto Matsumoto (Hiroshima University)
*
* Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* The new BSD License is applied to this software.
* see LICENSE.txt
*
* @note We assume that your system has inttypes.h. If your system
* doesn't have inttypes.h, you have to typedef uint32_t and uint64_t,
* and you have to define PRIu64 and PRIx64 in this file as follows:
* @verbatim
typedef unsigned int uint32_t
typedef unsigned long long uint64_t
#define PRIu64 "llu"
#define PRIx64 "llx"
@endverbatim
* uint32_t must be exactly 32-bit unsigned integer type (no more, no
* less), and uint64_t must be exactly 64-bit unsigned integer type.
* PRIu64 and PRIx64 are used for printf function to print 64-bit
* unsigned int and 64-bit unsigned int in hexadecimal format.
*/
#ifndef SFMT_H
#define SFMT_H
typedef struct sfmt_s sfmt_t;
uint32_t gen_rand32(sfmt_t *ctx);
uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit);
uint64_t gen_rand64(sfmt_t *ctx);
uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit);
void fill_array32(sfmt_t *ctx, uint32_t *array, int size);
void fill_array64(sfmt_t *ctx, uint64_t *array, int size);
sfmt_t *init_gen_rand(uint32_t seed);
sfmt_t *init_by_array(uint32_t *init_key, int key_length);
void fini_gen_rand(sfmt_t *ctx);
const char *get_idstring(void);
int get_min_array_size32(void);
int get_min_array_size64(void);
#ifndef JEMALLOC_ENABLE_INLINE
double to_real1(uint32_t v);
double genrand_real1(sfmt_t *ctx);
double to_real2(uint32_t v);
double genrand_real2(sfmt_t *ctx);
double to_real3(uint32_t v);
double genrand_real3(sfmt_t *ctx);
double to_res53(uint64_t v);
double to_res53_mix(uint32_t x, uint32_t y);
double genrand_res53(sfmt_t *ctx);
double genrand_res53_mix(sfmt_t *ctx);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(SFMT_C_))
/* These real versions are due to Isaku Wada */
/** generates a random number on [0,1]-real-interval */
JEMALLOC_INLINE double to_real1(uint32_t v)
{
return v * (1.0/4294967295.0);
/* divided by 2^32-1 */
}
/** generates a random number on [0,1]-real-interval */
JEMALLOC_INLINE double genrand_real1(sfmt_t *ctx)
{
return to_real1(gen_rand32(ctx));
}
/** generates a random number on [0,1)-real-interval */
JEMALLOC_INLINE double to_real2(uint32_t v)
{
return v * (1.0/4294967296.0);
/* divided by 2^32 */
}
/** generates a random number on [0,1)-real-interval */
JEMALLOC_INLINE double genrand_real2(sfmt_t *ctx)
{
return to_real2(gen_rand32(ctx));
}
/** generates a random number on (0,1)-real-interval */
JEMALLOC_INLINE double to_real3(uint32_t v)
{
return (((double)v) + 0.5)*(1.0/4294967296.0);
/* divided by 2^32 */
}
/** generates a random number on (0,1)-real-interval */
JEMALLOC_INLINE double genrand_real3(sfmt_t *ctx)
{
return to_real3(gen_rand32(ctx));
}
/** These real versions are due to Isaku Wada */
/** generates a random number on [0,1) with 53-bit resolution*/
JEMALLOC_INLINE double to_res53(uint64_t v)
{
return v * (1.0/18446744073709551616.0L);
}
/** generates a random number on [0,1) with 53-bit resolution from two
* 32 bit integers */
JEMALLOC_INLINE double to_res53_mix(uint32_t x, uint32_t y)
{
return to_res53(x | ((uint64_t)y << 32));
}
/** generates a random number on [0,1) with 53-bit resolution
*/
JEMALLOC_INLINE double genrand_res53(sfmt_t *ctx)
{
return to_res53(gen_rand64(ctx));
}
/** generates a random number on [0,1) with 53-bit resolution
using 32bit integer.
*/
JEMALLOC_INLINE double genrand_res53_mix(sfmt_t *ctx)
{
uint32_t x, y;
x = gen_rand32(ctx);
y = gen_rand32(ctx);
return to_res53_mix(x, y);
}
#endif
#endif
| 5,805 | 32.755814 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params44497.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SFMT_PARAMS44497_H
#define SFMT_PARAMS44497_H
#define POS1 330
#define SL1 5
#define SL2 3
#define SR1 9
#define SR2 3
#define MSK1 0xeffffffbU
#define MSK2 0xdfbebfffU
#define MSK3 0xbfbf7befU
#define MSK4 0x9ffd7bffU
#define PARITY1 0x00000001U
#define PARITY2 0x00000000U
#define PARITY3 0xa3ac4000U
#define PARITY4 0xecc1327aU
/* PARAMETERS FOR ALTIVEC */
#if defined(__APPLE__) /* For OSX */
#define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
#define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
#define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
#define ALTI_MSK64 \
(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
#define ALTI_SL2_PERM \
(vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
#define ALTI_SL2_PERM64 \
(vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
#define ALTI_SR2_PERM \
(vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12)
#define ALTI_SR2_PERM64 \
(vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12)
#else /* For OTHER OSs(Linux?) */
#define ALTI_SL1 {SL1, SL1, SL1, SL1}
#define ALTI_SR1 {SR1, SR1, SR1, SR1}
#define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
#define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
#define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
#define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
#define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12}
#define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12}
#endif /* For OSX */
#define IDSTR "SFMT-44497:330-5-3-9-3:effffffb-dfbebfff-bfbf7bef-9ffd7bff"
#endif /* SFMT_PARAMS44497_H */
| 3,566 | 42.5 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-alti.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file SFMT-alti.h
*
* @brief SIMD oriented Fast Mersenne Twister(SFMT)
* pseudorandom number generator
*
* @author Mutsuo Saito (Hiroshima University)
* @author Makoto Matsumoto (Hiroshima University)
*
* Copyright (C) 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* The new BSD License is applied to this software.
* see LICENSE.txt
*/
#ifndef SFMT_ALTI_H
#define SFMT_ALTI_H
/**
* This function represents the recursion formula in AltiVec and BIG ENDIAN.
* @param a a 128-bit part of the interal state array
* @param b a 128-bit part of the interal state array
* @param c a 128-bit part of the interal state array
* @param d a 128-bit part of the interal state array
* @return output
*/
JEMALLOC_ALWAYS_INLINE
vector unsigned int vec_recursion(vector unsigned int a,
vector unsigned int b,
vector unsigned int c,
vector unsigned int d) {
const vector unsigned int sl1 = ALTI_SL1;
const vector unsigned int sr1 = ALTI_SR1;
#ifdef ONLY64
const vector unsigned int mask = ALTI_MSK64;
const vector unsigned char perm_sl = ALTI_SL2_PERM64;
const vector unsigned char perm_sr = ALTI_SR2_PERM64;
#else
const vector unsigned int mask = ALTI_MSK;
const vector unsigned char perm_sl = ALTI_SL2_PERM;
const vector unsigned char perm_sr = ALTI_SR2_PERM;
#endif
vector unsigned int v, w, x, y, z;
x = vec_perm(a, (vector unsigned int)perm_sl, perm_sl);
v = a;
y = vec_sr(b, sr1);
z = vec_perm(c, (vector unsigned int)perm_sr, perm_sr);
w = vec_sl(d, sl1);
z = vec_xor(z, w);
y = vec_and(y, mask);
v = vec_xor(v, x);
z = vec_xor(z, y);
z = vec_xor(z, v);
return z;
}
/**
* This function fills the internal state array with pseudorandom
* integers.
*/
JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) {
int i;
vector unsigned int r, r1, r2;
r1 = ctx->sfmt[N - 2].s;
r2 = ctx->sfmt[N - 1].s;
for (i = 0; i < N - POS1; i++) {
r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2);
ctx->sfmt[i].s = r;
r1 = r2;
r2 = r;
}
for (; i < N; i++) {
r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1 - N].s, r1, r2);
ctx->sfmt[i].s = r;
r1 = r2;
r2 = r;
}
}
/**
* This function fills the user-specified array with pseudorandom
* integers.
*
* @param array an 128-bit array to be filled by pseudorandom numbers.
* @param size number of 128-bit pesudorandom numbers to be generated.
*/
JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
int i, j;
vector unsigned int r, r1, r2;
r1 = ctx->sfmt[N - 2].s;
r2 = ctx->sfmt[N - 1].s;
for (i = 0; i < N - POS1; i++) {
r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2);
array[i].s = r;
r1 = r2;
r2 = r;
}
for (; i < N; i++) {
r = vec_recursion(ctx->sfmt[i].s, array[i + POS1 - N].s, r1, r2);
array[i].s = r;
r1 = r2;
r2 = r;
}
/* main loop */
for (; i < size - N; i++) {
r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2);
array[i].s = r;
r1 = r2;
r2 = r;
}
for (j = 0; j < 2 * N - size; j++) {
ctx->sfmt[j].s = array[j + size - N].s;
}
for (; i < size; i++) {
r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2);
array[i].s = r;
ctx->sfmt[j++].s = r;
r1 = r2;
r2 = r;
}
}
#ifndef ONLY64
#if defined(__APPLE__)
#define ALTI_SWAP (vector unsigned char) \
(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11)
#else
#define ALTI_SWAP {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}
#endif
/**
* This function swaps high and low 32-bit of 64-bit integers in user
* specified array.
*
* @param array an 128-bit array to be swaped.
* @param size size of 128-bit array.
*/
JEMALLOC_INLINE void swap(w128_t *array, int size) {
int i;
const vector unsigned char perm = ALTI_SWAP;
for (i = 0; i < size; i++) {
array[i].s = vec_perm(array[i].s, (vector unsigned int)perm, perm);
}
}
#endif
#endif
| 5,921 | 30.668449 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params86243.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SFMT_PARAMS86243_H
#define SFMT_PARAMS86243_H
#define POS1 366
#define SL1 6
#define SL2 7
#define SR1 19
#define SR2 1
#define MSK1 0xfdbffbffU
#define MSK2 0xbff7ff3fU
#define MSK3 0xfd77efffU
#define MSK4 0xbf9ff3ffU
#define PARITY1 0x00000001U
#define PARITY2 0x00000000U
#define PARITY3 0x00000000U
#define PARITY4 0xe9528d85U
/* PARAMETERS FOR ALTIVEC */
#if defined(__APPLE__) /* For OSX */
#define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
#define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
#define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
#define ALTI_MSK64 \
(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
#define ALTI_SL2_PERM \
(vector unsigned char)(25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6)
#define ALTI_SL2_PERM64 \
(vector unsigned char)(7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6)
#define ALTI_SR2_PERM \
(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
#define ALTI_SR2_PERM64 \
(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
#else /* For OTHER OSs(Linux?) */
#define ALTI_SL1 {SL1, SL1, SL1, SL1}
#define ALTI_SR1 {SR1, SR1, SR1, SR1}
#define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
#define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
#define ALTI_SL2_PERM {25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6}
#define ALTI_SL2_PERM64 {7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6}
#define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
#define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
#endif /* For OSX */
#define IDSTR "SFMT-86243:366-6-7-19-1:fdbffbff-bff7ff3f-fd77efff-bf9ff3ff"
#endif /* SFMT_PARAMS86243_H */
| 3,564 | 42.47561 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params132049.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SFMT_PARAMS132049_H
#define SFMT_PARAMS132049_H
#define POS1 110
#define SL1 19
#define SL2 1
#define SR1 21
#define SR2 1
#define MSK1 0xffffbb5fU
#define MSK2 0xfb6ebf95U
#define MSK3 0xfffefffaU
#define MSK4 0xcff77fffU
#define PARITY1 0x00000001U
#define PARITY2 0x00000000U
#define PARITY3 0xcb520000U
#define PARITY4 0xc7e91c7dU
/* PARAMETERS FOR ALTIVEC */
#if defined(__APPLE__) /* For OSX */
#define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
#define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
#define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
#define ALTI_MSK64 \
(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
#define ALTI_SL2_PERM \
(vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
#define ALTI_SL2_PERM64 \
(vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
#define ALTI_SR2_PERM \
(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
#define ALTI_SR2_PERM64 \
(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
#else /* For OTHER OSs(Linux?) */
#define ALTI_SL1 {SL1, SL1, SL1, SL1}
#define ALTI_SR1 {SR1, SR1, SR1, SR1}
#define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
#define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
#define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
#define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
#define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
#define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
#endif /* For OSX */
#define IDSTR "SFMT-132049:110-19-1-21-1:ffffbb5f-fb6ebf95-fffefffa-cff77fff"
#endif /* SFMT_PARAMS132049_H */
| 3,564 | 42.47561 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/msvc_compat/inttypes.h | // ISO C9x compliant inttypes.h for Microsoft Visual Studio
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
//
// Copyright (c) 2006 Alexander Chemeris
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. The name of the author may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef _MSC_VER // [
#error "Use this header only with Microsoft Visual C++ compilers!"
#endif // _MSC_VER ]
#ifndef _MSC_INTTYPES_H_ // [
#define _MSC_INTTYPES_H_
#if _MSC_VER > 1000
#pragma once
#endif
#include "stdint.h"
// 7.8 Format conversion of integer types
typedef struct {
intmax_t quot;
intmax_t rem;
} imaxdiv_t;
// 7.8.1 Macros for format specifiers
#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [ See footnote 185 at page 198
#ifdef _WIN64
# define __PRI64_PREFIX "l"
# define __PRIPTR_PREFIX "l"
#else
# define __PRI64_PREFIX "ll"
# define __PRIPTR_PREFIX
#endif
// The fprintf macros for signed integers are:
#define PRId8 "d"
#define PRIi8 "i"
#define PRIdLEAST8 "d"
#define PRIiLEAST8 "i"
#define PRIdFAST8 "d"
#define PRIiFAST8 "i"
#define PRId16 "hd"
#define PRIi16 "hi"
#define PRIdLEAST16 "hd"
#define PRIiLEAST16 "hi"
#define PRIdFAST16 "hd"
#define PRIiFAST16 "hi"
#define PRId32 "d"
#define PRIi32 "i"
#define PRIdLEAST32 "d"
#define PRIiLEAST32 "i"
#define PRIdFAST32 "d"
#define PRIiFAST32 "i"
#define PRId64 __PRI64_PREFIX "d"
#define PRIi64 __PRI64_PREFIX "i"
#define PRIdLEAST64 __PRI64_PREFIX "d"
#define PRIiLEAST64 __PRI64_PREFIX "i"
#define PRIdFAST64 __PRI64_PREFIX "d"
#define PRIiFAST64 __PRI64_PREFIX "i"
#define PRIdMAX __PRI64_PREFIX "d"
#define PRIiMAX __PRI64_PREFIX "i"
#define PRIdPTR __PRIPTR_PREFIX "d"
#define PRIiPTR __PRIPTR_PREFIX "i"
// The fprintf macros for unsigned integers are:
#define PRIo8 "o"
#define PRIu8 "u"
#define PRIx8 "x"
#define PRIX8 "X"
#define PRIoLEAST8 "o"
#define PRIuLEAST8 "u"
#define PRIxLEAST8 "x"
#define PRIXLEAST8 "X"
#define PRIoFAST8 "o"
#define PRIuFAST8 "u"
#define PRIxFAST8 "x"
#define PRIXFAST8 "X"
#define PRIo16 "ho"
#define PRIu16 "hu"
#define PRIx16 "hx"
#define PRIX16 "hX"
#define PRIoLEAST16 "ho"
#define PRIuLEAST16 "hu"
#define PRIxLEAST16 "hx"
#define PRIXLEAST16 "hX"
#define PRIoFAST16 "ho"
#define PRIuFAST16 "hu"
#define PRIxFAST16 "hx"
#define PRIXFAST16 "hX"
#define PRIo32 "o"
#define PRIu32 "u"
#define PRIx32 "x"
#define PRIX32 "X"
#define PRIoLEAST32 "o"
#define PRIuLEAST32 "u"
#define PRIxLEAST32 "x"
#define PRIXLEAST32 "X"
#define PRIoFAST32 "o"
#define PRIuFAST32 "u"
#define PRIxFAST32 "x"
#define PRIXFAST32 "X"
#define PRIo64 __PRI64_PREFIX "o"
#define PRIu64 __PRI64_PREFIX "u"
#define PRIx64 __PRI64_PREFIX "x"
#define PRIX64 __PRI64_PREFIX "X"
#define PRIoLEAST64 __PRI64_PREFIX "o"
#define PRIuLEAST64 __PRI64_PREFIX "u"
#define PRIxLEAST64 __PRI64_PREFIX "x"
#define PRIXLEAST64 __PRI64_PREFIX "X"
#define PRIoFAST64 __PRI64_PREFIX "o"
#define PRIuFAST64 __PRI64_PREFIX "u"
#define PRIxFAST64 __PRI64_PREFIX "x"
#define PRIXFAST64 __PRI64_PREFIX "X"
#define PRIoMAX __PRI64_PREFIX "o"
#define PRIuMAX __PRI64_PREFIX "u"
#define PRIxMAX __PRI64_PREFIX "x"
#define PRIXMAX __PRI64_PREFIX "X"
#define PRIoPTR __PRIPTR_PREFIX "o"
#define PRIuPTR __PRIPTR_PREFIX "u"
#define PRIxPTR __PRIPTR_PREFIX "x"
#define PRIXPTR __PRIPTR_PREFIX "X"
// The fscanf macros for signed integers are:
#define SCNd8 "d"
#define SCNi8 "i"
#define SCNdLEAST8 "d"
#define SCNiLEAST8 "i"
#define SCNdFAST8 "d"
#define SCNiFAST8 "i"
#define SCNd16 "hd"
#define SCNi16 "hi"
#define SCNdLEAST16 "hd"
#define SCNiLEAST16 "hi"
#define SCNdFAST16 "hd"
#define SCNiFAST16 "hi"
#define SCNd32 "ld"
#define SCNi32 "li"
#define SCNdLEAST32 "ld"
#define SCNiLEAST32 "li"
#define SCNdFAST32 "ld"
#define SCNiFAST32 "li"
#define SCNd64 "I64d"
#define SCNi64 "I64i"
#define SCNdLEAST64 "I64d"
#define SCNiLEAST64 "I64i"
#define SCNdFAST64 "I64d"
#define SCNiFAST64 "I64i"
#define SCNdMAX "I64d"
#define SCNiMAX "I64i"
#ifdef _WIN64 // [
# define SCNdPTR "I64d"
# define SCNiPTR "I64i"
#else // _WIN64 ][
# define SCNdPTR "ld"
# define SCNiPTR "li"
#endif // _WIN64 ]
// The fscanf macros for unsigned integers are:
#define SCNo8 "o"
#define SCNu8 "u"
#define SCNx8 "x"
#define SCNX8 "X"
#define SCNoLEAST8 "o"
#define SCNuLEAST8 "u"
#define SCNxLEAST8 "x"
#define SCNXLEAST8 "X"
#define SCNoFAST8 "o"
#define SCNuFAST8 "u"
#define SCNxFAST8 "x"
#define SCNXFAST8 "X"
#define SCNo16 "ho"
#define SCNu16 "hu"
#define SCNx16 "hx"
#define SCNX16 "hX"
#define SCNoLEAST16 "ho"
#define SCNuLEAST16 "hu"
#define SCNxLEAST16 "hx"
#define SCNXLEAST16 "hX"
#define SCNoFAST16 "ho"
#define SCNuFAST16 "hu"
#define SCNxFAST16 "hx"
#define SCNXFAST16 "hX"
#define SCNo32 "lo"
#define SCNu32 "lu"
#define SCNx32 "lx"
#define SCNX32 "lX"
#define SCNoLEAST32 "lo"
#define SCNuLEAST32 "lu"
#define SCNxLEAST32 "lx"
#define SCNXLEAST32 "lX"
#define SCNoFAST32 "lo"
#define SCNuFAST32 "lu"
#define SCNxFAST32 "lx"
#define SCNXFAST32 "lX"
#define SCNo64 "I64o"
#define SCNu64 "I64u"
#define SCNx64 "I64x"
#define SCNX64 "I64X"
#define SCNoLEAST64 "I64o"
#define SCNuLEAST64 "I64u"
#define SCNxLEAST64 "I64x"
#define SCNXLEAST64 "I64X"
#define SCNoFAST64 "I64o"
#define SCNuFAST64 "I64u"
#define SCNxFAST64 "I64x"
#define SCNXFAST64 "I64X"
#define SCNoMAX "I64o"
#define SCNuMAX "I64u"
#define SCNxMAX "I64x"
#define SCNXMAX "I64X"
#ifdef _WIN64 // [
# define SCNoPTR "I64o"
# define SCNuPTR "I64u"
# define SCNxPTR "I64x"
# define SCNXPTR "I64X"
#else // _WIN64 ][
# define SCNoPTR "lo"
# define SCNuPTR "lu"
# define SCNxPTR "lx"
# define SCNXPTR "lX"
#endif // _WIN64 ]
#endif // __STDC_FORMAT_MACROS ]
// 7.8.2 Functions for greatest-width integer types
// 7.8.2.1 The imaxabs function
#define imaxabs _abs64
// 7.8.2.2 The imaxdiv function
// This is modified version of div() function from Microsoft's div.c found
// in %MSVC.NET%\crt\src\div.c
#ifdef STATIC_IMAXDIV // [
static
#else // STATIC_IMAXDIV ][
_inline
#endif // STATIC_IMAXDIV ]
imaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom)
{
imaxdiv_t result;
result.quot = numer / denom;
result.rem = numer % denom;
if (numer < 0 && result.rem > 0) {
// did division wrong; must fix up
++result.quot;
result.rem -= denom;
}
return result;
}
// 7.8.2.3 The strtoimax and strtoumax functions
#define strtoimax _strtoi64
#define strtoumax _strtoui64
// 7.8.2.4 The wcstoimax and wcstoumax functions
#define wcstoimax _wcstoi64
#define wcstoumax _wcstoui64
#endif // _MSC_INTTYPES_H_ ]
| 8,491 | 26.044586 | 94 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/msvc_compat/stdint.h | // ISO C9x compliant stdint.h for Microsoft Visual Studio
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
//
// Copyright (c) 2006-2008 Alexander Chemeris
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. The name of the author may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef _MSC_VER // [
#error "Use this header only with Microsoft Visual C++ compilers!"
#endif // _MSC_VER ]
#ifndef _MSC_STDINT_H_ // [
#define _MSC_STDINT_H_
#if _MSC_VER > 1000
#pragma once
#endif
#include <limits.h>
// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}'
// or compiler give many errors like this:
// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
#ifdef __cplusplus
extern "C" {
#endif
# include <wchar.h>
#ifdef __cplusplus
}
#endif
// Define _W64 macros to mark types changing their size, like intptr_t.
#ifndef _W64
# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
# define _W64 __w64
# else
# define _W64
# endif
#endif
// 7.18.1 Integer types
// 7.18.1.1 Exact-width integer types
// Visual Studio 6 and Embedded Visual C++ 4 doesn't
// realize that, e.g. char has the same size as __int8
// so we give up on __intX for them.
#if (_MSC_VER < 1300)
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
#else
typedef signed __int8 int8_t;
typedef signed __int16 int16_t;
typedef signed __int32 int32_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
#endif
typedef signed __int64 int64_t;
typedef unsigned __int64 uint64_t;
// 7.18.1.2 Minimum-width integer types
typedef int8_t int_least8_t;
typedef int16_t int_least16_t;
typedef int32_t int_least32_t;
typedef int64_t int_least64_t;
typedef uint8_t uint_least8_t;
typedef uint16_t uint_least16_t;
typedef uint32_t uint_least32_t;
typedef uint64_t uint_least64_t;
// 7.18.1.3 Fastest minimum-width integer types
typedef int8_t int_fast8_t;
typedef int16_t int_fast16_t;
typedef int32_t int_fast32_t;
typedef int64_t int_fast64_t;
typedef uint8_t uint_fast8_t;
typedef uint16_t uint_fast16_t;
typedef uint32_t uint_fast32_t;
typedef uint64_t uint_fast64_t;
// 7.18.1.4 Integer types capable of holding object pointers
#ifdef _WIN64 // [
typedef signed __int64 intptr_t;
typedef unsigned __int64 uintptr_t;
#else // _WIN64 ][
typedef _W64 signed int intptr_t;
typedef _W64 unsigned int uintptr_t;
#endif // _WIN64 ]
// 7.18.1.5 Greatest-width integer types
typedef int64_t intmax_t;
typedef uint64_t uintmax_t;
// 7.18.2 Limits of specified-width integer types
#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
// 7.18.2.1 Limits of exact-width integer types
#define INT8_MIN ((int8_t)_I8_MIN)
#define INT8_MAX _I8_MAX
#define INT16_MIN ((int16_t)_I16_MIN)
#define INT16_MAX _I16_MAX
#define INT32_MIN ((int32_t)_I32_MIN)
#define INT32_MAX _I32_MAX
#define INT64_MIN ((int64_t)_I64_MIN)
#define INT64_MAX _I64_MAX
#define UINT8_MAX _UI8_MAX
#define UINT16_MAX _UI16_MAX
#define UINT32_MAX _UI32_MAX
#define UINT64_MAX _UI64_MAX
// 7.18.2.2 Limits of minimum-width integer types
#define INT_LEAST8_MIN INT8_MIN
#define INT_LEAST8_MAX INT8_MAX
#define INT_LEAST16_MIN INT16_MIN
#define INT_LEAST16_MAX INT16_MAX
#define INT_LEAST32_MIN INT32_MIN
#define INT_LEAST32_MAX INT32_MAX
#define INT_LEAST64_MIN INT64_MIN
#define INT_LEAST64_MAX INT64_MAX
#define UINT_LEAST8_MAX UINT8_MAX
#define UINT_LEAST16_MAX UINT16_MAX
#define UINT_LEAST32_MAX UINT32_MAX
#define UINT_LEAST64_MAX UINT64_MAX
// 7.18.2.3 Limits of fastest minimum-width integer types
#define INT_FAST8_MIN INT8_MIN
#define INT_FAST8_MAX INT8_MAX
#define INT_FAST16_MIN INT16_MIN
#define INT_FAST16_MAX INT16_MAX
#define INT_FAST32_MIN INT32_MIN
#define INT_FAST32_MAX INT32_MAX
#define INT_FAST64_MIN INT64_MIN
#define INT_FAST64_MAX INT64_MAX
#define UINT_FAST8_MAX UINT8_MAX
#define UINT_FAST16_MAX UINT16_MAX
#define UINT_FAST32_MAX UINT32_MAX
#define UINT_FAST64_MAX UINT64_MAX
// 7.18.2.4 Limits of integer types capable of holding object pointers
#ifdef _WIN64 // [
# define INTPTR_MIN INT64_MIN
# define INTPTR_MAX INT64_MAX
# define UINTPTR_MAX UINT64_MAX
#else // _WIN64 ][
# define INTPTR_MIN INT32_MIN
# define INTPTR_MAX INT32_MAX
# define UINTPTR_MAX UINT32_MAX
#endif // _WIN64 ]
// 7.18.2.5 Limits of greatest-width integer types
#define INTMAX_MIN INT64_MIN
#define INTMAX_MAX INT64_MAX
#define UINTMAX_MAX UINT64_MAX
// 7.18.3 Limits of other integer types
#ifdef _WIN64 // [
# define PTRDIFF_MIN _I64_MIN
# define PTRDIFF_MAX _I64_MAX
#else // _WIN64 ][
# define PTRDIFF_MIN _I32_MIN
# define PTRDIFF_MAX _I32_MAX
#endif // _WIN64 ]
#define SIG_ATOMIC_MIN INT_MIN
#define SIG_ATOMIC_MAX INT_MAX
#ifndef SIZE_MAX // [
# ifdef _WIN64 // [
# define SIZE_MAX _UI64_MAX
# else // _WIN64 ][
# define SIZE_MAX _UI32_MAX
# endif // _WIN64 ]
#endif // SIZE_MAX ]
// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
#ifndef WCHAR_MIN // [
# define WCHAR_MIN 0
#endif // WCHAR_MIN ]
#ifndef WCHAR_MAX // [
# define WCHAR_MAX _UI16_MAX
#endif // WCHAR_MAX ]
#define WINT_MIN 0
#define WINT_MAX _UI16_MAX
#endif // __STDC_LIMIT_MACROS ]
// 7.18.4 Limits of other integer types
#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
// 7.18.4.1 Macros for minimum-width integer constants
#define INT8_C(val) val##i8
#define INT16_C(val) val##i16
#define INT32_C(val) val##i32
#define INT64_C(val) val##i64
#define UINT8_C(val) val##ui8
#define UINT16_C(val) val##ui16
#define UINT32_C(val) val##ui32
#define UINT64_C(val) val##ui64
// 7.18.4.2 Macros for greatest-width integer constants
#define INTMAX_C INT64_C
#define UINTMAX_C UINT64_C
#endif // __STDC_CONSTANT_MACROS ]
#endif // _MSC_STDINT_H_ ]
| 7,728 | 30.165323 | 122 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/msvc_compat/C99/inttypes.h | // ISO C9x compliant inttypes.h for Microsoft Visual Studio
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
//
// Copyright (c) 2006 Alexander Chemeris
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. The name of the author may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef _MSC_VER // [
#error "Use this header only with Microsoft Visual C++ compilers!"
#endif // _MSC_VER ]
#ifndef _MSC_INTTYPES_H_ // [
#define _MSC_INTTYPES_H_
#if _MSC_VER > 1000
#pragma once
#endif
#include "stdint.h"
// 7.8 Format conversion of integer types
typedef struct {
intmax_t quot;
intmax_t rem;
} imaxdiv_t;
// 7.8.1 Macros for format specifiers
#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [ See footnote 185 at page 198
#ifdef _WIN64
# define __PRI64_PREFIX "l"
# define __PRIPTR_PREFIX "l"
#else
# define __PRI64_PREFIX "ll"
# define __PRIPTR_PREFIX
#endif
// The fprintf macros for signed integers are:
#define PRId8 "d"
#define PRIi8 "i"
#define PRIdLEAST8 "d"
#define PRIiLEAST8 "i"
#define PRIdFAST8 "d"
#define PRIiFAST8 "i"
#define PRId16 "hd"
#define PRIi16 "hi"
#define PRIdLEAST16 "hd"
#define PRIiLEAST16 "hi"
#define PRIdFAST16 "hd"
#define PRIiFAST16 "hi"
#define PRId32 "d"
#define PRIi32 "i"
#define PRIdLEAST32 "d"
#define PRIiLEAST32 "i"
#define PRIdFAST32 "d"
#define PRIiFAST32 "i"
#define PRId64 __PRI64_PREFIX "d"
#define PRIi64 __PRI64_PREFIX "i"
#define PRIdLEAST64 __PRI64_PREFIX "d"
#define PRIiLEAST64 __PRI64_PREFIX "i"
#define PRIdFAST64 __PRI64_PREFIX "d"
#define PRIiFAST64 __PRI64_PREFIX "i"
#define PRIdMAX __PRI64_PREFIX "d"
#define PRIiMAX __PRI64_PREFIX "i"
#define PRIdPTR __PRIPTR_PREFIX "d"
#define PRIiPTR __PRIPTR_PREFIX "i"
// The fprintf macros for unsigned integers are:
#define PRIo8 "o"
#define PRIu8 "u"
#define PRIx8 "x"
#define PRIX8 "X"
#define PRIoLEAST8 "o"
#define PRIuLEAST8 "u"
#define PRIxLEAST8 "x"
#define PRIXLEAST8 "X"
#define PRIoFAST8 "o"
#define PRIuFAST8 "u"
#define PRIxFAST8 "x"
#define PRIXFAST8 "X"
#define PRIo16 "ho"
#define PRIu16 "hu"
#define PRIx16 "hx"
#define PRIX16 "hX"
#define PRIoLEAST16 "ho"
#define PRIuLEAST16 "hu"
#define PRIxLEAST16 "hx"
#define PRIXLEAST16 "hX"
#define PRIoFAST16 "ho"
#define PRIuFAST16 "hu"
#define PRIxFAST16 "hx"
#define PRIXFAST16 "hX"
#define PRIo32 "o"
#define PRIu32 "u"
#define PRIx32 "x"
#define PRIX32 "X"
#define PRIoLEAST32 "o"
#define PRIuLEAST32 "u"
#define PRIxLEAST32 "x"
#define PRIXLEAST32 "X"
#define PRIoFAST32 "o"
#define PRIuFAST32 "u"
#define PRIxFAST32 "x"
#define PRIXFAST32 "X"
#define PRIo64 __PRI64_PREFIX "o"
#define PRIu64 __PRI64_PREFIX "u"
#define PRIx64 __PRI64_PREFIX "x"
#define PRIX64 __PRI64_PREFIX "X"
#define PRIoLEAST64 __PRI64_PREFIX "o"
#define PRIuLEAST64 __PRI64_PREFIX "u"
#define PRIxLEAST64 __PRI64_PREFIX "x"
#define PRIXLEAST64 __PRI64_PREFIX "X"
#define PRIoFAST64 __PRI64_PREFIX "o"
#define PRIuFAST64 __PRI64_PREFIX "u"
#define PRIxFAST64 __PRI64_PREFIX "x"
#define PRIXFAST64 __PRI64_PREFIX "X"
#define PRIoMAX __PRI64_PREFIX "o"
#define PRIuMAX __PRI64_PREFIX "u"
#define PRIxMAX __PRI64_PREFIX "x"
#define PRIXMAX __PRI64_PREFIX "X"
#define PRIoPTR __PRIPTR_PREFIX "o"
#define PRIuPTR __PRIPTR_PREFIX "u"
#define PRIxPTR __PRIPTR_PREFIX "x"
#define PRIXPTR __PRIPTR_PREFIX "X"
// The fscanf macros for signed integers are:
#define SCNd8 "d"
#define SCNi8 "i"
#define SCNdLEAST8 "d"
#define SCNiLEAST8 "i"
#define SCNdFAST8 "d"
#define SCNiFAST8 "i"
#define SCNd16 "hd"
#define SCNi16 "hi"
#define SCNdLEAST16 "hd"
#define SCNiLEAST16 "hi"
#define SCNdFAST16 "hd"
#define SCNiFAST16 "hi"
#define SCNd32 "ld"
#define SCNi32 "li"
#define SCNdLEAST32 "ld"
#define SCNiLEAST32 "li"
#define SCNdFAST32 "ld"
#define SCNiFAST32 "li"
#define SCNd64 "I64d"
#define SCNi64 "I64i"
#define SCNdLEAST64 "I64d"
#define SCNiLEAST64 "I64i"
#define SCNdFAST64 "I64d"
#define SCNiFAST64 "I64i"
#define SCNdMAX "I64d"
#define SCNiMAX "I64i"
#ifdef _WIN64 // [
# define SCNdPTR "I64d"
# define SCNiPTR "I64i"
#else // _WIN64 ][
# define SCNdPTR "ld"
# define SCNiPTR "li"
#endif // _WIN64 ]
// The fscanf macros for unsigned integers are:
#define SCNo8 "o"
#define SCNu8 "u"
#define SCNx8 "x"
#define SCNX8 "X"
#define SCNoLEAST8 "o"
#define SCNuLEAST8 "u"
#define SCNxLEAST8 "x"
#define SCNXLEAST8 "X"
#define SCNoFAST8 "o"
#define SCNuFAST8 "u"
#define SCNxFAST8 "x"
#define SCNXFAST8 "X"
#define SCNo16 "ho"
#define SCNu16 "hu"
#define SCNx16 "hx"
#define SCNX16 "hX"
#define SCNoLEAST16 "ho"
#define SCNuLEAST16 "hu"
#define SCNxLEAST16 "hx"
#define SCNXLEAST16 "hX"
#define SCNoFAST16 "ho"
#define SCNuFAST16 "hu"
#define SCNxFAST16 "hx"
#define SCNXFAST16 "hX"
#define SCNo32 "lo"
#define SCNu32 "lu"
#define SCNx32 "lx"
#define SCNX32 "lX"
#define SCNoLEAST32 "lo"
#define SCNuLEAST32 "lu"
#define SCNxLEAST32 "lx"
#define SCNXLEAST32 "lX"
#define SCNoFAST32 "lo"
#define SCNuFAST32 "lu"
#define SCNxFAST32 "lx"
#define SCNXFAST32 "lX"
#define SCNo64 "I64o"
#define SCNu64 "I64u"
#define SCNx64 "I64x"
#define SCNX64 "I64X"
#define SCNoLEAST64 "I64o"
#define SCNuLEAST64 "I64u"
#define SCNxLEAST64 "I64x"
#define SCNXLEAST64 "I64X"
#define SCNoFAST64 "I64o"
#define SCNuFAST64 "I64u"
#define SCNxFAST64 "I64x"
#define SCNXFAST64 "I64X"
#define SCNoMAX "I64o"
#define SCNuMAX "I64u"
#define SCNxMAX "I64x"
#define SCNXMAX "I64X"
#ifdef _WIN64 // [
# define SCNoPTR "I64o"
# define SCNuPTR "I64u"
# define SCNxPTR "I64x"
# define SCNXPTR "I64X"
#else // _WIN64 ][
# define SCNoPTR "lo"
# define SCNuPTR "lu"
# define SCNxPTR "lx"
# define SCNXPTR "lX"
#endif // _WIN64 ]
#endif // __STDC_FORMAT_MACROS ]
// 7.8.2 Functions for greatest-width integer types
// 7.8.2.1 The imaxabs function
#define imaxabs _abs64
// 7.8.2.2 The imaxdiv function
// This is modified version of div() function from Microsoft's div.c found
// in %MSVC.NET%\crt\src\div.c
#ifdef STATIC_IMAXDIV // [
static
#else // STATIC_IMAXDIV ][
_inline
#endif // STATIC_IMAXDIV ]
imaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom)
{
imaxdiv_t result;
result.quot = numer / denom;
result.rem = numer % denom;
if (numer < 0 && result.rem > 0) {
// did division wrong; must fix up
++result.quot;
result.rem -= denom;
}
return result;
}
// 7.8.2.3 The strtoimax and strtoumax functions
#define strtoimax _strtoi64
#define strtoumax _strtoui64
// 7.8.2.4 The wcstoimax and wcstoumax functions
#define wcstoimax _wcstoi64
#define wcstoumax _wcstoui64
#endif // _MSC_INTTYPES_H_ ]
| 8,491 | 26.044586 | 94 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/msvc_compat/C99/stdint.h | // ISO C9x compliant stdint.h for Microsoft Visual Studio
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
//
// Copyright (c) 2006-2008 Alexander Chemeris
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. The name of the author may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef _MSC_VER // [
#error "Use this header only with Microsoft Visual C++ compilers!"
#endif // _MSC_VER ]
#ifndef _MSC_STDINT_H_ // [
#define _MSC_STDINT_H_
#if _MSC_VER > 1000
#pragma once
#endif
#include <limits.h>
// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}'
// or compiler give many errors like this:
// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
#ifdef __cplusplus
extern "C" {
#endif
# include <wchar.h>
#ifdef __cplusplus
}
#endif
// Define _W64 macros to mark types changing their size, like intptr_t.
#ifndef _W64
# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
# define _W64 __w64
# else
# define _W64
# endif
#endif
// 7.18.1 Integer types
// 7.18.1.1 Exact-width integer types
// Visual Studio 6 and Embedded Visual C++ 4 doesn't
// realize that, e.g. char has the same size as __int8
// so we give up on __intX for them.
#if (_MSC_VER < 1300)
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
#else
typedef signed __int8 int8_t;
typedef signed __int16 int16_t;
typedef signed __int32 int32_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
#endif
typedef signed __int64 int64_t;
typedef unsigned __int64 uint64_t;
// 7.18.1.2 Minimum-width integer types
typedef int8_t int_least8_t;
typedef int16_t int_least16_t;
typedef int32_t int_least32_t;
typedef int64_t int_least64_t;
typedef uint8_t uint_least8_t;
typedef uint16_t uint_least16_t;
typedef uint32_t uint_least32_t;
typedef uint64_t uint_least64_t;
// 7.18.1.3 Fastest minimum-width integer types
typedef int8_t int_fast8_t;
typedef int16_t int_fast16_t;
typedef int32_t int_fast32_t;
typedef int64_t int_fast64_t;
typedef uint8_t uint_fast8_t;
typedef uint16_t uint_fast16_t;
typedef uint32_t uint_fast32_t;
typedef uint64_t uint_fast64_t;
// 7.18.1.4 Integer types capable of holding object pointers
#ifdef _WIN64 // [
typedef signed __int64 intptr_t;
typedef unsigned __int64 uintptr_t;
#else // _WIN64 ][
typedef _W64 signed int intptr_t;
typedef _W64 unsigned int uintptr_t;
#endif // _WIN64 ]
// 7.18.1.5 Greatest-width integer types
typedef int64_t intmax_t;
typedef uint64_t uintmax_t;
// 7.18.2 Limits of specified-width integer types
#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
// 7.18.2.1 Limits of exact-width integer types
#define INT8_MIN ((int8_t)_I8_MIN)
#define INT8_MAX _I8_MAX
#define INT16_MIN ((int16_t)_I16_MIN)
#define INT16_MAX _I16_MAX
#define INT32_MIN ((int32_t)_I32_MIN)
#define INT32_MAX _I32_MAX
#define INT64_MIN ((int64_t)_I64_MIN)
#define INT64_MAX _I64_MAX
#define UINT8_MAX _UI8_MAX
#define UINT16_MAX _UI16_MAX
#define UINT32_MAX _UI32_MAX
#define UINT64_MAX _UI64_MAX
// 7.18.2.2 Limits of minimum-width integer types
#define INT_LEAST8_MIN INT8_MIN
#define INT_LEAST8_MAX INT8_MAX
#define INT_LEAST16_MIN INT16_MIN
#define INT_LEAST16_MAX INT16_MAX
#define INT_LEAST32_MIN INT32_MIN
#define INT_LEAST32_MAX INT32_MAX
#define INT_LEAST64_MIN INT64_MIN
#define INT_LEAST64_MAX INT64_MAX
#define UINT_LEAST8_MAX UINT8_MAX
#define UINT_LEAST16_MAX UINT16_MAX
#define UINT_LEAST32_MAX UINT32_MAX
#define UINT_LEAST64_MAX UINT64_MAX
// 7.18.2.3 Limits of fastest minimum-width integer types
#define INT_FAST8_MIN INT8_MIN
#define INT_FAST8_MAX INT8_MAX
#define INT_FAST16_MIN INT16_MIN
#define INT_FAST16_MAX INT16_MAX
#define INT_FAST32_MIN INT32_MIN
#define INT_FAST32_MAX INT32_MAX
#define INT_FAST64_MIN INT64_MIN
#define INT_FAST64_MAX INT64_MAX
#define UINT_FAST8_MAX UINT8_MAX
#define UINT_FAST16_MAX UINT16_MAX
#define UINT_FAST32_MAX UINT32_MAX
#define UINT_FAST64_MAX UINT64_MAX
// 7.18.2.4 Limits of integer types capable of holding object pointers
#ifdef _WIN64 // [
# define INTPTR_MIN INT64_MIN
# define INTPTR_MAX INT64_MAX
# define UINTPTR_MAX UINT64_MAX
#else // _WIN64 ][
# define INTPTR_MIN INT32_MIN
# define INTPTR_MAX INT32_MAX
# define UINTPTR_MAX UINT32_MAX
#endif // _WIN64 ]
// 7.18.2.5 Limits of greatest-width integer types
#define INTMAX_MIN INT64_MIN
#define INTMAX_MAX INT64_MAX
#define UINTMAX_MAX UINT64_MAX
// 7.18.3 Limits of other integer types
#ifdef _WIN64 // [
# define PTRDIFF_MIN _I64_MIN
# define PTRDIFF_MAX _I64_MAX
#else // _WIN64 ][
# define PTRDIFF_MIN _I32_MIN
# define PTRDIFF_MAX _I32_MAX
#endif // _WIN64 ]
#define SIG_ATOMIC_MIN INT_MIN
#define SIG_ATOMIC_MAX INT_MAX
#ifndef SIZE_MAX // [
# ifdef _WIN64 // [
# define SIZE_MAX _UI64_MAX
# else // _WIN64 ][
# define SIZE_MAX _UI32_MAX
# endif // _WIN64 ]
#endif // SIZE_MAX ]
// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
#ifndef WCHAR_MIN // [
# define WCHAR_MIN 0
#endif // WCHAR_MIN ]
#ifndef WCHAR_MAX // [
# define WCHAR_MAX _UI16_MAX
#endif // WCHAR_MAX ]
#define WINT_MIN 0
#define WINT_MAX _UI16_MAX
#endif // __STDC_LIMIT_MACROS ]
// 7.18.4 Limits of other integer types
#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
// 7.18.4.1 Macros for minimum-width integer constants
#define INT8_C(val) val##i8
#define INT16_C(val) val##i16
#define INT32_C(val) val##i32
#define INT64_C(val) val##i64
#define UINT8_C(val) val##ui8
#define UINT16_C(val) val##ui16
#define UINT32_C(val) val##ui32
#define UINT64_C(val) val##ui64
// 7.18.4.2 Macros for greatest-width integer constants
#define INTMAX_C INT64_C
#define UINTMAX_C UINT64_C
#endif // __STDC_CONSTANT_MACROS ]
#endif // _MSC_STDINT_H_ ]
| 7,728 | 30.165323 | 122 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/mutex.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct malloc_mutex_s malloc_mutex_t;
#if (defined(_WIN32) || defined(JEMALLOC_OSSPIN)\
|| defined(JEMALLOC_MUTEX_INIT_CB)\
|| defined(JEMALLOC_DISABLE_BSD_MALLOC_HOOKS))
#define JEMALLOC_NO_RWLOCKS
typedef malloc_mutex_t malloc_rwlock_t;
#else
typedef struct malloc_rwlock_s malloc_rwlock_t;
#endif
#if (defined(JEMALLOC_OSSPIN))
# define MALLOC_MUTEX_INITIALIZER {0}
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL}
#else
# if (defined(PTHREAD_MUTEX_ADAPTIVE_NP) && \
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP}
# else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER}
# endif
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct malloc_mutex_s {
#ifdef _WIN32
CRITICAL_SECTION lock;
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLock lock;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
pthread_mutex_t lock;
malloc_mutex_t *postponed_next;
#else
pthread_mutex_t lock;
#endif
};
#ifndef JEMALLOC_NO_RWLOCKS
struct malloc_rwlock_s {
pthread_rwlock_t lock;
};
#endif
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#ifdef JEMALLOC_LAZY_LOCK
extern bool isthreaded;
#else
# undef isthreaded /* Undo private_namespace.h definition. */
# define isthreaded true
#endif
bool malloc_mutex_init(malloc_mutex_t *mutex);
void malloc_mutex_prefork(malloc_mutex_t *mutex);
void malloc_mutex_postfork_parent(malloc_mutex_t *mutex);
void malloc_mutex_postfork_child(malloc_mutex_t *mutex);
bool mutex_boot(void);
#ifdef JEMALLOC_NO_RWLOCKS
#undef malloc_rwlock_init
#undef malloc_rwlock_destroy
#define malloc_rwlock_init malloc_mutex_init
#define malloc_rwlock_destroy malloc_mutex_destroy
#endif
void malloc_rwlock_prefork(malloc_rwlock_t *rwlock);
void malloc_rwlock_postfork_parent(malloc_rwlock_t *rwlock);
void malloc_rwlock_postfork_child(malloc_rwlock_t *rwlock);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void malloc_mutex_lock(malloc_mutex_t *mutex);
void malloc_mutex_unlock(malloc_mutex_t *mutex);
void malloc_mutex_destroy(malloc_mutex_t *mutex);
#ifndef JEMALLOC_NO_RWLOCKS
bool malloc_rwlock_init(malloc_rwlock_t *rwlock);
void malloc_rwlock_destroy(malloc_rwlock_t *rwlock);
#endif
void malloc_rwlock_rdlock(malloc_rwlock_t *rwlock);
void malloc_rwlock_wrlock(malloc_rwlock_t *rwlock);
void malloc_rwlock_unlock(malloc_rwlock_t *rwlock);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
JEMALLOC_INLINE void
malloc_mutex_lock(malloc_mutex_t *mutex)
{
if (isthreaded) {
#ifdef _WIN32
EnterCriticalSection(&mutex->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock(&mutex->lock);
#else
pthread_mutex_lock(&mutex->lock);
#endif
}
}
JEMALLOC_INLINE void
malloc_mutex_unlock(malloc_mutex_t *mutex)
{
if (isthreaded) {
#ifdef _WIN32
LeaveCriticalSection(&mutex->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockUnlock(&mutex->lock);
#else
pthread_mutex_unlock(&mutex->lock);
#endif
}
}
JEMALLOC_INLINE void
malloc_mutex_destroy(malloc_mutex_t *mutex)
{
#if (!defined(_WIN32) && !defined(JEMALLOC_OSSPIN)\
&& !defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_JET))
pthread_mutex_destroy(&mutex->lock);
#endif
}
JEMALLOC_INLINE void
malloc_rwlock_rdlock(malloc_rwlock_t *rwlock)
{
if (isthreaded) {
#ifdef _WIN32
EnterCriticalSection(&rwlock->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock(&rwlock->lock);
#elif (defined(JEMALLOC_NO_RWLOCKS))
pthread_mutex_lock(&rwlock->lock);
#else
pthread_rwlock_rdlock(&rwlock->lock);
#endif
}
}
JEMALLOC_INLINE void
malloc_rwlock_wrlock(malloc_rwlock_t *rwlock)
{
if (isthreaded) {
#ifdef _WIN32
EnterCriticalSection(&rwlock->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock(&rwlock->lock);
#elif (defined(JEMALLOC_NO_RWLOCKS))
pthread_mutex_lock(&rwlock->lock);
#else
pthread_rwlock_wrlock(&rwlock->lock);
#endif
}
}
JEMALLOC_INLINE void
malloc_rwlock_unlock(malloc_rwlock_t *rwlock)
{
if (isthreaded) {
#ifdef _WIN32
LeaveCriticalSection(&rwlock->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockUnlock(&rwlock->lock);
#elif (defined(JEMALLOC_NO_RWLOCKS))
pthread_mutex_unlock(&rwlock->lock);
#else
pthread_rwlock_unlock(&rwlock->lock);
#endif
}
}
#ifndef JEMALLOC_NO_RWLOCKS
JEMALLOC_INLINE bool
malloc_rwlock_init(malloc_rwlock_t *rwlock)
{
if (isthreaded) {
if (pthread_rwlock_init(&rwlock->lock, NULL) != 0)
return (true);
}
return (false);
}
JEMALLOC_INLINE void
malloc_rwlock_destroy(malloc_rwlock_t *rwlock)
{
if (isthreaded) {
pthread_rwlock_destroy(&rwlock->lock);
}
}
#endif
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 5,281 | 24.516908 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/ctl.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct ctl_node_s ctl_node_t;
typedef struct ctl_named_node_s ctl_named_node_t;
typedef struct ctl_indexed_node_s ctl_indexed_node_t;
typedef struct ctl_arena_stats_s ctl_arena_stats_t;
typedef struct ctl_stats_s ctl_stats_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct ctl_node_s {
bool named;
};
struct ctl_named_node_s {
struct ctl_node_s node;
const char *name;
/* If (nchildren == 0), this is a terminal node. */
unsigned nchildren;
const ctl_node_t *children;
int (*ctl)(const size_t *, size_t, void *, size_t *,
void *, size_t);
};
struct ctl_indexed_node_s {
struct ctl_node_s node;
const ctl_named_node_t *(*index)(const size_t *, size_t, size_t);
};
struct ctl_arena_stats_s {
bool initialized;
unsigned nthreads;
const char *dss;
size_t pactive;
size_t pdirty;
arena_stats_t astats;
/* Aggregate stats for small size classes, based on bin stats. */
size_t allocated_small;
uint64_t nmalloc_small;
uint64_t ndalloc_small;
uint64_t nrequests_small;
malloc_bin_stats_t bstats[NBINS];
malloc_large_stats_t *lstats; /* nlclasses elements. */
};
struct ctl_stats_s {
struct {
size_t current; /* stats_chunks.curchunks */
uint64_t total; /* stats_chunks.nchunks */
size_t high; /* stats_chunks.highchunks */
} chunks;
unsigned narenas;
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t newlen);
int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp);
int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen);
bool ctl_boot(void);
void ctl_prefork(void);
void ctl_postfork_parent(void);
void ctl_postfork_child(void);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
!= 0) { \
malloc_printf( \
"<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \
name); \
abort(); \
} \
} while (0)
#define xmallctlnametomib(name, mibp, miblenp) do { \
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
malloc_printf("<jemalloc>: Failure in " \
"xmallctlnametomib(\"%s\", ...)\n", name); \
abort(); \
} \
} while (0)
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
newlen) != 0) { \
malloc_write( \
"<jemalloc>: Failure in xmallctlbymib()\n"); \
abort(); \
} \
} while (0)
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 3,172 | 27.845455 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/vector.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct vector_s vector_t;
typedef struct vec_list_s vec_list_t;
#define VECTOR_MIN_PART_SIZE 8
#define VECTOR_INITIALIZER JEMALLOC_ARG_CONCAT({.data = NULL, .size = 0})
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct vec_list_s {
vec_list_t *next;
int length;
void *data[];
};
struct vector_s {
vec_list_t *list;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *vec_get(vector_t *vector, int index);
void vec_set(vector_t *vector, int index, void *val);
void vec_delete(vector_t *vector);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 1,063 | 26.282051 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/ql.h | /*
* List definitions.
*/
#define ql_head(a_type) \
struct { \
a_type *qlh_first; \
}
#define ql_head_initializer(a_head) {NULL}
#define ql_elm(a_type) qr(a_type)
/* List functions. */
#define ql_new(a_head) do { \
(a_head)->qlh_first = NULL; \
} while (0)
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
#define ql_first(a_head) ((a_head)->qlh_first)
#define ql_last(a_head, a_field) \
((ql_first(a_head) != NULL) \
? qr_prev(ql_first(a_head), a_field) : NULL)
#define ql_next(a_head, a_elm, a_field) \
((ql_last(a_head, a_field) != (a_elm)) \
? qr_next((a_elm), a_field) : NULL)
#define ql_prev(a_head, a_elm, a_field) \
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
: NULL)
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
qr_before_insert((a_qlelm), (a_elm), a_field); \
if (ql_first(a_head) == (a_qlelm)) { \
ql_first(a_head) = (a_elm); \
} \
} while (0)
#define ql_after_insert(a_qlelm, a_elm, a_field) \
qr_after_insert((a_qlelm), (a_elm), a_field)
#define ql_head_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = (a_elm); \
} while (0)
#define ql_tail_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = qr_next((a_elm), a_field); \
} while (0)
#define ql_remove(a_head, a_elm, a_field) do { \
if (ql_first(a_head) == (a_elm)) { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} \
if (ql_first(a_head) != (a_elm)) { \
qr_remove((a_elm), a_field); \
} else { \
ql_first(a_head) = NULL; \
} \
} while (0)
#define ql_head_remove(a_head, a_type, a_field) do { \
a_type *t = ql_first(a_head); \
ql_remove((a_head), t, a_field); \
} while (0)
#define ql_tail_remove(a_head, a_type, a_field) do { \
a_type *t = ql_last(a_head, a_field); \
ql_remove((a_head), t, a_field); \
} while (0)
#define ql_foreach(a_var, a_head, a_field) \
qr_foreach((a_var), ql_first(a_head), a_field)
#define ql_reverse_foreach(a_var, a_head, a_field) \
qr_reverse_foreach((a_var), ql_first(a_head), a_field)
| 2,373 | 27.261905 | 65 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/qr.h | /* Ring definitions. */
#define qr(a_type) \
struct { \
a_type *qre_next; \
a_type *qre_prev; \
}
/* Ring functions. */
#define qr_new(a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qrelm); \
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
(a_qrelm)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_after_insert(a_qrelm, a_qr, a_field) \
do \
{ \
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
(a_qr)->a_field.qre_prev = (a_qrelm); \
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
(a_qrelm)->a_field.qre_next = (a_qr); \
} while (0)
#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
void *t; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
t = (a_qr_a)->a_field.qre_prev; \
(a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
(a_qr_b)->a_field.qre_prev = t; \
} while (0)
/* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code. */
#define qr_split(a_qr_a, a_qr_b, a_field) \
qr_meld((a_qr_a), (a_qr_b), a_field)
#define qr_remove(a_qr, a_field) do { \
(a_qr)->a_field.qre_prev->a_field.qre_next \
= (a_qr)->a_field.qre_next; \
(a_qr)->a_field.qre_next->a_field.qre_prev \
= (a_qr)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_foreach(var, a_qr, a_field) \
for ((var) = (a_qr); \
(var) != NULL; \
(var) = (((var)->a_field.qre_next != (a_qr)) \
? (var)->a_field.qre_next : NULL))
#define qr_reverse_foreach(var, a_qr, a_field) \
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
(var) != NULL; \
(var) = (((var) != (a_qr)) \
? (var)->a_field.qre_prev : NULL))
| 2,255 | 32.176471 | 78 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/chunk_mmap.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
bool pages_purge(void *addr, size_t length, bool file_mapped);
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero);
bool chunk_dalloc_mmap(void *chunk, size_t size);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 819 | 34.652174 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/chunk.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* Size and alignment of memory chunks that are allocated by the OS's virtual
* memory system.
*/
#define LG_CHUNK_DEFAULT 22
/* Return the chunk address for allocation address a. */
#define CHUNK_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~chunksize_mask))
/* Return the chunk offset of address a. */
#define CHUNK_ADDR2OFFSET(a) \
((size_t)((uintptr_t)(a) & chunksize_mask))
/* Return the smallest chunk multiple that is >= s. */
#define CHUNK_CEILING(s) \
(((s) + chunksize_mask) & ~chunksize_mask)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern size_t opt_lg_chunk;
extern const char *opt_dss;
extern size_t chunksize;
extern size_t chunksize_mask; /* (chunksize - 1). */
extern size_t chunk_npages;
extern size_t map_bias; /* Number of arena chunk header pages. */
extern size_t arena_maxclass; /* Max size class for arenas. */
void *chunk_alloc_base(pool_t *pool, size_t size);
void *chunk_alloc_arena(chunk_alloc_t *chunk_alloc,
chunk_dalloc_t *chunk_dalloc, arena_t *arena, void *new_addr,
size_t size, size_t alignment, bool *zero);
void *chunk_alloc_default(void *new_addr, size_t size, size_t alignment,
bool *zero, unsigned arena_ind, pool_t *pool);
void chunk_unmap(pool_t *pool, void *chunk, size_t size);
bool chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind, pool_t *pool);
void chunk_record(pool_t *pool, extent_tree_t *chunks_szad,
extent_tree_t *chunks_ad, void *chunk, size_t size, bool zeroed);
bool chunk_global_boot();
bool chunk_boot(pool_t *pool);
bool chunk_init(pool_t *pool);
void chunk_prefork0(pool_t *pool);
void chunk_prefork1(pool_t *pool);
void chunk_postfork_parent0(pool_t *pool);
void chunk_postfork_parent1(pool_t *pool);
void chunk_postfork_child0(pool_t *pool);
void chunk_postfork_child1(pool_t *pool);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#include "jemalloc/internal/chunk_dss.h"
#include "jemalloc/internal/chunk_mmap.h"
| 2,490 | 35.632353 | 86 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/ckh.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct ckh_s ckh_t;
typedef struct ckhc_s ckhc_t;
/* Typedefs to allow easy function pointer passing. */
typedef void ckh_hash_t (const void *, size_t[2]);
typedef bool ckh_keycomp_t (const void *, const void *);
/* Maintain counters used to get an idea of performance. */
/* #define CKH_COUNT */
/* Print counter values in ckh_delete() (requires CKH_COUNT). */
/* #define CKH_VERBOSE */
/*
* There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit
* one bucket per L1 cache line.
*/
#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/* Hash table cell. */
struct ckhc_s {
const void *key;
const void *data;
};
struct ckh_s {
#ifdef CKH_COUNT
/* Counters used to get an idea of performance. */
uint64_t ngrows;
uint64_t nshrinks;
uint64_t nshrinkfails;
uint64_t ninserts;
uint64_t nrelocs;
#endif
/* Used for pseudo-random number generation. */
#define CKH_A 1103515241
#define CKH_C 12347
uint32_t prng_state;
/* Total number of items. */
size_t count;
/*
* Minimum and current number of hash table buckets. There are
* 2^LG_CKH_BUCKET_CELLS cells per bucket.
*/
unsigned lg_minbuckets;
unsigned lg_curbuckets;
/* Hash and comparison functions. */
ckh_hash_t *hash;
ckh_keycomp_t *keycomp;
/* Hash table with 2^lg_curbuckets buckets. */
ckhc_t *tab;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
bool ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh_keycomp_t *keycomp);
void ckh_delete(ckh_t *ckh);
size_t ckh_count(ckh_t *ckh);
bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
bool ckh_insert(ckh_t *ckh, const void *key, const void *data);
bool ckh_remove(ckh_t *ckh, const void *searchkey, void **key,
void **data);
bool ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data);
void ckh_string_hash(const void *key, size_t r_hash[2]);
bool ckh_string_keycomp(const void *k1, const void *k2);
void ckh_pointer_hash(const void *key, size_t r_hash[2]);
bool ckh_pointer_keycomp(const void *k1, const void *k2);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 2,646 | 28.741573 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/rtree.h | /*
* This radix tree implementation is tailored to the singular purpose of
* tracking which chunks are currently owned by jemalloc. This functionality
* is mandatory for OS X, where jemalloc must be able to respond to object
* ownership queries.
*
*******************************************************************************
*/
#ifdef JEMALLOC_H_TYPES
typedef struct rtree_s rtree_t;
/*
* Size of each radix tree node (must be a power of 2). This impacts tree
* depth.
*/
#define RTREE_NODESIZE (1U << 16)
typedef void *(rtree_alloc_t)(pool_t *, size_t);
typedef void (rtree_dalloc_t)(pool_t *, void *);
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct rtree_s {
rtree_alloc_t *alloc;
rtree_dalloc_t *dalloc;
pool_t *pool;
malloc_mutex_t mutex;
void **root;
unsigned height;
unsigned level2bits[1]; /* Dynamically sized. */
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
rtree_t *rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc,
pool_t *pool);
void rtree_delete(rtree_t *rtree);
void rtree_prefork(rtree_t *rtree);
void rtree_postfork_parent(rtree_t *rtree);
void rtree_postfork_child(rtree_t *rtree);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
#ifdef JEMALLOC_DEBUG
uint8_t rtree_get_locked(rtree_t *rtree, uintptr_t key);
#endif
uint8_t rtree_get(rtree_t *rtree, uintptr_t key);
bool rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
#define RTREE_GET_GENERATE(f) \
/* The least significant bits of the key are ignored. */ \
JEMALLOC_INLINE uint8_t \
f(rtree_t *rtree, uintptr_t key) \
{ \
uint8_t ret; \
uintptr_t subkey; \
unsigned i, lshift, height, bits; \
void **node, **child; \
\
RTREE_LOCK(&rtree->mutex); \
for (i = lshift = 0, height = rtree->height, node = rtree->root;\
i < height - 1; \
i++, lshift += bits, node = child) { \
bits = rtree->level2bits[i]; \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
3)) - bits); \
child = (void**)node[subkey]; \
if (child == NULL) { \
RTREE_UNLOCK(&rtree->mutex); \
return (0); \
} \
} \
\
/* \
* node is a leaf, so it contains values rather than node \
* pointers. \
*/ \
bits = rtree->level2bits[i]; \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \
bits); \
{ \
uint8_t *leaf = (uint8_t *)node; \
ret = leaf[subkey]; \
} \
RTREE_UNLOCK(&rtree->mutex); \
\
RTREE_GET_VALIDATE \
return (ret); \
}
#ifdef JEMALLOC_DEBUG
# define RTREE_LOCK(l) malloc_mutex_lock(l)
# define RTREE_UNLOCK(l) malloc_mutex_unlock(l)
# define RTREE_GET_VALIDATE
RTREE_GET_GENERATE(rtree_get_locked)
# undef RTREE_LOCK
# undef RTREE_UNLOCK
# undef RTREE_GET_VALIDATE
#endif
#define RTREE_LOCK(l)
#define RTREE_UNLOCK(l)
#ifdef JEMALLOC_DEBUG
/*
* Suppose that it were possible for a jemalloc-allocated chunk to be
* munmap()ped, followed by a different allocator in another thread re-using
* overlapping virtual memory, all without invalidating the cached rtree
* value. The result would be a false positive (the rtree would claim that
* jemalloc owns memory that it had actually discarded). This scenario
* seems impossible, but the following assertion is a prudent sanity check.
*/
# define RTREE_GET_VALIDATE \
assert(rtree_get_locked(rtree, key) == ret);
#else
# define RTREE_GET_VALIDATE
#endif
RTREE_GET_GENERATE(rtree_get)
#undef RTREE_LOCK
#undef RTREE_UNLOCK
#undef RTREE_GET_VALIDATE
JEMALLOC_INLINE bool
rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val)
{
uintptr_t subkey;
unsigned i, lshift, height, bits;
void **node, **child;
malloc_mutex_lock(&rtree->mutex);
for (i = lshift = 0, height = rtree->height, node = rtree->root;
i < height - 1;
i++, lshift += bits, node = child) {
bits = rtree->level2bits[i];
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
bits);
child = (void**)node[subkey];
if (child == NULL) {
size_t size = ((i + 1 < height - 1) ? sizeof(void *)
: (sizeof(uint8_t))) << rtree->level2bits[i+1];
child = (void**)rtree->alloc(rtree->pool, size);
if (child == NULL) {
malloc_mutex_unlock(&rtree->mutex);
return (true);
}
memset(child, 0, size);
node[subkey] = child;
}
}
/* node is a leaf, so it contains values rather than node pointers. */
bits = rtree->level2bits[i];
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits);
{
uint8_t *leaf = (uint8_t *)node;
leaf[subkey] = val;
}
malloc_mutex_unlock(&rtree->mutex);
return (false);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 5,206 | 28.754286 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/stats.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct tcache_bin_stats_s tcache_bin_stats_t;
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
typedef struct malloc_large_stats_s malloc_large_stats_t;
typedef struct arena_stats_s arena_stats_t;
typedef struct chunk_stats_s chunk_stats_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct tcache_bin_stats_s {
/*
* Number of allocation requests that corresponded to the size of this
* bin.
*/
uint64_t nrequests;
};
struct malloc_bin_stats_s {
/*
* Current number of bytes allocated, including objects currently
* cached by tcache.
*/
size_t allocated;
/*
* Total number of allocation/deallocation requests served directly by
* the bin. Note that tcache may allocate an object, then recycle it
* many times, resulting many increments to nrequests, but only one
* each to nmalloc and ndalloc.
*/
uint64_t nmalloc;
uint64_t ndalloc;
/*
* Number of allocation requests that correspond to the size of this
* bin. This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
uint64_t nrequests;
/* Number of tcache fills from this bin. */
uint64_t nfills;
/* Number of tcache flushes to this bin. */
uint64_t nflushes;
/* Total number of runs created for this bin's size class. */
uint64_t nruns;
/*
* Total number of runs reused by extracting them from the runs tree for
* this bin's size class.
*/
uint64_t reruns;
/* Current number of runs in this bin. */
size_t curruns;
};
struct malloc_large_stats_s {
/*
* Total number of allocation/deallocation requests served directly by
* the arena. Note that tcache may allocate an object, then recycle it
* many times, resulting many increments to nrequests, but only one
* each to nmalloc and ndalloc.
*/
uint64_t nmalloc;
uint64_t ndalloc;
/*
* Number of allocation requests that correspond to this size class.
* This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
uint64_t nrequests;
/* Current number of runs of this size class. */
size_t curruns;
};
struct arena_stats_s {
/* Number of bytes currently mapped. */
size_t mapped;
/*
* Total number of purge sweeps, total number of madvise calls made,
* and total pages purged in order to keep dirty unused memory under
* control.
*/
uint64_t npurge;
uint64_t nmadvise;
uint64_t purged;
/* Per-size-category statistics. */
size_t allocated_large;
uint64_t nmalloc_large;
uint64_t ndalloc_large;
uint64_t nrequests_large;
size_t allocated_huge;
uint64_t nmalloc_huge;
uint64_t ndalloc_huge;
uint64_t nrequests_huge;
/*
* One element for each possible size class, including sizes that
* overlap with bin size classes. This is necessary because ipalloc()
* sometimes has to use such large objects in order to assure proper
* alignment.
*/
malloc_large_stats_t *lstats;
};
struct chunk_stats_s {
/* Number of chunks that were allocated. */
uint64_t nchunks;
/* High-water mark for number of chunks allocated. */
size_t highchunks;
/*
* Current number of chunks allocated. This value isn't maintained for
* any other purpose, so keep track of it in order to be able to set
* highchunks.
*/
size_t curchunks;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern bool opt_stats_print;
void stats_print(pool_t *pool, void (*write)(void *, const char *), void *cbopaque,
const char *opts);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
size_t stats_cactive_get(pool_t *pool);
void stats_cactive_add(pool_t *pool, size_t size);
void stats_cactive_sub(pool_t *pool, size_t size);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_))
JEMALLOC_INLINE size_t
stats_cactive_get(pool_t *pool)
{
return (atomic_read_z(&(pool->stats_cactive)));
}
JEMALLOC_INLINE void
stats_cactive_add(pool_t *pool, size_t size)
{
atomic_add_z(&(pool->stats_cactive), size);
}
JEMALLOC_INLINE void
stats_cactive_sub(pool_t *pool, size_t size)
{
atomic_sub_z(&(pool->stats_cactive), size);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 4,604 | 25.016949 | 83 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/util.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/* Size of stack-allocated buffer passed to buferror(). */
#define BUFERROR_BUF 64
/*
* Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be
* large enough for all possible uses within jemalloc.
*/
#define MALLOC_PRINTF_BUFSIZE 4096
/*
* Wrap a cpp argument that contains commas such that it isn't broken up into
* multiple arguments.
*/
#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
/*
* Silence compiler warnings due to uninitialized values. This is used
* wherever the compiler fails to recognize that the variable is never used
* uninitialized.
*/
#ifdef JEMALLOC_CC_SILENCE
# define JEMALLOC_CC_SILENCE_INIT(v) = v
#else
# define JEMALLOC_CC_SILENCE_INIT(v)
#endif
#ifndef likely
#ifdef __GNUC__
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else
#define likely(x) !!(x)
#define unlikely(x) !!(x)
#endif
#endif
/*
* Define a custom assert() in order to reduce the chances of deadlock during
* assertion failure.
*/
#ifndef assert
#define assert(e) do { \
if (config_debug && !(e)) { \
malloc_printf( \
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
__FILE__, __LINE__, #e); \
abort(); \
} \
} while (0)
#endif
#ifndef not_reached
#define not_reached() do { \
if (config_debug) { \
malloc_printf( \
"<jemalloc>: %s:%d: Unreachable code reached\n", \
__FILE__, __LINE__); \
abort(); \
} \
} while (0)
#endif
#ifndef not_implemented
#define not_implemented() do { \
if (config_debug) { \
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
__FILE__, __LINE__); \
abort(); \
} \
} while (0)
#endif
#ifndef assert_not_implemented
#define assert_not_implemented(e) do { \
if (config_debug && !(e)) \
not_implemented(); \
} while (0)
#endif
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
#define cassert(c) do { \
if ((c) == false) \
not_reached(); \
} while (0)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
int buferror(int err, char *buf, size_t buflen);
uintmax_t malloc_strtoumax(const char *restrict nptr,
char **restrict endptr, int base);
void malloc_write(const char *s);
/*
* malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
* point math.
*/
int malloc_vsnprintf(char *str, size_t size, const char *format,
va_list ap);
int malloc_snprintf(char *str, size_t size, const char *format, ...)
JEMALLOC_ATTR(format(printf, 3, 4));
void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, va_list ap);
void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4));
void malloc_printf(const char *format, ...)
JEMALLOC_ATTR(format(printf, 1, 2));
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
int jemalloc_ffsl(long bitmap);
int jemalloc_ffs(int bitmap);
size_t pow2_ceil(size_t x);
size_t lg_floor(size_t x);
void set_errno(int errnum);
int get_errno(void);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_))
/* Sanity check: */
#if !defined(JEMALLOC_INTERNAL_FFSL) || !defined(JEMALLOC_INTERNAL_FFS)
# error Both JEMALLOC_INTERNAL_FFSL && JEMALLOC_INTERNAL_FFS should have been defined by configure
#endif
JEMALLOC_ALWAYS_INLINE int
jemalloc_ffsl(long bitmap)
{
return (JEMALLOC_INTERNAL_FFSL(bitmap));
}
JEMALLOC_ALWAYS_INLINE int
jemalloc_ffs(int bitmap)
{
return (JEMALLOC_INTERNAL_FFS(bitmap));
}
/* Compute the smallest power of 2 that is >= x. */
JEMALLOC_INLINE size_t
pow2_ceil(size_t x)
{
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
#if (LG_SIZEOF_PTR == 3)
x |= x >> 32;
#endif
x++;
return (x);
}
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE size_t
lg_floor(size_t x)
{
size_t ret;
asm ("bsr %1, %0"
: "=r"(ret) // Outputs.
: "r"(x) // Inputs.
);
return (ret);
}
#elif (defined(_MSC_VER))
JEMALLOC_INLINE size_t
lg_floor(size_t x)
{
unsigned long ret;
#if (LG_SIZEOF_PTR == 3)
_BitScanReverse64(&ret, x);
#elif (LG_SIZEOF_PTR == 2)
_BitScanReverse(&ret, x);
#else
# error "Unsupported type size for lg_floor()"
#endif
return ((unsigned)ret);
}
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
JEMALLOC_INLINE size_t
lg_floor(size_t x)
{
#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x));
#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x));
#else
# error "Unsupported type sizes for lg_floor()"
#endif
}
#else
JEMALLOC_INLINE size_t
lg_floor(size_t x)
{
x |= (x >> 1);
x |= (x >> 2);
x |= (x >> 4);
x |= (x >> 8);
x |= (x >> 16);
#if (LG_SIZEOF_PTR == 3 && LG_SIZEOF_PTR == LG_SIZEOF_LONG)
x |= (x >> 32);
if (x == KZU(0xffffffffffffffff))
return (63);
x++;
return (jemalloc_ffsl(x) - 2);
#elif (LG_SIZEOF_PTR == 2)
if (x == KZU(0xffffffff))
return (31);
x++;
return (jemalloc_ffs(x) - 2);
#else
# error "Unsupported type sizes for lg_floor()"
#endif
}
#endif
/* Sets error code */
JEMALLOC_INLINE void
set_errno(int errnum)
{
#ifdef _WIN32
int err = errnum;
errno = err;
SetLastError(errnum);
#else
errno = errnum;
#endif
}
/* Get last error code */
JEMALLOC_INLINE int
get_errno(void)
{
#ifdef _WIN32
return (GetLastError());
#else
return (errno);
#endif
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 6,088 | 21.977358 | 99 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/tcache.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct tcache_bin_info_s tcache_bin_info_t;
typedef struct tcache_bin_s tcache_bin_t;
typedef struct tcache_s tcache_t;
typedef struct tsd_tcache_s tsd_tcache_t;
/*
* tcache pointers close to NULL are used to encode state information that is
* used for two purposes: preventing thread caching on a per thread basis and
* cleaning up during thread shutdown.
*/
#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
/*
* Absolute maximum number of cache slots for each small bin in the thread
* cache. This is an additional constraint beyond that imposed as: twice the
* number of regions per run for this size class.
*
* This constant must be an even number.
*/
#define TCACHE_NSLOTS_SMALL_MAX 200
/* Number of cache slots for large size classes. */
#define TCACHE_NSLOTS_LARGE 20
/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
#define LG_TCACHE_MAXCLASS_DEFAULT 15
/*
* TCACHE_GC_SWEEP is the approximate number of allocation events between
* full GC sweeps. Integer rounding may cause the actual number to be
* slightly higher, since GC is performed incrementally.
*/
#define TCACHE_GC_SWEEP 8192
/* Number of tcache allocation/deallocation events between incremental GCs. */
#define TCACHE_GC_INCR \
((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
#define TSD_TCACHE_INITIALIZER JEMALLOC_ARG_CONCAT({.npools = 0, .seqno = NULL, .tcaches = NULL})
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
typedef enum {
tcache_enabled_false = 0, /* Enable cast to/from bool. */
tcache_enabled_true = 1,
tcache_enabled_default = 2
} tcache_enabled_t;
/*
* Read-only information associated with each element of tcache_t's tbins array
* is stored separately, mainly to reduce memory usage.
*/
struct tcache_bin_info_s {
unsigned ncached_max; /* Upper limit on ncached. */
};
struct tcache_bin_s {
tcache_bin_stats_t tstats;
int low_water; /* Min # cached since last GC. */
unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
unsigned ncached; /* # of cached objects. */
void **avail; /* Stack of available objects. */
};
struct tcache_s {
ql_elm(tcache_t) link; /* Used for aggregating stats. */
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */
arena_t *arena; /* This thread's arena. */
unsigned ev_cnt; /* Event count since incremental GC. */
unsigned next_gc_bin; /* Next bin to GC. */
tcache_bin_t tbins[1]; /* Dynamically sized. */
/*
* The pointer stacks associated with tbins follow as a contiguous
* array. During tcache initialization, the avail pointer in each
* element of tbins is initialized to point to the proper offset within
* this array.
*/
};
struct tsd_tcache_s {
size_t npools;
unsigned *seqno; /* Sequence number of pool */
tcache_t **tcaches;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern bool opt_tcache;
extern ssize_t opt_lg_tcache_max;
extern tcache_bin_info_t *tcache_bin_info;
/*
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
* large-object bins.
*/
extern size_t nhbins;
/* Maximum cached size class. */
extern size_t tcache_maxclass;
size_t tcache_salloc(const void *ptr);
void tcache_event_hard(tcache_t *tcache);
void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
size_t binind);
void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
tcache_t *tcache);
void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
tcache_t *tcache);
void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
void tcache_arena_dissociate(tcache_t *tcache);
tcache_t *tcache_get_hard(tcache_t *tcache, pool_t *pool, bool create);
tcache_t *tcache_create(arena_t *arena);
void tcache_destroy(tcache_t *tcache);
bool tcache_tsd_extend(tsd_tcache_t *tsd, unsigned len);
void tcache_thread_cleanup(void *arg);
void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
bool tcache_boot0(void);
bool tcache_boot1(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache, tsd_tcache_t)
malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache_enabled, tcache_enabled_t)
void tcache_event(tcache_t *tcache);
void tcache_flush(pool_t *pool);
bool tcache_enabled_get(void);
tcache_t *tcache_get(pool_t *pool, bool create);
void tcache_enabled_set(bool enabled);
void *tcache_alloc_easy(tcache_bin_t *tbin);
void *tcache_alloc_small(tcache_t *tcache, size_t size, bool zero);
void *tcache_alloc_large(tcache_t *tcache, size_t size, bool zero);
void tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind);
void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
/* Map of thread-specific caches. */
malloc_tsd_externs(tcache, tsd_tcache_t)
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache, tsd_tcache_t, { 0 },
tcache_thread_cleanup)
/* Per thread flag that allows thread caches to be disabled. */
malloc_tsd_externs(tcache_enabled, tcache_enabled_t)
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache_enabled, tcache_enabled_t,
tcache_enabled_default, malloc_tsd_no_cleanup)
JEMALLOC_INLINE void
tcache_flush(pool_t *pool)
{
tsd_tcache_t *tsd = tcache_tsd_get();
tcache_t *tcache = tsd->tcaches[pool->pool_id];
if (tsd->seqno[pool->pool_id] == pool->seqno) {
cassert(config_tcache);
if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX)
return;
tcache_destroy(tcache);
}
tsd->tcaches[pool->pool_id] = NULL;
}
JEMALLOC_INLINE bool
tcache_enabled_get(void)
{
tcache_enabled_t tcache_enabled;
cassert(config_tcache);
tcache_enabled = *tcache_enabled_tsd_get();
if (tcache_enabled == tcache_enabled_default) {
tcache_enabled = (tcache_enabled_t)opt_tcache;
tcache_enabled_tsd_set(&tcache_enabled);
}
return ((bool)tcache_enabled);
}
JEMALLOC_INLINE void
tcache_enabled_set(bool enabled)
{
tcache_enabled_t tcache_enabled;
tsd_tcache_t *tsd;
tcache_t *tcache;
int i;
cassert(config_tcache);
tcache_enabled = (tcache_enabled_t)enabled;
tcache_enabled_tsd_set(&tcache_enabled);
tsd = tcache_tsd_get();
malloc_mutex_lock(&pools_lock);
for (i = 0; i < tsd->npools; i++) {
tcache = tsd->tcaches[i];
if (tcache != NULL) {
if (enabled) {
if (tcache == TCACHE_STATE_DISABLED) {
tsd->tcaches[i] = NULL;
}
} else /* disabled */ {
if (tcache > TCACHE_STATE_MAX) {
if (pools[i] != NULL && tsd->seqno[i] == pools[i]->seqno)
tcache_destroy(tcache);
tcache = NULL;
}
if (tcache == NULL) {
tsd->tcaches[i] = TCACHE_STATE_DISABLED;
}
}
}
}
malloc_mutex_unlock(&pools_lock);
}
JEMALLOC_ALWAYS_INLINE tcache_t *
tcache_get(pool_t *pool, bool create)
{
tcache_t *tcache;
tsd_tcache_t *tsd;
if (config_tcache == false)
return (NULL);
if (config_lazy_lock && isthreaded == false)
return (NULL);
tsd = tcache_tsd_get();
/* expand tcaches array if necessary */
if ((tsd->npools <= pool->pool_id) &&
tcache_tsd_extend(tsd, pool->pool_id)) {
return (NULL);
}
/*
* All subsequent pools with the same id have to cleanup tcache before
* calling tcache_get_hard.
*/
if (tsd->seqno[pool->pool_id] != pool->seqno) {
tsd->tcaches[pool->pool_id] = NULL;
}
tcache = tsd->tcaches[pool->pool_id];
if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) {
if (tcache == TCACHE_STATE_DISABLED)
return (NULL);
tcache = tcache_get_hard(tcache, pool, create);
}
return (tcache);
}
JEMALLOC_ALWAYS_INLINE void
tcache_event(tcache_t *tcache)
{
if (TCACHE_GC_INCR == 0)
return;
tcache->ev_cnt++;
assert(tcache->ev_cnt <= TCACHE_GC_INCR);
if (tcache->ev_cnt == TCACHE_GC_INCR)
tcache_event_hard(tcache);
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_easy(tcache_bin_t *tbin)
{
void *ret;
if (tbin->ncached == 0) {
tbin->low_water = -1;
return (NULL);
}
tbin->ncached--;
if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
ret = tbin->avail[tbin->ncached];
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
{
void *ret;
size_t binind;
tcache_bin_t *tbin;
binind = small_size2bin(size);
assert(binind < NBINS);
tbin = &tcache->tbins[binind];
size = small_bin2size(binind);
ret = tcache_alloc_easy(tbin);
if (ret == NULL) {
ret = tcache_alloc_small_hard(tcache, tbin, binind);
if (ret == NULL)
return (NULL);
}
assert(tcache_salloc(ret) == size);
if (zero == false) {
if (config_fill) {
if (opt_junk) {
arena_alloc_junk_small(ret,
&arena_bin_info[binind], false);
} else if (opt_zero)
memset(ret, 0, size);
}
} else {
if (config_fill && opt_junk) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
true);
}
memset(ret, 0, size);
}
if (config_stats)
tbin->tstats.nrequests++;
if (config_prof)
tcache->prof_accumbytes += size;
tcache_event(tcache);
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
{
void *ret;
size_t binind;
tcache_bin_t *tbin;
size = PAGE_CEILING(size);
assert(size <= tcache_maxclass);
binind = NBINS + (size >> LG_PAGE) - 1;
assert(binind < nhbins);
tbin = &tcache->tbins[binind];
ret = tcache_alloc_easy(tbin);
if (ret == NULL) {
/*
* Only allocate one large object at a time, because it's quite
* expensive to create one and not use it.
*/
ret = arena_malloc_large(tcache->arena, size, zero);
if (ret == NULL)
return (NULL);
} else {
if (config_prof && size == PAGE) {
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(ret);
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
LG_PAGE);
arena_mapbits_large_binind_set(chunk, pageind,
BININD_INVALID);
}
if (zero == false) {
if (config_fill) {
if (opt_junk)
memset(ret, 0xa5, size);
else if (opt_zero)
memset(ret, 0, size);
}
} else
memset(ret, 0, size);
if (config_stats)
tbin->tstats.nrequests++;
if (config_prof)
tcache->prof_accumbytes += size;
}
tcache_event(tcache);
return (ret);
}
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind)
{
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
if (config_fill && opt_junk)
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind];
if (tbin->ncached == tbin_info->ncached_max) {
tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
1), tcache);
}
assert(tbin->ncached < tbin_info->ncached_max);
tbin->avail[tbin->ncached] = ptr;
tbin->ncached++;
tcache_event(tcache);
}
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
{
size_t binind;
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
assert((size & PAGE_MASK) == 0);
assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
assert(tcache_salloc(ptr) <= tcache_maxclass);
binind = NBINS + (size >> LG_PAGE) - 1;
if (config_fill && opt_junk)
memset(ptr, 0x5a, size);
tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind];
if (tbin->ncached == tbin_info->ncached_max) {
tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
1), tcache);
}
assert(tbin->ncached < tbin_info->ncached_max);
tbin->avail[tbin->ncached] = ptr;
tbin->ncached++;
tcache_event(tcache);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 12,206 | 26.187082 | 97 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/base.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *base_alloc(pool_t *pool, size_t size);
void *base_calloc(pool_t *pool, size_t number, size_t size);
extent_node_t *base_node_alloc(pool_t *pool);
void base_node_dalloc(pool_t *pool, extent_node_t *node);
size_t base_node_prealloc(pool_t *pool, size_t number);
bool base_boot(pool_t *pool);
bool base_init(pool_t *pool);
void base_prefork(pool_t *pool);
void base_postfork_parent(pool_t *pool);
void base_postfork_child(pool_t *pool);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 1,078 | 36.206897 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/bitmap.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS
typedef struct bitmap_level_s bitmap_level_t;
typedef struct bitmap_info_s bitmap_info_t;
typedef unsigned long bitmap_t;
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
/* Number of bits per group. */
#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS)
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
/* Maximum number of levels possible. */
#define BITMAP_MAX_LEVELS \
(LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
+ !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct bitmap_level_s {
/* Offset of this level's groups within the array of groups. */
size_t group_offset;
};
struct bitmap_info_s {
/* Logical number of bits in bitmap (stored at bottom level). */
size_t nbits;
/* Number of levels necessary for nbits. */
unsigned nlevels;
/*
* Only the first (nlevels+1) elements are used, and levels are ordered
* bottom to top (e.g. the bottom level is stored in levels[0]).
*/
bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
size_t bitmap_info_ngroups(const bitmap_info_t *binfo);
size_t bitmap_size(size_t nbits);
void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo);
bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo);
void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_))
JEMALLOC_INLINE bool
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
{
size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
bitmap_t rg = bitmap[rgoff];
/* The bitmap is full iff the root group is 0. */
return (rg == 0);
}
JEMALLOC_INLINE bool
bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
{
size_t goff;
bitmap_t g;
assert(bit < binfo->nbits);
goff = bit >> LG_BITMAP_GROUP_NBITS;
g = bitmap[goff];
return (!(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))));
}
JEMALLOC_INLINE void
bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
{
size_t goff;
bitmap_t *gp;
bitmap_t g;
assert(bit < binfo->nbits);
assert(bitmap_get(bitmap, binfo, bit) == false);
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[goff];
g = *gp;
assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
assert(bitmap_get(bitmap, binfo, bit));
/* Propagate group state transitions up the tree. */
if (g == 0) {
unsigned i;
for (i = 1; i < binfo->nlevels; i++) {
bit = goff;
goff = bit >> LG_BITMAP_GROUP_NBITS;
if (bitmap != NULL)
gp = &bitmap[binfo->levels[i].group_offset + goff];
g = *gp;
assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
if (g != 0)
break;
}
}
}
/* sfu: set first unset. */
JEMALLOC_INLINE size_t
bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
{
size_t bit;
bitmap_t g;
unsigned i;
assert(bitmap_full(bitmap, binfo) == false);
i = binfo->nlevels - 1;
g = bitmap[binfo->levels[i].group_offset];
bit = jemalloc_ffsl(g) - 1;
while (i > 0) {
i--;
g = bitmap[binfo->levels[i].group_offset + bit];
bit = (bit << LG_BITMAP_GROUP_NBITS) + (jemalloc_ffsl(g) - 1);
}
bitmap_set(bitmap, binfo, bit);
return (bit);
}
JEMALLOC_INLINE void
bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
{
size_t goff;
bitmap_t *gp;
bitmap_t g;
bool propagate;
assert(bit < binfo->nbits);
assert(bitmap_get(bitmap, binfo, bit));
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[goff];
g = *gp;
propagate = (g == 0);
assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
assert(bitmap_get(bitmap, binfo, bit) == false);
/* Propagate group state transitions up the tree. */
if (propagate) {
unsigned i;
for (i = 1; i < binfo->nlevels; i++) {
bit = goff;
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[binfo->levels[i].group_offset + goff];
g = *gp;
propagate = (g == 0);
assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)))
== 0);
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
if (propagate == false)
break;
}
}
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 5,240 | 27.177419 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/prng.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* Simple linear congruential pseudo-random number generator:
*
* prng(y) = (a*x + c) % m
*
* where the following constants ensure maximal period:
*
* a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4.
* c == Odd number (relatively prime to 2^n).
* m == 2^32
*
* See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
*
* This choice of m has the disadvantage that the quality of the bits is
* proportional to bit position. For example. the lowest bit has a cycle of 2,
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper
* bits.
*
* Macro parameters:
* uint32_t r : Result.
* unsigned lg_range : (0..32], number of least significant bits to return.
* uint32_t state : Seed value.
* const uint32_t a, c : See above discussion.
*/
#define prng32(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 32); \
\
r = (state * (a)) + (c); \
state = r; \
r >>= (32 - lg_range); \
} while (false)
/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
#define prng64(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 64); \
\
r = (state * (a)) + (c); \
state = r; \
r >>= (64 - lg_range); \
} while (false)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 2,017 | 32.081967 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/huge.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *huge_malloc(arena_t *arena, size_t size, bool zero);
void *huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero);
bool huge_ralloc_no_move(pool_t *pool, void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero);
void *huge_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, bool try_tcache_dalloc);
#ifdef JEMALLOC_JET
typedef void (huge_dalloc_junk_t)(void *, size_t);
extern huge_dalloc_junk_t *huge_dalloc_junk;
#endif
void huge_dalloc(pool_t *pool, void *ptr);
size_t huge_salloc(const void *ptr);
size_t huge_pool_salloc(pool_t *pool, const void *ptr);
prof_ctx_t *huge_prof_ctx_get(const void *ptr);
void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
bool huge_boot(pool_t *pool);
bool huge_init(pool_t *pool);
void huge_prefork(pool_t *pool);
void huge_postfork_parent(pool_t *pool);
void huge_postfork_child(pool_t *pool);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 1,568 | 39.230769 | 80 | h |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.