repo
stringlengths
1
152
file
stringlengths
15
205
code
stringlengths
0
41.6M
file_length
int64
0
41.6M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
90 values
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmem_basic/setup.sh
#!/usr/bin/env bash # # Copyright 2016-2017, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # # src/test/rpmem_basic/setup.sh -- common part for TEST* scripts # set -e require_nodes 2 require_node_libfabric 0 $RPMEM_PROVIDER $SETUP_LIBFABRIC_VERSION require_node_libfabric 1 $RPMEM_PROVIDER $SETUP_LIBFABRIC_VERSION require_node_log_files 0 $RPMEMD_LOG_FILE require_node_log_files 1 $RPMEM_LOG_FILE require_node_log_files 1 $PMEM_LOG_FILE POOLS_DIR=pools POOLS_PART=pool_parts PART_DIR=${NODE_DIR[0]}/$POOLS_PART RPMEM_POOLSET_DIR[0]=${NODE_DIR[0]}$POOLS_DIR if [ -z "$SETUP_MANUAL_INIT_RPMEM" ]; then init_rpmem_on_node 1 0 fi
2,130
38.462963
73
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/test/obj_rpmem_heap_interrupt/config.sh
#!/usr/bin/env bash # # Copyright 2016-2017, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # # src/test/obj_rpmem_heap_interrupt/config.sh -- test configuration # CONF_GLOBAL_FS_TYPE=pmem CONF_GLOBAL_BUILD_TYPE="debug nondebug" CONF_GLOBAL_RPMEM_PROVIDER=all CONF_GLOBAL_RPMEM_PMETHOD=all
1,789
41.619048
73
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/test/libpmempool_backup/config.sh
#!/usr/bin/env bash # # Copyright 2017, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # # libpmempool_backup/config.sh -- test configuration # # Extend timeout for TEST0, as it may take more than a minute # when run on a non-pmem file system. CONF_TIMEOUT[0]='10m'
1,766
41.071429
73
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/test/libpmempool_backup/common.sh
#!/usr/bin/env bash # # Copyright 2016-2018, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # # libpmempool_backup/common.sh -- functions for libpmempool_backup unittest # set -e POOLSET=$DIR/pool.set BACKUP=_backup REPLICA=_replica POOL_PART=$DIR/pool.part OUT=out${UNITTEST_NUM}.log OUT_TEMP=out${UNITTEST_NUM}_temp.log DIFF=diff${UNITTEST_NUM}.log rm -f $LOG $DIFF $OUT_TEMP && touch $LOG $DIFF $OUT_TEMP # params for blk, log and obj pools POOL_TYPES=( blk log obj ) POOL_CREATE_PARAMS=( "--write-layout 512" "" "--layout test_layout" ) POOL_CHECK_PARAMS=( "-smgB" "-s" "-soOaAbZH -l -C" ) POOL_OBJ=2 # create_poolset_variation -- create one from the tested poolset variation # usage: create_poolset_variation <variation-id> [<suffix>] # function create_poolset_variation() { local sfx="" local variation=$1 shift if [ $# -gt 0 ]; then sfx=$1 fi case "$variation" in 1) # valid poolset file create_poolset $POOLSET$sfx \ 20M:${POOL_PART}1$sfx:x \ 20M:${POOL_PART}2$sfx:x \ 20M:${POOL_PART}3$sfx:x \ 20M:${POOL_PART}4$sfx:x ;; 2) # valid poolset file with replica create_poolset $POOLSET$sfx \ 20M:${POOL_PART}1$sfx:x \ 20M:${POOL_PART}2$sfx:x \ 20M:${POOL_PART}3$sfx:x \ 20M:${POOL_PART}4$sfx:x \ r 80M:${POOL_PART}${REPLICA}$sfx:x ;; 3) # other number of parts create_poolset $POOLSET$sfx \ 20M:${POOL_PART}1$sfx:x \ 20M:${POOL_PART}2$sfx:x \ 40M:${POOL_PART}3$sfx:x ;; 4) # no poolset # return without check_file return ;; 5) # empty create_poolset $POOLSET$sfx ;; 6) # other size of part create_poolset $POOLSET$sfx \ 20M:${POOL_PART}1$sfx:x \ 20M:${POOL_PART}2$sfx:x \ 20M:${POOL_PART}3$sfx:x \ 21M:${POOL_PART}4$sfx:x ;; esac check_file $POOLSET$sfx } # # backup_and_compare -- perform backup and compare backup result with original # if compare parameters are provided # usage: backup_and_compare <poolset> <type> [<compare-params>] # function backup_and_compare () { local poolset=$1 local type=$2 shift 2 # backup expect_normal_exit ../libpmempool_api/libpmempool_test$EXESUFFIX \ -b $poolset$BACKUP -t $type -r 1 $poolset cat $OUT >> $OUT_TEMP # compare if [ $# -gt 0 ]; then compare_replicas "$1" $poolset $poolset$BACKUP >> $DIFF fi } ALL_POOL_PARTS="${POOL_PART}1 ${POOL_PART}2 ${POOL_PART}3 ${POOL_PART}4 \ ${POOL_PART}${REPLICA}" ALL_POOL_BACKUP_PARTS="${POOL_PART}1$BACKUP ${POOL_PART}2$BACKUP \ ${POOL_PART}3$BACKUP ${POOL_PART}4$BACKUP \ ${POOL_PART}${BACKUP}${REPLICA}" # # backup_cleanup -- perform cleanup between test cases # function backup_cleanup() { rm -f $POOLSET$BACKUP $ALL_POOL_PARTS $ALL_POOL_BACKUP_PARTS }
4,177
27.421769
78
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/test/obj_sync/mocks_windows.h
/* * Copyright 2016-2017, Intel Corporation * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * mocks_windows.h -- redefinitions of pthread functions * * This file is Windows-specific. * * This file should be included (i.e. using Forced Include) by libpmemobj * files, when compiled for the purpose of obj_sync test. * It would replace default implementation with mocked functions defined * in obj_sync.c. * * These defines could be also passed as preprocessor definitions. */ #ifndef WRAP_REAL #define os_mutex_init __wrap_os_mutex_init #define os_rwlock_init __wrap_os_rwlock_init #define os_cond_init __wrap_os_cond_init #endif
2,221
41.730769
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libvmmalloc/vmmalloc.h
/* * Copyright 2014-2016, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * vmmalloc.h -- internal definitions for libvmmalloc */ #define VMMALLOC_LOG_PREFIX "libvmmalloc" #define VMMALLOC_LOG_LEVEL_VAR "VMMALLOC_LOG_LEVEL" #define VMMALLOC_LOG_FILE_VAR "VMMALLOC_LOG_FILE" #define VMMALLOC_LOG_STATS_VAR "VMMALLOC_LOG_STATS" #define VMMALLOC_POOL_DIR_VAR "VMMALLOC_POOL_DIR" #define VMMALLOC_POOL_SIZE_VAR "VMMALLOC_POOL_SIZE" #define VMMALLOC_FORK_VAR "VMMALLOC_FORK"
2,005
43.577778
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/rpmem_common/rpmem_fip_common.h
/* * Copyright 2016-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * rpmem_fip_common.h -- common definitions for librpmem and rpmemd */ #ifndef RPMEM_FIP_COMMON_H #define RPMEM_FIP_COMMON_H 1 #include <string.h> #include <netinet/in.h> #include <rdma/fabric.h> #include <rdma/fi_cm.h> #include <rdma/fi_rma.h> #ifdef __cplusplus extern "C" { #endif #define RPMEM_FIVERSION FI_VERSION(1, 4) #define RPMEM_FIP_CQ_WAIT_MS 100 #define min(a, b) ((a) < (b) ? (a) : (b)) /* * rpmem_fip_node -- client or server node type */ enum rpmem_fip_node { RPMEM_FIP_NODE_CLIENT, RPMEM_FIP_NODE_SERVER, MAX_RPMEM_FIP_NODE, }; /* * rpmem_fip_probe -- list of providers */ struct rpmem_fip_probe { unsigned providers; }; /* * rpmem_fip_probe -- returns true if specified provider is available */ static inline int rpmem_fip_probe(struct rpmem_fip_probe probe, enum rpmem_provider provider) { return (probe.providers & (1U << provider)) != 0; } /* * rpmem_fip_probe_any -- returns true if any provider is available */ static inline int rpmem_fip_probe_any(struct rpmem_fip_probe probe) { return probe.providers != 0; } int rpmem_fip_probe_get(const char *target, struct rpmem_fip_probe *probe); struct fi_info *rpmem_fip_get_hints(enum rpmem_provider provider); int rpmem_fip_read_eq_check(struct fid_eq *eq, struct fi_eq_cm_entry *entry, uint32_t exp_event, fid_t exp_fid, int timeout); int rpmem_fip_read_eq(struct fid_eq *eq, struct fi_eq_cm_entry *entry, uint32_t *event, int timeout); size_t rpmem_fip_cq_size(enum rpmem_persist_method pm, enum rpmem_fip_node node); size_t rpmem_fip_tx_size(enum rpmem_persist_method pm, enum rpmem_fip_node node); size_t rpmem_fip_rx_size(enum rpmem_persist_method pm, enum rpmem_fip_node node); size_t rpmem_fip_max_nlanes(struct fi_info *fi); void rpmem_fip_print_info(struct fi_info *fi); #ifdef __cplusplus } #endif #endif
3,428
28.307692
76
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/rpmem_common/rpmem_common_log.h
/* * Copyright 2016, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * rpmem_common_log.h -- common log macros for librpmem and rpmemd */ #if defined(RPMEMC_LOG_RPMEM) && defined(RPMEMC_LOG_RPMEMD) #error Both RPMEMC_LOG_RPMEM and RPMEMC_LOG_RPMEMD defined #elif !defined(RPMEMC_LOG_RPMEM) && !defined(RPMEMC_LOG_RPMEMD) #define RPMEMC_LOG(level, fmt, args...) do {} while (0) #define RPMEMC_DBG(level, fmt, args...) do {} while (0) #define RPMEMC_FATAL(fmt, args...) do {} while (0) #define RPMEMC_ASSERT(cond) do {} while (0) #elif defined(RPMEMC_LOG_RPMEM) #include "out.h" #include "rpmem_util.h" #define RPMEMC_LOG(level, fmt, args...) RPMEM_LOG(level, fmt, ## args) #define RPMEMC_DBG(level, fmt, args...) RPMEM_DBG(fmt, ## args) #define RPMEMC_FATAL(fmt, args...) RPMEM_FATAL(fmt, ## args) #define RPMEMC_ASSERT(cond) RPMEM_ASSERT(cond) #else #include "rpmemd_log.h" #define RPMEMC_LOG(level, fmt, args...) RPMEMD_LOG(level, fmt, ## args) #define RPMEMC_DBG(level, fmt, args...) RPMEMD_DBG(fmt, ## args) #define RPMEMC_FATAL(fmt, args...) RPMEMD_FATAL(fmt, ## args) #define RPMEMC_ASSERT(cond) RPMEMD_ASSERT(cond) #endif
2,675
38.352941
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/rpmem_common/rpmem_common.h
/* * Copyright 2016-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * rpmem_common.h -- common definitions for librpmem and rpmemd */ #ifndef RPMEM_COMMON_H #define RPMEM_COMMON_H 1 /* * Values for SO_KEEPALIVE socket option */ #define RPMEM_CMD_ENV "RPMEM_CMD" #define RPMEM_SSH_ENV "RPMEM_SSH" #define RPMEM_DEF_CMD "rpmemd" #define RPMEM_DEF_SSH "ssh" #define RPMEM_PROV_SOCKET_ENV "RPMEM_ENABLE_SOCKETS" #define RPMEM_PROV_VERBS_ENV "RPMEM_ENABLE_VERBS" #define RPMEM_MAX_NLANES_ENV "RPMEM_MAX_NLANES" #define RPMEM_ACCEPT_TIMEOUT 30000 #define RPMEM_CONNECT_TIMEOUT 30000 #define RPMEM_MONITOR_TIMEOUT 1000 #include <stdint.h> #include <sys/socket.h> #include <netdb.h> #ifdef __cplusplus extern "C" { #endif /* * rpmem_err -- error codes */ enum rpmem_err { RPMEM_SUCCESS = 0, RPMEM_ERR_BADPROTO = 1, RPMEM_ERR_BADNAME = 2, RPMEM_ERR_BADSIZE = 3, RPMEM_ERR_BADNLANES = 4, RPMEM_ERR_BADPROVIDER = 5, RPMEM_ERR_FATAL = 6, RPMEM_ERR_FATAL_CONN = 7, RPMEM_ERR_BUSY = 8, RPMEM_ERR_EXISTS = 9, RPMEM_ERR_PROVNOSUP = 10, RPMEM_ERR_NOEXIST = 11, RPMEM_ERR_NOACCESS = 12, RPMEM_ERR_POOL_CFG = 13, MAX_RPMEM_ERR, }; /* * rpmem_persist_method -- remote persist operation method */ enum rpmem_persist_method { RPMEM_PM_GPSPM = 1, /* General Purpose Server Persistency Method */ RPMEM_PM_APM = 2, /* Appliance Persistency Method */ MAX_RPMEM_PM, }; const char *rpmem_persist_method_to_str(enum rpmem_persist_method pm); /* * rpmem_provider -- supported providers */ enum rpmem_provider { RPMEM_PROV_UNKNOWN = 0, RPMEM_PROV_LIBFABRIC_VERBS = 1, RPMEM_PROV_LIBFABRIC_SOCKETS = 2, MAX_RPMEM_PROV, }; enum rpmem_provider rpmem_provider_from_str(const char *str); const char *rpmem_provider_to_str(enum rpmem_provider provider); /* * rpmem_req_attr -- arguments for open/create request */ struct rpmem_req_attr { size_t pool_size; unsigned nlanes; size_t buff_size; enum rpmem_provider provider; const char *pool_desc; }; /* * rpmem_resp_attr -- return arguments from open/create request */ struct rpmem_resp_attr { unsigned short port; uint64_t rkey; uint64_t raddr; unsigned nlanes; enum rpmem_persist_method persist_method; }; #define RPMEM_HAS_USER 0x1 #define RPMEM_HAS_SERVICE 0x2 #define RPMEM_FLAGS_USE_IPV4 0x4 #define RPMEM_MAX_USER (32 + 1) /* see useradd(8) + 1 for '\0' */ #define RPMEM_MAX_NODE (255 + 1) /* see gethostname(2) + 1 for '\0' */ #define RPMEM_MAX_SERVICE (NI_MAXSERV + 1) /* + 1 for '\0' */ #define RPMEM_HDR_SIZE 4096 #define RPMEM_CLOSE_FLAGS_REMOVE 0x1 #define RPMEM_DEF_BUFF_SIZE 8192 struct rpmem_target_info { char user[RPMEM_MAX_USER]; char node[RPMEM_MAX_NODE]; char service[RPMEM_MAX_SERVICE]; unsigned flags; }; extern unsigned Rpmem_max_nlanes; extern int Rpmem_fork_unsafe; int rpmem_b64_write(int sockfd, const void *buf, size_t len, int flags); int rpmem_b64_read(int sockfd, void *buf, size_t len, int flags); const char *rpmem_get_ip_str(const struct sockaddr *addr); struct rpmem_target_info *rpmem_target_parse(const char *target); void rpmem_target_free(struct rpmem_target_info *info); int rpmem_xwrite(int fd, const void *buf, size_t len, int flags); int rpmem_xread(int fd, void *buf, size_t len, int flags); char *rpmem_get_ssh_conn_addr(void); #ifdef __cplusplus } #endif #endif
4,838
27.976048
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/rpmem_common/rpmem_proto.h
/* * Copyright 2016-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * rpmem_proto.h -- rpmem protocol definitions */ #ifndef RPMEM_PROTO_H #define RPMEM_PROTO_H 1 #include <stdint.h> #include <endian.h> #include "librpmem.h" #ifdef __cplusplus extern "C" { #endif #define PACKED __attribute__((packed)) #define RPMEM_PROTO "tcp" #define RPMEM_PROTO_MAJOR 0 #define RPMEM_PROTO_MINOR 1 #define RPMEM_SIG_SIZE 8 #define RPMEM_UUID_SIZE 16 #define RPMEM_PROV_SIZE 32 #define RPMEM_USER_SIZE 16 /* * rpmem_msg_type -- type of messages */ enum rpmem_msg_type { RPMEM_MSG_TYPE_CREATE = 1, /* create request */ RPMEM_MSG_TYPE_CREATE_RESP = 2, /* create request response */ RPMEM_MSG_TYPE_OPEN = 3, /* open request */ RPMEM_MSG_TYPE_OPEN_RESP = 4, /* open request response */ RPMEM_MSG_TYPE_CLOSE = 5, /* close request */ RPMEM_MSG_TYPE_CLOSE_RESP = 6, /* close request response */ RPMEM_MSG_TYPE_SET_ATTR = 7, /* set attributes request */ /* set attributes request response */ RPMEM_MSG_TYPE_SET_ATTR_RESP = 8, MAX_RPMEM_MSG_TYPE, }; /* * rpmem_pool_attr_packed -- a packed version */ struct rpmem_pool_attr_packed { char signature[RPMEM_POOL_HDR_SIG_LEN]; /* pool signature */ uint32_t major; /* format major version number */ uint32_t compat_features; /* mask: compatible "may" features */ uint32_t incompat_features; /* mask: "must support" features */ uint32_t ro_compat_features; /* mask: force RO if unsupported */ unsigned char poolset_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* pool uuid */ unsigned char uuid[RPMEM_POOL_HDR_UUID_LEN]; /* first part uuid */ unsigned char next_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* next pool uuid */ unsigned char prev_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* prev pool uuid */ unsigned char user_flags[RPMEM_POOL_USER_FLAGS_LEN]; /* user flags */ } PACKED; /* * rpmem_msg_ibc_attr -- in-band connection attributes * * Used by create request response and open request response. * Contains essential information to proceed with in-band connection * initialization. */ struct rpmem_msg_ibc_attr { uint32_t port; /* RDMA connection port */ uint32_t persist_method; /* persist method */ uint64_t rkey; /* remote key */ uint64_t raddr; /* remote address */ uint32_t nlanes; /* number of lanes */ } PACKED; /* * rpmem_msg_pool_desc -- remote pool descriptor */ struct rpmem_msg_pool_desc { uint32_t size; /* size of pool descriptor */ uint8_t desc[0]; /* pool descriptor, null-terminated string */ } PACKED; /* * rpmem_msg_hdr -- message header which consists of type and size of message * * The type must be one of the rpmem_msg_type values. */ struct rpmem_msg_hdr { uint32_t type; /* type of message */ uint64_t size; /* size of message */ uint8_t body[0]; } PACKED; /* * rpmem_msg_hdr_resp -- message response header which consists of type, size * and status. * * The type must be one of the rpmem_msg_type values. */ struct rpmem_msg_hdr_resp { uint32_t status; /* response status */ uint32_t type; /* type of message */ uint64_t size; /* size of message */ } PACKED; /* * rpmem_msg_common -- common fields for open/create messages */ struct rpmem_msg_common { uint16_t major; /* protocol version major number */ uint16_t minor; /* protocol version minor number */ uint64_t pool_size; /* minimum required size of a pool */ uint32_t nlanes; /* number of lanes used by initiator */ uint32_t provider; /* provider */ uint64_t buff_size; /* buffer size for inline persist */ } PACKED; /* * rpmem_msg_create -- create request message * * The type of message must be set to RPMEM_MSG_TYPE_CREATE. * The size of message must be set to * sizeof(struct rpmem_msg_create) + pool_desc_size */ struct rpmem_msg_create { struct rpmem_msg_hdr hdr; /* message header */ struct rpmem_msg_common c; struct rpmem_pool_attr_packed pool_attr; /* pool attributes */ struct rpmem_msg_pool_desc pool_desc; /* pool descriptor */ } PACKED; /* * rpmem_msg_create_resp -- create request response message * * The type of message must be set to RPMEM_MSG_TYPE_CREATE_RESP. * The size of message must be set to sizeof(struct rpmem_msg_create_resp). */ struct rpmem_msg_create_resp { struct rpmem_msg_hdr_resp hdr; /* message header */ struct rpmem_msg_ibc_attr ibc; /* in-band connection attributes */ } PACKED; /* * rpmem_msg_open -- open request message * * The type of message must be set to RPMEM_MSG_TYPE_OPEN. * The size of message must be set to * sizeof(struct rpmem_msg_open) + pool_desc_size */ struct rpmem_msg_open { struct rpmem_msg_hdr hdr; /* message header */ struct rpmem_msg_common c; struct rpmem_msg_pool_desc pool_desc; /* pool descriptor */ } PACKED; /* * rpmem_msg_open_resp -- open request response message * * The type of message must be set to RPMEM_MSG_TYPE_OPEN_RESP. * The size of message must be set to sizeof(struct rpmem_msg_open_resp) */ struct rpmem_msg_open_resp { struct rpmem_msg_hdr_resp hdr; /* message header */ struct rpmem_msg_ibc_attr ibc; /* in-band connection attributes */ struct rpmem_pool_attr_packed pool_attr; /* pool attributes */ } PACKED; /* * rpmem_msg_close -- close request message * * The type of message must be set to RPMEM_MSG_TYPE_CLOSE * The size of message must be set to sizeof(struct rpmem_msg_close) */ struct rpmem_msg_close { struct rpmem_msg_hdr hdr; /* message header */ uint32_t flags; /* flags */ } PACKED; /* * rpmem_msg_close_resp -- close request response message * * The type of message must be set to RPMEM_MSG_TYPE_CLOSE_RESP * The size of message must be set to sizeof(struct rpmem_msg_close_resp) */ struct rpmem_msg_close_resp { struct rpmem_msg_hdr_resp hdr; /* message header */ /* no more fields */ } PACKED; #define RPMEM_PERSIST_WRITE 0U /* persist using RDMA WRITE */ #define RPMEM_DEEP_PERSIST 1U /* deep persist operation */ #define RPMEM_PERSIST_SEND 2U /* persist using RDMA SEND */ #define RPMEM_PERSIST_MAX 2U /* maximum valid value */ /* * the two least significant bits * are reserved for mode of persist */ #define RPMEM_PERSIST_MASK 0x3U /* * rpmem_msg_persist -- remote persist message */ struct rpmem_msg_persist { uint32_t flags; /* lane flags */ uint32_t lane; /* lane identifier */ uint64_t addr; /* remote memory address */ uint64_t size; /* remote memory size */ uint8_t data[]; }; /* * rpmem_msg_persist_resp -- remote persist response message */ struct rpmem_msg_persist_resp { uint32_t flags; /* lane flags */ uint32_t lane; /* lane identifier */ }; /* * rpmem_msg_set_attr -- set attributes request message * * The type of message must be set to RPMEM_MSG_TYPE_SET_ATTR. * The size of message must be set to sizeof(struct rpmem_msg_set_attr) */ struct rpmem_msg_set_attr { struct rpmem_msg_hdr hdr; /* message header */ struct rpmem_pool_attr_packed pool_attr; /* pool attributes */ } PACKED; /* * rpmem_msg_set_attr_resp -- set attributes request response message * * The type of message must be set to RPMEM_MSG_TYPE_SET_ATTR_RESP. * The size of message must be set to sizeof(struct rpmem_msg_set_attr_resp). */ struct rpmem_msg_set_attr_resp { struct rpmem_msg_hdr_resp hdr; /* message header */ } PACKED; /* * XXX Begin: Suppress gcc conversion warnings for FreeBSD be*toh macros. */ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" /* * rpmem_ntoh_msg_ibc_attr -- convert rpmem_msg_ibc attr to host byte order */ static inline void rpmem_ntoh_msg_ibc_attr(struct rpmem_msg_ibc_attr *ibc) { ibc->port = be32toh(ibc->port); ibc->persist_method = be32toh(ibc->persist_method); ibc->rkey = be64toh(ibc->rkey); ibc->raddr = be64toh(ibc->raddr); } /* * rpmem_ntoh_msg_pool_desc -- convert rpmem_msg_pool_desc to host byte order */ static inline void rpmem_ntoh_msg_pool_desc(struct rpmem_msg_pool_desc *pool_desc) { pool_desc->size = be32toh(pool_desc->size); } /* * rpmem_ntoh_pool_attr -- convert rpmem_pool_attr to host byte order */ static inline void rpmem_ntoh_pool_attr(struct rpmem_pool_attr_packed *attr) { attr->major = be32toh(attr->major); attr->ro_compat_features = be32toh(attr->ro_compat_features); attr->incompat_features = be32toh(attr->incompat_features); attr->compat_features = be32toh(attr->compat_features); } /* * rpmem_ntoh_msg_hdr -- convert rpmem_msg_hdr to host byte order */ static inline void rpmem_ntoh_msg_hdr(struct rpmem_msg_hdr *hdrp) { hdrp->type = be32toh(hdrp->type); hdrp->size = be64toh(hdrp->size); } /* * rpmem_hton_msg_hdr -- convert rpmem_msg_hdr to network byte order */ static inline void rpmem_hton_msg_hdr(struct rpmem_msg_hdr *hdrp) { rpmem_ntoh_msg_hdr(hdrp); } /* * rpmem_ntoh_msg_hdr_resp -- convert rpmem_msg_hdr_resp to host byte order */ static inline void rpmem_ntoh_msg_hdr_resp(struct rpmem_msg_hdr_resp *hdrp) { hdrp->status = be32toh(hdrp->status); hdrp->type = be32toh(hdrp->type); hdrp->size = be64toh(hdrp->size); } /* * rpmem_hton_msg_hdr_resp -- convert rpmem_msg_hdr_resp to network byte order */ static inline void rpmem_hton_msg_hdr_resp(struct rpmem_msg_hdr_resp *hdrp) { rpmem_ntoh_msg_hdr_resp(hdrp); } /* * rpmem_ntoh_msg_common -- convert rpmem_msg_common to host byte order */ static inline void rpmem_ntoh_msg_common(struct rpmem_msg_common *msg) { msg->major = be16toh(msg->major); msg->minor = be16toh(msg->minor); msg->pool_size = be64toh(msg->pool_size); msg->nlanes = be32toh(msg->nlanes); msg->provider = be32toh(msg->provider); msg->buff_size = be64toh(msg->buff_size); } /* * rpmem_hton_msg_common -- convert rpmem_msg_common to network byte order */ static inline void rpmem_hton_msg_common(struct rpmem_msg_common *msg) { rpmem_ntoh_msg_common(msg); } /* * rpmem_ntoh_msg_create -- convert rpmem_msg_create to host byte order */ static inline void rpmem_ntoh_msg_create(struct rpmem_msg_create *msg) { rpmem_ntoh_msg_hdr(&msg->hdr); rpmem_ntoh_msg_common(&msg->c); rpmem_ntoh_pool_attr(&msg->pool_attr); rpmem_ntoh_msg_pool_desc(&msg->pool_desc); } /* * rpmem_hton_msg_create -- convert rpmem_msg_create to network byte order */ static inline void rpmem_hton_msg_create(struct rpmem_msg_create *msg) { rpmem_ntoh_msg_create(msg); } /* * rpmem_ntoh_msg_create_resp -- convert rpmem_msg_create_resp to host byte * order */ static inline void rpmem_ntoh_msg_create_resp(struct rpmem_msg_create_resp *msg) { rpmem_ntoh_msg_hdr_resp(&msg->hdr); rpmem_ntoh_msg_ibc_attr(&msg->ibc); } /* * rpmem_hton_msg_create_resp -- convert rpmem_msg_create_resp to network byte * order */ static inline void rpmem_hton_msg_create_resp(struct rpmem_msg_create_resp *msg) { rpmem_ntoh_msg_create_resp(msg); } /* * rpmem_ntoh_msg_open -- convert rpmem_msg_open to host byte order */ static inline void rpmem_ntoh_msg_open(struct rpmem_msg_open *msg) { rpmem_ntoh_msg_hdr(&msg->hdr); rpmem_ntoh_msg_common(&msg->c); rpmem_ntoh_msg_pool_desc(&msg->pool_desc); } /* * XXX End: Suppress gcc conversion warnings for FreeBSD be*toh macros */ #pragma GCC diagnostic pop /* * rpmem_hton_msg_open -- convert rpmem_msg_open to network byte order */ static inline void rpmem_hton_msg_open(struct rpmem_msg_open *msg) { rpmem_ntoh_msg_open(msg); } /* * rpmem_ntoh_msg_open_resp -- convert rpmem_msg_open_resp to host byte order */ static inline void rpmem_ntoh_msg_open_resp(struct rpmem_msg_open_resp *msg) { rpmem_ntoh_msg_hdr_resp(&msg->hdr); rpmem_ntoh_msg_ibc_attr(&msg->ibc); rpmem_ntoh_pool_attr(&msg->pool_attr); } /* * rpmem_hton_msg_open_resp -- convert rpmem_msg_open_resp to network byte order */ static inline void rpmem_hton_msg_open_resp(struct rpmem_msg_open_resp *msg) { rpmem_ntoh_msg_open_resp(msg); } /* * rpmem_ntoh_msg_set_attr -- convert rpmem_msg_set_attr to host byte order */ static inline void rpmem_ntoh_msg_set_attr(struct rpmem_msg_set_attr *msg) { rpmem_ntoh_msg_hdr(&msg->hdr); rpmem_ntoh_pool_attr(&msg->pool_attr); } /* * rpmem_hton_msg_set_attr -- convert rpmem_msg_set_attr to network byte order */ static inline void rpmem_hton_msg_set_attr(struct rpmem_msg_set_attr *msg) { rpmem_ntoh_msg_set_attr(msg); } /* * rpmem_ntoh_msg_set_attr_resp -- convert rpmem_msg_set_attr_resp to host byte * order */ static inline void rpmem_ntoh_msg_set_attr_resp(struct rpmem_msg_set_attr_resp *msg) { rpmem_ntoh_msg_hdr_resp(&msg->hdr); } /* * rpmem_hton_msg_set_attr_resp -- convert rpmem_msg_set_attr_resp to network * byte order */ static inline void rpmem_hton_msg_set_attr_resp(struct rpmem_msg_set_attr_resp *msg) { rpmem_hton_msg_hdr_resp(&msg->hdr); } /* * rpmem_ntoh_msg_close -- convert rpmem_msg_close to host byte order */ static inline void rpmem_ntoh_msg_close(struct rpmem_msg_close *msg) { rpmem_ntoh_msg_hdr(&msg->hdr); } /* * rpmem_hton_msg_close -- convert rpmem_msg_close to network byte order */ static inline void rpmem_hton_msg_close(struct rpmem_msg_close *msg) { rpmem_ntoh_msg_close(msg); } /* * rpmem_ntoh_msg_close_resp -- convert rpmem_msg_close_resp to host byte order */ static inline void rpmem_ntoh_msg_close_resp(struct rpmem_msg_close_resp *msg) { rpmem_ntoh_msg_hdr_resp(&msg->hdr); } /* * rpmem_hton_msg_close_resp -- convert rpmem_msg_close_resp to network byte * order */ static inline void rpmem_hton_msg_close_resp(struct rpmem_msg_close_resp *msg) { rpmem_ntoh_msg_close_resp(msg); } /* * pack_rpmem_pool_attr -- copy pool attributes to a packed structure */ static inline void pack_rpmem_pool_attr(const struct rpmem_pool_attr *src, struct rpmem_pool_attr_packed *dst) { memcpy(dst->signature, src->signature, sizeof(src->signature)); dst->major = src->major; dst->compat_features = src->compat_features; dst->incompat_features = src->incompat_features; dst->ro_compat_features = src->ro_compat_features; memcpy(dst->poolset_uuid, src->poolset_uuid, sizeof(dst->poolset_uuid)); memcpy(dst->uuid, src->uuid, sizeof(dst->uuid)); memcpy(dst->next_uuid, src->next_uuid, sizeof(dst->next_uuid)); memcpy(dst->prev_uuid, src->prev_uuid, sizeof(dst->prev_uuid)); memcpy(dst->user_flags, src->user_flags, sizeof(dst->user_flags)); } /* * unpack_rpmem_pool_attr -- copy pool attributes to an unpacked structure */ static inline void unpack_rpmem_pool_attr(const struct rpmem_pool_attr_packed *src, struct rpmem_pool_attr *dst) { memcpy(dst->signature, src->signature, sizeof(src->signature)); dst->major = src->major; dst->compat_features = src->compat_features; dst->incompat_features = src->incompat_features; dst->ro_compat_features = src->ro_compat_features; memcpy(dst->poolset_uuid, src->poolset_uuid, sizeof(dst->poolset_uuid)); memcpy(dst->uuid, src->uuid, sizeof(dst->uuid)); memcpy(dst->next_uuid, src->next_uuid, sizeof(dst->next_uuid)); memcpy(dst->prev_uuid, src->prev_uuid, sizeof(dst->prev_uuid)); memcpy(dst->user_flags, src->user_flags, sizeof(dst->user_flags)); } #ifdef __cplusplus } #endif #endif
16,448
27.507799
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/rpmem_common/rpmem_fip_lane.h
/* * Copyright 2016-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * rpmem_fip_lane.h -- rpmem fabric provider lane definition */ #include <sched.h> #include <stdint.h> #include "sys_util.h" /* * rpmem_fip_lane -- basic lane structure * * This structure consist of a synchronization object and a return value. * It is possible to wait on the lane for specified event. The event can be * signalled by another thread which can pass the return value if required. * * The sync variable can store up to 64 different events, each event on * separate bit. */ struct rpmem_fip_lane { os_spinlock_t lock; int ret; uint64_t sync; }; /* * rpmem_fip_lane_init -- initialize basic lane structure */ static inline int rpmem_fip_lane_init(struct rpmem_fip_lane *lanep) { lanep->ret = 0; lanep->sync = 0; return util_spin_init(&lanep->lock, PTHREAD_PROCESS_PRIVATE); } /* * rpmem_fip_lane_fini -- deinitialize basic lane structure */ static inline void rpmem_fip_lane_fini(struct rpmem_fip_lane *lanep) { util_spin_destroy(&lanep->lock); } /* * rpmem_fip_lane_busy -- return true if lane has pending events */ static inline int rpmem_fip_lane_busy(struct rpmem_fip_lane *lanep) { util_spin_lock(&lanep->lock); int ret = lanep->sync != 0; util_spin_unlock(&lanep->lock); return ret; } /* * rpmem_fip_lane_begin -- begin waiting for specified event(s) */ static inline void rpmem_fip_lane_begin(struct rpmem_fip_lane *lanep, uint64_t sig) { util_spin_lock(&lanep->lock); lanep->ret = 0; lanep->sync |= sig; util_spin_unlock(&lanep->lock); } static inline int rpmem_fip_lane_is_busy(struct rpmem_fip_lane *lanep, uint64_t sig) { util_spin_lock(&lanep->lock); int ret = (lanep->sync & sig) != 0; util_spin_unlock(&lanep->lock); return ret; } static inline int rpmem_fip_lane_ret(struct rpmem_fip_lane *lanep) { util_spin_lock(&lanep->lock); int ret = lanep->ret; util_spin_unlock(&lanep->lock); return ret; } /* * rpmem_fip_lane_wait -- wait for specified event(s) */ static inline int rpmem_fip_lane_wait(struct rpmem_fip_lane *lanep, uint64_t sig) { while (rpmem_fip_lane_is_busy(lanep, sig)) sched_yield(); return rpmem_fip_lane_ret(lanep); } /* * rpmem_fip_lane_signal -- signal lane about specified event */ static inline void rpmem_fip_lane_signal(struct rpmem_fip_lane *lanep, uint64_t sig) { util_spin_lock(&lanep->lock); lanep->sync &= ~sig; util_spin_unlock(&lanep->lock); } /* * rpmem_fip_lane_signal -- signal lane about specified event and store * return value */ static inline void rpmem_fip_lane_sigret(struct rpmem_fip_lane *lanep, uint64_t sig, int ret) { util_spin_lock(&lanep->lock); lanep->ret = ret; lanep->sync &= ~sig; util_spin_unlock(&lanep->lock); }
4,269
26.197452
75
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/rpmem_common/rpmem_fip_msg.h
/* * Copyright 2016-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * rpmem_fip_msg.h -- simple wrappers for fi_rma(3) and fi_msg(3) functions */ #ifndef RPMEM_FIP_MSG_H #define RPMEM_FIP_MSG_H 1 #include <rdma/fi_rma.h> #ifdef __cplusplus extern "C" { #endif /* * rpmem_fip_rma -- helper struct for RMA operation */ struct rpmem_fip_rma { struct fi_msg_rma msg; /* message structure */ struct iovec msg_iov; /* IO vector buffer */ struct fi_rma_iov rma_iov; /* RMA IO vector buffer */ void *desc; /* local memory descriptor */ uint64_t flags; /* RMA operation flags */ }; /* * rpmem_fip_msg -- helper struct for MSG operation */ struct rpmem_fip_msg { struct fi_msg msg; /* message structure */ struct iovec iov; /* IO vector buffer */ void *desc; /* local memory descriptor */ uint64_t flags; /* MSG operation flags */ }; /* * rpmem_fip_rma_init -- initialize RMA helper struct */ static inline void rpmem_fip_rma_init(struct rpmem_fip_rma *rma, void *desc, fi_addr_t addr, uint64_t rkey, void *context, uint64_t flags) { memset(rma, 0, sizeof(*rma)); rma->desc = desc; rma->flags = flags; rma->rma_iov.key = rkey; rma->msg.context = context; rma->msg.addr = addr; rma->msg.desc = &rma->desc; rma->msg.rma_iov = &rma->rma_iov; rma->msg.rma_iov_count = 1; rma->msg.msg_iov = &rma->msg_iov; rma->msg.iov_count = 1; } /* * rpmem_fip_msg_init -- initialize MSG helper struct */ static inline void rpmem_fip_msg_init(struct rpmem_fip_msg *msg, void *desc, fi_addr_t addr, void *context, void *buff, size_t len, uint64_t flags) { memset(msg, 0, sizeof(*msg)); msg->desc = desc; msg->flags = flags; msg->iov.iov_base = buff; msg->iov.iov_len = len; msg->msg.context = context; msg->msg.addr = addr; msg->msg.desc = &msg->desc; msg->msg.msg_iov = &msg->iov; msg->msg.iov_count = 1; } /* * rpmem_fip_writemsg -- wrapper for fi_writemsg */ static inline int rpmem_fip_writemsg(struct fid_ep *ep, struct rpmem_fip_rma *rma, const void *buff, size_t len, uint64_t addr) { rma->rma_iov.addr = addr; rma->rma_iov.len = len; rma->msg_iov.iov_base = (void *)buff; rma->msg_iov.iov_len = len; return (int)fi_writemsg(ep, &rma->msg, rma->flags); } /* * rpmem_fip_readmsg -- wrapper for fi_readmsg */ static inline int rpmem_fip_readmsg(struct fid_ep *ep, struct rpmem_fip_rma *rma, void *buff, size_t len, uint64_t addr) { rma->rma_iov.addr = addr; rma->rma_iov.len = len; rma->msg_iov.iov_base = buff; rma->msg_iov.iov_len = len; return (int)fi_readmsg(ep, &rma->msg, rma->flags); } /* * rpmem_fip_sendmsg -- wrapper for fi_sendmsg */ static inline int rpmem_fip_sendmsg(struct fid_ep *ep, struct rpmem_fip_msg *msg, size_t len) { msg->iov.iov_len = len; return (int)fi_sendmsg(ep, &msg->msg, msg->flags); } /* * rpmem_fip_recvmsg -- wrapper for fi_recvmsg */ static inline int rpmem_fip_recvmsg(struct fid_ep *ep, struct rpmem_fip_msg *msg) { return (int)fi_recvmsg(ep, &msg->msg, msg->flags); } /* * rpmem_fip_msg_get_pmsg -- returns message buffer as a persist message */ static inline struct rpmem_msg_persist * rpmem_fip_msg_get_pmsg(struct rpmem_fip_msg *msg) { return (struct rpmem_msg_persist *)msg->iov.iov_base; } /* * rpmem_fip_msg_get_pres -- returns message buffer as a persist response */ static inline struct rpmem_msg_persist_resp * rpmem_fip_msg_get_pres(struct rpmem_fip_msg *msg) { return (struct rpmem_msg_persist_resp *)msg->iov.iov_base; } #ifdef __cplusplus } #endif #endif
5,009
27.465909
75
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmempool/replica.h
/* * Copyright 2016-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * replica.h -- module for synchronizing and transforming poolset */ #ifndef REPLICA_H #define REPLICA_H #include "libpmempool.h" #include "pool.h" #include "os_badblock.h" #ifdef __cplusplus extern "C" { #endif #define UNDEF_REPLICA UINT_MAX #define UNDEF_PART UINT_MAX /* * A part marked as broken does not exist or is damaged so that * it cannot be opened and has to be recreated. */ #define IS_BROKEN (1U << 0) /* * A replica marked as inconsistent exists but has inconsistent metadata * (e.g. inconsistent parts or replicas linkage) */ #define IS_INCONSISTENT (1U << 1) /* * A part or replica marked in this way has bad blocks inside. */ #define HAS_BAD_BLOCKS (1U << 2) /* * A part marked in this way has bad blocks in the header */ #define HAS_CORRUPTED_HEADER (1U << 3) /* * A flag which can be passed to sync_replica() to indicate that the function is * called by pmempool_transform */ #define IS_TRANSFORMED (1U << 10) /* * Number of lanes utilized when working with remote replicas */ #define REMOTE_NLANES 1 /* * Helping structures for storing part's health status */ struct part_health_status { unsigned flags; struct badblocks bbs; /* structure with bad blocks */ char *recovery_file_name; /* name of bad block recovery file */ int recovery_file_exists; /* bad block recovery file exists */ }; /* * Helping structures for storing replica and poolset's health status */ struct replica_health_status { unsigned nparts; unsigned nhdrs; /* a flag for the replica */ unsigned flags; /* effective size of a pool, valid only for healthy replica */ size_t pool_size; /* flags for each part */ struct part_health_status part[]; }; struct poolset_health_status { unsigned nreplicas; /* a flag for the poolset */ unsigned flags; /* health statuses for each replica */ struct replica_health_status *replica[]; }; /* get index of the (r)th replica health status */ static inline unsigned REP_HEALTHidx(struct poolset_health_status *set, unsigned r) { ASSERTne(set->nreplicas, 0); return (set->nreplicas + r) % set->nreplicas; } /* get index of the (r + 1)th replica health status */ static inline unsigned REPN_HEALTHidx(struct poolset_health_status *set, unsigned r) { ASSERTne(set->nreplicas, 0); return (set->nreplicas + r + 1) % set->nreplicas; } /* get (p)th part health status */ static inline unsigned PART_HEALTHidx(struct replica_health_status *rep, unsigned p) { ASSERTne(rep->nparts, 0); return (rep->nparts + p) % rep->nparts; } /* get (r)th replica health status */ static inline struct replica_health_status * REP_HEALTH(struct poolset_health_status *set, unsigned r) { return set->replica[REP_HEALTHidx(set, r)]; } /* get (p)th part health status */ static inline unsigned PART_HEALTH(struct replica_health_status *rep, unsigned p) { return rep->part[PART_HEALTHidx(rep, p)].flags; } uint64_t replica_get_part_offset(struct pool_set *set, unsigned repn, unsigned partn); void replica_align_badblock_offset_length(size_t *offset, size_t *length, struct pool_set *set_in, unsigned repn, unsigned partn); size_t replica_get_part_data_len(struct pool_set *set_in, unsigned repn, unsigned partn); uint64_t replica_get_part_data_offset(struct pool_set *set_in, unsigned repn, unsigned part); /* * is_dry_run -- (internal) check whether only verification mode is enabled */ static inline bool is_dry_run(unsigned flags) { /* * PMEMPOOL_SYNC_DRY_RUN and PMEMPOOL_TRANSFORM_DRY_RUN * have to have the same value in order to use this common function. */ ASSERT_COMPILE_ERROR_ON(PMEMPOOL_SYNC_DRY_RUN != PMEMPOOL_TRANSFORM_DRY_RUN); return flags & PMEMPOOL_SYNC_DRY_RUN; } /* * fix_bad_blocks -- (internal) fix bad blocks - it causes reading or creating * bad blocks recovery files * (depending on if they exist or not) */ static inline bool fix_bad_blocks(unsigned flags) { return flags & PMEMPOOL_SYNC_FIX_BAD_BLOCKS; } int replica_remove_all_recovery_files(struct poolset_health_status *set_hs); int replica_remove_part(struct pool_set *set, unsigned repn, unsigned partn, int fix_bad_blocks); int replica_create_poolset_health_status(struct pool_set *set, struct poolset_health_status **set_hsp); void replica_free_poolset_health_status(struct poolset_health_status *set_s); int replica_check_poolset_health(struct pool_set *set, struct poolset_health_status **set_hs, int called_from_sync, unsigned flags); int replica_is_part_broken(unsigned repn, unsigned partn, struct poolset_health_status *set_hs); int replica_has_bad_blocks(unsigned repn, struct poolset_health_status *set_hs); int replica_part_has_bad_blocks(struct part_health_status *phs); int replica_part_has_corrupted_header(unsigned repn, unsigned partn, struct poolset_health_status *set_hs); unsigned replica_find_unbroken_part(unsigned repn, struct poolset_health_status *set_hs); int replica_is_replica_broken(unsigned repn, struct poolset_health_status *set_hs); int replica_is_replica_consistent(unsigned repn, struct poolset_health_status *set_hs); int replica_is_replica_healthy(unsigned repn, struct poolset_health_status *set_hs); unsigned replica_find_healthy_replica( struct poolset_health_status *set_hs); unsigned replica_find_replica_healthy_header( struct poolset_health_status *set_hs); int replica_is_poolset_healthy(struct poolset_health_status *set_hs); int replica_is_poolset_transformed(unsigned flags); ssize_t replica_get_pool_size(struct pool_set *set, unsigned repn); int replica_check_part_sizes(struct pool_set *set, size_t min_size); int replica_check_part_dirs(struct pool_set *set); int replica_check_local_part_dir(struct pool_set *set, unsigned repn, unsigned partn); int replica_open_replica_part_files(struct pool_set *set, unsigned repn); int replica_open_poolset_part_files(struct pool_set *set); int replica_sync(struct pool_set *set_in, struct poolset_health_status *set_hs, unsigned flags); int replica_transform(struct pool_set *set_in, struct pool_set *set_out, unsigned flags); #ifdef __cplusplus } #endif #endif
7,734
30.96281
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmempool/check.h
/* * Copyright 2016-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * check.h -- internal definitions for logic performing check */ #ifndef CHECK_H #define CHECK_H #ifdef __cplusplus extern "C" { #endif int check_init(PMEMpoolcheck *ppc); struct check_status *check_step(PMEMpoolcheck *ppc); void check_fini(PMEMpoolcheck *ppc); int check_is_end(struct check_data *data); struct pmempool_check_status *check_status_get(struct check_status *status); #ifdef _WIN32 void convert_status_cache(PMEMpoolcheck *ppc, char *buf, size_t size); #endif #ifdef __cplusplus } #endif #endif
2,122
34.383333
76
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmempool/check_util.h
/* * Copyright 2016-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * check_util.h -- internal definitions check util */ #ifndef CHECK_UTIL_H #define CHECK_UTIL_H #include <time.h> #include <limits.h> #include <sys/param.h> #ifdef __cplusplus extern "C" { #endif #define CHECK_STEP_COMPLETE UINT_MAX #define CHECK_INVALID_QUESTION UINT_MAX #define REQUIRE_ADVANCED "the following error can be fixed using " \ "PMEMPOOL_CHECK_ADVANCED flag" #ifndef min #define min(a, b) ((a) < (b) ? (a) : (b)) #endif /* check control context */ struct check_data; struct arena; /* queue of check statuses */ struct check_status; /* container storing state of all check steps */ #define PREFIX_MAX_SIZE 30 typedef struct { unsigned init_done; unsigned step; unsigned replica; unsigned part; int single_repl; int single_part; struct pool_set *set; int is_dev_dax; struct pool_hdr *hdrp; /* copy of the pool header in host byte order */ struct pool_hdr hdr; int hdr_valid; /* * If pool header has been modified this field indicates that * the pool parameters structure requires refresh. */ int pool_hdr_modified; unsigned healthy_replicas; struct pool_hdr *next_part_hdrp; struct pool_hdr *prev_part_hdrp; struct pool_hdr *next_repl_hdrp; struct pool_hdr *prev_repl_hdrp; int next_part_hdr_valid; int prev_part_hdr_valid; int next_repl_hdr_valid; int prev_repl_hdr_valid; /* valid poolset uuid */ uuid_t *valid_puuid; /* valid part uuid */ uuid_t *valid_uuid; /* valid part pool header */ struct pool_hdr *valid_part_hdrp; int valid_part_done; unsigned valid_part_replica; char prefix[PREFIX_MAX_SIZE]; struct arena *arenap; uint64_t offset; uint32_t narena; uint8_t *bitmap; uint8_t *dup_bitmap; uint8_t *fbitmap; struct list *list_inval; struct list *list_flog_inval; struct list *list_unmap; struct { int btti_header; int btti_backup; } valid; struct { struct btt_info btti; uint64_t btti_offset; } pool_valid; } location; /* check steps */ void check_bad_blocks(PMEMpoolcheck *ppc); void check_backup(PMEMpoolcheck *ppc); void check_pool_hdr(PMEMpoolcheck *ppc); void check_pool_hdr_uuids(PMEMpoolcheck *ppc); void check_sds(PMEMpoolcheck *ppc); void check_log(PMEMpoolcheck *ppc); void check_blk(PMEMpoolcheck *ppc); void check_cto(PMEMpoolcheck *ppc); void check_btt_info(PMEMpoolcheck *ppc); void check_btt_map_flog(PMEMpoolcheck *ppc); void check_write(PMEMpoolcheck *ppc); struct check_data *check_data_alloc(void); void check_data_free(struct check_data *data); uint32_t check_step_get(struct check_data *data); void check_step_inc(struct check_data *data); location *check_get_step_data(struct check_data *data); void check_end(struct check_data *data); int check_is_end_util(struct check_data *data); int check_status_create(PMEMpoolcheck *ppc, enum pmempool_check_msg_type type, uint32_t arg, const char *fmt, ...) FORMAT_PRINTF(4, 5); void check_status_release(PMEMpoolcheck *ppc, struct check_status *status); void check_clear_status_cache(struct check_data *data); struct check_status *check_pop_question(struct check_data *data); struct check_status *check_pop_error(struct check_data *data); struct check_status *check_pop_info(struct check_data *data); bool check_has_error(struct check_data *data); bool check_has_answer(struct check_data *data); int check_push_answer(PMEMpoolcheck *ppc); struct pmempool_check_status *check_status_get_util( struct check_status *status); int check_status_is(struct check_status *status, enum pmempool_check_msg_type type); /* create info status */ #define CHECK_INFO(ppc, ...)\ check_status_create(ppc, PMEMPOOL_CHECK_MSG_TYPE_INFO, 0, __VA_ARGS__) /* create info status and append error message based on errno */ #define CHECK_INFO_ERRNO(ppc, ...)\ check_status_create(ppc, PMEMPOOL_CHECK_MSG_TYPE_INFO,\ (uint32_t)errno, __VA_ARGS__) /* create error status */ #define CHECK_ERR(ppc, ...)\ check_status_create(ppc, PMEMPOOL_CHECK_MSG_TYPE_ERROR, 0, __VA_ARGS__) /* create question status */ #define CHECK_ASK(ppc, question, ...)\ check_status_create(ppc, PMEMPOOL_CHECK_MSG_TYPE_QUESTION, question,\ __VA_ARGS__) #define CHECK_NOT_COMPLETE(loc, steps)\ ((loc)->step != CHECK_STEP_COMPLETE &&\ ((steps)[(loc)->step].check != NULL ||\ (steps)[(loc)->step].fix != NULL)) int check_answer_loop(PMEMpoolcheck *ppc, location *data, void *ctx, int fail_on_no, int (*callback)(PMEMpoolcheck *, location *, uint32_t, void *ctx)); int check_questions_sequence_validate(PMEMpoolcheck *ppc); const char *check_get_time_str(time_t time); const char *check_get_uuid_str(uuid_t uuid); const char *check_get_pool_type_str(enum pool_type type); void check_insert_arena(PMEMpoolcheck *ppc, struct arena *arenap); #ifdef _WIN32 void cache_to_utf8(struct check_data *data, char *buf, size_t size); #endif #define CHECK_IS(ppc, flag)\ util_flag_isset((ppc)->args.flags, PMEMPOOL_CHECK_ ## flag) #define CHECK_IS_NOT(ppc, flag)\ util_flag_isclr((ppc)->args.flags, PMEMPOOL_CHECK_ ## flag) #define CHECK_WITHOUT_FIXING(ppc)\ CHECK_IS_NOT(ppc, REPAIR) || CHECK_IS(ppc, DRY_RUN) #ifdef __cplusplus } #endif #endif
6,694
28.493392
78
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmempool/pmempool.h
/* * Copyright 2016-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * pmempool.h -- internal definitions for libpmempool */ #ifndef PMEMPOOL_H #define PMEMPOOL_H #ifdef __cplusplus extern "C" { #endif #define PMEMPOOL_LOG_PREFIX "libpmempool" #define PMEMPOOL_LOG_LEVEL_VAR "PMEMPOOL_LOG_LEVEL" #define PMEMPOOL_LOG_FILE_VAR "PMEMPOOL_LOG_FILE" enum check_result { CHECK_RESULT_CONSISTENT, CHECK_RESULT_NOT_CONSISTENT, CHECK_RESULT_ASK_QUESTIONS, CHECK_RESULT_PROCESS_ANSWERS, CHECK_RESULT_REPAIRED, CHECK_RESULT_CANNOT_REPAIR, CHECK_RESULT_ERROR, CHECK_RESULT_INTERNAL_ERROR }; /* * pmempool_check_ctx -- context and arguments for check command */ struct pmempool_check_ctx { struct pmempool_check_args args; char *path; char *backup_path; struct check_data *data; struct pool_data *pool; enum check_result result; unsigned sync_required; }; #ifdef __cplusplus } #endif #endif
2,442
30.320513
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmempool/pool.h
/* * Copyright 2016-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * pool.h -- internal definitions for pool processing functions */ #ifndef POOL_H #define POOL_H #include <stdbool.h> #include <sys/types.h> #include "libpmemobj.h" #include "libpmemcto.h" #include "queue.h" #include "set.h" #include "log.h" #include "blk.h" #include "btt_layout.h" #include "cto.h" #ifdef __cplusplus extern "C" { #endif enum pool_type { POOL_TYPE_UNKNOWN = (1 << 0), POOL_TYPE_LOG = (1 << 1), POOL_TYPE_BLK = (1 << 2), POOL_TYPE_OBJ = (1 << 3), POOL_TYPE_BTT = (1 << 4), POOL_TYPE_CTO = (1 << 5), POOL_TYPE_ANY = POOL_TYPE_UNKNOWN | POOL_TYPE_LOG | POOL_TYPE_BLK | POOL_TYPE_OBJ | POOL_TYPE_BTT | POOL_TYPE_CTO, }; struct pool_params { enum pool_type type; char signature[POOL_HDR_SIG_LEN]; features_t features; size_t size; mode_t mode; int is_poolset; int is_part; int is_dev_dax; int is_pmem; union { struct { uint64_t bsize; } blk; struct { char layout[PMEMOBJ_MAX_LAYOUT]; } obj; struct { char layout[PMEMCTO_MAX_LAYOUT]; } cto; }; }; struct pool_set_file { int fd; char *fname; void *addr; size_t size; struct pool_set *poolset; time_t mtime; mode_t mode; }; struct arena { TAILQ_ENTRY(arena) next; struct btt_info btt_info; uint32_t id; bool valid; bool zeroed; uint64_t offset; uint8_t *flog; size_t flogsize; uint32_t *map; size_t mapsize; }; struct pool_data { struct pool_params params; struct pool_set_file *set_file; int blk_no_layout; union { struct pool_hdr pool; struct pmemlog log; struct pmemblk blk; struct pmemcto cto; } hdr; enum { UUID_NOP = 0, UUID_FROM_BTT, UUID_NOT_FROM_BTT, } uuid_op; struct arena bttc; TAILQ_HEAD(arenashead, arena) arenas; uint32_t narenas; }; struct pool_data *pool_data_alloc(PMEMpoolcheck *ppc); void pool_data_free(struct pool_data *pool); void pool_params_from_header(struct pool_params *params, const struct pool_hdr *hdr); int pool_set_parse(struct pool_set **setp, const char *path); void *pool_set_file_map(struct pool_set_file *file, uint64_t offset); int pool_read(struct pool_data *pool, void *buff, size_t nbytes, uint64_t off); int pool_write(struct pool_data *pool, const void *buff, size_t nbytes, uint64_t off); int pool_copy(struct pool_data *pool, const char *dst_path, int overwrite); int pool_set_part_copy(struct pool_set_part *dpart, struct pool_set_part *spart, int overwrite); int pool_memset(struct pool_data *pool, uint64_t off, int c, size_t count); unsigned pool_set_files_count(struct pool_set_file *file); int pool_set_file_map_headers(struct pool_set_file *file, int rdonly, int prv); void pool_set_file_unmap_headers(struct pool_set_file *file); void pool_hdr_default(enum pool_type type, struct pool_hdr *hdrp); enum pool_type pool_hdr_get_type(const struct pool_hdr *hdrp); enum pool_type pool_set_type(struct pool_set *set); const char *pool_get_pool_type_str(enum pool_type type); int pool_btt_info_valid(struct btt_info *infop); int pool_blk_get_first_valid_arena(struct pool_data *pool, struct arena *arenap); int pool_blk_bsize_valid(uint32_t bsize, uint64_t fsize); uint64_t pool_next_arena_offset(struct pool_data *pool, uint64_t header_offset); uint64_t pool_get_first_valid_btt(struct pool_data *pool, struct btt_info *infop, uint64_t offset, bool *zeroed); size_t pool_get_min_size(enum pool_type); #ifdef __cplusplus } #endif #endif
4,963
27.365714
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/pmem.h
/* * Copyright 2014-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * pmem.h -- internal definitions for libpmem */ #ifndef PMEM_H #define PMEM_H #include <stddef.h> #include "libpmem.h" #include "util.h" #ifdef __cplusplus extern "C" { #endif #define PMEM_LOG_PREFIX "libpmem" #define PMEM_LOG_LEVEL_VAR "PMEM_LOG_LEVEL" #define PMEM_LOG_FILE_VAR "PMEM_LOG_FILE" typedef void (*predrain_fence_func)(void); typedef void (*flush_func)(const void *, size_t); typedef int (*is_pmem_func)(const void *addr, size_t len); typedef void *(*memmove_nodrain_func)(void *pmemdest, const void *src, size_t len, unsigned flags); typedef void *(*memset_nodrain_func)(void *pmemdest, int c, size_t len, unsigned flags); struct pmem_funcs { predrain_fence_func predrain_fence; flush_func flush; is_pmem_func is_pmem; memmove_nodrain_func memmove_nodrain; memset_nodrain_func memset_nodrain; flush_func deep_flush; }; void pmem_init(void); void pmem_os_init(void); void pmem_init_funcs(struct pmem_funcs *funcs); int is_pmem_detect(const void *addr, size_t len); void *pmem_map_register(int fd, size_t len, const char *path, int is_dev_dax); /* * flush_empty_nolog -- (internal) do not flush the CPU cache */ static force_inline void flush_empty_nolog(const void *addr, size_t len) { /* NOP */ } /* * flush64b_empty -- (internal) do not flush the CPU cache */ static force_inline void flush64b_empty(const char *addr) { } /* * pmem_flush_flags -- internal wrapper around pmem_flush */ static inline void pmem_flush_flags(const void *addr, size_t len, unsigned flags) { if (!(flags & PMEM_F_MEM_NOFLUSH)) pmem_flush(addr, len); } void *memmove_nodrain_generic(void *pmemdest, const void *src, size_t len, unsigned flags); void *memset_nodrain_generic(void *pmemdest, int c, size_t len, unsigned flags); #ifdef __cplusplus } #endif #endif
3,394
29.585586
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/flush.h
/* * Copyright 2014-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef X86_64_FLUSH_H #define X86_64_FLUSH_H #include <emmintrin.h> #include <stddef.h> #include <stdint.h> #include "util.h" #define FLUSH_ALIGN ((uintptr_t)64) #ifdef _MSC_VER #define pmem_clflushopt _mm_clflushopt #define pmem_clwb _mm_clwb #else /* * The x86 memory instructions are new enough that the compiler * intrinsic functions are not always available. The intrinsic * functions are defined here in terms of asm statements for now. */ #define pmem_clflushopt(addr)\ asm volatile(".byte 0x66; clflush %0" : "+m" \ (*(volatile char *)(addr))); #define pmem_clwb(addr)\ asm volatile(".byte 0x66; xsaveopt %0" : "+m" \ (*(volatile char *)(addr))); #endif /* _MSC_VER */ /* * flush_clflush_nolog -- flush the CPU cache, using clflush */ static force_inline void flush_clflush_nolog(const void *addr, size_t len) { uintptr_t uptr; /* * Loop through cache-line-size (typically 64B) aligned chunks * covering the given range. */ for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1); uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) _mm_clflush((char *)uptr); } /* * flush_clflushopt_nolog -- flush the CPU cache, using clflushopt */ static force_inline void flush_clflushopt_nolog(const void *addr, size_t len) { uintptr_t uptr; /* * Loop through cache-line-size (typically 64B) aligned chunks * covering the given range. */ for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1); uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) { pmem_clflushopt((char *)uptr); } } /* * flush_clwb_nolog -- flush the CPU cache, using clwb */ static force_inline void flush_clwb_nolog(const void *addr, size_t len) { uintptr_t uptr; /* * Loop through cache-line-size (typically 64B) aligned chunks * covering the given range. */ for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1); uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) { pmem_clwb((char *)uptr); } } #endif
3,520
29.885965
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/cpu.h
/* * Copyright 2016-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef PMDK_CPU_H #define PMDK_CPU_H 1 /* * cpu.h -- definitions for "cpu" module */ int is_cpu_genuine_intel(void); int is_cpu_clflush_present(void); int is_cpu_clflushopt_present(void); int is_cpu_clwb_present(void); int is_cpu_avx_present(void); int is_cpu_avx512f_present(void); #endif
1,898
38.5625
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/avx.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef PMEM_AVX_H #define PMEM_AVX_H #include <immintrin.h> #include "util.h" /* * avx_zeroupper -- _mm256_zeroupper wrapper * * _mm256_zeroupper clears upper parts of avx registers. * * It's needed for 2 reasons: * - it improves performance of non-avx code after avx * - it works around problem discovered by Valgrind * * In optimized builds gcc inserts VZEROUPPER automatically before * calling non-avx code (or at the end of the function). But in release * builds it doesn't, so if we don't do this by ourselves, then when * someone memcpy'ies uninitialized data, Valgrind complains whenever * someone reads those registers. * * One notable example is loader, which tries to detect whether it * needs to save whole ymm registers by looking at their current * (possibly uninitialized) value. * * Valgrind complains like that: * Conditional jump or move depends on uninitialised value(s) * at 0x4015CC9: _dl_runtime_resolve_avx_slow * (in /lib/x86_64-linux-gnu/ld-2.24.so) * by 0x10B531: test_realloc_api (obj_basic_integration.c:185) * by 0x10F1EE: main (obj_basic_integration.c:594) * * Note: We have to be careful to not read AVX registers after this * intrinsic, because of this stupid gcc bug: * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82735 */ static force_inline void avx_zeroupper(void) { _mm256_zeroupper(); } static force_inline __m128i m256_get16b(__m256i ymm) { return _mm256_extractf128_si256(ymm, 0); } #ifdef _MSC_VER static force_inline uint64_t m256_get8b(__m256i ymm) { return (uint64_t)_mm_extract_epi64(m256_get16b(ymm), 0); } static force_inline uint32_t m256_get4b(__m256i ymm) { return (uint32_t)m256_get8b(ymm); } static force_inline uint16_t m256_get2b(__m256i ymm) { return (uint16_t)m256_get8b(ymm); } #else static force_inline uint64_t m256_get8b(__m256i ymm) { return (uint64_t)_mm256_extract_epi64(ymm, 0); } static force_inline uint32_t m256_get4b(__m256i ymm) { return (uint32_t)_mm256_extract_epi32(ymm, 0); } static force_inline uint16_t m256_get2b(__m256i ymm) { return (uint16_t)_mm256_extract_epi16(ymm, 0); } #endif #endif
3,753
31.362069
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy_memset.h
/* * Copyright 2014-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef MEMCPY_MEMSET_H #define MEMCPY_MEMSET_H #include <stddef.h> #include <xmmintrin.h> #include "pmem.h" static inline void barrier_after_ntstores(void) { /* * In this configuration pmem_drain does not contain sfence, so we have * to serialize non-temporal store instructions. */ _mm_sfence(); } static inline void no_barrier_after_ntstores(void) { /* * In this configuration pmem_drain contains sfence, so we don't have * to serialize non-temporal store instructions */ } #ifndef AVX512F_AVAILABLE /* XXX not supported in MSVC version we currently use */ #ifdef _MSC_VER #define AVX512F_AVAILABLE 0 #else #define AVX512F_AVAILABLE 1 #endif #endif #ifndef AVX_AVAILABLE #define AVX_AVAILABLE 1 #endif #ifndef SSE2_AVAILABLE #define SSE2_AVAILABLE 1 #endif #if SSE2_AVAILABLE void memmove_mov_sse2_clflush(char *dest, const char *src, size_t len); void memmove_mov_sse2_clflushopt(char *dest, const char *src, size_t len); void memmove_mov_sse2_clwb(char *dest, const char *src, size_t len); void memmove_mov_sse2_empty(char *dest, const char *src, size_t len); void memmove_movnt_sse2_clflush(char *dest, const char *src, size_t len); void memmove_movnt_sse2_clflushopt(char *dest, const char *src, size_t len); void memmove_movnt_sse2_clwb(char *dest, const char *src, size_t len); void memmove_movnt_sse2_empty(char *dest, const char *src, size_t len); void memset_mov_sse2_clflush(char *dest, int c, size_t len); void memset_mov_sse2_clflushopt(char *dest, int c, size_t len); void memset_mov_sse2_clwb(char *dest, int c, size_t len); void memset_mov_sse2_empty(char *dest, int c, size_t len); void memset_movnt_sse2_clflush(char *dest, int c, size_t len); void memset_movnt_sse2_clflushopt(char *dest, int c, size_t len); void memset_movnt_sse2_clwb(char *dest, int c, size_t len); void memset_movnt_sse2_empty(char *dest, int c, size_t len); #endif #if AVX_AVAILABLE void memmove_mov_avx_clflush(char *dest, const char *src, size_t len); void memmove_mov_avx_clflushopt(char *dest, const char *src, size_t len); void memmove_mov_avx_clwb(char *dest, const char *src, size_t len); void memmove_mov_avx_empty(char *dest, const char *src, size_t len); void memmove_movnt_avx_clflush(char *dest, const char *src, size_t len); void memmove_movnt_avx_clflushopt(char *dest, const char *src, size_t len); void memmove_movnt_avx_clwb(char *dest, const char *src, size_t len); void memmove_movnt_avx_empty(char *dest, const char *src, size_t len); void memset_mov_avx_clflush(char *dest, int c, size_t len); void memset_mov_avx_clflushopt(char *dest, int c, size_t len); void memset_mov_avx_clwb(char *dest, int c, size_t len); void memset_mov_avx_empty(char *dest, int c, size_t len); void memset_movnt_avx_clflush(char *dest, int c, size_t len); void memset_movnt_avx_clflushopt(char *dest, int c, size_t len); void memset_movnt_avx_clwb(char *dest, int c, size_t len); void memset_movnt_avx_empty(char *dest, int c, size_t len); #endif #if AVX512F_AVAILABLE void memmove_mov_avx512f_clflush(char *dest, const char *src, size_t len); void memmove_mov_avx512f_clflushopt(char *dest, const char *src, size_t len); void memmove_mov_avx512f_clwb(char *dest, const char *src, size_t len); void memmove_mov_avx512f_empty(char *dest, const char *src, size_t len); void memmove_movnt_avx512f_clflush(char *dest, const char *src, size_t len); void memmove_movnt_avx512f_clflushopt(char *dest, const char *src, size_t len); void memmove_movnt_avx512f_clwb(char *dest, const char *src, size_t len); void memmove_movnt_avx512f_empty(char *dest, const char *src, size_t len); void memset_mov_avx512f_clflush(char *dest, int c, size_t len); void memset_mov_avx512f_clflushopt(char *dest, int c, size_t len); void memset_mov_avx512f_clwb(char *dest, int c, size_t len); void memset_mov_avx512f_empty(char *dest, int c, size_t len); void memset_movnt_avx512f_clflush(char *dest, int c, size_t len); void memset_movnt_avx512f_clflushopt(char *dest, int c, size_t len); void memset_movnt_avx512f_clwb(char *dest, int c, size_t len); void memset_movnt_avx512f_empty(char *dest, int c, size_t len); #endif extern size_t Movnt_threshold; #endif
5,754
41.316176
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_avx512f.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef PMEM_MEMSET_AVX512F_H #define PMEM_MEMSET_AVX512F_H #include <stddef.h> #include "memset_avx.h" static force_inline void memset_small_avx512f(char *dest, __m256i ymm, size_t len) { /* We can't do better than AVX here. */ memset_small_avx(dest, ymm, len); } #endif
1,880
38.1875
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_sse2.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem.h" #include "flush.h" #include "libpmem.h" #include "memcpy_memset.h" #include "memset_sse2.h" #include "out.h" #include "valgrind_internal.h" static force_inline void memset_movnt4x64b(char *dest, __m128i xmm) { _mm_stream_si128((__m128i *)dest + 0, xmm); _mm_stream_si128((__m128i *)dest + 1, xmm); _mm_stream_si128((__m128i *)dest + 2, xmm); _mm_stream_si128((__m128i *)dest + 3, xmm); _mm_stream_si128((__m128i *)dest + 4, xmm); _mm_stream_si128((__m128i *)dest + 5, xmm); _mm_stream_si128((__m128i *)dest + 6, xmm); _mm_stream_si128((__m128i *)dest + 7, xmm); _mm_stream_si128((__m128i *)dest + 8, xmm); _mm_stream_si128((__m128i *)dest + 9, xmm); _mm_stream_si128((__m128i *)dest + 10, xmm); _mm_stream_si128((__m128i *)dest + 11, xmm); _mm_stream_si128((__m128i *)dest + 12, xmm); _mm_stream_si128((__m128i *)dest + 13, xmm); _mm_stream_si128((__m128i *)dest + 14, xmm); _mm_stream_si128((__m128i *)dest + 15, xmm); VALGRIND_DO_FLUSH(dest, 4 * 64); } static force_inline void memset_movnt2x64b(char *dest, __m128i xmm) { _mm_stream_si128((__m128i *)dest + 0, xmm); _mm_stream_si128((__m128i *)dest + 1, xmm); _mm_stream_si128((__m128i *)dest + 2, xmm); _mm_stream_si128((__m128i *)dest + 3, xmm); _mm_stream_si128((__m128i *)dest + 4, xmm); _mm_stream_si128((__m128i *)dest + 5, xmm); _mm_stream_si128((__m128i *)dest + 6, xmm); _mm_stream_si128((__m128i *)dest + 7, xmm); VALGRIND_DO_FLUSH(dest, 2 * 64); } static force_inline void memset_movnt1x64b(char *dest, __m128i xmm) { _mm_stream_si128((__m128i *)dest + 0, xmm); _mm_stream_si128((__m128i *)dest + 1, xmm); _mm_stream_si128((__m128i *)dest + 2, xmm); _mm_stream_si128((__m128i *)dest + 3, xmm); VALGRIND_DO_FLUSH(dest, 64); } static force_inline void memset_movnt1x32b(char *dest, __m128i xmm) { _mm_stream_si128((__m128i *)dest + 0, xmm); _mm_stream_si128((__m128i *)dest + 1, xmm); VALGRIND_DO_FLUSH(dest, 32); } static force_inline void memset_movnt1x16b(char *dest, __m128i xmm) { _mm_stream_si128((__m128i *)dest, xmm); VALGRIND_DO_FLUSH(dest, 16); } static force_inline void memset_movnt1x8b(char *dest, __m128i xmm) { uint64_t x = (uint64_t)_mm_cvtsi128_si64(xmm); _mm_stream_si64((long long *)dest, (long long)x); VALGRIND_DO_FLUSH(dest, 8); } static force_inline void memset_movnt1x4b(char *dest, __m128i xmm) { uint32_t x = (uint32_t)_mm_cvtsi128_si32(xmm); _mm_stream_si32((int *)dest, (int)x); VALGRIND_DO_FLUSH(dest, 4); } void EXPORTED_SYMBOL(char *dest, int c, size_t len) { __m128i xmm = _mm_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_sse2(dest, xmm, cnt); dest += cnt; len -= cnt; } while (len >= 4 * 64) { memset_movnt4x64b(dest, xmm); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_movnt2x64b(dest, xmm); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_movnt1x64b(dest, xmm); dest += 1 * 64; len -= 1 * 64; } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memset_movnt1x32b(dest, xmm); else if (len == 16) memset_movnt1x16b(dest, xmm); else if (len == 8) memset_movnt1x8b(dest, xmm); else if (len == 4) memset_movnt1x4b(dest, xmm); else goto nonnt; goto end; } nonnt: memset_small_sse2(dest, xmm, len); end: maybe_barrier(); }
5,136
25.755208
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_sse2.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem.h" #include "flush.h" #include "memcpy_memset.h" #include "memset_sse2.h" static force_inline void memset_mov4x64b(char *dest, __m128i xmm) { _mm_store_si128((__m128i *)dest + 0, xmm); _mm_store_si128((__m128i *)dest + 1, xmm); _mm_store_si128((__m128i *)dest + 2, xmm); _mm_store_si128((__m128i *)dest + 3, xmm); _mm_store_si128((__m128i *)dest + 4, xmm); _mm_store_si128((__m128i *)dest + 5, xmm); _mm_store_si128((__m128i *)dest + 6, xmm); _mm_store_si128((__m128i *)dest + 7, xmm); _mm_store_si128((__m128i *)dest + 8, xmm); _mm_store_si128((__m128i *)dest + 9, xmm); _mm_store_si128((__m128i *)dest + 10, xmm); _mm_store_si128((__m128i *)dest + 11, xmm); _mm_store_si128((__m128i *)dest + 12, xmm); _mm_store_si128((__m128i *)dest + 13, xmm); _mm_store_si128((__m128i *)dest + 14, xmm); _mm_store_si128((__m128i *)dest + 15, xmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memset_mov2x64b(char *dest, __m128i xmm) { _mm_store_si128((__m128i *)dest + 0, xmm); _mm_store_si128((__m128i *)dest + 1, xmm); _mm_store_si128((__m128i *)dest + 2, xmm); _mm_store_si128((__m128i *)dest + 3, xmm); _mm_store_si128((__m128i *)dest + 4, xmm); _mm_store_si128((__m128i *)dest + 5, xmm); _mm_store_si128((__m128i *)dest + 6, xmm); _mm_store_si128((__m128i *)dest + 7, xmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memset_mov1x64b(char *dest, __m128i xmm) { _mm_store_si128((__m128i *)dest + 0, xmm); _mm_store_si128((__m128i *)dest + 1, xmm); _mm_store_si128((__m128i *)dest + 2, xmm); _mm_store_si128((__m128i *)dest + 3, xmm); flush64b(dest + 0 * 64); } void EXPORTED_SYMBOL(char *dest, int c, size_t len) { __m128i xmm = _mm_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_sse2(dest, xmm, cnt); dest += cnt; len -= cnt; } while (len >= 4 * 64) { memset_mov4x64b(dest, xmm); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_mov2x64b(dest, xmm); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_mov1x64b(dest, xmm); dest += 1 * 64; len -= 1 * 64; } if (len) memset_small_sse2(dest, xmm, len); }
3,985
28.525926
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_avx.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem.h" #include "avx.h" #include "flush.h" #include "libpmem.h" #include "memset_avx.h" #include "memcpy_memset.h" #include "out.h" #include "valgrind_internal.h" static force_inline void memset_movnt8x64b(char *dest, __m256i ymm) { _mm256_stream_si256((__m256i *)dest + 0, ymm); _mm256_stream_si256((__m256i *)dest + 1, ymm); _mm256_stream_si256((__m256i *)dest + 2, ymm); _mm256_stream_si256((__m256i *)dest + 3, ymm); _mm256_stream_si256((__m256i *)dest + 4, ymm); _mm256_stream_si256((__m256i *)dest + 5, ymm); _mm256_stream_si256((__m256i *)dest + 6, ymm); _mm256_stream_si256((__m256i *)dest + 7, ymm); _mm256_stream_si256((__m256i *)dest + 8, ymm); _mm256_stream_si256((__m256i *)dest + 9, ymm); _mm256_stream_si256((__m256i *)dest + 10, ymm); _mm256_stream_si256((__m256i *)dest + 11, ymm); _mm256_stream_si256((__m256i *)dest + 12, ymm); _mm256_stream_si256((__m256i *)dest + 13, ymm); _mm256_stream_si256((__m256i *)dest + 14, ymm); _mm256_stream_si256((__m256i *)dest + 15, ymm); VALGRIND_DO_FLUSH(dest, 8 * 64); } static force_inline void memset_movnt4x64b(char *dest, __m256i ymm) { _mm256_stream_si256((__m256i *)dest + 0, ymm); _mm256_stream_si256((__m256i *)dest + 1, ymm); _mm256_stream_si256((__m256i *)dest + 2, ymm); _mm256_stream_si256((__m256i *)dest + 3, ymm); _mm256_stream_si256((__m256i *)dest + 4, ymm); _mm256_stream_si256((__m256i *)dest + 5, ymm); _mm256_stream_si256((__m256i *)dest + 6, ymm); _mm256_stream_si256((__m256i *)dest + 7, ymm); VALGRIND_DO_FLUSH(dest, 4 * 64); } static force_inline void memset_movnt2x64b(char *dest, __m256i ymm) { _mm256_stream_si256((__m256i *)dest + 0, ymm); _mm256_stream_si256((__m256i *)dest + 1, ymm); _mm256_stream_si256((__m256i *)dest + 2, ymm); _mm256_stream_si256((__m256i *)dest + 3, ymm); VALGRIND_DO_FLUSH(dest, 2 * 64); } static force_inline void memset_movnt1x64b(char *dest, __m256i ymm) { _mm256_stream_si256((__m256i *)dest + 0, ymm); _mm256_stream_si256((__m256i *)dest + 1, ymm); VALGRIND_DO_FLUSH(dest, 64); } static force_inline void memset_movnt1x32b(char *dest, __m256i ymm) { _mm256_stream_si256((__m256i *)dest, ymm); VALGRIND_DO_FLUSH(dest, 32); } static force_inline void memset_movnt1x16b(char *dest, __m256i ymm) { __m128i xmm0 = m256_get16b(ymm); _mm_stream_si128((__m128i *)dest, xmm0); VALGRIND_DO_FLUSH(dest - 16, 16); } static force_inline void memset_movnt1x8b(char *dest, __m256i ymm) { uint64_t x = m256_get8b(ymm); _mm_stream_si64((long long *)dest, (long long)x); VALGRIND_DO_FLUSH(dest, 8); } static force_inline void memset_movnt1x4b(char *dest, __m256i ymm) { uint32_t x = m256_get4b(ymm); _mm_stream_si32((int *)dest, (int)x); VALGRIND_DO_FLUSH(dest, 4); } void EXPORTED_SYMBOL(char *dest, int c, size_t len) { __m256i ymm = _mm256_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_avx(dest, ymm, cnt); dest += cnt; len -= cnt; } while (len >= 8 * 64) { memset_movnt8x64b(dest, ymm); dest += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memset_movnt4x64b(dest, ymm); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_movnt2x64b(dest, ymm); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_movnt1x64b(dest, ymm); dest += 1 * 64; len -= 1 * 64; } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memset_movnt1x32b(dest, ymm); else if (len == 16) memset_movnt1x16b(dest, ymm); else if (len == 8) memset_movnt1x8b(dest, ymm); else if (len == 4) memset_movnt1x4b(dest, ymm); else goto nonnt; goto end; } nonnt: memset_small_avx(dest, ymm, len); end: avx_zeroupper(); maybe_barrier(); }
5,514
25.137441
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_avx.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef PMEM_MEMSET_AVX_H #define PMEM_MEMSET_AVX_H #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include <string.h> #include "avx.h" #include "libpmem.h" #include "out.h" static force_inline void memset_small_avx_noflush(char *dest, __m256i ymm, size_t len) { ASSERT(len <= 64); if (len <= 8) goto le8; if (len <= 32) goto le32; /* 33..64 */ _mm256_storeu_si256((__m256i *)dest, ymm); _mm256_storeu_si256((__m256i *)(dest + len - 32), ymm); return; le32: if (len > 16) { /* 17..32 */ __m128i xmm = m256_get16b(ymm); _mm_storeu_si128((__m128i *)dest, xmm); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm); return; } /* 9..16 */ uint64_t d8 = m256_get8b(ymm); *(uint64_t *)dest = d8; *(uint64_t *)(dest + len - 8) = d8; return; le8: if (len <= 2) goto le2; if (len > 4) { /* 5..8 */ uint32_t d = m256_get4b(ymm); *(uint32_t *)dest = d; *(uint32_t *)(dest + len - 4) = d; return; } /* 3..4 */ uint16_t d2 = m256_get2b(ymm); *(uint16_t *)dest = d2; *(uint16_t *)(dest + len - 2) = d2; return; le2: if (len == 2) { uint16_t d2 = m256_get2b(ymm); *(uint16_t *)dest = d2; return; } *(uint8_t *)dest = (uint8_t)m256_get2b(ymm); } static force_inline void memset_small_avx(char *dest, __m256i ymm, size_t len) { memset_small_avx_noflush(dest, ymm, len); flush(dest, len); } #endif
2,975
24.655172
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_avx.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem.h" #include "avx.h" #include "flush.h" #include "memset_avx.h" #include "memcpy_memset.h" static force_inline void memset_mov8x64b(char *dest, __m256i ymm) { _mm256_store_si256((__m256i *)dest + 0, ymm); _mm256_store_si256((__m256i *)dest + 1, ymm); _mm256_store_si256((__m256i *)dest + 2, ymm); _mm256_store_si256((__m256i *)dest + 3, ymm); _mm256_store_si256((__m256i *)dest + 4, ymm); _mm256_store_si256((__m256i *)dest + 5, ymm); _mm256_store_si256((__m256i *)dest + 6, ymm); _mm256_store_si256((__m256i *)dest + 7, ymm); _mm256_store_si256((__m256i *)dest + 8, ymm); _mm256_store_si256((__m256i *)dest + 9, ymm); _mm256_store_si256((__m256i *)dest + 10, ymm); _mm256_store_si256((__m256i *)dest + 11, ymm); _mm256_store_si256((__m256i *)dest + 12, ymm); _mm256_store_si256((__m256i *)dest + 13, ymm); _mm256_store_si256((__m256i *)dest + 14, ymm); _mm256_store_si256((__m256i *)dest + 15, ymm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); } static force_inline void memset_mov4x64b(char *dest, __m256i ymm) { _mm256_store_si256((__m256i *)dest + 0, ymm); _mm256_store_si256((__m256i *)dest + 1, ymm); _mm256_store_si256((__m256i *)dest + 2, ymm); _mm256_store_si256((__m256i *)dest + 3, ymm); _mm256_store_si256((__m256i *)dest + 4, ymm); _mm256_store_si256((__m256i *)dest + 5, ymm); _mm256_store_si256((__m256i *)dest + 6, ymm); _mm256_store_si256((__m256i *)dest + 7, ymm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memset_mov2x64b(char *dest, __m256i ymm) { _mm256_store_si256((__m256i *)dest + 0, ymm); _mm256_store_si256((__m256i *)dest + 1, ymm); _mm256_store_si256((__m256i *)dest + 2, ymm); _mm256_store_si256((__m256i *)dest + 3, ymm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memset_mov1x64b(char *dest, __m256i ymm) { _mm256_store_si256((__m256i *)dest + 0, ymm); _mm256_store_si256((__m256i *)dest + 1, ymm); flush64b(dest + 0 * 64); } void EXPORTED_SYMBOL(char *dest, int c, size_t len) { __m256i ymm = _mm256_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_avx(dest, ymm, cnt); dest += cnt; len -= cnt; } while (len >= 8 * 64) { memset_mov8x64b(dest, ymm); dest += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memset_mov4x64b(dest, ymm); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_mov2x64b(dest, ymm); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_mov1x64b(dest, ymm); dest += 1 * 64; len -= 1 * 64; } if (len) memset_small_avx(dest, ymm, len); avx_zeroupper(); }
4,570
27.56875
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_avx512f.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem.h" #include "avx.h" #include "flush.h" #include "memset_avx512f.h" #include "memcpy_memset.h" static force_inline void memset_mov32x64b(char *dest, __m512i zmm) { _mm512_store_si512((__m512i *)dest + 0, zmm); _mm512_store_si512((__m512i *)dest + 1, zmm); _mm512_store_si512((__m512i *)dest + 2, zmm); _mm512_store_si512((__m512i *)dest + 3, zmm); _mm512_store_si512((__m512i *)dest + 4, zmm); _mm512_store_si512((__m512i *)dest + 5, zmm); _mm512_store_si512((__m512i *)dest + 6, zmm); _mm512_store_si512((__m512i *)dest + 7, zmm); _mm512_store_si512((__m512i *)dest + 8, zmm); _mm512_store_si512((__m512i *)dest + 9, zmm); _mm512_store_si512((__m512i *)dest + 10, zmm); _mm512_store_si512((__m512i *)dest + 11, zmm); _mm512_store_si512((__m512i *)dest + 12, zmm); _mm512_store_si512((__m512i *)dest + 13, zmm); _mm512_store_si512((__m512i *)dest + 14, zmm); _mm512_store_si512((__m512i *)dest + 15, zmm); _mm512_store_si512((__m512i *)dest + 16, zmm); _mm512_store_si512((__m512i *)dest + 17, zmm); _mm512_store_si512((__m512i *)dest + 18, zmm); _mm512_store_si512((__m512i *)dest + 19, zmm); _mm512_store_si512((__m512i *)dest + 20, zmm); _mm512_store_si512((__m512i *)dest + 21, zmm); _mm512_store_si512((__m512i *)dest + 22, zmm); _mm512_store_si512((__m512i *)dest + 23, zmm); _mm512_store_si512((__m512i *)dest + 24, zmm); _mm512_store_si512((__m512i *)dest + 25, zmm); _mm512_store_si512((__m512i *)dest + 26, zmm); _mm512_store_si512((__m512i *)dest + 27, zmm); _mm512_store_si512((__m512i *)dest + 28, zmm); _mm512_store_si512((__m512i *)dest + 29, zmm); _mm512_store_si512((__m512i *)dest + 30, zmm); _mm512_store_si512((__m512i *)dest + 31, zmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); flush64b(dest + 8 * 64); flush64b(dest + 9 * 64); flush64b(dest + 10 * 64); flush64b(dest + 11 * 64); flush64b(dest + 12 * 64); flush64b(dest + 13 * 64); flush64b(dest + 14 * 64); flush64b(dest + 15 * 64); flush64b(dest + 16 * 64); flush64b(dest + 17 * 64); flush64b(dest + 18 * 64); flush64b(dest + 19 * 64); flush64b(dest + 20 * 64); flush64b(dest + 21 * 64); flush64b(dest + 22 * 64); flush64b(dest + 23 * 64); flush64b(dest + 24 * 64); flush64b(dest + 25 * 64); flush64b(dest + 26 * 64); flush64b(dest + 27 * 64); flush64b(dest + 28 * 64); flush64b(dest + 29 * 64); flush64b(dest + 30 * 64); flush64b(dest + 31 * 64); } static force_inline void memset_mov16x64b(char *dest, __m512i zmm) { _mm512_store_si512((__m512i *)dest + 0, zmm); _mm512_store_si512((__m512i *)dest + 1, zmm); _mm512_store_si512((__m512i *)dest + 2, zmm); _mm512_store_si512((__m512i *)dest + 3, zmm); _mm512_store_si512((__m512i *)dest + 4, zmm); _mm512_store_si512((__m512i *)dest + 5, zmm); _mm512_store_si512((__m512i *)dest + 6, zmm); _mm512_store_si512((__m512i *)dest + 7, zmm); _mm512_store_si512((__m512i *)dest + 8, zmm); _mm512_store_si512((__m512i *)dest + 9, zmm); _mm512_store_si512((__m512i *)dest + 10, zmm); _mm512_store_si512((__m512i *)dest + 11, zmm); _mm512_store_si512((__m512i *)dest + 12, zmm); _mm512_store_si512((__m512i *)dest + 13, zmm); _mm512_store_si512((__m512i *)dest + 14, zmm); _mm512_store_si512((__m512i *)dest + 15, zmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); flush64b(dest + 8 * 64); flush64b(dest + 9 * 64); flush64b(dest + 10 * 64); flush64b(dest + 11 * 64); flush64b(dest + 12 * 64); flush64b(dest + 13 * 64); flush64b(dest + 14 * 64); flush64b(dest + 15 * 64); } static force_inline void memset_mov8x64b(char *dest, __m512i zmm) { _mm512_store_si512((__m512i *)dest + 0, zmm); _mm512_store_si512((__m512i *)dest + 1, zmm); _mm512_store_si512((__m512i *)dest + 2, zmm); _mm512_store_si512((__m512i *)dest + 3, zmm); _mm512_store_si512((__m512i *)dest + 4, zmm); _mm512_store_si512((__m512i *)dest + 5, zmm); _mm512_store_si512((__m512i *)dest + 6, zmm); _mm512_store_si512((__m512i *)dest + 7, zmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); } static force_inline void memset_mov4x64b(char *dest, __m512i zmm) { _mm512_store_si512((__m512i *)dest + 0, zmm); _mm512_store_si512((__m512i *)dest + 1, zmm); _mm512_store_si512((__m512i *)dest + 2, zmm); _mm512_store_si512((__m512i *)dest + 3, zmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memset_mov2x64b(char *dest, __m512i zmm) { _mm512_store_si512((__m512i *)dest + 0, zmm); _mm512_store_si512((__m512i *)dest + 1, zmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memset_mov1x64b(char *dest, __m512i zmm) { _mm512_store_si512((__m512i *)dest + 0, zmm); flush64b(dest + 0 * 64); } void EXPORTED_SYMBOL(char *dest, int c, size_t len) { __m512i zmm = _mm512_set1_epi8((char)c); /* See comment in memset_movnt_avx512f */ __m256i ymm = _mm256_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_avx512f(dest, ymm, cnt); dest += cnt; len -= cnt; } while (len >= 32 * 64) { memset_mov32x64b(dest, zmm); dest += 32 * 64; len -= 32 * 64; } if (len >= 16 * 64) { memset_mov16x64b(dest, zmm); dest += 16 * 64; len -= 16 * 64; } if (len >= 8 * 64) { memset_mov8x64b(dest, zmm); dest += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memset_mov4x64b(dest, zmm); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_mov2x64b(dest, zmm); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_mov1x64b(dest, zmm); dest += 1 * 64; len -= 1 * 64; } if (len) memset_small_avx512f(dest, ymm, len); avx_zeroupper(); }
7,852
28.411985
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_sse2.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef PMEM_MEMSET_SSE2_H #define PMEM_MEMSET_SSE2_H #include <xmmintrin.h> #include <stddef.h> #include <stdint.h> #include <string.h> #include "libpmem.h" #include "out.h" static force_inline void memset_small_sse2_noflush(char *dest, __m128i xmm, size_t len) { ASSERT(len <= 64); if (len <= 8) goto le8; if (len <= 32) goto le32; if (len > 48) { /* 49..64 */ _mm_storeu_si128((__m128i *)(dest + 0), xmm); _mm_storeu_si128((__m128i *)(dest + 16), xmm); _mm_storeu_si128((__m128i *)(dest + 32), xmm); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm); return; } /* 33..48 */ _mm_storeu_si128((__m128i *)(dest + 0), xmm); _mm_storeu_si128((__m128i *)(dest + 16), xmm); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm); return; le32: if (len > 16) { /* 17..32 */ _mm_storeu_si128((__m128i *)(dest + 0), xmm); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm); return; } /* 9..16 */ uint64_t d8 = (uint64_t)_mm_cvtsi128_si64(xmm); *(uint64_t *)dest = d8; *(uint64_t *)(dest + len - 8) = d8; return; le8: if (len <= 2) goto le2; if (len > 4) { /* 5..8 */ uint32_t d4 = (uint32_t)_mm_cvtsi128_si32(xmm); *(uint32_t *)dest = d4; *(uint32_t *)(dest + len - 4) = d4; return; } /* 3..4 */ uint16_t d2 = (uint16_t)(uint32_t)_mm_cvtsi128_si32(xmm); *(uint16_t *)dest = d2; *(uint16_t *)(dest + len - 2) = d2; return; le2: if (len == 2) { uint16_t d2 = (uint16_t)(uint32_t)_mm_cvtsi128_si32(xmm); *(uint16_t *)dest = d2; return; } *(uint8_t *)dest = (uint8_t)_mm_cvtsi128_si32(xmm); } static force_inline void memset_small_sse2(char *dest, __m128i xmm, size_t len) { memset_small_sse2_noflush(dest, xmm, len); flush(dest, len); } #endif
3,327
26.056911
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_avx512f.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem.h" #include "avx.h" #include "flush.h" #include "libpmem.h" #include "memcpy_memset.h" #include "memset_avx512f.h" #include "out.h" #include "util.h" #include "valgrind_internal.h" static force_inline void memset_movnt32x64b(char *dest, __m512i zmm) { _mm512_stream_si512((__m512i *)dest + 0, zmm); _mm512_stream_si512((__m512i *)dest + 1, zmm); _mm512_stream_si512((__m512i *)dest + 2, zmm); _mm512_stream_si512((__m512i *)dest + 3, zmm); _mm512_stream_si512((__m512i *)dest + 4, zmm); _mm512_stream_si512((__m512i *)dest + 5, zmm); _mm512_stream_si512((__m512i *)dest + 6, zmm); _mm512_stream_si512((__m512i *)dest + 7, zmm); _mm512_stream_si512((__m512i *)dest + 8, zmm); _mm512_stream_si512((__m512i *)dest + 9, zmm); _mm512_stream_si512((__m512i *)dest + 10, zmm); _mm512_stream_si512((__m512i *)dest + 11, zmm); _mm512_stream_si512((__m512i *)dest + 12, zmm); _mm512_stream_si512((__m512i *)dest + 13, zmm); _mm512_stream_si512((__m512i *)dest + 14, zmm); _mm512_stream_si512((__m512i *)dest + 15, zmm); _mm512_stream_si512((__m512i *)dest + 16, zmm); _mm512_stream_si512((__m512i *)dest + 17, zmm); _mm512_stream_si512((__m512i *)dest + 18, zmm); _mm512_stream_si512((__m512i *)dest + 19, zmm); _mm512_stream_si512((__m512i *)dest + 20, zmm); _mm512_stream_si512((__m512i *)dest + 21, zmm); _mm512_stream_si512((__m512i *)dest + 22, zmm); _mm512_stream_si512((__m512i *)dest + 23, zmm); _mm512_stream_si512((__m512i *)dest + 24, zmm); _mm512_stream_si512((__m512i *)dest + 25, zmm); _mm512_stream_si512((__m512i *)dest + 26, zmm); _mm512_stream_si512((__m512i *)dest + 27, zmm); _mm512_stream_si512((__m512i *)dest + 28, zmm); _mm512_stream_si512((__m512i *)dest + 29, zmm); _mm512_stream_si512((__m512i *)dest + 30, zmm); _mm512_stream_si512((__m512i *)dest + 31, zmm); VALGRIND_DO_FLUSH(dest, 32 * 64); } static force_inline void memset_movnt16x64b(char *dest, __m512i zmm) { _mm512_stream_si512((__m512i *)dest + 0, zmm); _mm512_stream_si512((__m512i *)dest + 1, zmm); _mm512_stream_si512((__m512i *)dest + 2, zmm); _mm512_stream_si512((__m512i *)dest + 3, zmm); _mm512_stream_si512((__m512i *)dest + 4, zmm); _mm512_stream_si512((__m512i *)dest + 5, zmm); _mm512_stream_si512((__m512i *)dest + 6, zmm); _mm512_stream_si512((__m512i *)dest + 7, zmm); _mm512_stream_si512((__m512i *)dest + 8, zmm); _mm512_stream_si512((__m512i *)dest + 9, zmm); _mm512_stream_si512((__m512i *)dest + 10, zmm); _mm512_stream_si512((__m512i *)dest + 11, zmm); _mm512_stream_si512((__m512i *)dest + 12, zmm); _mm512_stream_si512((__m512i *)dest + 13, zmm); _mm512_stream_si512((__m512i *)dest + 14, zmm); _mm512_stream_si512((__m512i *)dest + 15, zmm); VALGRIND_DO_FLUSH(dest, 16 * 64); } static force_inline void memset_movnt8x64b(char *dest, __m512i zmm) { _mm512_stream_si512((__m512i *)dest + 0, zmm); _mm512_stream_si512((__m512i *)dest + 1, zmm); _mm512_stream_si512((__m512i *)dest + 2, zmm); _mm512_stream_si512((__m512i *)dest + 3, zmm); _mm512_stream_si512((__m512i *)dest + 4, zmm); _mm512_stream_si512((__m512i *)dest + 5, zmm); _mm512_stream_si512((__m512i *)dest + 6, zmm); _mm512_stream_si512((__m512i *)dest + 7, zmm); VALGRIND_DO_FLUSH(dest, 8 * 64); } static force_inline void memset_movnt4x64b(char *dest, __m512i zmm) { _mm512_stream_si512((__m512i *)dest + 0, zmm); _mm512_stream_si512((__m512i *)dest + 1, zmm); _mm512_stream_si512((__m512i *)dest + 2, zmm); _mm512_stream_si512((__m512i *)dest + 3, zmm); VALGRIND_DO_FLUSH(dest, 4 * 64); } static force_inline void memset_movnt2x64b(char *dest, __m512i zmm) { _mm512_stream_si512((__m512i *)dest + 0, zmm); _mm512_stream_si512((__m512i *)dest + 1, zmm); VALGRIND_DO_FLUSH(dest, 2 * 64); } static force_inline void memset_movnt1x64b(char *dest, __m512i zmm) { _mm512_stream_si512((__m512i *)dest + 0, zmm); VALGRIND_DO_FLUSH(dest, 64); } static force_inline void memset_movnt1x32b(char *dest, __m256i ymm) { _mm256_stream_si256((__m256i *)dest, ymm); VALGRIND_DO_FLUSH(dest, 32); } static force_inline void memset_movnt1x16b(char *dest, __m256i ymm) { __m128i xmm = _mm256_extracti128_si256(ymm, 0); _mm_stream_si128((__m128i *)dest, xmm); VALGRIND_DO_FLUSH(dest, 16); } static force_inline void memset_movnt1x8b(char *dest, __m256i ymm) { uint64_t x = m256_get8b(ymm); _mm_stream_si64((long long *)dest, (long long)x); VALGRIND_DO_FLUSH(dest, 8); } static force_inline void memset_movnt1x4b(char *dest, __m256i ymm) { uint32_t x = m256_get4b(ymm); _mm_stream_si32((int *)dest, (int)x); VALGRIND_DO_FLUSH(dest, 4); } void EXPORTED_SYMBOL(char *dest, int c, size_t len) { __m512i zmm = _mm512_set1_epi8((char)c); /* * Can't use _mm512_extracti64x4_epi64, because some versions of gcc * crash. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82887 */ __m256i ymm = _mm256_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_avx512f(dest, ymm, cnt); dest += cnt; len -= cnt; } while (len >= 32 * 64) { memset_movnt32x64b(dest, zmm); dest += 32 * 64; len -= 32 * 64; } if (len >= 16 * 64) { memset_movnt16x64b(dest, zmm); dest += 16 * 64; len -= 16 * 64; } if (len >= 8 * 64) { memset_movnt8x64b(dest, zmm); dest += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memset_movnt4x64b(dest, zmm); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_movnt2x64b(dest, zmm); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_movnt1x64b(dest, zmm); dest += 1 * 64; len -= 1 * 64; } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memset_movnt1x32b(dest, ymm); else if (len == 16) memset_movnt1x16b(dest, ymm); else if (len == 8) memset_movnt1x8b(dest, ymm); else if (len == 4) memset_movnt1x4b(dest, ymm); else goto nonnt; goto end; } nonnt: memset_small_avx512f(dest, ymm, len); end: avx_zeroupper(); maybe_barrier(); }
7,756
27.105072
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_avx.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef PMEM_MEMCPY_AVX_H #define PMEM_MEMCPY_AVX_H #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "libpmem.h" #include "out.h" static force_inline void memmove_small_avx_noflush(char *dest, const char *src, size_t len) { ASSERT(len <= 64); if (len <= 8) goto le8; if (len <= 32) goto le32; /* 33..64 */ __m256i ymm0 = _mm256_loadu_si256((__m256i *)src); __m256i ymm1 = _mm256_loadu_si256((__m256i *)(src + len - 32)); _mm256_storeu_si256((__m256i *)dest, ymm0); _mm256_storeu_si256((__m256i *)(dest + len - 32), ymm1); return; le32: if (len > 16) { /* 17..32 */ __m128i xmm0 = _mm_loadu_si128((__m128i *)src); __m128i xmm1 = _mm_loadu_si128((__m128i *)(src + len - 16)); _mm_storeu_si128((__m128i *)dest, xmm0); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm1); return; } /* 9..16 */ uint64_t d80 = *(uint64_t *)src; uint64_t d81 = *(uint64_t *)(src + len - 8); *(uint64_t *)dest = d80; *(uint64_t *)(dest + len - 8) = d81; return; le8: if (len <= 2) goto le2; if (len > 4) { /* 5..8 */ uint32_t d40 = *(uint32_t *)src; uint32_t d41 = *(uint32_t *)(src + len - 4); *(uint32_t *)dest = d40; *(uint32_t *)(dest + len - 4) = d41; return; } /* 3..4 */ uint16_t d20 = *(uint16_t *)src; uint16_t d21 = *(uint16_t *)(src + len - 2); *(uint16_t *)dest = d20; *(uint16_t *)(dest + len - 2) = d21; return; le2: if (len == 2) { *(uint16_t *)dest = *(uint16_t *)src; return; } *(uint8_t *)dest = *(uint8_t *)src; } static force_inline void memmove_small_avx(char *dest, const char *src, size_t len) { memmove_small_avx_noflush(dest, src, len); flush(dest, len); } #endif
3,275
26.529412
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_sse2.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef PMEM_MEMCPY_SSE2_H #define PMEM_MEMCPY_SSE2_H #include <xmmintrin.h> #include <stddef.h> #include <stdint.h> #include "libpmem.h" #include "out.h" static force_inline void memmove_small_sse2_noflush(char *dest, const char *src, size_t len) { ASSERT(len <= 64); if (len <= 8) goto le8; if (len <= 32) goto le32; if (len > 48) { /* 49..64 */ __m128i xmm0 = _mm_loadu_si128((__m128i *)src); __m128i xmm1 = _mm_loadu_si128((__m128i *)(src + 16)); __m128i xmm2 = _mm_loadu_si128((__m128i *)(src + 32)); __m128i xmm3 = _mm_loadu_si128((__m128i *)(src + len - 16)); _mm_storeu_si128((__m128i *)dest, xmm0); _mm_storeu_si128((__m128i *)(dest + 16), xmm1); _mm_storeu_si128((__m128i *)(dest + 32), xmm2); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm3); return; } /* 33..48 */ __m128i xmm0 = _mm_loadu_si128((__m128i *)src); __m128i xmm1 = _mm_loadu_si128((__m128i *)(src + 16)); __m128i xmm2 = _mm_loadu_si128((__m128i *)(src + len - 16)); _mm_storeu_si128((__m128i *)dest, xmm0); _mm_storeu_si128((__m128i *)(dest + 16), xmm1); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm2); return; le32: if (len > 16) { /* 17..32 */ __m128i xmm0 = _mm_loadu_si128((__m128i *)src); __m128i xmm1 = _mm_loadu_si128((__m128i *)(src + len - 16)); _mm_storeu_si128((__m128i *)dest, xmm0); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm1); return; } /* 9..16 */ uint64_t d80 = *(uint64_t *)src; uint64_t d81 = *(uint64_t *)(src + len - 8); *(uint64_t *)dest = d80; *(uint64_t *)(dest + len - 8) = d81; return; le8: if (len <= 2) goto le2; if (len > 4) { /* 5..8 */ uint32_t d40 = *(uint32_t *)src; uint32_t d41 = *(uint32_t *)(src + len - 4); *(uint32_t *)dest = d40; *(uint32_t *)(dest + len - 4) = d41; return; } /* 3..4 */ uint16_t d20 = *(uint16_t *)src; uint16_t d21 = *(uint16_t *)(src + len - 2); *(uint16_t *)dest = d20; *(uint16_t *)(dest + len - 2) = d21; return; le2: if (len == 2) { *(uint16_t *)dest = *(uint16_t *)src; return; } *(uint8_t *)dest = *(uint8_t *)src; } static force_inline void memmove_small_sse2(char *dest, const char *src, size_t len) { memmove_small_sse2_noflush(dest, src, len); flush(dest, len); } #endif
3,846
27.496296
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_sse2.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem.h" #include "flush.h" #include "memcpy_memset.h" #include "memcpy_sse2.h" static force_inline void memmove_mov4x64b(char *dest, const char *src) { __m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0); __m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1); __m128i xmm2 = _mm_loadu_si128((__m128i *)src + 2); __m128i xmm3 = _mm_loadu_si128((__m128i *)src + 3); __m128i xmm4 = _mm_loadu_si128((__m128i *)src + 4); __m128i xmm5 = _mm_loadu_si128((__m128i *)src + 5); __m128i xmm6 = _mm_loadu_si128((__m128i *)src + 6); __m128i xmm7 = _mm_loadu_si128((__m128i *)src + 7); __m128i xmm8 = _mm_loadu_si128((__m128i *)src + 8); __m128i xmm9 = _mm_loadu_si128((__m128i *)src + 9); __m128i xmm10 = _mm_loadu_si128((__m128i *)src + 10); __m128i xmm11 = _mm_loadu_si128((__m128i *)src + 11); __m128i xmm12 = _mm_loadu_si128((__m128i *)src + 12); __m128i xmm13 = _mm_loadu_si128((__m128i *)src + 13); __m128i xmm14 = _mm_loadu_si128((__m128i *)src + 14); __m128i xmm15 = _mm_loadu_si128((__m128i *)src + 15); _mm_store_si128((__m128i *)dest + 0, xmm0); _mm_store_si128((__m128i *)dest + 1, xmm1); _mm_store_si128((__m128i *)dest + 2, xmm2); _mm_store_si128((__m128i *)dest + 3, xmm3); _mm_store_si128((__m128i *)dest + 4, xmm4); _mm_store_si128((__m128i *)dest + 5, xmm5); _mm_store_si128((__m128i *)dest + 6, xmm6); _mm_store_si128((__m128i *)dest + 7, xmm7); _mm_store_si128((__m128i *)dest + 8, xmm8); _mm_store_si128((__m128i *)dest + 9, xmm9); _mm_store_si128((__m128i *)dest + 10, xmm10); _mm_store_si128((__m128i *)dest + 11, xmm11); _mm_store_si128((__m128i *)dest + 12, xmm12); _mm_store_si128((__m128i *)dest + 13, xmm13); _mm_store_si128((__m128i *)dest + 14, xmm14); _mm_store_si128((__m128i *)dest + 15, xmm15); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memmove_mov2x64b(char *dest, const char *src) { __m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0); __m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1); __m128i xmm2 = _mm_loadu_si128((__m128i *)src + 2); __m128i xmm3 = _mm_loadu_si128((__m128i *)src + 3); __m128i xmm4 = _mm_loadu_si128((__m128i *)src + 4); __m128i xmm5 = _mm_loadu_si128((__m128i *)src + 5); __m128i xmm6 = _mm_loadu_si128((__m128i *)src + 6); __m128i xmm7 = _mm_loadu_si128((__m128i *)src + 7); _mm_store_si128((__m128i *)dest + 0, xmm0); _mm_store_si128((__m128i *)dest + 1, xmm1); _mm_store_si128((__m128i *)dest + 2, xmm2); _mm_store_si128((__m128i *)dest + 3, xmm3); _mm_store_si128((__m128i *)dest + 4, xmm4); _mm_store_si128((__m128i *)dest + 5, xmm5); _mm_store_si128((__m128i *)dest + 6, xmm6); _mm_store_si128((__m128i *)dest + 7, xmm7); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memmove_mov1x64b(char *dest, const char *src) { __m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0); __m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1); __m128i xmm2 = _mm_loadu_si128((__m128i *)src + 2); __m128i xmm3 = _mm_loadu_si128((__m128i *)src + 3); _mm_store_si128((__m128i *)dest + 0, xmm0); _mm_store_si128((__m128i *)dest + 1, xmm1); _mm_store_si128((__m128i *)dest + 2, xmm2); _mm_store_si128((__m128i *)dest + 3, xmm3); flush64b(dest + 0 * 64); } static force_inline void memmove_mov_sse_fw(char *dest, const char *src, size_t len) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_sse2(dest, src, cnt); dest += cnt; src += cnt; len -= cnt; } while (len >= 4 * 64) { memmove_mov4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_mov2x64b(dest, src); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_mov1x64b(dest, src); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len) memmove_small_sse2(dest, src, len); } static force_inline void memmove_mov_sse_bw(char *dest, const char *src, size_t len) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_sse2(dest, src, cnt); } while (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_mov4x64b(dest, src); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_mov2x64b(dest, src); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_mov1x64b(dest, src); } if (len) memmove_small_sse2(dest - len, src - len, len); } void EXPORTED_SYMBOL(char *dest, const char *src, size_t len) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_mov_sse_fw(dest, src, len); else memmove_mov_sse_bw(dest, src, len); }
6,467
28.534247
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx512f.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem.h" #include "avx.h" #include "flush.h" #include "memcpy_avx512f.h" #include "memcpy_memset.h" static force_inline void memmove_mov32x64b(char *dest, const char *src) { __m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0); __m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1); __m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2); __m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3); __m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4); __m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5); __m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6); __m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7); __m512i zmm8 = _mm512_loadu_si512((__m512i *)src + 8); __m512i zmm9 = _mm512_loadu_si512((__m512i *)src + 9); __m512i zmm10 = _mm512_loadu_si512((__m512i *)src + 10); __m512i zmm11 = _mm512_loadu_si512((__m512i *)src + 11); __m512i zmm12 = _mm512_loadu_si512((__m512i *)src + 12); __m512i zmm13 = _mm512_loadu_si512((__m512i *)src + 13); __m512i zmm14 = _mm512_loadu_si512((__m512i *)src + 14); __m512i zmm15 = _mm512_loadu_si512((__m512i *)src + 15); __m512i zmm16 = _mm512_loadu_si512((__m512i *)src + 16); __m512i zmm17 = _mm512_loadu_si512((__m512i *)src + 17); __m512i zmm18 = _mm512_loadu_si512((__m512i *)src + 18); __m512i zmm19 = _mm512_loadu_si512((__m512i *)src + 19); __m512i zmm20 = _mm512_loadu_si512((__m512i *)src + 20); __m512i zmm21 = _mm512_loadu_si512((__m512i *)src + 21); __m512i zmm22 = _mm512_loadu_si512((__m512i *)src + 22); __m512i zmm23 = _mm512_loadu_si512((__m512i *)src + 23); __m512i zmm24 = _mm512_loadu_si512((__m512i *)src + 24); __m512i zmm25 = _mm512_loadu_si512((__m512i *)src + 25); __m512i zmm26 = _mm512_loadu_si512((__m512i *)src + 26); __m512i zmm27 = _mm512_loadu_si512((__m512i *)src + 27); __m512i zmm28 = _mm512_loadu_si512((__m512i *)src + 28); __m512i zmm29 = _mm512_loadu_si512((__m512i *)src + 29); __m512i zmm30 = _mm512_loadu_si512((__m512i *)src + 30); __m512i zmm31 = _mm512_loadu_si512((__m512i *)src + 31); _mm512_store_si512((__m512i *)dest + 0, zmm0); _mm512_store_si512((__m512i *)dest + 1, zmm1); _mm512_store_si512((__m512i *)dest + 2, zmm2); _mm512_store_si512((__m512i *)dest + 3, zmm3); _mm512_store_si512((__m512i *)dest + 4, zmm4); _mm512_store_si512((__m512i *)dest + 5, zmm5); _mm512_store_si512((__m512i *)dest + 6, zmm6); _mm512_store_si512((__m512i *)dest + 7, zmm7); _mm512_store_si512((__m512i *)dest + 8, zmm8); _mm512_store_si512((__m512i *)dest + 9, zmm9); _mm512_store_si512((__m512i *)dest + 10, zmm10); _mm512_store_si512((__m512i *)dest + 11, zmm11); _mm512_store_si512((__m512i *)dest + 12, zmm12); _mm512_store_si512((__m512i *)dest + 13, zmm13); _mm512_store_si512((__m512i *)dest + 14, zmm14); _mm512_store_si512((__m512i *)dest + 15, zmm15); _mm512_store_si512((__m512i *)dest + 16, zmm16); _mm512_store_si512((__m512i *)dest + 17, zmm17); _mm512_store_si512((__m512i *)dest + 18, zmm18); _mm512_store_si512((__m512i *)dest + 19, zmm19); _mm512_store_si512((__m512i *)dest + 20, zmm20); _mm512_store_si512((__m512i *)dest + 21, zmm21); _mm512_store_si512((__m512i *)dest + 22, zmm22); _mm512_store_si512((__m512i *)dest + 23, zmm23); _mm512_store_si512((__m512i *)dest + 24, zmm24); _mm512_store_si512((__m512i *)dest + 25, zmm25); _mm512_store_si512((__m512i *)dest + 26, zmm26); _mm512_store_si512((__m512i *)dest + 27, zmm27); _mm512_store_si512((__m512i *)dest + 28, zmm28); _mm512_store_si512((__m512i *)dest + 29, zmm29); _mm512_store_si512((__m512i *)dest + 30, zmm30); _mm512_store_si512((__m512i *)dest + 31, zmm31); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); flush64b(dest + 8 * 64); flush64b(dest + 9 * 64); flush64b(dest + 10 * 64); flush64b(dest + 11 * 64); flush64b(dest + 12 * 64); flush64b(dest + 13 * 64); flush64b(dest + 14 * 64); flush64b(dest + 15 * 64); flush64b(dest + 16 * 64); flush64b(dest + 17 * 64); flush64b(dest + 18 * 64); flush64b(dest + 19 * 64); flush64b(dest + 20 * 64); flush64b(dest + 21 * 64); flush64b(dest + 22 * 64); flush64b(dest + 23 * 64); flush64b(dest + 24 * 64); flush64b(dest + 25 * 64); flush64b(dest + 26 * 64); flush64b(dest + 27 * 64); flush64b(dest + 28 * 64); flush64b(dest + 29 * 64); flush64b(dest + 30 * 64); flush64b(dest + 31 * 64); } static force_inline void memmove_mov16x64b(char *dest, const char *src) { __m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0); __m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1); __m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2); __m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3); __m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4); __m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5); __m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6); __m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7); __m512i zmm8 = _mm512_loadu_si512((__m512i *)src + 8); __m512i zmm9 = _mm512_loadu_si512((__m512i *)src + 9); __m512i zmm10 = _mm512_loadu_si512((__m512i *)src + 10); __m512i zmm11 = _mm512_loadu_si512((__m512i *)src + 11); __m512i zmm12 = _mm512_loadu_si512((__m512i *)src + 12); __m512i zmm13 = _mm512_loadu_si512((__m512i *)src + 13); __m512i zmm14 = _mm512_loadu_si512((__m512i *)src + 14); __m512i zmm15 = _mm512_loadu_si512((__m512i *)src + 15); _mm512_store_si512((__m512i *)dest + 0, zmm0); _mm512_store_si512((__m512i *)dest + 1, zmm1); _mm512_store_si512((__m512i *)dest + 2, zmm2); _mm512_store_si512((__m512i *)dest + 3, zmm3); _mm512_store_si512((__m512i *)dest + 4, zmm4); _mm512_store_si512((__m512i *)dest + 5, zmm5); _mm512_store_si512((__m512i *)dest + 6, zmm6); _mm512_store_si512((__m512i *)dest + 7, zmm7); _mm512_store_si512((__m512i *)dest + 8, zmm8); _mm512_store_si512((__m512i *)dest + 9, zmm9); _mm512_store_si512((__m512i *)dest + 10, zmm10); _mm512_store_si512((__m512i *)dest + 11, zmm11); _mm512_store_si512((__m512i *)dest + 12, zmm12); _mm512_store_si512((__m512i *)dest + 13, zmm13); _mm512_store_si512((__m512i *)dest + 14, zmm14); _mm512_store_si512((__m512i *)dest + 15, zmm15); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); flush64b(dest + 8 * 64); flush64b(dest + 9 * 64); flush64b(dest + 10 * 64); flush64b(dest + 11 * 64); flush64b(dest + 12 * 64); flush64b(dest + 13 * 64); flush64b(dest + 14 * 64); flush64b(dest + 15 * 64); } static force_inline void memmove_mov8x64b(char *dest, const char *src) { __m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0); __m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1); __m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2); __m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3); __m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4); __m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5); __m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6); __m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7); _mm512_store_si512((__m512i *)dest + 0, zmm0); _mm512_store_si512((__m512i *)dest + 1, zmm1); _mm512_store_si512((__m512i *)dest + 2, zmm2); _mm512_store_si512((__m512i *)dest + 3, zmm3); _mm512_store_si512((__m512i *)dest + 4, zmm4); _mm512_store_si512((__m512i *)dest + 5, zmm5); _mm512_store_si512((__m512i *)dest + 6, zmm6); _mm512_store_si512((__m512i *)dest + 7, zmm7); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); } static force_inline void memmove_mov4x64b(char *dest, const char *src) { __m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0); __m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1); __m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2); __m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3); _mm512_store_si512((__m512i *)dest + 0, zmm0); _mm512_store_si512((__m512i *)dest + 1, zmm1); _mm512_store_si512((__m512i *)dest + 2, zmm2); _mm512_store_si512((__m512i *)dest + 3, zmm3); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memmove_mov2x64b(char *dest, const char *src) { __m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0); __m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1); _mm512_store_si512((__m512i *)dest + 0, zmm0); _mm512_store_si512((__m512i *)dest + 1, zmm1); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memmove_mov1x64b(char *dest, const char *src) { __m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0); _mm512_store_si512((__m512i *)dest + 0, zmm0); flush64b(dest + 0 * 64); } static force_inline void memmove_mov_avx512f_fw(char *dest, const char *src, size_t len) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_avx512f(dest, src, cnt); dest += cnt; src += cnt; len -= cnt; } while (len >= 32 * 64) { memmove_mov32x64b(dest, src); dest += 32 * 64; src += 32 * 64; len -= 32 * 64; } if (len >= 16 * 64) { memmove_mov16x64b(dest, src); dest += 16 * 64; src += 16 * 64; len -= 16 * 64; } if (len >= 8 * 64) { memmove_mov8x64b(dest, src); dest += 8 * 64; src += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memmove_mov4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_mov2x64b(dest, src); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_mov1x64b(dest, src); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len) memmove_small_avx512f(dest, src, len); } static force_inline void memmove_mov_avx512f_bw(char *dest, const char *src, size_t len) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_avx512f(dest, src, cnt); } while (len >= 32 * 64) { dest -= 32 * 64; src -= 32 * 64; len -= 32 * 64; memmove_mov32x64b(dest, src); } if (len >= 16 * 64) { dest -= 16 * 64; src -= 16 * 64; len -= 16 * 64; memmove_mov16x64b(dest, src); } if (len >= 8 * 64) { dest -= 8 * 64; src -= 8 * 64; len -= 8 * 64; memmove_mov8x64b(dest, src); } if (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_mov4x64b(dest, src); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_mov2x64b(dest, src); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_mov1x64b(dest, src); } if (len) memmove_small_avx512f(dest - len, src - len, len); } void EXPORTED_SYMBOL(char *dest, const char *src, size_t len) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_mov_avx512f_fw(dest, src, len); else memmove_mov_avx512f_bw(dest, src, len); avx_zeroupper(); }
12,825
30.131068
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem.h" #include "avx.h" #include "flush.h" #include "memcpy_avx.h" #include "memcpy_memset.h" static force_inline void memmove_mov8x64b(char *dest, const char *src) { __m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0); __m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1); __m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2); __m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3); __m256i ymm4 = _mm256_loadu_si256((__m256i *)src + 4); __m256i ymm5 = _mm256_loadu_si256((__m256i *)src + 5); __m256i ymm6 = _mm256_loadu_si256((__m256i *)src + 6); __m256i ymm7 = _mm256_loadu_si256((__m256i *)src + 7); __m256i ymm8 = _mm256_loadu_si256((__m256i *)src + 8); __m256i ymm9 = _mm256_loadu_si256((__m256i *)src + 9); __m256i ymm10 = _mm256_loadu_si256((__m256i *)src + 10); __m256i ymm11 = _mm256_loadu_si256((__m256i *)src + 11); __m256i ymm12 = _mm256_loadu_si256((__m256i *)src + 12); __m256i ymm13 = _mm256_loadu_si256((__m256i *)src + 13); __m256i ymm14 = _mm256_loadu_si256((__m256i *)src + 14); __m256i ymm15 = _mm256_loadu_si256((__m256i *)src + 15); _mm256_store_si256((__m256i *)dest + 0, ymm0); _mm256_store_si256((__m256i *)dest + 1, ymm1); _mm256_store_si256((__m256i *)dest + 2, ymm2); _mm256_store_si256((__m256i *)dest + 3, ymm3); _mm256_store_si256((__m256i *)dest + 4, ymm4); _mm256_store_si256((__m256i *)dest + 5, ymm5); _mm256_store_si256((__m256i *)dest + 6, ymm6); _mm256_store_si256((__m256i *)dest + 7, ymm7); _mm256_store_si256((__m256i *)dest + 8, ymm8); _mm256_store_si256((__m256i *)dest + 9, ymm9); _mm256_store_si256((__m256i *)dest + 10, ymm10); _mm256_store_si256((__m256i *)dest + 11, ymm11); _mm256_store_si256((__m256i *)dest + 12, ymm12); _mm256_store_si256((__m256i *)dest + 13, ymm13); _mm256_store_si256((__m256i *)dest + 14, ymm14); _mm256_store_si256((__m256i *)dest + 15, ymm15); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); } static force_inline void memmove_mov4x64b(char *dest, const char *src) { __m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0); __m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1); __m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2); __m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3); __m256i ymm4 = _mm256_loadu_si256((__m256i *)src + 4); __m256i ymm5 = _mm256_loadu_si256((__m256i *)src + 5); __m256i ymm6 = _mm256_loadu_si256((__m256i *)src + 6); __m256i ymm7 = _mm256_loadu_si256((__m256i *)src + 7); _mm256_store_si256((__m256i *)dest + 0, ymm0); _mm256_store_si256((__m256i *)dest + 1, ymm1); _mm256_store_si256((__m256i *)dest + 2, ymm2); _mm256_store_si256((__m256i *)dest + 3, ymm3); _mm256_store_si256((__m256i *)dest + 4, ymm4); _mm256_store_si256((__m256i *)dest + 5, ymm5); _mm256_store_si256((__m256i *)dest + 6, ymm6); _mm256_store_si256((__m256i *)dest + 7, ymm7); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memmove_mov2x64b(char *dest, const char *src) { __m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0); __m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1); __m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2); __m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3); _mm256_store_si256((__m256i *)dest + 0, ymm0); _mm256_store_si256((__m256i *)dest + 1, ymm1); _mm256_store_si256((__m256i *)dest + 2, ymm2); _mm256_store_si256((__m256i *)dest + 3, ymm3); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memmove_mov1x64b(char *dest, const char *src) { __m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0); __m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1); _mm256_store_si256((__m256i *)dest + 0, ymm0); _mm256_store_si256((__m256i *)dest + 1, ymm1); flush64b(dest + 0 * 64); } static force_inline void memmove_mov_avx_fw(char *dest, const char *src, size_t len) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_avx(dest, src, cnt); dest += cnt; src += cnt; len -= cnt; } while (len >= 8 * 64) { memmove_mov8x64b(dest, src); dest += 8 * 64; src += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memmove_mov4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_mov2x64b(dest, src); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_mov1x64b(dest, src); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len) memmove_small_avx(dest, src, len); } static force_inline void memmove_mov_avx_bw(char *dest, const char *src, size_t len) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_avx(dest, src, cnt); } while (len >= 8 * 64) { dest -= 8 * 64; src -= 8 * 64; len -= 8 * 64; memmove_mov8x64b(dest, src); } if (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_mov4x64b(dest, src); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_mov2x64b(dest, src); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_mov1x64b(dest, src); } if (len) memmove_small_avx(dest - len, src - len, len); } void EXPORTED_SYMBOL(char *dest, const char *src, size_t len) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_mov_avx_fw(dest, src, len); else memmove_mov_avx_bw(dest, src, len); avx_zeroupper(); }
7,378
27.937255
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_avx512f.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef PMEM_MEMCPY_AVX512F_H #define PMEM_MEMCPY_AVX512F_H #include <stddef.h> #include "memcpy_avx.h" static force_inline void memmove_small_avx512f(char *dest, const char *src, size_t len) { /* We can't do better than AVX here. */ memmove_small_avx(dest, src, len); } #endif
1,886
38.3125
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx512f.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem.h" #include "avx.h" #include "flush.h" #include "memcpy_avx512f.h" #include "memcpy_memset.h" #include "libpmem.h" #include "valgrind_internal.h" static force_inline void memmove_movnt32x64b(char *dest, const char *src) { __m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0); __m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1); __m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2); __m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3); __m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4); __m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5); __m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6); __m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7); __m512i zmm8 = _mm512_loadu_si512((__m512i *)src + 8); __m512i zmm9 = _mm512_loadu_si512((__m512i *)src + 9); __m512i zmm10 = _mm512_loadu_si512((__m512i *)src + 10); __m512i zmm11 = _mm512_loadu_si512((__m512i *)src + 11); __m512i zmm12 = _mm512_loadu_si512((__m512i *)src + 12); __m512i zmm13 = _mm512_loadu_si512((__m512i *)src + 13); __m512i zmm14 = _mm512_loadu_si512((__m512i *)src + 14); __m512i zmm15 = _mm512_loadu_si512((__m512i *)src + 15); __m512i zmm16 = _mm512_loadu_si512((__m512i *)src + 16); __m512i zmm17 = _mm512_loadu_si512((__m512i *)src + 17); __m512i zmm18 = _mm512_loadu_si512((__m512i *)src + 18); __m512i zmm19 = _mm512_loadu_si512((__m512i *)src + 19); __m512i zmm20 = _mm512_loadu_si512((__m512i *)src + 20); __m512i zmm21 = _mm512_loadu_si512((__m512i *)src + 21); __m512i zmm22 = _mm512_loadu_si512((__m512i *)src + 22); __m512i zmm23 = _mm512_loadu_si512((__m512i *)src + 23); __m512i zmm24 = _mm512_loadu_si512((__m512i *)src + 24); __m512i zmm25 = _mm512_loadu_si512((__m512i *)src + 25); __m512i zmm26 = _mm512_loadu_si512((__m512i *)src + 26); __m512i zmm27 = _mm512_loadu_si512((__m512i *)src + 27); __m512i zmm28 = _mm512_loadu_si512((__m512i *)src + 28); __m512i zmm29 = _mm512_loadu_si512((__m512i *)src + 29); __m512i zmm30 = _mm512_loadu_si512((__m512i *)src + 30); __m512i zmm31 = _mm512_loadu_si512((__m512i *)src + 31); _mm512_stream_si512((__m512i *)dest + 0, zmm0); _mm512_stream_si512((__m512i *)dest + 1, zmm1); _mm512_stream_si512((__m512i *)dest + 2, zmm2); _mm512_stream_si512((__m512i *)dest + 3, zmm3); _mm512_stream_si512((__m512i *)dest + 4, zmm4); _mm512_stream_si512((__m512i *)dest + 5, zmm5); _mm512_stream_si512((__m512i *)dest + 6, zmm6); _mm512_stream_si512((__m512i *)dest + 7, zmm7); _mm512_stream_si512((__m512i *)dest + 8, zmm8); _mm512_stream_si512((__m512i *)dest + 9, zmm9); _mm512_stream_si512((__m512i *)dest + 10, zmm10); _mm512_stream_si512((__m512i *)dest + 11, zmm11); _mm512_stream_si512((__m512i *)dest + 12, zmm12); _mm512_stream_si512((__m512i *)dest + 13, zmm13); _mm512_stream_si512((__m512i *)dest + 14, zmm14); _mm512_stream_si512((__m512i *)dest + 15, zmm15); _mm512_stream_si512((__m512i *)dest + 16, zmm16); _mm512_stream_si512((__m512i *)dest + 17, zmm17); _mm512_stream_si512((__m512i *)dest + 18, zmm18); _mm512_stream_si512((__m512i *)dest + 19, zmm19); _mm512_stream_si512((__m512i *)dest + 20, zmm20); _mm512_stream_si512((__m512i *)dest + 21, zmm21); _mm512_stream_si512((__m512i *)dest + 22, zmm22); _mm512_stream_si512((__m512i *)dest + 23, zmm23); _mm512_stream_si512((__m512i *)dest + 24, zmm24); _mm512_stream_si512((__m512i *)dest + 25, zmm25); _mm512_stream_si512((__m512i *)dest + 26, zmm26); _mm512_stream_si512((__m512i *)dest + 27, zmm27); _mm512_stream_si512((__m512i *)dest + 28, zmm28); _mm512_stream_si512((__m512i *)dest + 29, zmm29); _mm512_stream_si512((__m512i *)dest + 30, zmm30); _mm512_stream_si512((__m512i *)dest + 31, zmm31); VALGRIND_DO_FLUSH(dest, 32 * 64); } static force_inline void memmove_movnt16x64b(char *dest, const char *src) { __m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0); __m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1); __m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2); __m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3); __m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4); __m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5); __m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6); __m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7); __m512i zmm8 = _mm512_loadu_si512((__m512i *)src + 8); __m512i zmm9 = _mm512_loadu_si512((__m512i *)src + 9); __m512i zmm10 = _mm512_loadu_si512((__m512i *)src + 10); __m512i zmm11 = _mm512_loadu_si512((__m512i *)src + 11); __m512i zmm12 = _mm512_loadu_si512((__m512i *)src + 12); __m512i zmm13 = _mm512_loadu_si512((__m512i *)src + 13); __m512i zmm14 = _mm512_loadu_si512((__m512i *)src + 14); __m512i zmm15 = _mm512_loadu_si512((__m512i *)src + 15); _mm512_stream_si512((__m512i *)dest + 0, zmm0); _mm512_stream_si512((__m512i *)dest + 1, zmm1); _mm512_stream_si512((__m512i *)dest + 2, zmm2); _mm512_stream_si512((__m512i *)dest + 3, zmm3); _mm512_stream_si512((__m512i *)dest + 4, zmm4); _mm512_stream_si512((__m512i *)dest + 5, zmm5); _mm512_stream_si512((__m512i *)dest + 6, zmm6); _mm512_stream_si512((__m512i *)dest + 7, zmm7); _mm512_stream_si512((__m512i *)dest + 8, zmm8); _mm512_stream_si512((__m512i *)dest + 9, zmm9); _mm512_stream_si512((__m512i *)dest + 10, zmm10); _mm512_stream_si512((__m512i *)dest + 11, zmm11); _mm512_stream_si512((__m512i *)dest + 12, zmm12); _mm512_stream_si512((__m512i *)dest + 13, zmm13); _mm512_stream_si512((__m512i *)dest + 14, zmm14); _mm512_stream_si512((__m512i *)dest + 15, zmm15); VALGRIND_DO_FLUSH(dest, 16 * 64); } static force_inline void memmove_movnt8x64b(char *dest, const char *src) { __m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0); __m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1); __m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2); __m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3); __m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4); __m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5); __m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6); __m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7); _mm512_stream_si512((__m512i *)dest + 0, zmm0); _mm512_stream_si512((__m512i *)dest + 1, zmm1); _mm512_stream_si512((__m512i *)dest + 2, zmm2); _mm512_stream_si512((__m512i *)dest + 3, zmm3); _mm512_stream_si512((__m512i *)dest + 4, zmm4); _mm512_stream_si512((__m512i *)dest + 5, zmm5); _mm512_stream_si512((__m512i *)dest + 6, zmm6); _mm512_stream_si512((__m512i *)dest + 7, zmm7); VALGRIND_DO_FLUSH(dest, 8 * 64); } static force_inline void memmove_movnt4x64b(char *dest, const char *src) { __m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0); __m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1); __m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2); __m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3); _mm512_stream_si512((__m512i *)dest + 0, zmm0); _mm512_stream_si512((__m512i *)dest + 1, zmm1); _mm512_stream_si512((__m512i *)dest + 2, zmm2); _mm512_stream_si512((__m512i *)dest + 3, zmm3); VALGRIND_DO_FLUSH(dest, 4 * 64); } static force_inline void memmove_movnt2x64b(char *dest, const char *src) { __m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0); __m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1); _mm512_stream_si512((__m512i *)dest + 0, zmm0); _mm512_stream_si512((__m512i *)dest + 1, zmm1); VALGRIND_DO_FLUSH(dest, 2 * 64); } static force_inline void memmove_movnt1x64b(char *dest, const char *src) { __m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0); _mm512_stream_si512((__m512i *)dest + 0, zmm0); VALGRIND_DO_FLUSH(dest, 64); } static force_inline void memmove_movnt1x32b(char *dest, const char *src) { __m256i zmm0 = _mm256_loadu_si256((__m256i *)src); _mm256_stream_si256((__m256i *)dest, zmm0); VALGRIND_DO_FLUSH(dest, 32); } static force_inline void memmove_movnt1x16b(char *dest, const char *src) { __m128i ymm0 = _mm_loadu_si128((__m128i *)src); _mm_stream_si128((__m128i *)dest, ymm0); VALGRIND_DO_FLUSH(dest, 16); } static force_inline void memmove_movnt1x8b(char *dest, const char *src) { _mm_stream_si64((long long *)dest, *(long long *)src); VALGRIND_DO_FLUSH(dest, 8); } static force_inline void memmove_movnt1x4b(char *dest, const char *src) { _mm_stream_si32((int *)dest, *(int *)src); VALGRIND_DO_FLUSH(dest, 4); } static force_inline void memmove_movnt_avx512f_fw(char *dest, const char *src, size_t len) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_avx512f(dest, src, cnt); dest += cnt; src += cnt; len -= cnt; } while (len >= 32 * 64) { memmove_movnt32x64b(dest, src); dest += 32 * 64; src += 32 * 64; len -= 32 * 64; } if (len >= 16 * 64) { memmove_movnt16x64b(dest, src); dest += 16 * 64; src += 16 * 64; len -= 16 * 64; } if (len >= 8 * 64) { memmove_movnt8x64b(dest, src); dest += 8 * 64; src += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_movnt2x64b(dest, src); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_movnt1x64b(dest, src); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memmove_movnt1x32b(dest, src); else if (len == 16) memmove_movnt1x16b(dest, src); else if (len == 8) memmove_movnt1x8b(dest, src); else if (len == 4) memmove_movnt1x4b(dest, src); else goto nonnt; goto end; } nonnt: memmove_small_avx512f(dest, src, len); end: avx_zeroupper(); } static force_inline void memmove_movnt_avx512f_bw(char *dest, const char *src, size_t len) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_avx512f(dest, src, cnt); } while (len >= 32 * 64) { dest -= 32 * 64; src -= 32 * 64; len -= 32 * 64; memmove_movnt32x64b(dest, src); } if (len >= 16 * 64) { dest -= 16 * 64; src -= 16 * 64; len -= 16 * 64; memmove_movnt16x64b(dest, src); } if (len >= 8 * 64) { dest -= 8 * 64; src -= 8 * 64; len -= 8 * 64; memmove_movnt8x64b(dest, src); } if (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_movnt2x64b(dest, src); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_movnt1x64b(dest, src); } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) { dest -= 32; src -= 32; memmove_movnt1x32b(dest, src); } else if (len == 16) { dest -= 16; src -= 16; memmove_movnt1x16b(dest, src); } else if (len == 8) { dest -= 8; src -= 8; memmove_movnt1x8b(dest, src); } else if (len == 4) { dest -= 4; src -= 4; memmove_movnt1x4b(dest, src); } else { goto nonnt; } goto end; } nonnt: dest -= len; src -= len; memmove_small_avx512f(dest, src, len); end: avx_zeroupper(); } void EXPORTED_SYMBOL(char *dest, const char *src, size_t len) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_movnt_avx512f_fw(dest, src, len); else memmove_movnt_avx512f_bw(dest, src, len); maybe_barrier(); }
13,191
28.446429
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_sse2.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem.h" #include "flush.h" #include "memcpy_memset.h" #include "memcpy_sse2.h" #include "valgrind_internal.h" static force_inline void memmove_movnt4x64b(char *dest, const char *src) { __m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0); __m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1); __m128i xmm2 = _mm_loadu_si128((__m128i *)src + 2); __m128i xmm3 = _mm_loadu_si128((__m128i *)src + 3); __m128i xmm4 = _mm_loadu_si128((__m128i *)src + 4); __m128i xmm5 = _mm_loadu_si128((__m128i *)src + 5); __m128i xmm6 = _mm_loadu_si128((__m128i *)src + 6); __m128i xmm7 = _mm_loadu_si128((__m128i *)src + 7); __m128i xmm8 = _mm_loadu_si128((__m128i *)src + 8); __m128i xmm9 = _mm_loadu_si128((__m128i *)src + 9); __m128i xmm10 = _mm_loadu_si128((__m128i *)src + 10); __m128i xmm11 = _mm_loadu_si128((__m128i *)src + 11); __m128i xmm12 = _mm_loadu_si128((__m128i *)src + 12); __m128i xmm13 = _mm_loadu_si128((__m128i *)src + 13); __m128i xmm14 = _mm_loadu_si128((__m128i *)src + 14); __m128i xmm15 = _mm_loadu_si128((__m128i *)src + 15); _mm_stream_si128((__m128i *)dest + 0, xmm0); _mm_stream_si128((__m128i *)dest + 1, xmm1); _mm_stream_si128((__m128i *)dest + 2, xmm2); _mm_stream_si128((__m128i *)dest + 3, xmm3); _mm_stream_si128((__m128i *)dest + 4, xmm4); _mm_stream_si128((__m128i *)dest + 5, xmm5); _mm_stream_si128((__m128i *)dest + 6, xmm6); _mm_stream_si128((__m128i *)dest + 7, xmm7); _mm_stream_si128((__m128i *)dest + 8, xmm8); _mm_stream_si128((__m128i *)dest + 9, xmm9); _mm_stream_si128((__m128i *)dest + 10, xmm10); _mm_stream_si128((__m128i *)dest + 11, xmm11); _mm_stream_si128((__m128i *)dest + 12, xmm12); _mm_stream_si128((__m128i *)dest + 13, xmm13); _mm_stream_si128((__m128i *)dest + 14, xmm14); _mm_stream_si128((__m128i *)dest + 15, xmm15); VALGRIND_DO_FLUSH(dest, 4 * 64); } static force_inline void memmove_movnt2x64b(char *dest, const char *src) { __m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0); __m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1); __m128i xmm2 = _mm_loadu_si128((__m128i *)src + 2); __m128i xmm3 = _mm_loadu_si128((__m128i *)src + 3); __m128i xmm4 = _mm_loadu_si128((__m128i *)src + 4); __m128i xmm5 = _mm_loadu_si128((__m128i *)src + 5); __m128i xmm6 = _mm_loadu_si128((__m128i *)src + 6); __m128i xmm7 = _mm_loadu_si128((__m128i *)src + 7); _mm_stream_si128((__m128i *)dest + 0, xmm0); _mm_stream_si128((__m128i *)dest + 1, xmm1); _mm_stream_si128((__m128i *)dest + 2, xmm2); _mm_stream_si128((__m128i *)dest + 3, xmm3); _mm_stream_si128((__m128i *)dest + 4, xmm4); _mm_stream_si128((__m128i *)dest + 5, xmm5); _mm_stream_si128((__m128i *)dest + 6, xmm6); _mm_stream_si128((__m128i *)dest + 7, xmm7); VALGRIND_DO_FLUSH(dest, 2 * 64); } static force_inline void memmove_movnt1x64b(char *dest, const char *src) { __m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0); __m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1); __m128i xmm2 = _mm_loadu_si128((__m128i *)src + 2); __m128i xmm3 = _mm_loadu_si128((__m128i *)src + 3); _mm_stream_si128((__m128i *)dest + 0, xmm0); _mm_stream_si128((__m128i *)dest + 1, xmm1); _mm_stream_si128((__m128i *)dest + 2, xmm2); _mm_stream_si128((__m128i *)dest + 3, xmm3); VALGRIND_DO_FLUSH(dest, 64); } static force_inline void memmove_movnt1x32b(char *dest, const char *src) { __m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0); __m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1); _mm_stream_si128((__m128i *)dest + 0, xmm0); _mm_stream_si128((__m128i *)dest + 1, xmm1); VALGRIND_DO_FLUSH(dest, 32); } static force_inline void memmove_movnt1x16b(char *dest, const char *src) { __m128i xmm0 = _mm_loadu_si128((__m128i *)src); _mm_stream_si128((__m128i *)dest, xmm0); VALGRIND_DO_FLUSH(dest, 16); } static force_inline void memmove_movnt1x8b(char *dest, const char *src) { _mm_stream_si64((long long *)dest, *(long long *)src); VALGRIND_DO_FLUSH(dest, 8); } static force_inline void memmove_movnt1x4b(char *dest, const char *src) { _mm_stream_si32((int *)dest, *(int *)src); VALGRIND_DO_FLUSH(dest, 4); } static force_inline void memmove_movnt_sse_fw(char *dest, const char *src, size_t len) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_sse2(dest, src, cnt); dest += cnt; src += cnt; len -= cnt; } while (len >= 4 * 64) { memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_movnt2x64b(dest, src); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_movnt1x64b(dest, src); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len == 0) return; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memmove_movnt1x32b(dest, src); else if (len == 16) memmove_movnt1x16b(dest, src); else if (len == 8) memmove_movnt1x8b(dest, src); else if (len == 4) memmove_movnt1x4b(dest, src); else goto nonnt; return; } nonnt: memmove_small_sse2(dest, src, len); } static force_inline void memmove_movnt_sse_bw(char *dest, const char *src, size_t len) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_sse2(dest, src, cnt); } while (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_movnt2x64b(dest, src); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_movnt1x64b(dest, src); } if (len == 0) return; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) { dest -= 32; src -= 32; memmove_movnt1x32b(dest, src); } else if (len == 16) { dest -= 16; src -= 16; memmove_movnt1x16b(dest, src); } else if (len == 8) { dest -= 8; src -= 8; memmove_movnt1x8b(dest, src); } else if (len == 4) { dest -= 4; src -= 4; memmove_movnt1x4b(dest, src); } else { goto nonnt; } return; } nonnt: dest -= len; src -= len; memmove_small_sse2(dest, src, len); } void EXPORTED_SYMBOL(char *dest, const char *src, size_t len) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_movnt_sse_fw(dest, src, len); else memmove_movnt_sse_bw(dest, src, len); maybe_barrier(); }
8,204
25.813725
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem.h" #include "avx.h" #include "flush.h" #include "memcpy_avx.h" #include "memcpy_memset.h" #include "valgrind_internal.h" static force_inline void memmove_movnt8x64b(char *dest, const char *src) { __m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0); __m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1); __m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2); __m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3); __m256i ymm4 = _mm256_loadu_si256((__m256i *)src + 4); __m256i ymm5 = _mm256_loadu_si256((__m256i *)src + 5); __m256i ymm6 = _mm256_loadu_si256((__m256i *)src + 6); __m256i ymm7 = _mm256_loadu_si256((__m256i *)src + 7); __m256i ymm8 = _mm256_loadu_si256((__m256i *)src + 8); __m256i ymm9 = _mm256_loadu_si256((__m256i *)src + 9); __m256i ymm10 = _mm256_loadu_si256((__m256i *)src + 10); __m256i ymm11 = _mm256_loadu_si256((__m256i *)src + 11); __m256i ymm12 = _mm256_loadu_si256((__m256i *)src + 12); __m256i ymm13 = _mm256_loadu_si256((__m256i *)src + 13); __m256i ymm14 = _mm256_loadu_si256((__m256i *)src + 14); __m256i ymm15 = _mm256_loadu_si256((__m256i *)src + 15); _mm256_stream_si256((__m256i *)dest + 0, ymm0); _mm256_stream_si256((__m256i *)dest + 1, ymm1); _mm256_stream_si256((__m256i *)dest + 2, ymm2); _mm256_stream_si256((__m256i *)dest + 3, ymm3); _mm256_stream_si256((__m256i *)dest + 4, ymm4); _mm256_stream_si256((__m256i *)dest + 5, ymm5); _mm256_stream_si256((__m256i *)dest + 6, ymm6); _mm256_stream_si256((__m256i *)dest + 7, ymm7); _mm256_stream_si256((__m256i *)dest + 8, ymm8); _mm256_stream_si256((__m256i *)dest + 9, ymm9); _mm256_stream_si256((__m256i *)dest + 10, ymm10); _mm256_stream_si256((__m256i *)dest + 11, ymm11); _mm256_stream_si256((__m256i *)dest + 12, ymm12); _mm256_stream_si256((__m256i *)dest + 13, ymm13); _mm256_stream_si256((__m256i *)dest + 14, ymm14); _mm256_stream_si256((__m256i *)dest + 15, ymm15); VALGRIND_DO_FLUSH(dest, 8 * 64); } static force_inline void memmove_movnt4x64b(char *dest, const char *src) { __m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0); __m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1); __m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2); __m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3); __m256i ymm4 = _mm256_loadu_si256((__m256i *)src + 4); __m256i ymm5 = _mm256_loadu_si256((__m256i *)src + 5); __m256i ymm6 = _mm256_loadu_si256((__m256i *)src + 6); __m256i ymm7 = _mm256_loadu_si256((__m256i *)src + 7); _mm256_stream_si256((__m256i *)dest + 0, ymm0); _mm256_stream_si256((__m256i *)dest + 1, ymm1); _mm256_stream_si256((__m256i *)dest + 2, ymm2); _mm256_stream_si256((__m256i *)dest + 3, ymm3); _mm256_stream_si256((__m256i *)dest + 4, ymm4); _mm256_stream_si256((__m256i *)dest + 5, ymm5); _mm256_stream_si256((__m256i *)dest + 6, ymm6); _mm256_stream_si256((__m256i *)dest + 7, ymm7); VALGRIND_DO_FLUSH(dest, 4 * 64); } static force_inline void memmove_movnt2x64b(char *dest, const char *src) { __m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0); __m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1); __m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2); __m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3); _mm256_stream_si256((__m256i *)dest + 0, ymm0); _mm256_stream_si256((__m256i *)dest + 1, ymm1); _mm256_stream_si256((__m256i *)dest + 2, ymm2); _mm256_stream_si256((__m256i *)dest + 3, ymm3); VALGRIND_DO_FLUSH(dest, 2 * 64); } static force_inline void memmove_movnt1x64b(char *dest, const char *src) { __m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0); __m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1); _mm256_stream_si256((__m256i *)dest + 0, ymm0); _mm256_stream_si256((__m256i *)dest + 1, ymm1); VALGRIND_DO_FLUSH(dest, 64); } static force_inline void memmove_movnt1x32b(char *dest, const char *src) { __m256i ymm0 = _mm256_loadu_si256((__m256i *)src); _mm256_stream_si256((__m256i *)dest, ymm0); VALGRIND_DO_FLUSH(dest, 32); } static force_inline void memmove_movnt1x16b(char *dest, const char *src) { __m128i xmm0 = _mm_loadu_si128((__m128i *)src); _mm_stream_si128((__m128i *)dest, xmm0); VALGRIND_DO_FLUSH(dest, 16); } static force_inline void memmove_movnt1x8b(char *dest, const char *src) { _mm_stream_si64((long long *)dest, *(long long *)src); VALGRIND_DO_FLUSH(dest, 8); } static force_inline void memmove_movnt1x4b(char *dest, const char *src) { _mm_stream_si32((int *)dest, *(int *)src); VALGRIND_DO_FLUSH(dest, 4); } static force_inline void memmove_movnt_avx_fw(char *dest, const char *src, size_t len) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_avx(dest, src, cnt); dest += cnt; src += cnt; len -= cnt; } while (len >= 8 * 64) { memmove_movnt8x64b(dest, src); dest += 8 * 64; src += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_movnt2x64b(dest, src); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_movnt1x64b(dest, src); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memmove_movnt1x32b(dest, src); else if (len == 16) memmove_movnt1x16b(dest, src); else if (len == 8) memmove_movnt1x8b(dest, src); else if (len == 4) memmove_movnt1x4b(dest, src); else goto nonnt; goto end; } nonnt: memmove_small_avx(dest, src, len); end: avx_zeroupper(); } static force_inline void memmove_movnt_avx_bw(char *dest, const char *src, size_t len) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_avx(dest, src, cnt); } while (len >= 8 * 64) { dest -= 8 * 64; src -= 8 * 64; len -= 8 * 64; memmove_movnt8x64b(dest, src); } if (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_movnt2x64b(dest, src); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_movnt1x64b(dest, src); } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) { dest -= 32; src -= 32; memmove_movnt1x32b(dest, src); } else if (len == 16) { dest -= 16; src -= 16; memmove_movnt1x16b(dest, src); } else if (len == 8) { dest -= 8; src -= 8; memmove_movnt1x8b(dest, src); } else if (len == 4) { dest -= 4; src -= 4; memmove_movnt1x4b(dest, src); } else { goto nonnt; } goto end; } nonnt: dest -= len; src -= len; memmove_small_avx(dest, src, len); end: avx_zeroupper(); } void EXPORTED_SYMBOL(char *dest, const char *src, size_t len) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_movnt_avx_fw(dest, src, len); else memmove_movnt_avx_bw(dest, src, len); maybe_barrier(); }
8,883
25.519403
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/aarch64/flush.h
/* * Copyright 2014-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef ARM64_FLUSH_H #define ARM64_FLUSH_H #include <stdint.h> #include "arm_cacheops.h" #include "util.h" #define FLUSH_ALIGN ((uintptr_t)64) /* * flush_clflushopt_nolog -- flush the CPU cache, using * arm_clean_and_invalidate_va_to_poc (see arm_cacheops.h) {DC CIVAC} */ static force_inline void flush_dcache_invalidate_opt_nolog(const void *addr, size_t len) { uintptr_t uptr; arm_data_memory_barrier(); for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1); uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) { arm_clean_and_invalidate_va_to_poc((char *)uptr); } arm_data_memory_barrier(); } /* * flush_dcache_nolog -- flush the CPU cache, using DC CVAC */ static force_inline void flush_dcache_nolog(const void *addr, size_t len) { uintptr_t uptr; /* * Loop through cache-line-size (typically 64B) aligned chunks * covering the given range. */ for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1); uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) { arm_clean_va_to_poc((char *)uptr); } } #endif
2,631
32.74359
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/aarch64/arm_cacheops.h
/* * Copyright 2014-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * ARM inline assembly to flush and invalidate caches * clwb => dc cvac * clflush | clflushopt => dc civac * fence => dmb ish */ #ifndef AARCH64_CACHEOPS_H #define AARCH64_CACHEOPS_H #include <stdlib.h> static inline void arm_clean_va_to_poc(void const *p __attribute__((unused))) { asm volatile("dc cvac, %0" : : "r" (p) : "memory"); } static inline void arm_data_memory_barrier(void) { asm volatile("dmb ish" : : : "memory"); } static inline void arm_clean_and_invalidate_va_to_poc(const void *addr) { asm volatile("dc civac, %0" : : "r" (addr) : "memory"); } #endif
2,185
34.258065
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/libvmem/vmem.h
/* * Copyright 2014-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * vmem.h -- internal definitions for libvmem */ #ifndef VMEM_H #define VMEM_H 1 #include <stddef.h> #include "pool_hdr.h" #ifdef __cplusplus extern "C" { #endif #define VMEM_LOG_PREFIX "libvmem" #define VMEM_LOG_LEVEL_VAR "VMEM_LOG_LEVEL" #define VMEM_LOG_FILE_VAR "VMEM_LOG_FILE" /* attributes of the vmem memory pool format for the pool header */ #define VMEM_HDR_SIG "VMEM " /* must be 8 bytes including '\0' */ #define VMEM_FORMAT_MAJOR 1 struct vmem { struct pool_hdr hdr; /* memory pool header */ void *addr; /* mapped region */ size_t size; /* size of mapped region */ int caller_mapped; }; void vmem_construct(void); #ifdef __cplusplus } #endif #endif
2,284
31.183099
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/getopt/getopt.h
/* * *Copyright (c) 2012, Kim Gräsman * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Kim Gräsman nor the * names of contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL KIM GRÄSMAN BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef INCLUDED_GETOPT_PORT_H #define INCLUDED_GETOPT_PORT_H #if defined(__cplusplus) extern "C" { #endif #define no_argument 0 #define required_argument 1 #define optional_argument 2 extern char* optarg; extern int optind, opterr, optopt; struct option { const char* name; int has_arg; int* flag; int val; }; int getopt(int argc, char* const argv[], const char* optstring); int getopt_long(int argc, char* const argv[], const char* optstring, const struct option* longopts, int* longindex); #if defined(__cplusplus) } #endif #endif // INCLUDED_GETOPT_PORT_H
2,137
35.237288
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_defs.h
/* ./../windows/jemalloc_gen/include/jemalloc/jemalloc_defs.h. Generated from jemalloc_defs.h.in by configure. */ /* Defined if __attribute__((...)) syntax is supported. */ /* #undef JEMALLOC_HAVE_ATTR */ /* Defined if alloc_size attribute is supported. */ /* #undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE */ /* Defined if format(gnu_printf, ...) attribute is supported. */ /* #undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF */ /* Defined if format(printf, ...) attribute is supported. */ /* #undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF */ /* * Define overrides for non-standard allocator-related functions if they are * present on the system. */ /* #undef JEMALLOC_OVERRIDE_MEMALIGN */ /* #undef JEMALLOC_OVERRIDE_VALLOC */ /* * At least Linux omits the "const" in: * * size_t malloc_usable_size(const void *ptr); * * Match the operating system's prototype. */ #define JEMALLOC_USABLE_SIZE_CONST const /* * If defined, specify throw() for the public function prototypes when compiling * with C++. The only justification for this is to match the prototypes that * glibc defines. */ /* #undef JEMALLOC_USE_CXX_THROW */ #ifdef _MSC_VER # ifdef _WIN64 # define LG_SIZEOF_PTR_WIN 3 # else # define LG_SIZEOF_PTR_WIN 2 # endif #endif /* sizeof(void *) == 2^LG_SIZEOF_PTR. */ #define LG_SIZEOF_PTR LG_SIZEOF_PTR_WIN
1,327
27.255319
115
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_mangle_jet.h
/* * By default application code must explicitly refer to mangled symbol names, * so that it is possible to use jemalloc in conjunction with another allocator * in the same application. Define JEMALLOC_MANGLE in order to cause automatic * name mangling that matches the API prefixing that happened as a result of * --with-mangling and/or --with-jemalloc-prefix configuration settings. */ #ifdef JEMALLOC_MANGLE # ifndef JEMALLOC_NO_DEMANGLE # define JEMALLOC_NO_DEMANGLE # endif # define pool_create jet_pool_create # define pool_delete jet_pool_delete # define pool_malloc jet_pool_malloc # define pool_calloc jet_pool_calloc # define pool_ralloc jet_pool_ralloc # define pool_aligned_alloc jet_pool_aligned_alloc # define pool_free jet_pool_free # define pool_malloc_usable_size jet_pool_malloc_usable_size # define pool_malloc_stats_print jet_pool_malloc_stats_print # define pool_extend jet_pool_extend # define pool_set_alloc_funcs jet_pool_set_alloc_funcs # define pool_check jet_pool_check # define malloc_conf jet_malloc_conf # define malloc_message jet_malloc_message # define malloc jet_malloc # define calloc jet_calloc # define posix_memalign jet_posix_memalign # define aligned_alloc jet_aligned_alloc # define realloc jet_realloc # define free jet_free # define mallocx jet_mallocx # define rallocx jet_rallocx # define xallocx jet_xallocx # define sallocx jet_sallocx # define dallocx jet_dallocx # define nallocx jet_nallocx # define mallctl jet_mallctl # define mallctlnametomib jet_mallctlnametomib # define mallctlbymib jet_mallctlbymib # define navsnprintf jet_navsnprintf # define malloc_stats_print jet_malloc_stats_print # define malloc_usable_size jet_malloc_usable_size #endif /* * The jet_* macros can be used as stable alternative names for the * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily * meant for use in jemalloc itself, but it can be used by application code to * provide isolation from the name mangling specified via --with-mangling * and/or --with-jemalloc-prefix. */ #ifndef JEMALLOC_NO_DEMANGLE # undef jet_pool_create # undef jet_pool_delete # undef jet_pool_malloc # undef jet_pool_calloc # undef jet_pool_ralloc # undef jet_pool_aligned_alloc # undef jet_pool_free # undef jet_pool_malloc_usable_size # undef jet_pool_malloc_stats_print # undef jet_pool_extend # undef jet_pool_set_alloc_funcs # undef jet_pool_check # undef jet_malloc_conf # undef jet_malloc_message # undef jet_malloc # undef jet_calloc # undef jet_posix_memalign # undef jet_aligned_alloc # undef jet_realloc # undef jet_free # undef jet_mallocx # undef jet_rallocx # undef jet_xallocx # undef jet_sallocx # undef jet_dallocx # undef jet_nallocx # undef jet_mallctl # undef jet_mallctlnametomib # undef jet_mallctlbymib # undef jet_navsnprintf # undef jet_malloc_stats_print # undef jet_malloc_usable_size #endif
2,939
32.793103
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_protos_jet.h
/* * The jet_ prefix on the following public symbol declarations is an artifact * of namespace management, and should be omitted in application code unless * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle@[email protected]). */ extern JEMALLOC_EXPORT const char *jet_malloc_conf; extern JEMALLOC_EXPORT void (*jet_malloc_message)(void *cbopaque, const char *s); typedef struct pool_s pool_t; JEMALLOC_EXPORT pool_t *jet_pool_create(void *addr, size_t size, int zeroed); JEMALLOC_EXPORT int jet_pool_delete(pool_t *pool); JEMALLOC_EXPORT size_t jet_pool_extend(pool_t *pool, void *addr, size_t size, int zeroed); JEMALLOC_EXPORT void *jet_pool_malloc(pool_t *pool, size_t size); JEMALLOC_EXPORT void *jet_pool_calloc(pool_t *pool, size_t nmemb, size_t size); JEMALLOC_EXPORT void *jet_pool_ralloc(pool_t *pool, void *ptr, size_t size); JEMALLOC_EXPORT void *jet_pool_aligned_alloc(pool_t *pool, size_t alignment, size_t size); JEMALLOC_EXPORT void jet_pool_free(pool_t *pool, void *ptr); JEMALLOC_EXPORT size_t jet_pool_malloc_usable_size(pool_t *pool, void *ptr); JEMALLOC_EXPORT void jet_pool_malloc_stats_print(pool_t *pool, void (*write_cb)(void *, const char *), void *cbopaque, const char *opts); JEMALLOC_EXPORT void jet_pool_set_alloc_funcs(void *(*malloc_func)(size_t), void (*free_func)(void *)); JEMALLOC_EXPORT int jet_pool_check(pool_t *pool); JEMALLOC_EXPORT void *jet_malloc(size_t size) JEMALLOC_ATTR(malloc); JEMALLOC_EXPORT void *jet_calloc(size_t num, size_t size) JEMALLOC_ATTR(malloc); JEMALLOC_EXPORT int jet_posix_memalign(void **memptr, size_t alignment, size_t size) JEMALLOC_ATTR(nonnull(1)); JEMALLOC_EXPORT void *jet_aligned_alloc(size_t alignment, size_t size) JEMALLOC_ATTR(malloc); JEMALLOC_EXPORT void *jet_realloc(void *ptr, size_t size); JEMALLOC_EXPORT void jet_free(void *ptr); JEMALLOC_EXPORT void *jet_mallocx(size_t size, int flags); JEMALLOC_EXPORT void *jet_rallocx(void *ptr, size_t size, int flags); JEMALLOC_EXPORT size_t jet_xallocx(void *ptr, size_t size, size_t extra, int flags); JEMALLOC_EXPORT size_t jet_sallocx(const void *ptr, int flags); JEMALLOC_EXPORT void jet_dallocx(void *ptr, int flags); JEMALLOC_EXPORT size_t jet_nallocx(size_t size, int flags); JEMALLOC_EXPORT int jet_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen); JEMALLOC_EXPORT int jet_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp); JEMALLOC_EXPORT int jet_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); JEMALLOC_EXPORT void jet_malloc_stats_print(void (*write_cb)(void *, const char *), void *jet_cbopaque, const char *opts); JEMALLOC_EXPORT size_t jet_malloc_usable_size( JEMALLOC_USABLE_SIZE_CONST void *ptr); JEMALLOC_EXPORT int jet_navsnprintf(char *str, size_t size, const char *format, va_list ap); #ifdef JEMALLOC_OVERRIDE_MEMALIGN JEMALLOC_EXPORT void * jet_memalign(size_t alignment, size_t size) JEMALLOC_ATTR(malloc); #endif #ifdef JEMALLOC_OVERRIDE_VALLOC JEMALLOC_EXPORT void * jet_valloc(size_t size) JEMALLOC_ATTR(malloc); #endif
3,176
45.043478
91
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_rename.h
/* * Name mangling for public symbols is controlled by --with-mangling and * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by * these macro definitions. */ #ifndef JEMALLOC_NO_RENAME # define je_pool_create je_vmem_pool_create # define je_pool_delete je_vmem_pool_delete # define je_pool_malloc je_vmem_pool_malloc # define je_pool_calloc je_vmem_pool_calloc # define je_pool_ralloc je_vmem_pool_ralloc # define je_pool_aligned_alloc je_vmem_pool_aligned_alloc # define je_pool_free je_vmem_pool_free # define je_pool_malloc_usable_size je_vmem_pool_malloc_usable_size # define je_pool_malloc_stats_print je_vmem_pool_malloc_stats_print # define je_pool_extend je_vmem_pool_extend # define je_pool_set_alloc_funcs je_vmem_pool_set_alloc_funcs # define je_pool_check je_vmem_pool_check # define je_malloc_conf je_vmem_malloc_conf # define je_malloc_message je_vmem_malloc_message # define je_malloc je_vmem_malloc # define je_calloc je_vmem_calloc # define je_posix_memalign je_vmem_posix_memalign # define je_aligned_alloc je_vmem_aligned_alloc # define je_realloc je_vmem_realloc # define je_free je_vmem_free # define je_mallocx je_vmem_mallocx # define je_rallocx je_vmem_rallocx # define je_xallocx je_vmem_xallocx # define je_sallocx je_vmem_sallocx # define je_dallocx je_vmem_dallocx # define je_nallocx je_vmem_nallocx # define je_mallctl je_vmem_mallctl # define je_mallctlnametomib je_vmem_mallctlnametomib # define je_mallctlbymib je_vmem_mallctlbymib # define je_navsnprintf je_vmem_navsnprintf # define je_malloc_stats_print je_vmem_malloc_stats_print # define je_malloc_usable_size je_vmem_malloc_usable_size #endif
1,694
41.375
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_mangle.h
/* * By default application code must explicitly refer to mangled symbol names, * so that it is possible to use jemalloc in conjunction with another allocator * in the same application. Define JEMALLOC_MANGLE in order to cause automatic * name mangling that matches the API prefixing that happened as a result of * --with-mangling and/or --with-jemalloc-prefix configuration settings. */ #ifdef JEMALLOC_MANGLE # ifndef JEMALLOC_NO_DEMANGLE # define JEMALLOC_NO_DEMANGLE # endif # define pool_create je_pool_create # define pool_delete je_pool_delete # define pool_malloc je_pool_malloc # define pool_calloc je_pool_calloc # define pool_ralloc je_pool_ralloc # define pool_aligned_alloc je_pool_aligned_alloc # define pool_free je_pool_free # define pool_malloc_usable_size je_pool_malloc_usable_size # define pool_malloc_stats_print je_pool_malloc_stats_print # define pool_extend je_pool_extend # define pool_set_alloc_funcs je_pool_set_alloc_funcs # define pool_check je_pool_check # define malloc_conf je_malloc_conf # define malloc_message je_malloc_message # define malloc je_malloc # define calloc je_calloc # define posix_memalign je_posix_memalign # define aligned_alloc je_aligned_alloc # define realloc je_realloc # define free je_free # define mallocx je_mallocx # define rallocx je_rallocx # define xallocx je_xallocx # define sallocx je_sallocx # define dallocx je_dallocx # define nallocx je_nallocx # define mallctl je_mallctl # define mallctlnametomib je_mallctlnametomib # define mallctlbymib je_mallctlbymib # define navsnprintf je_navsnprintf # define malloc_stats_print je_malloc_stats_print # define malloc_usable_size je_malloc_usable_size #endif /* * The je_* macros can be used as stable alternative names for the * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily * meant for use in jemalloc itself, but it can be used by application code to * provide isolation from the name mangling specified via --with-mangling * and/or --with-jemalloc-prefix. */ #ifndef JEMALLOC_NO_DEMANGLE # undef je_pool_create # undef je_pool_delete # undef je_pool_malloc # undef je_pool_calloc # undef je_pool_ralloc # undef je_pool_aligned_alloc # undef je_pool_free # undef je_pool_malloc_usable_size # undef je_pool_malloc_stats_print # undef je_pool_extend # undef je_pool_set_alloc_funcs # undef je_pool_check # undef je_malloc_conf # undef je_malloc_message # undef je_malloc # undef je_calloc # undef je_posix_memalign # undef je_aligned_alloc # undef je_realloc # undef je_free # undef je_mallocx # undef je_rallocx # undef je_xallocx # undef je_sallocx # undef je_dallocx # undef je_nallocx # undef je_mallctl # undef je_mallctlnametomib # undef je_mallctlbymib # undef je_navsnprintf # undef je_malloc_stats_print # undef je_malloc_usable_size #endif
2,874
32.045977
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc.h
#ifndef JEMALLOC_H_ #define JEMALLOC_H_ #ifdef __cplusplus extern "C" { #endif /* Defined if __attribute__((...)) syntax is supported. */ /* #undef JEMALLOC_HAVE_ATTR */ /* Defined if alloc_size attribute is supported. */ /* #undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE */ /* Defined if format(gnu_printf, ...) attribute is supported. */ /* #undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF */ /* Defined if format(printf, ...) attribute is supported. */ /* #undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF */ /* * Define overrides for non-standard allocator-related functions if they are * present on the system. */ /* #undef JEMALLOC_OVERRIDE_MEMALIGN */ /* #undef JEMALLOC_OVERRIDE_VALLOC */ /* * At least Linux omits the "const" in: * * size_t malloc_usable_size(const void *ptr); * * Match the operating system's prototype. */ #define JEMALLOC_USABLE_SIZE_CONST const /* * If defined, specify throw() for the public function prototypes when compiling * with C++. The only justification for this is to match the prototypes that * glibc defines. */ /* #undef JEMALLOC_USE_CXX_THROW */ #ifdef _MSC_VER # ifdef _WIN64 # define LG_SIZEOF_PTR_WIN 3 # else # define LG_SIZEOF_PTR_WIN 2 # endif #endif /* sizeof(void *) == 2^LG_SIZEOF_PTR. */ #define LG_SIZEOF_PTR LG_SIZEOF_PTR_WIN /* * Name mangling for public symbols is controlled by --with-mangling and * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by * these macro definitions. */ #ifndef JEMALLOC_NO_RENAME # define je_pool_create je_vmem_pool_create # define je_pool_delete je_vmem_pool_delete # define je_pool_malloc je_vmem_pool_malloc # define je_pool_calloc je_vmem_pool_calloc # define je_pool_ralloc je_vmem_pool_ralloc # define je_pool_aligned_alloc je_vmem_pool_aligned_alloc # define je_pool_free je_vmem_pool_free # define je_pool_malloc_usable_size je_vmem_pool_malloc_usable_size # define je_pool_malloc_stats_print je_vmem_pool_malloc_stats_print # define je_pool_extend je_vmem_pool_extend # define je_pool_set_alloc_funcs je_vmem_pool_set_alloc_funcs # define je_pool_check je_vmem_pool_check # define je_malloc_conf je_vmem_malloc_conf # define je_malloc_message je_vmem_malloc_message # define je_malloc je_vmem_malloc # define je_calloc je_vmem_calloc # define je_posix_memalign je_vmem_posix_memalign # define je_aligned_alloc je_vmem_aligned_alloc # define je_realloc je_vmem_realloc # define je_free je_vmem_free # define je_mallocx je_vmem_mallocx # define je_rallocx je_vmem_rallocx # define je_xallocx je_vmem_xallocx # define je_sallocx je_vmem_sallocx # define je_dallocx je_vmem_dallocx # define je_nallocx je_vmem_nallocx # define je_mallctl je_vmem_mallctl # define je_mallctlnametomib je_vmem_mallctlnametomib # define je_mallctlbymib je_vmem_mallctlbymib # define je_navsnprintf je_vmem_navsnprintf # define je_malloc_stats_print je_vmem_malloc_stats_print # define je_malloc_usable_size je_vmem_malloc_usable_size #endif #include <limits.h> #include <strings.h> #include <stdbool.h> #include <stdarg.h> #define JEMALLOC_VERSION "" #define JEMALLOC_VERSION_MAJOR #define JEMALLOC_VERSION_MINOR #define JEMALLOC_VERSION_BUGFIX #define JEMALLOC_VERSION_NREV #define JEMALLOC_VERSION_GID "" # define MALLOCX_LG_ALIGN(la) (la) # if LG_SIZEOF_PTR == 2 # define MALLOCX_ALIGN(a) (ffs(a)-1) # else # define MALLOCX_ALIGN(a) \ (((a) < (size_t)INT_MAX) ? ffs(a)-1 : ffs((a)>>32)+31) # endif # define MALLOCX_ZERO ((int)0x40) /* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */ # define MALLOCX_ARENA(a) ((int)(((a)+1) << 8)) #ifdef JEMALLOC_HAVE_ATTR # define JEMALLOC_ATTR(s) __attribute__((s)) # define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) # define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) # define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) # define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) #elif _MSC_VER # define JEMALLOC_ATTR(s) # ifndef JEMALLOC_EXPORT # ifdef DLLEXPORT # define JEMALLOC_EXPORT __declspec(dllexport) # else # define JEMALLOC_EXPORT __declspec(dllimport) # endif # endif # define JEMALLOC_ALIGNED(s) __declspec(align(s)) # define JEMALLOC_SECTION(s) __declspec(allocate(s)) # define JEMALLOC_NOINLINE __declspec(noinline) #else # define JEMALLOC_ATTR(s) # define JEMALLOC_EXPORT # define JEMALLOC_ALIGNED(s) # define JEMALLOC_SECTION(s) # define JEMALLOC_NOINLINE #endif /* * The je_ prefix on the following public symbol declarations is an artifact * of namespace management, and should be omitted in application code unless * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h). */ extern JEMALLOC_EXPORT const char *je_malloc_conf; extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque, const char *s); typedef struct pool_s pool_t; JEMALLOC_EXPORT pool_t *je_pool_create(void *addr, size_t size, int zeroed, int empty); JEMALLOC_EXPORT int je_pool_delete(pool_t *pool); JEMALLOC_EXPORT size_t je_pool_extend(pool_t *pool, void *addr, size_t size, int zeroed); JEMALLOC_EXPORT void *je_pool_malloc(pool_t *pool, size_t size); JEMALLOC_EXPORT void *je_pool_calloc(pool_t *pool, size_t nmemb, size_t size); JEMALLOC_EXPORT void *je_pool_ralloc(pool_t *pool, void *ptr, size_t size); JEMALLOC_EXPORT void *je_pool_aligned_alloc(pool_t *pool, size_t alignment, size_t size); JEMALLOC_EXPORT void je_pool_free(pool_t *pool, void *ptr); JEMALLOC_EXPORT size_t je_pool_malloc_usable_size(pool_t *pool, void *ptr); JEMALLOC_EXPORT void je_pool_malloc_stats_print(pool_t *pool, void (*write_cb)(void *, const char *), void *cbopaque, const char *opts); JEMALLOC_EXPORT void je_pool_set_alloc_funcs(void *(*malloc_func)(size_t), void (*free_func)(void *)); JEMALLOC_EXPORT int je_pool_check(pool_t *pool); JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc); JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size) JEMALLOC_ATTR(malloc); JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment, size_t size) JEMALLOC_ATTR(nonnull(1)); JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size) JEMALLOC_ATTR(malloc); JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size); JEMALLOC_EXPORT void je_free(void *ptr); JEMALLOC_EXPORT void *je_mallocx(size_t size, int flags); JEMALLOC_EXPORT void *je_rallocx(void *ptr, size_t size, int flags); JEMALLOC_EXPORT size_t je_xallocx(void *ptr, size_t size, size_t extra, int flags); JEMALLOC_EXPORT size_t je_sallocx(const void *ptr, int flags); JEMALLOC_EXPORT void je_dallocx(void *ptr, int flags); JEMALLOC_EXPORT size_t je_nallocx(size_t size, int flags); JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen); JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp); JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *, const char *), void *je_cbopaque, const char *opts); JEMALLOC_EXPORT size_t je_malloc_usable_size( JEMALLOC_USABLE_SIZE_CONST void *ptr); JEMALLOC_EXPORT int je_navsnprintf(char *str, size_t size, const char *format, va_list ap); #ifdef JEMALLOC_OVERRIDE_MEMALIGN JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size) JEMALLOC_ATTR(malloc); #endif #ifdef JEMALLOC_OVERRIDE_VALLOC JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc); #endif typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, unsigned, pool_t *); typedef bool (chunk_dalloc_t)(void *, size_t, unsigned, pool_t *); /* * By default application code must explicitly refer to mangled symbol names, * so that it is possible to use jemalloc in conjunction with another allocator * in the same application. Define JEMALLOC_MANGLE in order to cause automatic * name mangling that matches the API prefixing that happened as a result of * --with-mangling and/or --with-jemalloc-prefix configuration settings. */ #ifdef JEMALLOC_MANGLE # ifndef JEMALLOC_NO_DEMANGLE # define JEMALLOC_NO_DEMANGLE # endif # define pool_create je_pool_create # define pool_delete je_pool_delete # define pool_malloc je_pool_malloc # define pool_calloc je_pool_calloc # define pool_ralloc je_pool_ralloc # define pool_aligned_alloc je_pool_aligned_alloc # define pool_free je_pool_free # define pool_malloc_usable_size je_pool_malloc_usable_size # define pool_malloc_stats_print je_pool_malloc_stats_print # define pool_extend je_pool_extend # define pool_set_alloc_funcs je_pool_set_alloc_funcs # define pool_check je_pool_check # define malloc_conf je_malloc_conf # define malloc_message je_malloc_message # define malloc je_malloc # define calloc je_calloc # define posix_memalign je_posix_memalign # define aligned_alloc je_aligned_alloc # define realloc je_realloc # define free je_free # define mallocx je_mallocx # define rallocx je_rallocx # define xallocx je_xallocx # define sallocx je_sallocx # define dallocx je_dallocx # define nallocx je_nallocx # define mallctl je_mallctl # define mallctlnametomib je_mallctlnametomib # define mallctlbymib je_mallctlbymib # define navsnprintf je_navsnprintf # define malloc_stats_print je_malloc_stats_print # define malloc_usable_size je_malloc_usable_size #endif /* * The je_* macros can be used as stable alternative names for the * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily * meant for use in jemalloc itself, but it can be used by application code to * provide isolation from the name mangling specified via --with-mangling * and/or --with-jemalloc-prefix. */ #ifndef JEMALLOC_NO_DEMANGLE # undef je_pool_create # undef je_pool_delete # undef je_pool_malloc # undef je_pool_calloc # undef je_pool_ralloc # undef je_pool_aligned_alloc # undef je_pool_free # undef je_pool_malloc_usable_size # undef je_pool_malloc_stats_print # undef je_pool_extend # undef je_pool_set_alloc_funcs # undef je_pool_check # undef je_malloc_conf # undef je_malloc_message # undef je_malloc # undef je_calloc # undef je_posix_memalign # undef je_aligned_alloc # undef je_realloc # undef je_free # undef je_mallocx # undef je_rallocx # undef je_xallocx # undef je_sallocx # undef je_dallocx # undef je_nallocx # undef je_mallctl # undef je_mallctlnametomib # undef je_mallctlbymib # undef je_navsnprintf # undef je_malloc_stats_print # undef je_malloc_usable_size #endif #ifdef __cplusplus } #endif #endif /* JEMALLOC_H_ */
10,674
34
90
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_protos.h
/* * The je_ prefix on the following public symbol declarations is an artifact * of namespace management, and should be omitted in application code unless * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h). */ extern JEMALLOC_EXPORT const char *je_malloc_conf; extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque, const char *s); typedef struct pool_s pool_t; JEMALLOC_EXPORT pool_t *je_pool_create(void *addr, size_t size, int zeroed); JEMALLOC_EXPORT int je_pool_delete(pool_t *pool); JEMALLOC_EXPORT size_t je_pool_extend(pool_t *pool, void *addr, size_t size, int zeroed); JEMALLOC_EXPORT void *je_pool_malloc(pool_t *pool, size_t size); JEMALLOC_EXPORT void *je_pool_calloc(pool_t *pool, size_t nmemb, size_t size); JEMALLOC_EXPORT void *je_pool_ralloc(pool_t *pool, void *ptr, size_t size); JEMALLOC_EXPORT void *je_pool_aligned_alloc(pool_t *pool, size_t alignment, size_t size); JEMALLOC_EXPORT void je_pool_free(pool_t *pool, void *ptr); JEMALLOC_EXPORT size_t je_pool_malloc_usable_size(pool_t *pool, void *ptr); JEMALLOC_EXPORT void je_pool_malloc_stats_print(pool_t *pool, void (*write_cb)(void *, const char *), void *cbopaque, const char *opts); JEMALLOC_EXPORT void je_pool_set_alloc_funcs(void *(*malloc_func)(size_t), void (*free_func)(void *)); JEMALLOC_EXPORT int je_pool_check(pool_t *pool); JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc); JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size) JEMALLOC_ATTR(malloc); JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment, size_t size) JEMALLOC_ATTR(nonnull(1)); JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size) JEMALLOC_ATTR(malloc); JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size); JEMALLOC_EXPORT void je_free(void *ptr); JEMALLOC_EXPORT void *je_mallocx(size_t size, int flags); JEMALLOC_EXPORT void *je_rallocx(void *ptr, size_t size, int flags); JEMALLOC_EXPORT size_t je_xallocx(void *ptr, size_t size, size_t extra, int flags); JEMALLOC_EXPORT size_t je_sallocx(const void *ptr, int flags); JEMALLOC_EXPORT void je_dallocx(void *ptr, int flags); JEMALLOC_EXPORT size_t je_nallocx(size_t size, int flags); JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen); JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp); JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *, const char *), void *je_cbopaque, const char *opts); JEMALLOC_EXPORT size_t je_malloc_usable_size( JEMALLOC_USABLE_SIZE_CONST void *ptr); JEMALLOC_EXPORT int je_navsnprintf(char *str, size_t size, const char *format, va_list ap); #ifdef JEMALLOC_OVERRIDE_MEMALIGN JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size) JEMALLOC_ATTR(malloc); #endif #ifdef JEMALLOC_OVERRIDE_VALLOC JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc); #endif
3,124
44.289855
90
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_typedefs.h
typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, unsigned, pool_t *); typedef bool (chunk_dalloc_t)(void *, size_t, unsigned, pool_t *);
150
49.333333
82
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_macros.h
#include <limits.h> #include <strings.h> #include <stdbool.h> #include <stdarg.h> #define JEMALLOC_VERSION "" #define JEMALLOC_VERSION_MAJOR #define JEMALLOC_VERSION_MINOR #define JEMALLOC_VERSION_BUGFIX #define JEMALLOC_VERSION_NREV #define JEMALLOC_VERSION_GID "" # define MALLOCX_LG_ALIGN(la) (la) # if LG_SIZEOF_PTR == 2 # define MALLOCX_ALIGN(a) (ffs(a)-1) # else # define MALLOCX_ALIGN(a) \ (((a) < (size_t)INT_MAX) ? ffs(a)-1 : ffs((a)>>32)+31) # endif # define MALLOCX_ZERO ((int)0x40) /* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */ # define MALLOCX_ARENA(a) ((int)(((a)+1) << 8)) #ifdef JEMALLOC_HAVE_ATTR # define JEMALLOC_ATTR(s) __attribute__((s)) # define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) # define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) # define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) # define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) #elif _MSC_VER # define JEMALLOC_ATTR(s) # ifdef DLLEXPORT # define JEMALLOC_EXPORT __declspec(dllexport) # else # define JEMALLOC_EXPORT __declspec(dllimport) # endif # define JEMALLOC_ALIGNED(s) __declspec(align(s)) # define JEMALLOC_SECTION(s) __declspec(allocate(s)) # define JEMALLOC_NOINLINE __declspec(noinline) #else # define JEMALLOC_ATTR(s) # define JEMALLOC_EXPORT # define JEMALLOC_ALIGNED(s) # define JEMALLOC_SECTION(s) # define JEMALLOC_NOINLINE #endif
1,426
29.361702
76
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/size_classes.h
/* This file was automatically generated by size_classes.sh. */ /******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* * This header requires LG_SIZEOF_PTR, LG_TINY_MIN, LG_QUANTUM, and LG_PAGE to * be defined prior to inclusion, and it in turn defines: * * LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling. * SIZE_CLASSES: Complete table of * SC(index, lg_delta, size, bin, lg_delta_lookup) tuples. * index: Size class index. * lg_grp: Lg group base size (no deltas added). * lg_delta: Lg delta to previous size class. * ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta * bin: 'yes' if a small bin size class, 'no' otherwise. * lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no' * otherwise. * NTBINS: Number of tiny bins. * NLBINS: Number of bins supported by the lookup table. * NBINS: Number of small size class bins. * LG_TINY_MAXCLASS: Lg of maximum tiny size class. * LOOKUP_MAXCLASS: Maximum size class included in lookup table. * SMALL_MAXCLASS: Maximum small size class. */ #define LG_SIZE_CLASS_GROUP 2 #if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 12) #define SIZE_CLASSES \ /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \ SC( 0, 3, 3, 0, yes, 3) \ SC( 1, 3, 3, 1, yes, 3) \ SC( 2, 3, 3, 2, yes, 3) \ SC( 3, 3, 3, 3, yes, 3) \ \ SC( 4, 5, 3, 1, yes, 3) \ SC( 5, 5, 3, 2, yes, 3) \ SC( 6, 5, 3, 3, yes, 3) \ SC( 7, 5, 3, 4, yes, 3) \ \ SC( 8, 6, 4, 1, yes, 4) \ SC( 9, 6, 4, 2, yes, 4) \ SC( 10, 6, 4, 3, yes, 4) \ SC( 11, 6, 4, 4, yes, 4) \ \ SC( 12, 7, 5, 1, yes, 5) \ SC( 13, 7, 5, 2, yes, 5) \ SC( 14, 7, 5, 3, yes, 5) \ SC( 15, 7, 5, 4, yes, 5) \ \ SC( 16, 8, 6, 1, yes, 6) \ SC( 17, 8, 6, 2, yes, 6) \ SC( 18, 8, 6, 3, yes, 6) \ SC( 19, 8, 6, 4, yes, 6) \ \ SC( 20, 9, 7, 1, yes, 7) \ SC( 21, 9, 7, 2, yes, 7) \ SC( 22, 9, 7, 3, yes, 7) \ SC( 23, 9, 7, 4, yes, 7) \ \ SC( 24, 10, 8, 1, yes, 8) \ SC( 25, 10, 8, 2, yes, 8) \ SC( 26, 10, 8, 3, yes, 8) \ SC( 27, 10, 8, 4, yes, 8) \ \ SC( 28, 11, 9, 1, yes, 9) \ SC( 29, 11, 9, 2, yes, 9) \ SC( 30, 11, 9, 3, yes, 9) \ SC( 31, 11, 9, 4, no, 9) \ \ SC( 32, 12, 10, 1, no, no) \ SC( 33, 12, 10, 2, no, no) \ SC( 34, 12, 10, 3, no, no) \ SC( 35, 12, 10, 4, no, no) \ \ SC( 36, 13, 11, 1, no, no) \ SC( 37, 13, 11, 2, no, no) \ SC( 38, 13, 11, 3, no, no) \ SC( 39, 13, 11, 4, no, no) \ \ SC( 40, 14, 12, 1, no, no) \ SC( 41, 14, 12, 2, no, no) \ SC( 42, 14, 12, 3, no, no) \ SC( 43, 14, 12, 4, no, no) \ \ SC( 44, 15, 13, 1, no, no) \ SC( 45, 15, 13, 2, no, no) \ SC( 46, 15, 13, 3, no, no) \ SC( 47, 15, 13, 4, no, no) \ \ SC( 48, 16, 14, 1, no, no) \ SC( 49, 16, 14, 2, no, no) \ SC( 50, 16, 14, 3, no, no) \ SC( 51, 16, 14, 4, no, no) \ \ SC( 52, 17, 15, 1, no, no) \ SC( 53, 17, 15, 2, no, no) \ SC( 54, 17, 15, 3, no, no) \ SC( 55, 17, 15, 4, no, no) \ \ SC( 56, 18, 16, 1, no, no) \ SC( 57, 18, 16, 2, no, no) \ SC( 58, 18, 16, 3, no, no) \ SC( 59, 18, 16, 4, no, no) \ \ SC( 60, 19, 17, 1, no, no) \ SC( 61, 19, 17, 2, no, no) \ SC( 62, 19, 17, 3, no, no) \ SC( 63, 19, 17, 4, no, no) \ \ SC( 64, 20, 18, 1, no, no) \ SC( 65, 20, 18, 2, no, no) \ SC( 66, 20, 18, 3, no, no) \ SC( 67, 20, 18, 4, no, no) \ \ SC( 68, 21, 19, 1, no, no) \ SC( 69, 21, 19, 2, no, no) \ SC( 70, 21, 19, 3, no, no) \ SC( 71, 21, 19, 4, no, no) \ \ SC( 72, 22, 20, 1, no, no) \ SC( 73, 22, 20, 2, no, no) \ SC( 74, 22, 20, 3, no, no) \ SC( 75, 22, 20, 4, no, no) \ \ SC( 76, 23, 21, 1, no, no) \ SC( 77, 23, 21, 2, no, no) \ SC( 78, 23, 21, 3, no, no) \ SC( 79, 23, 21, 4, no, no) \ \ SC( 80, 24, 22, 1, no, no) \ SC( 81, 24, 22, 2, no, no) \ SC( 82, 24, 22, 3, no, no) \ SC( 83, 24, 22, 4, no, no) \ \ SC( 84, 25, 23, 1, no, no) \ SC( 85, 25, 23, 2, no, no) \ SC( 86, 25, 23, 3, no, no) \ SC( 87, 25, 23, 4, no, no) \ \ SC( 88, 26, 24, 1, no, no) \ SC( 89, 26, 24, 2, no, no) \ SC( 90, 26, 24, 3, no, no) \ SC( 91, 26, 24, 4, no, no) \ \ SC( 92, 27, 25, 1, no, no) \ SC( 93, 27, 25, 2, no, no) \ SC( 94, 27, 25, 3, no, no) \ SC( 95, 27, 25, 4, no, no) \ \ SC( 96, 28, 26, 1, no, no) \ SC( 97, 28, 26, 2, no, no) \ SC( 98, 28, 26, 3, no, no) \ SC( 99, 28, 26, 4, no, no) \ \ SC(100, 29, 27, 1, no, no) \ SC(101, 29, 27, 2, no, no) \ SC(102, 29, 27, 3, no, no) \ SC(103, 29, 27, 4, no, no) \ \ SC(104, 30, 28, 1, no, no) \ SC(105, 30, 28, 2, no, no) \ SC(106, 30, 28, 3, no, no) \ SC(107, 30, 28, 4, no, no) \ \ SC(108, 31, 29, 1, no, no) \ SC(109, 31, 29, 2, no, no) \ SC(110, 31, 29, 3, no, no) \ #define SIZE_CLASSES_DEFINED #define NTBINS 0 #define NLBINS 32 #define NBINS 31 #define LG_TINY_MAXCLASS "NA" #define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) #define SMALL_MAXCLASS ((((size_t)1) << 11) + (((size_t)3) << 9)) #endif #if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 13) #define SIZE_CLASSES \ /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \ SC( 0, 3, 3, 0, yes, 3) \ SC( 1, 3, 3, 1, yes, 3) \ SC( 2, 3, 3, 2, yes, 3) \ SC( 3, 3, 3, 3, yes, 3) \ \ SC( 4, 5, 3, 1, yes, 3) \ SC( 5, 5, 3, 2, yes, 3) \ SC( 6, 5, 3, 3, yes, 3) \ SC( 7, 5, 3, 4, yes, 3) \ \ SC( 8, 6, 4, 1, yes, 4) \ SC( 9, 6, 4, 2, yes, 4) \ SC( 10, 6, 4, 3, yes, 4) \ SC( 11, 6, 4, 4, yes, 4) \ \ SC( 12, 7, 5, 1, yes, 5) \ SC( 13, 7, 5, 2, yes, 5) \ SC( 14, 7, 5, 3, yes, 5) \ SC( 15, 7, 5, 4, yes, 5) \ \ SC( 16, 8, 6, 1, yes, 6) \ SC( 17, 8, 6, 2, yes, 6) \ SC( 18, 8, 6, 3, yes, 6) \ SC( 19, 8, 6, 4, yes, 6) \ \ SC( 20, 9, 7, 1, yes, 7) \ SC( 21, 9, 7, 2, yes, 7) \ SC( 22, 9, 7, 3, yes, 7) \ SC( 23, 9, 7, 4, yes, 7) \ \ SC( 24, 10, 8, 1, yes, 8) \ SC( 25, 10, 8, 2, yes, 8) \ SC( 26, 10, 8, 3, yes, 8) \ SC( 27, 10, 8, 4, yes, 8) \ \ SC( 28, 11, 9, 1, yes, 9) \ SC( 29, 11, 9, 2, yes, 9) \ SC( 30, 11, 9, 3, yes, 9) \ SC( 31, 11, 9, 4, yes, 9) \ \ SC( 32, 12, 10, 1, yes, no) \ SC( 33, 12, 10, 2, yes, no) \ SC( 34, 12, 10, 3, yes, no) \ SC( 35, 12, 10, 4, no, no) \ \ SC( 36, 13, 11, 1, no, no) \ SC( 37, 13, 11, 2, no, no) \ SC( 38, 13, 11, 3, no, no) \ SC( 39, 13, 11, 4, no, no) \ \ SC( 40, 14, 12, 1, no, no) \ SC( 41, 14, 12, 2, no, no) \ SC( 42, 14, 12, 3, no, no) \ SC( 43, 14, 12, 4, no, no) \ \ SC( 44, 15, 13, 1, no, no) \ SC( 45, 15, 13, 2, no, no) \ SC( 46, 15, 13, 3, no, no) \ SC( 47, 15, 13, 4, no, no) \ \ SC( 48, 16, 14, 1, no, no) \ SC( 49, 16, 14, 2, no, no) \ SC( 50, 16, 14, 3, no, no) \ SC( 51, 16, 14, 4, no, no) \ \ SC( 52, 17, 15, 1, no, no) \ SC( 53, 17, 15, 2, no, no) \ SC( 54, 17, 15, 3, no, no) \ SC( 55, 17, 15, 4, no, no) \ \ SC( 56, 18, 16, 1, no, no) \ SC( 57, 18, 16, 2, no, no) \ SC( 58, 18, 16, 3, no, no) \ SC( 59, 18, 16, 4, no, no) \ \ SC( 60, 19, 17, 1, no, no) \ SC( 61, 19, 17, 2, no, no) \ SC( 62, 19, 17, 3, no, no) \ SC( 63, 19, 17, 4, no, no) \ \ SC( 64, 20, 18, 1, no, no) \ SC( 65, 20, 18, 2, no, no) \ SC( 66, 20, 18, 3, no, no) \ SC( 67, 20, 18, 4, no, no) \ \ SC( 68, 21, 19, 1, no, no) \ SC( 69, 21, 19, 2, no, no) \ SC( 70, 21, 19, 3, no, no) \ SC( 71, 21, 19, 4, no, no) \ \ SC( 72, 22, 20, 1, no, no) \ SC( 73, 22, 20, 2, no, no) \ SC( 74, 22, 20, 3, no, no) \ SC( 75, 22, 20, 4, no, no) \ \ SC( 76, 23, 21, 1, no, no) \ SC( 77, 23, 21, 2, no, no) \ SC( 78, 23, 21, 3, no, no) \ SC( 79, 23, 21, 4, no, no) \ \ SC( 80, 24, 22, 1, no, no) \ SC( 81, 24, 22, 2, no, no) \ SC( 82, 24, 22, 3, no, no) \ SC( 83, 24, 22, 4, no, no) \ \ SC( 84, 25, 23, 1, no, no) \ SC( 85, 25, 23, 2, no, no) \ SC( 86, 25, 23, 3, no, no) \ SC( 87, 25, 23, 4, no, no) \ \ SC( 88, 26, 24, 1, no, no) \ SC( 89, 26, 24, 2, no, no) \ SC( 90, 26, 24, 3, no, no) \ SC( 91, 26, 24, 4, no, no) \ \ SC( 92, 27, 25, 1, no, no) \ SC( 93, 27, 25, 2, no, no) \ SC( 94, 27, 25, 3, no, no) \ SC( 95, 27, 25, 4, no, no) \ \ SC( 96, 28, 26, 1, no, no) \ SC( 97, 28, 26, 2, no, no) \ SC( 98, 28, 26, 3, no, no) \ SC( 99, 28, 26, 4, no, no) \ \ SC(100, 29, 27, 1, no, no) \ SC(101, 29, 27, 2, no, no) \ SC(102, 29, 27, 3, no, no) \ SC(103, 29, 27, 4, no, no) \ \ SC(104, 30, 28, 1, no, no) \ SC(105, 30, 28, 2, no, no) \ SC(106, 30, 28, 3, no, no) \ SC(107, 30, 28, 4, no, no) \ \ SC(108, 31, 29, 1, no, no) \ SC(109, 31, 29, 2, no, no) \ SC(110, 31, 29, 3, no, no) \ #define SIZE_CLASSES_DEFINED #define NTBINS 0 #define NLBINS 32 #define NBINS 35 #define LG_TINY_MAXCLASS "NA" #define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) #define SMALL_MAXCLASS ((((size_t)1) << 12) + (((size_t)3) << 10)) #endif #if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 16) #define SIZE_CLASSES \ /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \ SC( 0, 3, 3, 0, yes, 3) \ SC( 1, 3, 3, 1, yes, 3) \ SC( 2, 3, 3, 2, yes, 3) \ SC( 3, 3, 3, 3, yes, 3) \ \ SC( 4, 5, 3, 1, yes, 3) \ SC( 5, 5, 3, 2, yes, 3) \ SC( 6, 5, 3, 3, yes, 3) \ SC( 7, 5, 3, 4, yes, 3) \ \ SC( 8, 6, 4, 1, yes, 4) \ SC( 9, 6, 4, 2, yes, 4) \ SC( 10, 6, 4, 3, yes, 4) \ SC( 11, 6, 4, 4, yes, 4) \ \ SC( 12, 7, 5, 1, yes, 5) \ SC( 13, 7, 5, 2, yes, 5) \ SC( 14, 7, 5, 3, yes, 5) \ SC( 15, 7, 5, 4, yes, 5) \ \ SC( 16, 8, 6, 1, yes, 6) \ SC( 17, 8, 6, 2, yes, 6) \ SC( 18, 8, 6, 3, yes, 6) \ SC( 19, 8, 6, 4, yes, 6) \ \ SC( 20, 9, 7, 1, yes, 7) \ SC( 21, 9, 7, 2, yes, 7) \ SC( 22, 9, 7, 3, yes, 7) \ SC( 23, 9, 7, 4, yes, 7) \ \ SC( 24, 10, 8, 1, yes, 8) \ SC( 25, 10, 8, 2, yes, 8) \ SC( 26, 10, 8, 3, yes, 8) \ SC( 27, 10, 8, 4, yes, 8) \ \ SC( 28, 11, 9, 1, yes, 9) \ SC( 29, 11, 9, 2, yes, 9) \ SC( 30, 11, 9, 3, yes, 9) \ SC( 31, 11, 9, 4, yes, 9) \ \ SC( 32, 12, 10, 1, yes, no) \ SC( 33, 12, 10, 2, yes, no) \ SC( 34, 12, 10, 3, yes, no) \ SC( 35, 12, 10, 4, yes, no) \ \ SC( 36, 13, 11, 1, yes, no) \ SC( 37, 13, 11, 2, yes, no) \ SC( 38, 13, 11, 3, yes, no) \ SC( 39, 13, 11, 4, yes, no) \ \ SC( 40, 14, 12, 1, yes, no) \ SC( 41, 14, 12, 2, yes, no) \ SC( 42, 14, 12, 3, yes, no) \ SC( 43, 14, 12, 4, yes, no) \ \ SC( 44, 15, 13, 1, yes, no) \ SC( 45, 15, 13, 2, yes, no) \ SC( 46, 15, 13, 3, yes, no) \ SC( 47, 15, 13, 4, no, no) \ \ SC( 48, 16, 14, 1, no, no) \ SC( 49, 16, 14, 2, no, no) \ SC( 50, 16, 14, 3, no, no) \ SC( 51, 16, 14, 4, no, no) \ \ SC( 52, 17, 15, 1, no, no) \ SC( 53, 17, 15, 2, no, no) \ SC( 54, 17, 15, 3, no, no) \ SC( 55, 17, 15, 4, no, no) \ \ SC( 56, 18, 16, 1, no, no) \ SC( 57, 18, 16, 2, no, no) \ SC( 58, 18, 16, 3, no, no) \ SC( 59, 18, 16, 4, no, no) \ \ SC( 60, 19, 17, 1, no, no) \ SC( 61, 19, 17, 2, no, no) \ SC( 62, 19, 17, 3, no, no) \ SC( 63, 19, 17, 4, no, no) \ \ SC( 64, 20, 18, 1, no, no) \ SC( 65, 20, 18, 2, no, no) \ SC( 66, 20, 18, 3, no, no) \ SC( 67, 20, 18, 4, no, no) \ \ SC( 68, 21, 19, 1, no, no) \ SC( 69, 21, 19, 2, no, no) \ SC( 70, 21, 19, 3, no, no) \ SC( 71, 21, 19, 4, no, no) \ \ SC( 72, 22, 20, 1, no, no) \ SC( 73, 22, 20, 2, no, no) \ SC( 74, 22, 20, 3, no, no) \ SC( 75, 22, 20, 4, no, no) \ \ SC( 76, 23, 21, 1, no, no) \ SC( 77, 23, 21, 2, no, no) \ SC( 78, 23, 21, 3, no, no) \ SC( 79, 23, 21, 4, no, no) \ \ SC( 80, 24, 22, 1, no, no) \ SC( 81, 24, 22, 2, no, no) \ SC( 82, 24, 22, 3, no, no) \ SC( 83, 24, 22, 4, no, no) \ \ SC( 84, 25, 23, 1, no, no) \ SC( 85, 25, 23, 2, no, no) \ SC( 86, 25, 23, 3, no, no) \ SC( 87, 25, 23, 4, no, no) \ \ SC( 88, 26, 24, 1, no, no) \ SC( 89, 26, 24, 2, no, no) \ SC( 90, 26, 24, 3, no, no) \ SC( 91, 26, 24, 4, no, no) \ \ SC( 92, 27, 25, 1, no, no) \ SC( 93, 27, 25, 2, no, no) \ SC( 94, 27, 25, 3, no, no) \ SC( 95, 27, 25, 4, no, no) \ \ SC( 96, 28, 26, 1, no, no) \ SC( 97, 28, 26, 2, no, no) \ SC( 98, 28, 26, 3, no, no) \ SC( 99, 28, 26, 4, no, no) \ \ SC(100, 29, 27, 1, no, no) \ SC(101, 29, 27, 2, no, no) \ SC(102, 29, 27, 3, no, no) \ SC(103, 29, 27, 4, no, no) \ \ SC(104, 30, 28, 1, no, no) \ SC(105, 30, 28, 2, no, no) \ SC(106, 30, 28, 3, no, no) \ SC(107, 30, 28, 4, no, no) \ \ SC(108, 31, 29, 1, no, no) \ SC(109, 31, 29, 2, no, no) \ SC(110, 31, 29, 3, no, no) \ #define SIZE_CLASSES_DEFINED #define NTBINS 0 #define NLBINS 32 #define NBINS 47 #define LG_TINY_MAXCLASS "NA" #define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) #define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13)) #endif #if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 12) #define SIZE_CLASSES \ /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \ SC( 0, 3, 3, 0, yes, 3) \ \ SC( 1, 3, 3, 1, yes, 3) \ SC( 2, 4, 4, 1, yes, 4) \ SC( 3, 4, 4, 2, yes, 4) \ SC( 4, 4, 4, 3, yes, 4) \ \ SC( 5, 6, 4, 1, yes, 4) \ SC( 6, 6, 4, 2, yes, 4) \ SC( 7, 6, 4, 3, yes, 4) \ SC( 8, 6, 4, 4, yes, 4) \ \ SC( 9, 7, 5, 1, yes, 5) \ SC( 10, 7, 5, 2, yes, 5) \ SC( 11, 7, 5, 3, yes, 5) \ SC( 12, 7, 5, 4, yes, 5) \ \ SC( 13, 8, 6, 1, yes, 6) \ SC( 14, 8, 6, 2, yes, 6) \ SC( 15, 8, 6, 3, yes, 6) \ SC( 16, 8, 6, 4, yes, 6) \ \ SC( 17, 9, 7, 1, yes, 7) \ SC( 18, 9, 7, 2, yes, 7) \ SC( 19, 9, 7, 3, yes, 7) \ SC( 20, 9, 7, 4, yes, 7) \ \ SC( 21, 10, 8, 1, yes, 8) \ SC( 22, 10, 8, 2, yes, 8) \ SC( 23, 10, 8, 3, yes, 8) \ SC( 24, 10, 8, 4, yes, 8) \ \ SC( 25, 11, 9, 1, yes, 9) \ SC( 26, 11, 9, 2, yes, 9) \ SC( 27, 11, 9, 3, yes, 9) \ SC( 28, 11, 9, 4, no, 9) \ \ SC( 29, 12, 10, 1, no, no) \ SC( 30, 12, 10, 2, no, no) \ SC( 31, 12, 10, 3, no, no) \ SC( 32, 12, 10, 4, no, no) \ \ SC( 33, 13, 11, 1, no, no) \ SC( 34, 13, 11, 2, no, no) \ SC( 35, 13, 11, 3, no, no) \ SC( 36, 13, 11, 4, no, no) \ \ SC( 37, 14, 12, 1, no, no) \ SC( 38, 14, 12, 2, no, no) \ SC( 39, 14, 12, 3, no, no) \ SC( 40, 14, 12, 4, no, no) \ \ SC( 41, 15, 13, 1, no, no) \ SC( 42, 15, 13, 2, no, no) \ SC( 43, 15, 13, 3, no, no) \ SC( 44, 15, 13, 4, no, no) \ \ SC( 45, 16, 14, 1, no, no) \ SC( 46, 16, 14, 2, no, no) \ SC( 47, 16, 14, 3, no, no) \ SC( 48, 16, 14, 4, no, no) \ \ SC( 49, 17, 15, 1, no, no) \ SC( 50, 17, 15, 2, no, no) \ SC( 51, 17, 15, 3, no, no) \ SC( 52, 17, 15, 4, no, no) \ \ SC( 53, 18, 16, 1, no, no) \ SC( 54, 18, 16, 2, no, no) \ SC( 55, 18, 16, 3, no, no) \ SC( 56, 18, 16, 4, no, no) \ \ SC( 57, 19, 17, 1, no, no) \ SC( 58, 19, 17, 2, no, no) \ SC( 59, 19, 17, 3, no, no) \ SC( 60, 19, 17, 4, no, no) \ \ SC( 61, 20, 18, 1, no, no) \ SC( 62, 20, 18, 2, no, no) \ SC( 63, 20, 18, 3, no, no) \ SC( 64, 20, 18, 4, no, no) \ \ SC( 65, 21, 19, 1, no, no) \ SC( 66, 21, 19, 2, no, no) \ SC( 67, 21, 19, 3, no, no) \ SC( 68, 21, 19, 4, no, no) \ \ SC( 69, 22, 20, 1, no, no) \ SC( 70, 22, 20, 2, no, no) \ SC( 71, 22, 20, 3, no, no) \ SC( 72, 22, 20, 4, no, no) \ \ SC( 73, 23, 21, 1, no, no) \ SC( 74, 23, 21, 2, no, no) \ SC( 75, 23, 21, 3, no, no) \ SC( 76, 23, 21, 4, no, no) \ \ SC( 77, 24, 22, 1, no, no) \ SC( 78, 24, 22, 2, no, no) \ SC( 79, 24, 22, 3, no, no) \ SC( 80, 24, 22, 4, no, no) \ \ SC( 81, 25, 23, 1, no, no) \ SC( 82, 25, 23, 2, no, no) \ SC( 83, 25, 23, 3, no, no) \ SC( 84, 25, 23, 4, no, no) \ \ SC( 85, 26, 24, 1, no, no) \ SC( 86, 26, 24, 2, no, no) \ SC( 87, 26, 24, 3, no, no) \ SC( 88, 26, 24, 4, no, no) \ \ SC( 89, 27, 25, 1, no, no) \ SC( 90, 27, 25, 2, no, no) \ SC( 91, 27, 25, 3, no, no) \ SC( 92, 27, 25, 4, no, no) \ \ SC( 93, 28, 26, 1, no, no) \ SC( 94, 28, 26, 2, no, no) \ SC( 95, 28, 26, 3, no, no) \ SC( 96, 28, 26, 4, no, no) \ \ SC( 97, 29, 27, 1, no, no) \ SC( 98, 29, 27, 2, no, no) \ SC( 99, 29, 27, 3, no, no) \ SC(100, 29, 27, 4, no, no) \ \ SC(101, 30, 28, 1, no, no) \ SC(102, 30, 28, 2, no, no) \ SC(103, 30, 28, 3, no, no) \ SC(104, 30, 28, 4, no, no) \ \ SC(105, 31, 29, 1, no, no) \ SC(106, 31, 29, 2, no, no) \ SC(107, 31, 29, 3, no, no) \ #define SIZE_CLASSES_DEFINED #define NTBINS 1 #define NLBINS 29 #define NBINS 28 #define LG_TINY_MAXCLASS 3 #define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) #define SMALL_MAXCLASS ((((size_t)1) << 11) + (((size_t)3) << 9)) #endif #if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 13) #define SIZE_CLASSES \ /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \ SC( 0, 3, 3, 0, yes, 3) \ \ SC( 1, 3, 3, 1, yes, 3) \ SC( 2, 4, 4, 1, yes, 4) \ SC( 3, 4, 4, 2, yes, 4) \ SC( 4, 4, 4, 3, yes, 4) \ \ SC( 5, 6, 4, 1, yes, 4) \ SC( 6, 6, 4, 2, yes, 4) \ SC( 7, 6, 4, 3, yes, 4) \ SC( 8, 6, 4, 4, yes, 4) \ \ SC( 9, 7, 5, 1, yes, 5) \ SC( 10, 7, 5, 2, yes, 5) \ SC( 11, 7, 5, 3, yes, 5) \ SC( 12, 7, 5, 4, yes, 5) \ \ SC( 13, 8, 6, 1, yes, 6) \ SC( 14, 8, 6, 2, yes, 6) \ SC( 15, 8, 6, 3, yes, 6) \ SC( 16, 8, 6, 4, yes, 6) \ \ SC( 17, 9, 7, 1, yes, 7) \ SC( 18, 9, 7, 2, yes, 7) \ SC( 19, 9, 7, 3, yes, 7) \ SC( 20, 9, 7, 4, yes, 7) \ \ SC( 21, 10, 8, 1, yes, 8) \ SC( 22, 10, 8, 2, yes, 8) \ SC( 23, 10, 8, 3, yes, 8) \ SC( 24, 10, 8, 4, yes, 8) \ \ SC( 25, 11, 9, 1, yes, 9) \ SC( 26, 11, 9, 2, yes, 9) \ SC( 27, 11, 9, 3, yes, 9) \ SC( 28, 11, 9, 4, yes, 9) \ \ SC( 29, 12, 10, 1, yes, no) \ SC( 30, 12, 10, 2, yes, no) \ SC( 31, 12, 10, 3, yes, no) \ SC( 32, 12, 10, 4, no, no) \ \ SC( 33, 13, 11, 1, no, no) \ SC( 34, 13, 11, 2, no, no) \ SC( 35, 13, 11, 3, no, no) \ SC( 36, 13, 11, 4, no, no) \ \ SC( 37, 14, 12, 1, no, no) \ SC( 38, 14, 12, 2, no, no) \ SC( 39, 14, 12, 3, no, no) \ SC( 40, 14, 12, 4, no, no) \ \ SC( 41, 15, 13, 1, no, no) \ SC( 42, 15, 13, 2, no, no) \ SC( 43, 15, 13, 3, no, no) \ SC( 44, 15, 13, 4, no, no) \ \ SC( 45, 16, 14, 1, no, no) \ SC( 46, 16, 14, 2, no, no) \ SC( 47, 16, 14, 3, no, no) \ SC( 48, 16, 14, 4, no, no) \ \ SC( 49, 17, 15, 1, no, no) \ SC( 50, 17, 15, 2, no, no) \ SC( 51, 17, 15, 3, no, no) \ SC( 52, 17, 15, 4, no, no) \ \ SC( 53, 18, 16, 1, no, no) \ SC( 54, 18, 16, 2, no, no) \ SC( 55, 18, 16, 3, no, no) \ SC( 56, 18, 16, 4, no, no) \ \ SC( 57, 19, 17, 1, no, no) \ SC( 58, 19, 17, 2, no, no) \ SC( 59, 19, 17, 3, no, no) \ SC( 60, 19, 17, 4, no, no) \ \ SC( 61, 20, 18, 1, no, no) \ SC( 62, 20, 18, 2, no, no) \ SC( 63, 20, 18, 3, no, no) \ SC( 64, 20, 18, 4, no, no) \ \ SC( 65, 21, 19, 1, no, no) \ SC( 66, 21, 19, 2, no, no) \ SC( 67, 21, 19, 3, no, no) \ SC( 68, 21, 19, 4, no, no) \ \ SC( 69, 22, 20, 1, no, no) \ SC( 70, 22, 20, 2, no, no) \ SC( 71, 22, 20, 3, no, no) \ SC( 72, 22, 20, 4, no, no) \ \ SC( 73, 23, 21, 1, no, no) \ SC( 74, 23, 21, 2, no, no) \ SC( 75, 23, 21, 3, no, no) \ SC( 76, 23, 21, 4, no, no) \ \ SC( 77, 24, 22, 1, no, no) \ SC( 78, 24, 22, 2, no, no) \ SC( 79, 24, 22, 3, no, no) \ SC( 80, 24, 22, 4, no, no) \ \ SC( 81, 25, 23, 1, no, no) \ SC( 82, 25, 23, 2, no, no) \ SC( 83, 25, 23, 3, no, no) \ SC( 84, 25, 23, 4, no, no) \ \ SC( 85, 26, 24, 1, no, no) \ SC( 86, 26, 24, 2, no, no) \ SC( 87, 26, 24, 3, no, no) \ SC( 88, 26, 24, 4, no, no) \ \ SC( 89, 27, 25, 1, no, no) \ SC( 90, 27, 25, 2, no, no) \ SC( 91, 27, 25, 3, no, no) \ SC( 92, 27, 25, 4, no, no) \ \ SC( 93, 28, 26, 1, no, no) \ SC( 94, 28, 26, 2, no, no) \ SC( 95, 28, 26, 3, no, no) \ SC( 96, 28, 26, 4, no, no) \ \ SC( 97, 29, 27, 1, no, no) \ SC( 98, 29, 27, 2, no, no) \ SC( 99, 29, 27, 3, no, no) \ SC(100, 29, 27, 4, no, no) \ \ SC(101, 30, 28, 1, no, no) \ SC(102, 30, 28, 2, no, no) \ SC(103, 30, 28, 3, no, no) \ SC(104, 30, 28, 4, no, no) \ \ SC(105, 31, 29, 1, no, no) \ SC(106, 31, 29, 2, no, no) \ SC(107, 31, 29, 3, no, no) \ #define SIZE_CLASSES_DEFINED #define NTBINS 1 #define NLBINS 29 #define NBINS 32 #define LG_TINY_MAXCLASS 3 #define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) #define SMALL_MAXCLASS ((((size_t)1) << 12) + (((size_t)3) << 10)) #endif #if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 16) #define SIZE_CLASSES \ /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \ SC( 0, 3, 3, 0, yes, 3) \ \ SC( 1, 3, 3, 1, yes, 3) \ SC( 2, 4, 4, 1, yes, 4) \ SC( 3, 4, 4, 2, yes, 4) \ SC( 4, 4, 4, 3, yes, 4) \ \ SC( 5, 6, 4, 1, yes, 4) \ SC( 6, 6, 4, 2, yes, 4) \ SC( 7, 6, 4, 3, yes, 4) \ SC( 8, 6, 4, 4, yes, 4) \ \ SC( 9, 7, 5, 1, yes, 5) \ SC( 10, 7, 5, 2, yes, 5) \ SC( 11, 7, 5, 3, yes, 5) \ SC( 12, 7, 5, 4, yes, 5) \ \ SC( 13, 8, 6, 1, yes, 6) \ SC( 14, 8, 6, 2, yes, 6) \ SC( 15, 8, 6, 3, yes, 6) \ SC( 16, 8, 6, 4, yes, 6) \ \ SC( 17, 9, 7, 1, yes, 7) \ SC( 18, 9, 7, 2, yes, 7) \ SC( 19, 9, 7, 3, yes, 7) \ SC( 20, 9, 7, 4, yes, 7) \ \ SC( 21, 10, 8, 1, yes, 8) \ SC( 22, 10, 8, 2, yes, 8) \ SC( 23, 10, 8, 3, yes, 8) \ SC( 24, 10, 8, 4, yes, 8) \ \ SC( 25, 11, 9, 1, yes, 9) \ SC( 26, 11, 9, 2, yes, 9) \ SC( 27, 11, 9, 3, yes, 9) \ SC( 28, 11, 9, 4, yes, 9) \ \ SC( 29, 12, 10, 1, yes, no) \ SC( 30, 12, 10, 2, yes, no) \ SC( 31, 12, 10, 3, yes, no) \ SC( 32, 12, 10, 4, yes, no) \ \ SC( 33, 13, 11, 1, yes, no) \ SC( 34, 13, 11, 2, yes, no) \ SC( 35, 13, 11, 3, yes, no) \ SC( 36, 13, 11, 4, yes, no) \ \ SC( 37, 14, 12, 1, yes, no) \ SC( 38, 14, 12, 2, yes, no) \ SC( 39, 14, 12, 3, yes, no) \ SC( 40, 14, 12, 4, yes, no) \ \ SC( 41, 15, 13, 1, yes, no) \ SC( 42, 15, 13, 2, yes, no) \ SC( 43, 15, 13, 3, yes, no) \ SC( 44, 15, 13, 4, no, no) \ \ SC( 45, 16, 14, 1, no, no) \ SC( 46, 16, 14, 2, no, no) \ SC( 47, 16, 14, 3, no, no) \ SC( 48, 16, 14, 4, no, no) \ \ SC( 49, 17, 15, 1, no, no) \ SC( 50, 17, 15, 2, no, no) \ SC( 51, 17, 15, 3, no, no) \ SC( 52, 17, 15, 4, no, no) \ \ SC( 53, 18, 16, 1, no, no) \ SC( 54, 18, 16, 2, no, no) \ SC( 55, 18, 16, 3, no, no) \ SC( 56, 18, 16, 4, no, no) \ \ SC( 57, 19, 17, 1, no, no) \ SC( 58, 19, 17, 2, no, no) \ SC( 59, 19, 17, 3, no, no) \ SC( 60, 19, 17, 4, no, no) \ \ SC( 61, 20, 18, 1, no, no) \ SC( 62, 20, 18, 2, no, no) \ SC( 63, 20, 18, 3, no, no) \ SC( 64, 20, 18, 4, no, no) \ \ SC( 65, 21, 19, 1, no, no) \ SC( 66, 21, 19, 2, no, no) \ SC( 67, 21, 19, 3, no, no) \ SC( 68, 21, 19, 4, no, no) \ \ SC( 69, 22, 20, 1, no, no) \ SC( 70, 22, 20, 2, no, no) \ SC( 71, 22, 20, 3, no, no) \ SC( 72, 22, 20, 4, no, no) \ \ SC( 73, 23, 21, 1, no, no) \ SC( 74, 23, 21, 2, no, no) \ SC( 75, 23, 21, 3, no, no) \ SC( 76, 23, 21, 4, no, no) \ \ SC( 77, 24, 22, 1, no, no) \ SC( 78, 24, 22, 2, no, no) \ SC( 79, 24, 22, 3, no, no) \ SC( 80, 24, 22, 4, no, no) \ \ SC( 81, 25, 23, 1, no, no) \ SC( 82, 25, 23, 2, no, no) \ SC( 83, 25, 23, 3, no, no) \ SC( 84, 25, 23, 4, no, no) \ \ SC( 85, 26, 24, 1, no, no) \ SC( 86, 26, 24, 2, no, no) \ SC( 87, 26, 24, 3, no, no) \ SC( 88, 26, 24, 4, no, no) \ \ SC( 89, 27, 25, 1, no, no) \ SC( 90, 27, 25, 2, no, no) \ SC( 91, 27, 25, 3, no, no) \ SC( 92, 27, 25, 4, no, no) \ \ SC( 93, 28, 26, 1, no, no) \ SC( 94, 28, 26, 2, no, no) \ SC( 95, 28, 26, 3, no, no) \ SC( 96, 28, 26, 4, no, no) \ \ SC( 97, 29, 27, 1, no, no) \ SC( 98, 29, 27, 2, no, no) \ SC( 99, 29, 27, 3, no, no) \ SC(100, 29, 27, 4, no, no) \ \ SC(101, 30, 28, 1, no, no) \ SC(102, 30, 28, 2, no, no) \ SC(103, 30, 28, 3, no, no) \ SC(104, 30, 28, 4, no, no) \ \ SC(105, 31, 29, 1, no, no) \ SC(106, 31, 29, 2, no, no) \ SC(107, 31, 29, 3, no, no) \ #define SIZE_CLASSES_DEFINED #define NTBINS 1 #define NLBINS 29 #define NBINS 44 #define LG_TINY_MAXCLASS 3 #define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) #define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13)) #endif #if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 12) #define SIZE_CLASSES \ /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \ SC( 0, 4, 4, 0, yes, 4) \ SC( 1, 4, 4, 1, yes, 4) \ SC( 2, 4, 4, 2, yes, 4) \ SC( 3, 4, 4, 3, yes, 4) \ \ SC( 4, 6, 4, 1, yes, 4) \ SC( 5, 6, 4, 2, yes, 4) \ SC( 6, 6, 4, 3, yes, 4) \ SC( 7, 6, 4, 4, yes, 4) \ \ SC( 8, 7, 5, 1, yes, 5) \ SC( 9, 7, 5, 2, yes, 5) \ SC( 10, 7, 5, 3, yes, 5) \ SC( 11, 7, 5, 4, yes, 5) \ \ SC( 12, 8, 6, 1, yes, 6) \ SC( 13, 8, 6, 2, yes, 6) \ SC( 14, 8, 6, 3, yes, 6) \ SC( 15, 8, 6, 4, yes, 6) \ \ SC( 16, 9, 7, 1, yes, 7) \ SC( 17, 9, 7, 2, yes, 7) \ SC( 18, 9, 7, 3, yes, 7) \ SC( 19, 9, 7, 4, yes, 7) \ \ SC( 20, 10, 8, 1, yes, 8) \ SC( 21, 10, 8, 2, yes, 8) \ SC( 22, 10, 8, 3, yes, 8) \ SC( 23, 10, 8, 4, yes, 8) \ \ SC( 24, 11, 9, 1, yes, 9) \ SC( 25, 11, 9, 2, yes, 9) \ SC( 26, 11, 9, 3, yes, 9) \ SC( 27, 11, 9, 4, no, 9) \ \ SC( 28, 12, 10, 1, no, no) \ SC( 29, 12, 10, 2, no, no) \ SC( 30, 12, 10, 3, no, no) \ SC( 31, 12, 10, 4, no, no) \ \ SC( 32, 13, 11, 1, no, no) \ SC( 33, 13, 11, 2, no, no) \ SC( 34, 13, 11, 3, no, no) \ SC( 35, 13, 11, 4, no, no) \ \ SC( 36, 14, 12, 1, no, no) \ SC( 37, 14, 12, 2, no, no) \ SC( 38, 14, 12, 3, no, no) \ SC( 39, 14, 12, 4, no, no) \ \ SC( 40, 15, 13, 1, no, no) \ SC( 41, 15, 13, 2, no, no) \ SC( 42, 15, 13, 3, no, no) \ SC( 43, 15, 13, 4, no, no) \ \ SC( 44, 16, 14, 1, no, no) \ SC( 45, 16, 14, 2, no, no) \ SC( 46, 16, 14, 3, no, no) \ SC( 47, 16, 14, 4, no, no) \ \ SC( 48, 17, 15, 1, no, no) \ SC( 49, 17, 15, 2, no, no) \ SC( 50, 17, 15, 3, no, no) \ SC( 51, 17, 15, 4, no, no) \ \ SC( 52, 18, 16, 1, no, no) \ SC( 53, 18, 16, 2, no, no) \ SC( 54, 18, 16, 3, no, no) \ SC( 55, 18, 16, 4, no, no) \ \ SC( 56, 19, 17, 1, no, no) \ SC( 57, 19, 17, 2, no, no) \ SC( 58, 19, 17, 3, no, no) \ SC( 59, 19, 17, 4, no, no) \ \ SC( 60, 20, 18, 1, no, no) \ SC( 61, 20, 18, 2, no, no) \ SC( 62, 20, 18, 3, no, no) \ SC( 63, 20, 18, 4, no, no) \ \ SC( 64, 21, 19, 1, no, no) \ SC( 65, 21, 19, 2, no, no) \ SC( 66, 21, 19, 3, no, no) \ SC( 67, 21, 19, 4, no, no) \ \ SC( 68, 22, 20, 1, no, no) \ SC( 69, 22, 20, 2, no, no) \ SC( 70, 22, 20, 3, no, no) \ SC( 71, 22, 20, 4, no, no) \ \ SC( 72, 23, 21, 1, no, no) \ SC( 73, 23, 21, 2, no, no) \ SC( 74, 23, 21, 3, no, no) \ SC( 75, 23, 21, 4, no, no) \ \ SC( 76, 24, 22, 1, no, no) \ SC( 77, 24, 22, 2, no, no) \ SC( 78, 24, 22, 3, no, no) \ SC( 79, 24, 22, 4, no, no) \ \ SC( 80, 25, 23, 1, no, no) \ SC( 81, 25, 23, 2, no, no) \ SC( 82, 25, 23, 3, no, no) \ SC( 83, 25, 23, 4, no, no) \ \ SC( 84, 26, 24, 1, no, no) \ SC( 85, 26, 24, 2, no, no) \ SC( 86, 26, 24, 3, no, no) \ SC( 87, 26, 24, 4, no, no) \ \ SC( 88, 27, 25, 1, no, no) \ SC( 89, 27, 25, 2, no, no) \ SC( 90, 27, 25, 3, no, no) \ SC( 91, 27, 25, 4, no, no) \ \ SC( 92, 28, 26, 1, no, no) \ SC( 93, 28, 26, 2, no, no) \ SC( 94, 28, 26, 3, no, no) \ SC( 95, 28, 26, 4, no, no) \ \ SC( 96, 29, 27, 1, no, no) \ SC( 97, 29, 27, 2, no, no) \ SC( 98, 29, 27, 3, no, no) \ SC( 99, 29, 27, 4, no, no) \ \ SC(100, 30, 28, 1, no, no) \ SC(101, 30, 28, 2, no, no) \ SC(102, 30, 28, 3, no, no) \ SC(103, 30, 28, 4, no, no) \ \ SC(104, 31, 29, 1, no, no) \ SC(105, 31, 29, 2, no, no) \ SC(106, 31, 29, 3, no, no) \ #define SIZE_CLASSES_DEFINED #define NTBINS 0 #define NLBINS 28 #define NBINS 27 #define LG_TINY_MAXCLASS "NA" #define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) #define SMALL_MAXCLASS ((((size_t)1) << 11) + (((size_t)3) << 9)) #endif #if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 13) #define SIZE_CLASSES \ /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \ SC( 0, 4, 4, 0, yes, 4) \ SC( 1, 4, 4, 1, yes, 4) \ SC( 2, 4, 4, 2, yes, 4) \ SC( 3, 4, 4, 3, yes, 4) \ \ SC( 4, 6, 4, 1, yes, 4) \ SC( 5, 6, 4, 2, yes, 4) \ SC( 6, 6, 4, 3, yes, 4) \ SC( 7, 6, 4, 4, yes, 4) \ \ SC( 8, 7, 5, 1, yes, 5) \ SC( 9, 7, 5, 2, yes, 5) \ SC( 10, 7, 5, 3, yes, 5) \ SC( 11, 7, 5, 4, yes, 5) \ \ SC( 12, 8, 6, 1, yes, 6) \ SC( 13, 8, 6, 2, yes, 6) \ SC( 14, 8, 6, 3, yes, 6) \ SC( 15, 8, 6, 4, yes, 6) \ \ SC( 16, 9, 7, 1, yes, 7) \ SC( 17, 9, 7, 2, yes, 7) \ SC( 18, 9, 7, 3, yes, 7) \ SC( 19, 9, 7, 4, yes, 7) \ \ SC( 20, 10, 8, 1, yes, 8) \ SC( 21, 10, 8, 2, yes, 8) \ SC( 22, 10, 8, 3, yes, 8) \ SC( 23, 10, 8, 4, yes, 8) \ \ SC( 24, 11, 9, 1, yes, 9) \ SC( 25, 11, 9, 2, yes, 9) \ SC( 26, 11, 9, 3, yes, 9) \ SC( 27, 11, 9, 4, yes, 9) \ \ SC( 28, 12, 10, 1, yes, no) \ SC( 29, 12, 10, 2, yes, no) \ SC( 30, 12, 10, 3, yes, no) \ SC( 31, 12, 10, 4, no, no) \ \ SC( 32, 13, 11, 1, no, no) \ SC( 33, 13, 11, 2, no, no) \ SC( 34, 13, 11, 3, no, no) \ SC( 35, 13, 11, 4, no, no) \ \ SC( 36, 14, 12, 1, no, no) \ SC( 37, 14, 12, 2, no, no) \ SC( 38, 14, 12, 3, no, no) \ SC( 39, 14, 12, 4, no, no) \ \ SC( 40, 15, 13, 1, no, no) \ SC( 41, 15, 13, 2, no, no) \ SC( 42, 15, 13, 3, no, no) \ SC( 43, 15, 13, 4, no, no) \ \ SC( 44, 16, 14, 1, no, no) \ SC( 45, 16, 14, 2, no, no) \ SC( 46, 16, 14, 3, no, no) \ SC( 47, 16, 14, 4, no, no) \ \ SC( 48, 17, 15, 1, no, no) \ SC( 49, 17, 15, 2, no, no) \ SC( 50, 17, 15, 3, no, no) \ SC( 51, 17, 15, 4, no, no) \ \ SC( 52, 18, 16, 1, no, no) \ SC( 53, 18, 16, 2, no, no) \ SC( 54, 18, 16, 3, no, no) \ SC( 55, 18, 16, 4, no, no) \ \ SC( 56, 19, 17, 1, no, no) \ SC( 57, 19, 17, 2, no, no) \ SC( 58, 19, 17, 3, no, no) \ SC( 59, 19, 17, 4, no, no) \ \ SC( 60, 20, 18, 1, no, no) \ SC( 61, 20, 18, 2, no, no) \ SC( 62, 20, 18, 3, no, no) \ SC( 63, 20, 18, 4, no, no) \ \ SC( 64, 21, 19, 1, no, no) \ SC( 65, 21, 19, 2, no, no) \ SC( 66, 21, 19, 3, no, no) \ SC( 67, 21, 19, 4, no, no) \ \ SC( 68, 22, 20, 1, no, no) \ SC( 69, 22, 20, 2, no, no) \ SC( 70, 22, 20, 3, no, no) \ SC( 71, 22, 20, 4, no, no) \ \ SC( 72, 23, 21, 1, no, no) \ SC( 73, 23, 21, 2, no, no) \ SC( 74, 23, 21, 3, no, no) \ SC( 75, 23, 21, 4, no, no) \ \ SC( 76, 24, 22, 1, no, no) \ SC( 77, 24, 22, 2, no, no) \ SC( 78, 24, 22, 3, no, no) \ SC( 79, 24, 22, 4, no, no) \ \ SC( 80, 25, 23, 1, no, no) \ SC( 81, 25, 23, 2, no, no) \ SC( 82, 25, 23, 3, no, no) \ SC( 83, 25, 23, 4, no, no) \ \ SC( 84, 26, 24, 1, no, no) \ SC( 85, 26, 24, 2, no, no) \ SC( 86, 26, 24, 3, no, no) \ SC( 87, 26, 24, 4, no, no) \ \ SC( 88, 27, 25, 1, no, no) \ SC( 89, 27, 25, 2, no, no) \ SC( 90, 27, 25, 3, no, no) \ SC( 91, 27, 25, 4, no, no) \ \ SC( 92, 28, 26, 1, no, no) \ SC( 93, 28, 26, 2, no, no) \ SC( 94, 28, 26, 3, no, no) \ SC( 95, 28, 26, 4, no, no) \ \ SC( 96, 29, 27, 1, no, no) \ SC( 97, 29, 27, 2, no, no) \ SC( 98, 29, 27, 3, no, no) \ SC( 99, 29, 27, 4, no, no) \ \ SC(100, 30, 28, 1, no, no) \ SC(101, 30, 28, 2, no, no) \ SC(102, 30, 28, 3, no, no) \ SC(103, 30, 28, 4, no, no) \ \ SC(104, 31, 29, 1, no, no) \ SC(105, 31, 29, 2, no, no) \ SC(106, 31, 29, 3, no, no) \ #define SIZE_CLASSES_DEFINED #define NTBINS 0 #define NLBINS 28 #define NBINS 31 #define LG_TINY_MAXCLASS "NA" #define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) #define SMALL_MAXCLASS ((((size_t)1) << 12) + (((size_t)3) << 10)) #endif #if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 16) #define SIZE_CLASSES \ /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \ SC( 0, 4, 4, 0, yes, 4) \ SC( 1, 4, 4, 1, yes, 4) \ SC( 2, 4, 4, 2, yes, 4) \ SC( 3, 4, 4, 3, yes, 4) \ \ SC( 4, 6, 4, 1, yes, 4) \ SC( 5, 6, 4, 2, yes, 4) \ SC( 6, 6, 4, 3, yes, 4) \ SC( 7, 6, 4, 4, yes, 4) \ \ SC( 8, 7, 5, 1, yes, 5) \ SC( 9, 7, 5, 2, yes, 5) \ SC( 10, 7, 5, 3, yes, 5) \ SC( 11, 7, 5, 4, yes, 5) \ \ SC( 12, 8, 6, 1, yes, 6) \ SC( 13, 8, 6, 2, yes, 6) \ SC( 14, 8, 6, 3, yes, 6) \ SC( 15, 8, 6, 4, yes, 6) \ \ SC( 16, 9, 7, 1, yes, 7) \ SC( 17, 9, 7, 2, yes, 7) \ SC( 18, 9, 7, 3, yes, 7) \ SC( 19, 9, 7, 4, yes, 7) \ \ SC( 20, 10, 8, 1, yes, 8) \ SC( 21, 10, 8, 2, yes, 8) \ SC( 22, 10, 8, 3, yes, 8) \ SC( 23, 10, 8, 4, yes, 8) \ \ SC( 24, 11, 9, 1, yes, 9) \ SC( 25, 11, 9, 2, yes, 9) \ SC( 26, 11, 9, 3, yes, 9) \ SC( 27, 11, 9, 4, yes, 9) \ \ SC( 28, 12, 10, 1, yes, no) \ SC( 29, 12, 10, 2, yes, no) \ SC( 30, 12, 10, 3, yes, no) \ SC( 31, 12, 10, 4, yes, no) \ \ SC( 32, 13, 11, 1, yes, no) \ SC( 33, 13, 11, 2, yes, no) \ SC( 34, 13, 11, 3, yes, no) \ SC( 35, 13, 11, 4, yes, no) \ \ SC( 36, 14, 12, 1, yes, no) \ SC( 37, 14, 12, 2, yes, no) \ SC( 38, 14, 12, 3, yes, no) \ SC( 39, 14, 12, 4, yes, no) \ \ SC( 40, 15, 13, 1, yes, no) \ SC( 41, 15, 13, 2, yes, no) \ SC( 42, 15, 13, 3, yes, no) \ SC( 43, 15, 13, 4, no, no) \ \ SC( 44, 16, 14, 1, no, no) \ SC( 45, 16, 14, 2, no, no) \ SC( 46, 16, 14, 3, no, no) \ SC( 47, 16, 14, 4, no, no) \ \ SC( 48, 17, 15, 1, no, no) \ SC( 49, 17, 15, 2, no, no) \ SC( 50, 17, 15, 3, no, no) \ SC( 51, 17, 15, 4, no, no) \ \ SC( 52, 18, 16, 1, no, no) \ SC( 53, 18, 16, 2, no, no) \ SC( 54, 18, 16, 3, no, no) \ SC( 55, 18, 16, 4, no, no) \ \ SC( 56, 19, 17, 1, no, no) \ SC( 57, 19, 17, 2, no, no) \ SC( 58, 19, 17, 3, no, no) \ SC( 59, 19, 17, 4, no, no) \ \ SC( 60, 20, 18, 1, no, no) \ SC( 61, 20, 18, 2, no, no) \ SC( 62, 20, 18, 3, no, no) \ SC( 63, 20, 18, 4, no, no) \ \ SC( 64, 21, 19, 1, no, no) \ SC( 65, 21, 19, 2, no, no) \ SC( 66, 21, 19, 3, no, no) \ SC( 67, 21, 19, 4, no, no) \ \ SC( 68, 22, 20, 1, no, no) \ SC( 69, 22, 20, 2, no, no) \ SC( 70, 22, 20, 3, no, no) \ SC( 71, 22, 20, 4, no, no) \ \ SC( 72, 23, 21, 1, no, no) \ SC( 73, 23, 21, 2, no, no) \ SC( 74, 23, 21, 3, no, no) \ SC( 75, 23, 21, 4, no, no) \ \ SC( 76, 24, 22, 1, no, no) \ SC( 77, 24, 22, 2, no, no) \ SC( 78, 24, 22, 3, no, no) \ SC( 79, 24, 22, 4, no, no) \ \ SC( 80, 25, 23, 1, no, no) \ SC( 81, 25, 23, 2, no, no) \ SC( 82, 25, 23, 3, no, no) \ SC( 83, 25, 23, 4, no, no) \ \ SC( 84, 26, 24, 1, no, no) \ SC( 85, 26, 24, 2, no, no) \ SC( 86, 26, 24, 3, no, no) \ SC( 87, 26, 24, 4, no, no) \ \ SC( 88, 27, 25, 1, no, no) \ SC( 89, 27, 25, 2, no, no) \ SC( 90, 27, 25, 3, no, no) \ SC( 91, 27, 25, 4, no, no) \ \ SC( 92, 28, 26, 1, no, no) \ SC( 93, 28, 26, 2, no, no) \ SC( 94, 28, 26, 3, no, no) \ SC( 95, 28, 26, 4, no, no) \ \ SC( 96, 29, 27, 1, no, no) \ SC( 97, 29, 27, 2, no, no) \ SC( 98, 29, 27, 3, no, no) \ SC( 99, 29, 27, 4, no, no) \ \ SC(100, 30, 28, 1, no, no) \ SC(101, 30, 28, 2, no, no) \ SC(102, 30, 28, 3, no, no) \ SC(103, 30, 28, 4, no, no) \ \ SC(104, 31, 29, 1, no, no) \ SC(105, 31, 29, 2, no, no) \ SC(106, 31, 29, 3, no, no) \ #define SIZE_CLASSES_DEFINED #define NTBINS 0 #define NLBINS 28 #define NBINS 43 #define LG_TINY_MAXCLASS "NA" #define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) #define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13)) #endif #if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 12) #define SIZE_CLASSES \ /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \ SC( 0, 3, 3, 0, yes, 3) \ SC( 1, 3, 3, 1, yes, 3) \ SC( 2, 3, 3, 2, yes, 3) \ SC( 3, 3, 3, 3, yes, 3) \ \ SC( 4, 5, 3, 1, yes, 3) \ SC( 5, 5, 3, 2, yes, 3) \ SC( 6, 5, 3, 3, yes, 3) \ SC( 7, 5, 3, 4, yes, 3) \ \ SC( 8, 6, 4, 1, yes, 4) \ SC( 9, 6, 4, 2, yes, 4) \ SC( 10, 6, 4, 3, yes, 4) \ SC( 11, 6, 4, 4, yes, 4) \ \ SC( 12, 7, 5, 1, yes, 5) \ SC( 13, 7, 5, 2, yes, 5) \ SC( 14, 7, 5, 3, yes, 5) \ SC( 15, 7, 5, 4, yes, 5) \ \ SC( 16, 8, 6, 1, yes, 6) \ SC( 17, 8, 6, 2, yes, 6) \ SC( 18, 8, 6, 3, yes, 6) \ SC( 19, 8, 6, 4, yes, 6) \ \ SC( 20, 9, 7, 1, yes, 7) \ SC( 21, 9, 7, 2, yes, 7) \ SC( 22, 9, 7, 3, yes, 7) \ SC( 23, 9, 7, 4, yes, 7) \ \ SC( 24, 10, 8, 1, yes, 8) \ SC( 25, 10, 8, 2, yes, 8) \ SC( 26, 10, 8, 3, yes, 8) \ SC( 27, 10, 8, 4, yes, 8) \ \ SC( 28, 11, 9, 1, yes, 9) \ SC( 29, 11, 9, 2, yes, 9) \ SC( 30, 11, 9, 3, yes, 9) \ SC( 31, 11, 9, 4, no, 9) \ \ SC( 32, 12, 10, 1, no, no) \ SC( 33, 12, 10, 2, no, no) \ SC( 34, 12, 10, 3, no, no) \ SC( 35, 12, 10, 4, no, no) \ \ SC( 36, 13, 11, 1, no, no) \ SC( 37, 13, 11, 2, no, no) \ SC( 38, 13, 11, 3, no, no) \ SC( 39, 13, 11, 4, no, no) \ \ SC( 40, 14, 12, 1, no, no) \ SC( 41, 14, 12, 2, no, no) \ SC( 42, 14, 12, 3, no, no) \ SC( 43, 14, 12, 4, no, no) \ \ SC( 44, 15, 13, 1, no, no) \ SC( 45, 15, 13, 2, no, no) \ SC( 46, 15, 13, 3, no, no) \ SC( 47, 15, 13, 4, no, no) \ \ SC( 48, 16, 14, 1, no, no) \ SC( 49, 16, 14, 2, no, no) \ SC( 50, 16, 14, 3, no, no) \ SC( 51, 16, 14, 4, no, no) \ \ SC( 52, 17, 15, 1, no, no) \ SC( 53, 17, 15, 2, no, no) \ SC( 54, 17, 15, 3, no, no) \ SC( 55, 17, 15, 4, no, no) \ \ SC( 56, 18, 16, 1, no, no) \ SC( 57, 18, 16, 2, no, no) \ SC( 58, 18, 16, 3, no, no) \ SC( 59, 18, 16, 4, no, no) \ \ SC( 60, 19, 17, 1, no, no) \ SC( 61, 19, 17, 2, no, no) \ SC( 62, 19, 17, 3, no, no) \ SC( 63, 19, 17, 4, no, no) \ \ SC( 64, 20, 18, 1, no, no) \ SC( 65, 20, 18, 2, no, no) \ SC( 66, 20, 18, 3, no, no) \ SC( 67, 20, 18, 4, no, no) \ \ SC( 68, 21, 19, 1, no, no) \ SC( 69, 21, 19, 2, no, no) \ SC( 70, 21, 19, 3, no, no) \ SC( 71, 21, 19, 4, no, no) \ \ SC( 72, 22, 20, 1, no, no) \ SC( 73, 22, 20, 2, no, no) \ SC( 74, 22, 20, 3, no, no) \ SC( 75, 22, 20, 4, no, no) \ \ SC( 76, 23, 21, 1, no, no) \ SC( 77, 23, 21, 2, no, no) \ SC( 78, 23, 21, 3, no, no) \ SC( 79, 23, 21, 4, no, no) \ \ SC( 80, 24, 22, 1, no, no) \ SC( 81, 24, 22, 2, no, no) \ SC( 82, 24, 22, 3, no, no) \ SC( 83, 24, 22, 4, no, no) \ \ SC( 84, 25, 23, 1, no, no) \ SC( 85, 25, 23, 2, no, no) \ SC( 86, 25, 23, 3, no, no) \ SC( 87, 25, 23, 4, no, no) \ \ SC( 88, 26, 24, 1, no, no) \ SC( 89, 26, 24, 2, no, no) \ SC( 90, 26, 24, 3, no, no) \ SC( 91, 26, 24, 4, no, no) \ \ SC( 92, 27, 25, 1, no, no) \ SC( 93, 27, 25, 2, no, no) \ SC( 94, 27, 25, 3, no, no) \ SC( 95, 27, 25, 4, no, no) \ \ SC( 96, 28, 26, 1, no, no) \ SC( 97, 28, 26, 2, no, no) \ SC( 98, 28, 26, 3, no, no) \ SC( 99, 28, 26, 4, no, no) \ \ SC(100, 29, 27, 1, no, no) \ SC(101, 29, 27, 2, no, no) \ SC(102, 29, 27, 3, no, no) \ SC(103, 29, 27, 4, no, no) \ \ SC(104, 30, 28, 1, no, no) \ SC(105, 30, 28, 2, no, no) \ SC(106, 30, 28, 3, no, no) \ SC(107, 30, 28, 4, no, no) \ \ SC(108, 31, 29, 1, no, no) \ SC(109, 31, 29, 2, no, no) \ SC(110, 31, 29, 3, no, no) \ SC(111, 31, 29, 4, no, no) \ \ SC(112, 32, 30, 1, no, no) \ SC(113, 32, 30, 2, no, no) \ SC(114, 32, 30, 3, no, no) \ SC(115, 32, 30, 4, no, no) \ \ SC(116, 33, 31, 1, no, no) \ SC(117, 33, 31, 2, no, no) \ SC(118, 33, 31, 3, no, no) \ SC(119, 33, 31, 4, no, no) \ \ SC(120, 34, 32, 1, no, no) \ SC(121, 34, 32, 2, no, no) \ SC(122, 34, 32, 3, no, no) \ SC(123, 34, 32, 4, no, no) \ \ SC(124, 35, 33, 1, no, no) \ SC(125, 35, 33, 2, no, no) \ SC(126, 35, 33, 3, no, no) \ SC(127, 35, 33, 4, no, no) \ \ SC(128, 36, 34, 1, no, no) \ SC(129, 36, 34, 2, no, no) \ SC(130, 36, 34, 3, no, no) \ SC(131, 36, 34, 4, no, no) \ \ SC(132, 37, 35, 1, no, no) \ SC(133, 37, 35, 2, no, no) \ SC(134, 37, 35, 3, no, no) \ SC(135, 37, 35, 4, no, no) \ \ SC(136, 38, 36, 1, no, no) \ SC(137, 38, 36, 2, no, no) \ SC(138, 38, 36, 3, no, no) \ SC(139, 38, 36, 4, no, no) \ \ SC(140, 39, 37, 1, no, no) \ SC(141, 39, 37, 2, no, no) \ SC(142, 39, 37, 3, no, no) \ SC(143, 39, 37, 4, no, no) \ \ SC(144, 40, 38, 1, no, no) \ SC(145, 40, 38, 2, no, no) \ SC(146, 40, 38, 3, no, no) \ SC(147, 40, 38, 4, no, no) \ \ SC(148, 41, 39, 1, no, no) \ SC(149, 41, 39, 2, no, no) \ SC(150, 41, 39, 3, no, no) \ SC(151, 41, 39, 4, no, no) \ \ SC(152, 42, 40, 1, no, no) \ SC(153, 42, 40, 2, no, no) \ SC(154, 42, 40, 3, no, no) \ SC(155, 42, 40, 4, no, no) \ \ SC(156, 43, 41, 1, no, no) \ SC(157, 43, 41, 2, no, no) \ SC(158, 43, 41, 3, no, no) \ SC(159, 43, 41, 4, no, no) \ \ SC(160, 44, 42, 1, no, no) \ SC(161, 44, 42, 2, no, no) \ SC(162, 44, 42, 3, no, no) \ SC(163, 44, 42, 4, no, no) \ \ SC(164, 45, 43, 1, no, no) \ SC(165, 45, 43, 2, no, no) \ SC(166, 45, 43, 3, no, no) \ SC(167, 45, 43, 4, no, no) \ \ SC(168, 46, 44, 1, no, no) \ SC(169, 46, 44, 2, no, no) \ SC(170, 46, 44, 3, no, no) \ SC(171, 46, 44, 4, no, no) \ \ SC(172, 47, 45, 1, no, no) \ SC(173, 47, 45, 2, no, no) \ SC(174, 47, 45, 3, no, no) \ SC(175, 47, 45, 4, no, no) \ \ SC(176, 48, 46, 1, no, no) \ SC(177, 48, 46, 2, no, no) \ SC(178, 48, 46, 3, no, no) \ SC(179, 48, 46, 4, no, no) \ \ SC(180, 49, 47, 1, no, no) \ SC(181, 49, 47, 2, no, no) \ SC(182, 49, 47, 3, no, no) \ SC(183, 49, 47, 4, no, no) \ \ SC(184, 50, 48, 1, no, no) \ SC(185, 50, 48, 2, no, no) \ SC(186, 50, 48, 3, no, no) \ SC(187, 50, 48, 4, no, no) \ \ SC(188, 51, 49, 1, no, no) \ SC(189, 51, 49, 2, no, no) \ SC(190, 51, 49, 3, no, no) \ SC(191, 51, 49, 4, no, no) \ \ SC(192, 52, 50, 1, no, no) \ SC(193, 52, 50, 2, no, no) \ SC(194, 52, 50, 3, no, no) \ SC(195, 52, 50, 4, no, no) \ \ SC(196, 53, 51, 1, no, no) \ SC(197, 53, 51, 2, no, no) \ SC(198, 53, 51, 3, no, no) \ SC(199, 53, 51, 4, no, no) \ \ SC(200, 54, 52, 1, no, no) \ SC(201, 54, 52, 2, no, no) \ SC(202, 54, 52, 3, no, no) \ SC(203, 54, 52, 4, no, no) \ \ SC(204, 55, 53, 1, no, no) \ SC(205, 55, 53, 2, no, no) \ SC(206, 55, 53, 3, no, no) \ SC(207, 55, 53, 4, no, no) \ \ SC(208, 56, 54, 1, no, no) \ SC(209, 56, 54, 2, no, no) \ SC(210, 56, 54, 3, no, no) \ SC(211, 56, 54, 4, no, no) \ \ SC(212, 57, 55, 1, no, no) \ SC(213, 57, 55, 2, no, no) \ SC(214, 57, 55, 3, no, no) \ SC(215, 57, 55, 4, no, no) \ \ SC(216, 58, 56, 1, no, no) \ SC(217, 58, 56, 2, no, no) \ SC(218, 58, 56, 3, no, no) \ SC(219, 58, 56, 4, no, no) \ \ SC(220, 59, 57, 1, no, no) \ SC(221, 59, 57, 2, no, no) \ SC(222, 59, 57, 3, no, no) \ SC(223, 59, 57, 4, no, no) \ \ SC(224, 60, 58, 1, no, no) \ SC(225, 60, 58, 2, no, no) \ SC(226, 60, 58, 3, no, no) \ SC(227, 60, 58, 4, no, no) \ \ SC(228, 61, 59, 1, no, no) \ SC(229, 61, 59, 2, no, no) \ SC(230, 61, 59, 3, no, no) \ SC(231, 61, 59, 4, no, no) \ \ SC(232, 62, 60, 1, no, no) \ SC(233, 62, 60, 2, no, no) \ SC(234, 62, 60, 3, no, no) \ SC(235, 62, 60, 4, no, no) \ \ SC(236, 63, 61, 1, no, no) \ SC(237, 63, 61, 2, no, no) \ SC(238, 63, 61, 3, no, no) \ #define SIZE_CLASSES_DEFINED #define NTBINS 0 #define NLBINS 32 #define NBINS 31 #define LG_TINY_MAXCLASS "NA" #define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) #define SMALL_MAXCLASS ((((size_t)1) << 11) + (((size_t)3) << 9)) #endif #if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 13) #define SIZE_CLASSES \ /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \ SC( 0, 3, 3, 0, yes, 3) \ SC( 1, 3, 3, 1, yes, 3) \ SC( 2, 3, 3, 2, yes, 3) \ SC( 3, 3, 3, 3, yes, 3) \ \ SC( 4, 5, 3, 1, yes, 3) \ SC( 5, 5, 3, 2, yes, 3) \ SC( 6, 5, 3, 3, yes, 3) \ SC( 7, 5, 3, 4, yes, 3) \ \ SC( 8, 6, 4, 1, yes, 4) \ SC( 9, 6, 4, 2, yes, 4) \ SC( 10, 6, 4, 3, yes, 4) \ SC( 11, 6, 4, 4, yes, 4) \ \ SC( 12, 7, 5, 1, yes, 5) \ SC( 13, 7, 5, 2, yes, 5) \ SC( 14, 7, 5, 3, yes, 5) \ SC( 15, 7, 5, 4, yes, 5) \ \ SC( 16, 8, 6, 1, yes, 6) \ SC( 17, 8, 6, 2, yes, 6) \ SC( 18, 8, 6, 3, yes, 6) \ SC( 19, 8, 6, 4, yes, 6) \ \ SC( 20, 9, 7, 1, yes, 7) \ SC( 21, 9, 7, 2, yes, 7) \ SC( 22, 9, 7, 3, yes, 7) \ SC( 23, 9, 7, 4, yes, 7) \ \ SC( 24, 10, 8, 1, yes, 8) \ SC( 25, 10, 8, 2, yes, 8) \ SC( 26, 10, 8, 3, yes, 8) \ SC( 27, 10, 8, 4, yes, 8) \ \ SC( 28, 11, 9, 1, yes, 9) \ SC( 29, 11, 9, 2, yes, 9) \ SC( 30, 11, 9, 3, yes, 9) \ SC( 31, 11, 9, 4, yes, 9) \ \ SC( 32, 12, 10, 1, yes, no) \ SC( 33, 12, 10, 2, yes, no) \ SC( 34, 12, 10, 3, yes, no) \ SC( 35, 12, 10, 4, no, no) \ \ SC( 36, 13, 11, 1, no, no) \ SC( 37, 13, 11, 2, no, no) \ SC( 38, 13, 11, 3, no, no) \ SC( 39, 13, 11, 4, no, no) \ \ SC( 40, 14, 12, 1, no, no) \ SC( 41, 14, 12, 2, no, no) \ SC( 42, 14, 12, 3, no, no) \ SC( 43, 14, 12, 4, no, no) \ \ SC( 44, 15, 13, 1, no, no) \ SC( 45, 15, 13, 2, no, no) \ SC( 46, 15, 13, 3, no, no) \ SC( 47, 15, 13, 4, no, no) \ \ SC( 48, 16, 14, 1, no, no) \ SC( 49, 16, 14, 2, no, no) \ SC( 50, 16, 14, 3, no, no) \ SC( 51, 16, 14, 4, no, no) \ \ SC( 52, 17, 15, 1, no, no) \ SC( 53, 17, 15, 2, no, no) \ SC( 54, 17, 15, 3, no, no) \ SC( 55, 17, 15, 4, no, no) \ \ SC( 56, 18, 16, 1, no, no) \ SC( 57, 18, 16, 2, no, no) \ SC( 58, 18, 16, 3, no, no) \ SC( 59, 18, 16, 4, no, no) \ \ SC( 60, 19, 17, 1, no, no) \ SC( 61, 19, 17, 2, no, no) \ SC( 62, 19, 17, 3, no, no) \ SC( 63, 19, 17, 4, no, no) \ \ SC( 64, 20, 18, 1, no, no) \ SC( 65, 20, 18, 2, no, no) \ SC( 66, 20, 18, 3, no, no) \ SC( 67, 20, 18, 4, no, no) \ \ SC( 68, 21, 19, 1, no, no) \ SC( 69, 21, 19, 2, no, no) \ SC( 70, 21, 19, 3, no, no) \ SC( 71, 21, 19, 4, no, no) \ \ SC( 72, 22, 20, 1, no, no) \ SC( 73, 22, 20, 2, no, no) \ SC( 74, 22, 20, 3, no, no) \ SC( 75, 22, 20, 4, no, no) \ \ SC( 76, 23, 21, 1, no, no) \ SC( 77, 23, 21, 2, no, no) \ SC( 78, 23, 21, 3, no, no) \ SC( 79, 23, 21, 4, no, no) \ \ SC( 80, 24, 22, 1, no, no) \ SC( 81, 24, 22, 2, no, no) \ SC( 82, 24, 22, 3, no, no) \ SC( 83, 24, 22, 4, no, no) \ \ SC( 84, 25, 23, 1, no, no) \ SC( 85, 25, 23, 2, no, no) \ SC( 86, 25, 23, 3, no, no) \ SC( 87, 25, 23, 4, no, no) \ \ SC( 88, 26, 24, 1, no, no) \ SC( 89, 26, 24, 2, no, no) \ SC( 90, 26, 24, 3, no, no) \ SC( 91, 26, 24, 4, no, no) \ \ SC( 92, 27, 25, 1, no, no) \ SC( 93, 27, 25, 2, no, no) \ SC( 94, 27, 25, 3, no, no) \ SC( 95, 27, 25, 4, no, no) \ \ SC( 96, 28, 26, 1, no, no) \ SC( 97, 28, 26, 2, no, no) \ SC( 98, 28, 26, 3, no, no) \ SC( 99, 28, 26, 4, no, no) \ \ SC(100, 29, 27, 1, no, no) \ SC(101, 29, 27, 2, no, no) \ SC(102, 29, 27, 3, no, no) \ SC(103, 29, 27, 4, no, no) \ \ SC(104, 30, 28, 1, no, no) \ SC(105, 30, 28, 2, no, no) \ SC(106, 30, 28, 3, no, no) \ SC(107, 30, 28, 4, no, no) \ \ SC(108, 31, 29, 1, no, no) \ SC(109, 31, 29, 2, no, no) \ SC(110, 31, 29, 3, no, no) \ SC(111, 31, 29, 4, no, no) \ \ SC(112, 32, 30, 1, no, no) \ SC(113, 32, 30, 2, no, no) \ SC(114, 32, 30, 3, no, no) \ SC(115, 32, 30, 4, no, no) \ \ SC(116, 33, 31, 1, no, no) \ SC(117, 33, 31, 2, no, no) \ SC(118, 33, 31, 3, no, no) \ SC(119, 33, 31, 4, no, no) \ \ SC(120, 34, 32, 1, no, no) \ SC(121, 34, 32, 2, no, no) \ SC(122, 34, 32, 3, no, no) \ SC(123, 34, 32, 4, no, no) \ \ SC(124, 35, 33, 1, no, no) \ SC(125, 35, 33, 2, no, no) \ SC(126, 35, 33, 3, no, no) \ SC(127, 35, 33, 4, no, no) \ \ SC(128, 36, 34, 1, no, no) \ SC(129, 36, 34, 2, no, no) \ SC(130, 36, 34, 3, no, no) \ SC(131, 36, 34, 4, no, no) \ \ SC(132, 37, 35, 1, no, no) \ SC(133, 37, 35, 2, no, no) \ SC(134, 37, 35, 3, no, no) \ SC(135, 37, 35, 4, no, no) \ \ SC(136, 38, 36, 1, no, no) \ SC(137, 38, 36, 2, no, no) \ SC(138, 38, 36, 3, no, no) \ SC(139, 38, 36, 4, no, no) \ \ SC(140, 39, 37, 1, no, no) \ SC(141, 39, 37, 2, no, no) \ SC(142, 39, 37, 3, no, no) \ SC(143, 39, 37, 4, no, no) \ \ SC(144, 40, 38, 1, no, no) \ SC(145, 40, 38, 2, no, no) \ SC(146, 40, 38, 3, no, no) \ SC(147, 40, 38, 4, no, no) \ \ SC(148, 41, 39, 1, no, no) \ SC(149, 41, 39, 2, no, no) \ SC(150, 41, 39, 3, no, no) \ SC(151, 41, 39, 4, no, no) \ \ SC(152, 42, 40, 1, no, no) \ SC(153, 42, 40, 2, no, no) \ SC(154, 42, 40, 3, no, no) \ SC(155, 42, 40, 4, no, no) \ \ SC(156, 43, 41, 1, no, no) \ SC(157, 43, 41, 2, no, no) \ SC(158, 43, 41, 3, no, no) \ SC(159, 43, 41, 4, no, no) \ \ SC(160, 44, 42, 1, no, no) \ SC(161, 44, 42, 2, no, no) \ SC(162, 44, 42, 3, no, no) \ SC(163, 44, 42, 4, no, no) \ \ SC(164, 45, 43, 1, no, no) \ SC(165, 45, 43, 2, no, no) \ SC(166, 45, 43, 3, no, no) \ SC(167, 45, 43, 4, no, no) \ \ SC(168, 46, 44, 1, no, no) \ SC(169, 46, 44, 2, no, no) \ SC(170, 46, 44, 3, no, no) \ SC(171, 46, 44, 4, no, no) \ \ SC(172, 47, 45, 1, no, no) \ SC(173, 47, 45, 2, no, no) \ SC(174, 47, 45, 3, no, no) \ SC(175, 47, 45, 4, no, no) \ \ SC(176, 48, 46, 1, no, no) \ SC(177, 48, 46, 2, no, no) \ SC(178, 48, 46, 3, no, no) \ SC(179, 48, 46, 4, no, no) \ \ SC(180, 49, 47, 1, no, no) \ SC(181, 49, 47, 2, no, no) \ SC(182, 49, 47, 3, no, no) \ SC(183, 49, 47, 4, no, no) \ \ SC(184, 50, 48, 1, no, no) \ SC(185, 50, 48, 2, no, no) \ SC(186, 50, 48, 3, no, no) \ SC(187, 50, 48, 4, no, no) \ \ SC(188, 51, 49, 1, no, no) \ SC(189, 51, 49, 2, no, no) \ SC(190, 51, 49, 3, no, no) \ SC(191, 51, 49, 4, no, no) \ \ SC(192, 52, 50, 1, no, no) \ SC(193, 52, 50, 2, no, no) \ SC(194, 52, 50, 3, no, no) \ SC(195, 52, 50, 4, no, no) \ \ SC(196, 53, 51, 1, no, no) \ SC(197, 53, 51, 2, no, no) \ SC(198, 53, 51, 3, no, no) \ SC(199, 53, 51, 4, no, no) \ \ SC(200, 54, 52, 1, no, no) \ SC(201, 54, 52, 2, no, no) \ SC(202, 54, 52, 3, no, no) \ SC(203, 54, 52, 4, no, no) \ \ SC(204, 55, 53, 1, no, no) \ SC(205, 55, 53, 2, no, no) \ SC(206, 55, 53, 3, no, no) \ SC(207, 55, 53, 4, no, no) \ \ SC(208, 56, 54, 1, no, no) \ SC(209, 56, 54, 2, no, no) \ SC(210, 56, 54, 3, no, no) \ SC(211, 56, 54, 4, no, no) \ \ SC(212, 57, 55, 1, no, no) \ SC(213, 57, 55, 2, no, no) \ SC(214, 57, 55, 3, no, no) \ SC(215, 57, 55, 4, no, no) \ \ SC(216, 58, 56, 1, no, no) \ SC(217, 58, 56, 2, no, no) \ SC(218, 58, 56, 3, no, no) \ SC(219, 58, 56, 4, no, no) \ \ SC(220, 59, 57, 1, no, no) \ SC(221, 59, 57, 2, no, no) \ SC(222, 59, 57, 3, no, no) \ SC(223, 59, 57, 4, no, no) \ \ SC(224, 60, 58, 1, no, no) \ SC(225, 60, 58, 2, no, no) \ SC(226, 60, 58, 3, no, no) \ SC(227, 60, 58, 4, no, no) \ \ SC(228, 61, 59, 1, no, no) \ SC(229, 61, 59, 2, no, no) \ SC(230, 61, 59, 3, no, no) \ SC(231, 61, 59, 4, no, no) \ \ SC(232, 62, 60, 1, no, no) \ SC(233, 62, 60, 2, no, no) \ SC(234, 62, 60, 3, no, no) \ SC(235, 62, 60, 4, no, no) \ \ SC(236, 63, 61, 1, no, no) \ SC(237, 63, 61, 2, no, no) \ SC(238, 63, 61, 3, no, no) \ #define SIZE_CLASSES_DEFINED #define NTBINS 0 #define NLBINS 32 #define NBINS 35 #define LG_TINY_MAXCLASS "NA" #define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) #define SMALL_MAXCLASS ((((size_t)1) << 12) + (((size_t)3) << 10)) #endif #if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 16) #define SIZE_CLASSES \ /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \ SC( 0, 3, 3, 0, yes, 3) \ SC( 1, 3, 3, 1, yes, 3) \ SC( 2, 3, 3, 2, yes, 3) \ SC( 3, 3, 3, 3, yes, 3) \ \ SC( 4, 5, 3, 1, yes, 3) \ SC( 5, 5, 3, 2, yes, 3) \ SC( 6, 5, 3, 3, yes, 3) \ SC( 7, 5, 3, 4, yes, 3) \ \ SC( 8, 6, 4, 1, yes, 4) \ SC( 9, 6, 4, 2, yes, 4) \ SC( 10, 6, 4, 3, yes, 4) \ SC( 11, 6, 4, 4, yes, 4) \ \ SC( 12, 7, 5, 1, yes, 5) \ SC( 13, 7, 5, 2, yes, 5) \ SC( 14, 7, 5, 3, yes, 5) \ SC( 15, 7, 5, 4, yes, 5) \ \ SC( 16, 8, 6, 1, yes, 6) \ SC( 17, 8, 6, 2, yes, 6) \ SC( 18, 8, 6, 3, yes, 6) \ SC( 19, 8, 6, 4, yes, 6) \ \ SC( 20, 9, 7, 1, yes, 7) \ SC( 21, 9, 7, 2, yes, 7) \ SC( 22, 9, 7, 3, yes, 7) \ SC( 23, 9, 7, 4, yes, 7) \ \ SC( 24, 10, 8, 1, yes, 8) \ SC( 25, 10, 8, 2, yes, 8) \ SC( 26, 10, 8, 3, yes, 8) \ SC( 27, 10, 8, 4, yes, 8) \ \ SC( 28, 11, 9, 1, yes, 9) \ SC( 29, 11, 9, 2, yes, 9) \ SC( 30, 11, 9, 3, yes, 9) \ SC( 31, 11, 9, 4, yes, 9) \ \ SC( 32, 12, 10, 1, yes, no) \ SC( 33, 12, 10, 2, yes, no) \ SC( 34, 12, 10, 3, yes, no) \ SC( 35, 12, 10, 4, yes, no) \ \ SC( 36, 13, 11, 1, yes, no) \ SC( 37, 13, 11, 2, yes, no) \ SC( 38, 13, 11, 3, yes, no) \ SC( 39, 13, 11, 4, yes, no) \ \ SC( 40, 14, 12, 1, yes, no) \ SC( 41, 14, 12, 2, yes, no) \ SC( 42, 14, 12, 3, yes, no) \ SC( 43, 14, 12, 4, yes, no) \ \ SC( 44, 15, 13, 1, yes, no) \ SC( 45, 15, 13, 2, yes, no) \ SC( 46, 15, 13, 3, yes, no) \ SC( 47, 15, 13, 4, no, no) \ \ SC( 48, 16, 14, 1, no, no) \ SC( 49, 16, 14, 2, no, no) \ SC( 50, 16, 14, 3, no, no) \ SC( 51, 16, 14, 4, no, no) \ \ SC( 52, 17, 15, 1, no, no) \ SC( 53, 17, 15, 2, no, no) \ SC( 54, 17, 15, 3, no, no) \ SC( 55, 17, 15, 4, no, no) \ \ SC( 56, 18, 16, 1, no, no) \ SC( 57, 18, 16, 2, no, no) \ SC( 58, 18, 16, 3, no, no) \ SC( 59, 18, 16, 4, no, no) \ \ SC( 60, 19, 17, 1, no, no) \ SC( 61, 19, 17, 2, no, no) \ SC( 62, 19, 17, 3, no, no) \ SC( 63, 19, 17, 4, no, no) \ \ SC( 64, 20, 18, 1, no, no) \ SC( 65, 20, 18, 2, no, no) \ SC( 66, 20, 18, 3, no, no) \ SC( 67, 20, 18, 4, no, no) \ \ SC( 68, 21, 19, 1, no, no) \ SC( 69, 21, 19, 2, no, no) \ SC( 70, 21, 19, 3, no, no) \ SC( 71, 21, 19, 4, no, no) \ \ SC( 72, 22, 20, 1, no, no) \ SC( 73, 22, 20, 2, no, no) \ SC( 74, 22, 20, 3, no, no) \ SC( 75, 22, 20, 4, no, no) \ \ SC( 76, 23, 21, 1, no, no) \ SC( 77, 23, 21, 2, no, no) \ SC( 78, 23, 21, 3, no, no) \ SC( 79, 23, 21, 4, no, no) \ \ SC( 80, 24, 22, 1, no, no) \ SC( 81, 24, 22, 2, no, no) \ SC( 82, 24, 22, 3, no, no) \ SC( 83, 24, 22, 4, no, no) \ \ SC( 84, 25, 23, 1, no, no) \ SC( 85, 25, 23, 2, no, no) \ SC( 86, 25, 23, 3, no, no) \ SC( 87, 25, 23, 4, no, no) \ \ SC( 88, 26, 24, 1, no, no) \ SC( 89, 26, 24, 2, no, no) \ SC( 90, 26, 24, 3, no, no) \ SC( 91, 26, 24, 4, no, no) \ \ SC( 92, 27, 25, 1, no, no) \ SC( 93, 27, 25, 2, no, no) \ SC( 94, 27, 25, 3, no, no) \ SC( 95, 27, 25, 4, no, no) \ \ SC( 96, 28, 26, 1, no, no) \ SC( 97, 28, 26, 2, no, no) \ SC( 98, 28, 26, 3, no, no) \ SC( 99, 28, 26, 4, no, no) \ \ SC(100, 29, 27, 1, no, no) \ SC(101, 29, 27, 2, no, no) \ SC(102, 29, 27, 3, no, no) \ SC(103, 29, 27, 4, no, no) \ \ SC(104, 30, 28, 1, no, no) \ SC(105, 30, 28, 2, no, no) \ SC(106, 30, 28, 3, no, no) \ SC(107, 30, 28, 4, no, no) \ \ SC(108, 31, 29, 1, no, no) \ SC(109, 31, 29, 2, no, no) \ SC(110, 31, 29, 3, no, no) \ SC(111, 31, 29, 4, no, no) \ \ SC(112, 32, 30, 1, no, no) \ SC(113, 32, 30, 2, no, no) \ SC(114, 32, 30, 3, no, no) \ SC(115, 32, 30, 4, no, no) \ \ SC(116, 33, 31, 1, no, no) \ SC(117, 33, 31, 2, no, no) \ SC(118, 33, 31, 3, no, no) \ SC(119, 33, 31, 4, no, no) \ \ SC(120, 34, 32, 1, no, no) \ SC(121, 34, 32, 2, no, no) \ SC(122, 34, 32, 3, no, no) \ SC(123, 34, 32, 4, no, no) \ \ SC(124, 35, 33, 1, no, no) \ SC(125, 35, 33, 2, no, no) \ SC(126, 35, 33, 3, no, no) \ SC(127, 35, 33, 4, no, no) \ \ SC(128, 36, 34, 1, no, no) \ SC(129, 36, 34, 2, no, no) \ SC(130, 36, 34, 3, no, no) \ SC(131, 36, 34, 4, no, no) \ \ SC(132, 37, 35, 1, no, no) \ SC(133, 37, 35, 2, no, no) \ SC(134, 37, 35, 3, no, no) \ SC(135, 37, 35, 4, no, no) \ \ SC(136, 38, 36, 1, no, no) \ SC(137, 38, 36, 2, no, no) \ SC(138, 38, 36, 3, no, no) \ SC(139, 38, 36, 4, no, no) \ \ SC(140, 39, 37, 1, no, no) \ SC(141, 39, 37, 2, no, no) \ SC(142, 39, 37, 3, no, no) \ SC(143, 39, 37, 4, no, no) \ \ SC(144, 40, 38, 1, no, no) \ SC(145, 40, 38, 2, no, no) \ SC(146, 40, 38, 3, no, no) \ SC(147, 40, 38, 4, no, no) \ \ SC(148, 41, 39, 1, no, no) \ SC(149, 41, 39, 2, no, no) \ SC(150, 41, 39, 3, no, no) \ SC(151, 41, 39, 4, no, no) \ \ SC(152, 42, 40, 1, no, no) \ SC(153, 42, 40, 2, no, no) \ SC(154, 42, 40, 3, no, no) \ SC(155, 42, 40, 4, no, no) \ \ SC(156, 43, 41, 1, no, no) \ SC(157, 43, 41, 2, no, no) \ SC(158, 43, 41, 3, no, no) \ SC(159, 43, 41, 4, no, no) \ \ SC(160, 44, 42, 1, no, no) \ SC(161, 44, 42, 2, no, no) \ SC(162, 44, 42, 3, no, no) \ SC(163, 44, 42, 4, no, no) \ \ SC(164, 45, 43, 1, no, no) \ SC(165, 45, 43, 2, no, no) \ SC(166, 45, 43, 3, no, no) \ SC(167, 45, 43, 4, no, no) \ \ SC(168, 46, 44, 1, no, no) \ SC(169, 46, 44, 2, no, no) \ SC(170, 46, 44, 3, no, no) \ SC(171, 46, 44, 4, no, no) \ \ SC(172, 47, 45, 1, no, no) \ SC(173, 47, 45, 2, no, no) \ SC(174, 47, 45, 3, no, no) \ SC(175, 47, 45, 4, no, no) \ \ SC(176, 48, 46, 1, no, no) \ SC(177, 48, 46, 2, no, no) \ SC(178, 48, 46, 3, no, no) \ SC(179, 48, 46, 4, no, no) \ \ SC(180, 49, 47, 1, no, no) \ SC(181, 49, 47, 2, no, no) \ SC(182, 49, 47, 3, no, no) \ SC(183, 49, 47, 4, no, no) \ \ SC(184, 50, 48, 1, no, no) \ SC(185, 50, 48, 2, no, no) \ SC(186, 50, 48, 3, no, no) \ SC(187, 50, 48, 4, no, no) \ \ SC(188, 51, 49, 1, no, no) \ SC(189, 51, 49, 2, no, no) \ SC(190, 51, 49, 3, no, no) \ SC(191, 51, 49, 4, no, no) \ \ SC(192, 52, 50, 1, no, no) \ SC(193, 52, 50, 2, no, no) \ SC(194, 52, 50, 3, no, no) \ SC(195, 52, 50, 4, no, no) \ \ SC(196, 53, 51, 1, no, no) \ SC(197, 53, 51, 2, no, no) \ SC(198, 53, 51, 3, no, no) \ SC(199, 53, 51, 4, no, no) \ \ SC(200, 54, 52, 1, no, no) \ SC(201, 54, 52, 2, no, no) \ SC(202, 54, 52, 3, no, no) \ SC(203, 54, 52, 4, no, no) \ \ SC(204, 55, 53, 1, no, no) \ SC(205, 55, 53, 2, no, no) \ SC(206, 55, 53, 3, no, no) \ SC(207, 55, 53, 4, no, no) \ \ SC(208, 56, 54, 1, no, no) \ SC(209, 56, 54, 2, no, no) \ SC(210, 56, 54, 3, no, no) \ SC(211, 56, 54, 4, no, no) \ \ SC(212, 57, 55, 1, no, no) \ SC(213, 57, 55, 2, no, no) \ SC(214, 57, 55, 3, no, no) \ SC(215, 57, 55, 4, no, no) \ \ SC(216, 58, 56, 1, no, no) \ SC(217, 58, 56, 2, no, no) \ SC(218, 58, 56, 3, no, no) \ SC(219, 58, 56, 4, no, no) \ \ SC(220, 59, 57, 1, no, no) \ SC(221, 59, 57, 2, no, no) \ SC(222, 59, 57, 3, no, no) \ SC(223, 59, 57, 4, no, no) \ \ SC(224, 60, 58, 1, no, no) \ SC(225, 60, 58, 2, no, no) \ SC(226, 60, 58, 3, no, no) \ SC(227, 60, 58, 4, no, no) \ \ SC(228, 61, 59, 1, no, no) \ SC(229, 61, 59, 2, no, no) \ SC(230, 61, 59, 3, no, no) \ SC(231, 61, 59, 4, no, no) \ \ SC(232, 62, 60, 1, no, no) \ SC(233, 62, 60, 2, no, no) \ SC(234, 62, 60, 3, no, no) \ SC(235, 62, 60, 4, no, no) \ \ SC(236, 63, 61, 1, no, no) \ SC(237, 63, 61, 2, no, no) \ SC(238, 63, 61, 3, no, no) \ #define SIZE_CLASSES_DEFINED #define NTBINS 0 #define NLBINS 32 #define NBINS 47 #define LG_TINY_MAXCLASS "NA" #define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) #define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13)) #endif #if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 12) #define SIZE_CLASSES \ /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \ SC( 0, 3, 3, 0, yes, 3) \ \ SC( 1, 3, 3, 1, yes, 3) \ SC( 2, 4, 4, 1, yes, 4) \ SC( 3, 4, 4, 2, yes, 4) \ SC( 4, 4, 4, 3, yes, 4) \ \ SC( 5, 6, 4, 1, yes, 4) \ SC( 6, 6, 4, 2, yes, 4) \ SC( 7, 6, 4, 3, yes, 4) \ SC( 8, 6, 4, 4, yes, 4) \ \ SC( 9, 7, 5, 1, yes, 5) \ SC( 10, 7, 5, 2, yes, 5) \ SC( 11, 7, 5, 3, yes, 5) \ SC( 12, 7, 5, 4, yes, 5) \ \ SC( 13, 8, 6, 1, yes, 6) \ SC( 14, 8, 6, 2, yes, 6) \ SC( 15, 8, 6, 3, yes, 6) \ SC( 16, 8, 6, 4, yes, 6) \ \ SC( 17, 9, 7, 1, yes, 7) \ SC( 18, 9, 7, 2, yes, 7) \ SC( 19, 9, 7, 3, yes, 7) \ SC( 20, 9, 7, 4, yes, 7) \ \ SC( 21, 10, 8, 1, yes, 8) \ SC( 22, 10, 8, 2, yes, 8) \ SC( 23, 10, 8, 3, yes, 8) \ SC( 24, 10, 8, 4, yes, 8) \ \ SC( 25, 11, 9, 1, yes, 9) \ SC( 26, 11, 9, 2, yes, 9) \ SC( 27, 11, 9, 3, yes, 9) \ SC( 28, 11, 9, 4, no, 9) \ \ SC( 29, 12, 10, 1, no, no) \ SC( 30, 12, 10, 2, no, no) \ SC( 31, 12, 10, 3, no, no) \ SC( 32, 12, 10, 4, no, no) \ \ SC( 33, 13, 11, 1, no, no) \ SC( 34, 13, 11, 2, no, no) \ SC( 35, 13, 11, 3, no, no) \ SC( 36, 13, 11, 4, no, no) \ \ SC( 37, 14, 12, 1, no, no) \ SC( 38, 14, 12, 2, no, no) \ SC( 39, 14, 12, 3, no, no) \ SC( 40, 14, 12, 4, no, no) \ \ SC( 41, 15, 13, 1, no, no) \ SC( 42, 15, 13, 2, no, no) \ SC( 43, 15, 13, 3, no, no) \ SC( 44, 15, 13, 4, no, no) \ \ SC( 45, 16, 14, 1, no, no) \ SC( 46, 16, 14, 2, no, no) \ SC( 47, 16, 14, 3, no, no) \ SC( 48, 16, 14, 4, no, no) \ \ SC( 49, 17, 15, 1, no, no) \ SC( 50, 17, 15, 2, no, no) \ SC( 51, 17, 15, 3, no, no) \ SC( 52, 17, 15, 4, no, no) \ \ SC( 53, 18, 16, 1, no, no) \ SC( 54, 18, 16, 2, no, no) \ SC( 55, 18, 16, 3, no, no) \ SC( 56, 18, 16, 4, no, no) \ \ SC( 57, 19, 17, 1, no, no) \ SC( 58, 19, 17, 2, no, no) \ SC( 59, 19, 17, 3, no, no) \ SC( 60, 19, 17, 4, no, no) \ \ SC( 61, 20, 18, 1, no, no) \ SC( 62, 20, 18, 2, no, no) \ SC( 63, 20, 18, 3, no, no) \ SC( 64, 20, 18, 4, no, no) \ \ SC( 65, 21, 19, 1, no, no) \ SC( 66, 21, 19, 2, no, no) \ SC( 67, 21, 19, 3, no, no) \ SC( 68, 21, 19, 4, no, no) \ \ SC( 69, 22, 20, 1, no, no) \ SC( 70, 22, 20, 2, no, no) \ SC( 71, 22, 20, 3, no, no) \ SC( 72, 22, 20, 4, no, no) \ \ SC( 73, 23, 21, 1, no, no) \ SC( 74, 23, 21, 2, no, no) \ SC( 75, 23, 21, 3, no, no) \ SC( 76, 23, 21, 4, no, no) \ \ SC( 77, 24, 22, 1, no, no) \ SC( 78, 24, 22, 2, no, no) \ SC( 79, 24, 22, 3, no, no) \ SC( 80, 24, 22, 4, no, no) \ \ SC( 81, 25, 23, 1, no, no) \ SC( 82, 25, 23, 2, no, no) \ SC( 83, 25, 23, 3, no, no) \ SC( 84, 25, 23, 4, no, no) \ \ SC( 85, 26, 24, 1, no, no) \ SC( 86, 26, 24, 2, no, no) \ SC( 87, 26, 24, 3, no, no) \ SC( 88, 26, 24, 4, no, no) \ \ SC( 89, 27, 25, 1, no, no) \ SC( 90, 27, 25, 2, no, no) \ SC( 91, 27, 25, 3, no, no) \ SC( 92, 27, 25, 4, no, no) \ \ SC( 93, 28, 26, 1, no, no) \ SC( 94, 28, 26, 2, no, no) \ SC( 95, 28, 26, 3, no, no) \ SC( 96, 28, 26, 4, no, no) \ \ SC( 97, 29, 27, 1, no, no) \ SC( 98, 29, 27, 2, no, no) \ SC( 99, 29, 27, 3, no, no) \ SC(100, 29, 27, 4, no, no) \ \ SC(101, 30, 28, 1, no, no) \ SC(102, 30, 28, 2, no, no) \ SC(103, 30, 28, 3, no, no) \ SC(104, 30, 28, 4, no, no) \ \ SC(105, 31, 29, 1, no, no) \ SC(106, 31, 29, 2, no, no) \ SC(107, 31, 29, 3, no, no) \ SC(108, 31, 29, 4, no, no) \ \ SC(109, 32, 30, 1, no, no) \ SC(110, 32, 30, 2, no, no) \ SC(111, 32, 30, 3, no, no) \ SC(112, 32, 30, 4, no, no) \ \ SC(113, 33, 31, 1, no, no) \ SC(114, 33, 31, 2, no, no) \ SC(115, 33, 31, 3, no, no) \ SC(116, 33, 31, 4, no, no) \ \ SC(117, 34, 32, 1, no, no) \ SC(118, 34, 32, 2, no, no) \ SC(119, 34, 32, 3, no, no) \ SC(120, 34, 32, 4, no, no) \ \ SC(121, 35, 33, 1, no, no) \ SC(122, 35, 33, 2, no, no) \ SC(123, 35, 33, 3, no, no) \ SC(124, 35, 33, 4, no, no) \ \ SC(125, 36, 34, 1, no, no) \ SC(126, 36, 34, 2, no, no) \ SC(127, 36, 34, 3, no, no) \ SC(128, 36, 34, 4, no, no) \ \ SC(129, 37, 35, 1, no, no) \ SC(130, 37, 35, 2, no, no) \ SC(131, 37, 35, 3, no, no) \ SC(132, 37, 35, 4, no, no) \ \ SC(133, 38, 36, 1, no, no) \ SC(134, 38, 36, 2, no, no) \ SC(135, 38, 36, 3, no, no) \ SC(136, 38, 36, 4, no, no) \ \ SC(137, 39, 37, 1, no, no) \ SC(138, 39, 37, 2, no, no) \ SC(139, 39, 37, 3, no, no) \ SC(140, 39, 37, 4, no, no) \ \ SC(141, 40, 38, 1, no, no) \ SC(142, 40, 38, 2, no, no) \ SC(143, 40, 38, 3, no, no) \ SC(144, 40, 38, 4, no, no) \ \ SC(145, 41, 39, 1, no, no) \ SC(146, 41, 39, 2, no, no) \ SC(147, 41, 39, 3, no, no) \ SC(148, 41, 39, 4, no, no) \ \ SC(149, 42, 40, 1, no, no) \ SC(150, 42, 40, 2, no, no) \ SC(151, 42, 40, 3, no, no) \ SC(152, 42, 40, 4, no, no) \ \ SC(153, 43, 41, 1, no, no) \ SC(154, 43, 41, 2, no, no) \ SC(155, 43, 41, 3, no, no) \ SC(156, 43, 41, 4, no, no) \ \ SC(157, 44, 42, 1, no, no) \ SC(158, 44, 42, 2, no, no) \ SC(159, 44, 42, 3, no, no) \ SC(160, 44, 42, 4, no, no) \ \ SC(161, 45, 43, 1, no, no) \ SC(162, 45, 43, 2, no, no) \ SC(163, 45, 43, 3, no, no) \ SC(164, 45, 43, 4, no, no) \ \ SC(165, 46, 44, 1, no, no) \ SC(166, 46, 44, 2, no, no) \ SC(167, 46, 44, 3, no, no) \ SC(168, 46, 44, 4, no, no) \ \ SC(169, 47, 45, 1, no, no) \ SC(170, 47, 45, 2, no, no) \ SC(171, 47, 45, 3, no, no) \ SC(172, 47, 45, 4, no, no) \ \ SC(173, 48, 46, 1, no, no) \ SC(174, 48, 46, 2, no, no) \ SC(175, 48, 46, 3, no, no) \ SC(176, 48, 46, 4, no, no) \ \ SC(177, 49, 47, 1, no, no) \ SC(178, 49, 47, 2, no, no) \ SC(179, 49, 47, 3, no, no) \ SC(180, 49, 47, 4, no, no) \ \ SC(181, 50, 48, 1, no, no) \ SC(182, 50, 48, 2, no, no) \ SC(183, 50, 48, 3, no, no) \ SC(184, 50, 48, 4, no, no) \ \ SC(185, 51, 49, 1, no, no) \ SC(186, 51, 49, 2, no, no) \ SC(187, 51, 49, 3, no, no) \ SC(188, 51, 49, 4, no, no) \ \ SC(189, 52, 50, 1, no, no) \ SC(190, 52, 50, 2, no, no) \ SC(191, 52, 50, 3, no, no) \ SC(192, 52, 50, 4, no, no) \ \ SC(193, 53, 51, 1, no, no) \ SC(194, 53, 51, 2, no, no) \ SC(195, 53, 51, 3, no, no) \ SC(196, 53, 51, 4, no, no) \ \ SC(197, 54, 52, 1, no, no) \ SC(198, 54, 52, 2, no, no) \ SC(199, 54, 52, 3, no, no) \ SC(200, 54, 52, 4, no, no) \ \ SC(201, 55, 53, 1, no, no) \ SC(202, 55, 53, 2, no, no) \ SC(203, 55, 53, 3, no, no) \ SC(204, 55, 53, 4, no, no) \ \ SC(205, 56, 54, 1, no, no) \ SC(206, 56, 54, 2, no, no) \ SC(207, 56, 54, 3, no, no) \ SC(208, 56, 54, 4, no, no) \ \ SC(209, 57, 55, 1, no, no) \ SC(210, 57, 55, 2, no, no) \ SC(211, 57, 55, 3, no, no) \ SC(212, 57, 55, 4, no, no) \ \ SC(213, 58, 56, 1, no, no) \ SC(214, 58, 56, 2, no, no) \ SC(215, 58, 56, 3, no, no) \ SC(216, 58, 56, 4, no, no) \ \ SC(217, 59, 57, 1, no, no) \ SC(218, 59, 57, 2, no, no) \ SC(219, 59, 57, 3, no, no) \ SC(220, 59, 57, 4, no, no) \ \ SC(221, 60, 58, 1, no, no) \ SC(222, 60, 58, 2, no, no) \ SC(223, 60, 58, 3, no, no) \ SC(224, 60, 58, 4, no, no) \ \ SC(225, 61, 59, 1, no, no) \ SC(226, 61, 59, 2, no, no) \ SC(227, 61, 59, 3, no, no) \ SC(228, 61, 59, 4, no, no) \ \ SC(229, 62, 60, 1, no, no) \ SC(230, 62, 60, 2, no, no) \ SC(231, 62, 60, 3, no, no) \ SC(232, 62, 60, 4, no, no) \ \ SC(233, 63, 61, 1, no, no) \ SC(234, 63, 61, 2, no, no) \ SC(235, 63, 61, 3, no, no) \ #define SIZE_CLASSES_DEFINED #define NTBINS 1 #define NLBINS 29 #define NBINS 28 #define LG_TINY_MAXCLASS 3 #define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) #define SMALL_MAXCLASS ((((size_t)1) << 11) + (((size_t)3) << 9)) #endif #if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 13) #define SIZE_CLASSES \ /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \ SC( 0, 3, 3, 0, yes, 3) \ \ SC( 1, 3, 3, 1, yes, 3) \ SC( 2, 4, 4, 1, yes, 4) \ SC( 3, 4, 4, 2, yes, 4) \ SC( 4, 4, 4, 3, yes, 4) \ \ SC( 5, 6, 4, 1, yes, 4) \ SC( 6, 6, 4, 2, yes, 4) \ SC( 7, 6, 4, 3, yes, 4) \ SC( 8, 6, 4, 4, yes, 4) \ \ SC( 9, 7, 5, 1, yes, 5) \ SC( 10, 7, 5, 2, yes, 5) \ SC( 11, 7, 5, 3, yes, 5) \ SC( 12, 7, 5, 4, yes, 5) \ \ SC( 13, 8, 6, 1, yes, 6) \ SC( 14, 8, 6, 2, yes, 6) \ SC( 15, 8, 6, 3, yes, 6) \ SC( 16, 8, 6, 4, yes, 6) \ \ SC( 17, 9, 7, 1, yes, 7) \ SC( 18, 9, 7, 2, yes, 7) \ SC( 19, 9, 7, 3, yes, 7) \ SC( 20, 9, 7, 4, yes, 7) \ \ SC( 21, 10, 8, 1, yes, 8) \ SC( 22, 10, 8, 2, yes, 8) \ SC( 23, 10, 8, 3, yes, 8) \ SC( 24, 10, 8, 4, yes, 8) \ \ SC( 25, 11, 9, 1, yes, 9) \ SC( 26, 11, 9, 2, yes, 9) \ SC( 27, 11, 9, 3, yes, 9) \ SC( 28, 11, 9, 4, yes, 9) \ \ SC( 29, 12, 10, 1, yes, no) \ SC( 30, 12, 10, 2, yes, no) \ SC( 31, 12, 10, 3, yes, no) \ SC( 32, 12, 10, 4, no, no) \ \ SC( 33, 13, 11, 1, no, no) \ SC( 34, 13, 11, 2, no, no) \ SC( 35, 13, 11, 3, no, no) \ SC( 36, 13, 11, 4, no, no) \ \ SC( 37, 14, 12, 1, no, no) \ SC( 38, 14, 12, 2, no, no) \ SC( 39, 14, 12, 3, no, no) \ SC( 40, 14, 12, 4, no, no) \ \ SC( 41, 15, 13, 1, no, no) \ SC( 42, 15, 13, 2, no, no) \ SC( 43, 15, 13, 3, no, no) \ SC( 44, 15, 13, 4, no, no) \ \ SC( 45, 16, 14, 1, no, no) \ SC( 46, 16, 14, 2, no, no) \ SC( 47, 16, 14, 3, no, no) \ SC( 48, 16, 14, 4, no, no) \ \ SC( 49, 17, 15, 1, no, no) \ SC( 50, 17, 15, 2, no, no) \ SC( 51, 17, 15, 3, no, no) \ SC( 52, 17, 15, 4, no, no) \ \ SC( 53, 18, 16, 1, no, no) \ SC( 54, 18, 16, 2, no, no) \ SC( 55, 18, 16, 3, no, no) \ SC( 56, 18, 16, 4, no, no) \ \ SC( 57, 19, 17, 1, no, no) \ SC( 58, 19, 17, 2, no, no) \ SC( 59, 19, 17, 3, no, no) \ SC( 60, 19, 17, 4, no, no) \ \ SC( 61, 20, 18, 1, no, no) \ SC( 62, 20, 18, 2, no, no) \ SC( 63, 20, 18, 3, no, no) \ SC( 64, 20, 18, 4, no, no) \ \ SC( 65, 21, 19, 1, no, no) \ SC( 66, 21, 19, 2, no, no) \ SC( 67, 21, 19, 3, no, no) \ SC( 68, 21, 19, 4, no, no) \ \ SC( 69, 22, 20, 1, no, no) \ SC( 70, 22, 20, 2, no, no) \ SC( 71, 22, 20, 3, no, no) \ SC( 72, 22, 20, 4, no, no) \ \ SC( 73, 23, 21, 1, no, no) \ SC( 74, 23, 21, 2, no, no) \ SC( 75, 23, 21, 3, no, no) \ SC( 76, 23, 21, 4, no, no) \ \ SC( 77, 24, 22, 1, no, no) \ SC( 78, 24, 22, 2, no, no) \ SC( 79, 24, 22, 3, no, no) \ SC( 80, 24, 22, 4, no, no) \ \ SC( 81, 25, 23, 1, no, no) \ SC( 82, 25, 23, 2, no, no) \ SC( 83, 25, 23, 3, no, no) \ SC( 84, 25, 23, 4, no, no) \ \ SC( 85, 26, 24, 1, no, no) \ SC( 86, 26, 24, 2, no, no) \ SC( 87, 26, 24, 3, no, no) \ SC( 88, 26, 24, 4, no, no) \ \ SC( 89, 27, 25, 1, no, no) \ SC( 90, 27, 25, 2, no, no) \ SC( 91, 27, 25, 3, no, no) \ SC( 92, 27, 25, 4, no, no) \ \ SC( 93, 28, 26, 1, no, no) \ SC( 94, 28, 26, 2, no, no) \ SC( 95, 28, 26, 3, no, no) \ SC( 96, 28, 26, 4, no, no) \ \ SC( 97, 29, 27, 1, no, no) \ SC( 98, 29, 27, 2, no, no) \ SC( 99, 29, 27, 3, no, no) \ SC(100, 29, 27, 4, no, no) \ \ SC(101, 30, 28, 1, no, no) \ SC(102, 30, 28, 2, no, no) \ SC(103, 30, 28, 3, no, no) \ SC(104, 30, 28, 4, no, no) \ \ SC(105, 31, 29, 1, no, no) \ SC(106, 31, 29, 2, no, no) \ SC(107, 31, 29, 3, no, no) \ SC(108, 31, 29, 4, no, no) \ \ SC(109, 32, 30, 1, no, no) \ SC(110, 32, 30, 2, no, no) \ SC(111, 32, 30, 3, no, no) \ SC(112, 32, 30, 4, no, no) \ \ SC(113, 33, 31, 1, no, no) \ SC(114, 33, 31, 2, no, no) \ SC(115, 33, 31, 3, no, no) \ SC(116, 33, 31, 4, no, no) \ \ SC(117, 34, 32, 1, no, no) \ SC(118, 34, 32, 2, no, no) \ SC(119, 34, 32, 3, no, no) \ SC(120, 34, 32, 4, no, no) \ \ SC(121, 35, 33, 1, no, no) \ SC(122, 35, 33, 2, no, no) \ SC(123, 35, 33, 3, no, no) \ SC(124, 35, 33, 4, no, no) \ \ SC(125, 36, 34, 1, no, no) \ SC(126, 36, 34, 2, no, no) \ SC(127, 36, 34, 3, no, no) \ SC(128, 36, 34, 4, no, no) \ \ SC(129, 37, 35, 1, no, no) \ SC(130, 37, 35, 2, no, no) \ SC(131, 37, 35, 3, no, no) \ SC(132, 37, 35, 4, no, no) \ \ SC(133, 38, 36, 1, no, no) \ SC(134, 38, 36, 2, no, no) \ SC(135, 38, 36, 3, no, no) \ SC(136, 38, 36, 4, no, no) \ \ SC(137, 39, 37, 1, no, no) \ SC(138, 39, 37, 2, no, no) \ SC(139, 39, 37, 3, no, no) \ SC(140, 39, 37, 4, no, no) \ \ SC(141, 40, 38, 1, no, no) \ SC(142, 40, 38, 2, no, no) \ SC(143, 40, 38, 3, no, no) \ SC(144, 40, 38, 4, no, no) \ \ SC(145, 41, 39, 1, no, no) \ SC(146, 41, 39, 2, no, no) \ SC(147, 41, 39, 3, no, no) \ SC(148, 41, 39, 4, no, no) \ \ SC(149, 42, 40, 1, no, no) \ SC(150, 42, 40, 2, no, no) \ SC(151, 42, 40, 3, no, no) \ SC(152, 42, 40, 4, no, no) \ \ SC(153, 43, 41, 1, no, no) \ SC(154, 43, 41, 2, no, no) \ SC(155, 43, 41, 3, no, no) \ SC(156, 43, 41, 4, no, no) \ \ SC(157, 44, 42, 1, no, no) \ SC(158, 44, 42, 2, no, no) \ SC(159, 44, 42, 3, no, no) \ SC(160, 44, 42, 4, no, no) \ \ SC(161, 45, 43, 1, no, no) \ SC(162, 45, 43, 2, no, no) \ SC(163, 45, 43, 3, no, no) \ SC(164, 45, 43, 4, no, no) \ \ SC(165, 46, 44, 1, no, no) \ SC(166, 46, 44, 2, no, no) \ SC(167, 46, 44, 3, no, no) \ SC(168, 46, 44, 4, no, no) \ \ SC(169, 47, 45, 1, no, no) \ SC(170, 47, 45, 2, no, no) \ SC(171, 47, 45, 3, no, no) \ SC(172, 47, 45, 4, no, no) \ \ SC(173, 48, 46, 1, no, no) \ SC(174, 48, 46, 2, no, no) \ SC(175, 48, 46, 3, no, no) \ SC(176, 48, 46, 4, no, no) \ \ SC(177, 49, 47, 1, no, no) \ SC(178, 49, 47, 2, no, no) \ SC(179, 49, 47, 3, no, no) \ SC(180, 49, 47, 4, no, no) \ \ SC(181, 50, 48, 1, no, no) \ SC(182, 50, 48, 2, no, no) \ SC(183, 50, 48, 3, no, no) \ SC(184, 50, 48, 4, no, no) \ \ SC(185, 51, 49, 1, no, no) \ SC(186, 51, 49, 2, no, no) \ SC(187, 51, 49, 3, no, no) \ SC(188, 51, 49, 4, no, no) \ \ SC(189, 52, 50, 1, no, no) \ SC(190, 52, 50, 2, no, no) \ SC(191, 52, 50, 3, no, no) \ SC(192, 52, 50, 4, no, no) \ \ SC(193, 53, 51, 1, no, no) \ SC(194, 53, 51, 2, no, no) \ SC(195, 53, 51, 3, no, no) \ SC(196, 53, 51, 4, no, no) \ \ SC(197, 54, 52, 1, no, no) \ SC(198, 54, 52, 2, no, no) \ SC(199, 54, 52, 3, no, no) \ SC(200, 54, 52, 4, no, no) \ \ SC(201, 55, 53, 1, no, no) \ SC(202, 55, 53, 2, no, no) \ SC(203, 55, 53, 3, no, no) \ SC(204, 55, 53, 4, no, no) \ \ SC(205, 56, 54, 1, no, no) \ SC(206, 56, 54, 2, no, no) \ SC(207, 56, 54, 3, no, no) \ SC(208, 56, 54, 4, no, no) \ \ SC(209, 57, 55, 1, no, no) \ SC(210, 57, 55, 2, no, no) \ SC(211, 57, 55, 3, no, no) \ SC(212, 57, 55, 4, no, no) \ \ SC(213, 58, 56, 1, no, no) \ SC(214, 58, 56, 2, no, no) \ SC(215, 58, 56, 3, no, no) \ SC(216, 58, 56, 4, no, no) \ \ SC(217, 59, 57, 1, no, no) \ SC(218, 59, 57, 2, no, no) \ SC(219, 59, 57, 3, no, no) \ SC(220, 59, 57, 4, no, no) \ \ SC(221, 60, 58, 1, no, no) \ SC(222, 60, 58, 2, no, no) \ SC(223, 60, 58, 3, no, no) \ SC(224, 60, 58, 4, no, no) \ \ SC(225, 61, 59, 1, no, no) \ SC(226, 61, 59, 2, no, no) \ SC(227, 61, 59, 3, no, no) \ SC(228, 61, 59, 4, no, no) \ \ SC(229, 62, 60, 1, no, no) \ SC(230, 62, 60, 2, no, no) \ SC(231, 62, 60, 3, no, no) \ SC(232, 62, 60, 4, no, no) \ \ SC(233, 63, 61, 1, no, no) \ SC(234, 63, 61, 2, no, no) \ SC(235, 63, 61, 3, no, no) \ #define SIZE_CLASSES_DEFINED #define NTBINS 1 #define NLBINS 29 #define NBINS 32 #define LG_TINY_MAXCLASS 3 #define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) #define SMALL_MAXCLASS ((((size_t)1) << 12) + (((size_t)3) << 10)) #endif #if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 16) #define SIZE_CLASSES \ /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \ SC( 0, 3, 3, 0, yes, 3) \ \ SC( 1, 3, 3, 1, yes, 3) \ SC( 2, 4, 4, 1, yes, 4) \ SC( 3, 4, 4, 2, yes, 4) \ SC( 4, 4, 4, 3, yes, 4) \ \ SC( 5, 6, 4, 1, yes, 4) \ SC( 6, 6, 4, 2, yes, 4) \ SC( 7, 6, 4, 3, yes, 4) \ SC( 8, 6, 4, 4, yes, 4) \ \ SC( 9, 7, 5, 1, yes, 5) \ SC( 10, 7, 5, 2, yes, 5) \ SC( 11, 7, 5, 3, yes, 5) \ SC( 12, 7, 5, 4, yes, 5) \ \ SC( 13, 8, 6, 1, yes, 6) \ SC( 14, 8, 6, 2, yes, 6) \ SC( 15, 8, 6, 3, yes, 6) \ SC( 16, 8, 6, 4, yes, 6) \ \ SC( 17, 9, 7, 1, yes, 7) \ SC( 18, 9, 7, 2, yes, 7) \ SC( 19, 9, 7, 3, yes, 7) \ SC( 20, 9, 7, 4, yes, 7) \ \ SC( 21, 10, 8, 1, yes, 8) \ SC( 22, 10, 8, 2, yes, 8) \ SC( 23, 10, 8, 3, yes, 8) \ SC( 24, 10, 8, 4, yes, 8) \ \ SC( 25, 11, 9, 1, yes, 9) \ SC( 26, 11, 9, 2, yes, 9) \ SC( 27, 11, 9, 3, yes, 9) \ SC( 28, 11, 9, 4, yes, 9) \ \ SC( 29, 12, 10, 1, yes, no) \ SC( 30, 12, 10, 2, yes, no) \ SC( 31, 12, 10, 3, yes, no) \ SC( 32, 12, 10, 4, yes, no) \ \ SC( 33, 13, 11, 1, yes, no) \ SC( 34, 13, 11, 2, yes, no) \ SC( 35, 13, 11, 3, yes, no) \ SC( 36, 13, 11, 4, yes, no) \ \ SC( 37, 14, 12, 1, yes, no) \ SC( 38, 14, 12, 2, yes, no) \ SC( 39, 14, 12, 3, yes, no) \ SC( 40, 14, 12, 4, yes, no) \ \ SC( 41, 15, 13, 1, yes, no) \ SC( 42, 15, 13, 2, yes, no) \ SC( 43, 15, 13, 3, yes, no) \ SC( 44, 15, 13, 4, no, no) \ \ SC( 45, 16, 14, 1, no, no) \ SC( 46, 16, 14, 2, no, no) \ SC( 47, 16, 14, 3, no, no) \ SC( 48, 16, 14, 4, no, no) \ \ SC( 49, 17, 15, 1, no, no) \ SC( 50, 17, 15, 2, no, no) \ SC( 51, 17, 15, 3, no, no) \ SC( 52, 17, 15, 4, no, no) \ \ SC( 53, 18, 16, 1, no, no) \ SC( 54, 18, 16, 2, no, no) \ SC( 55, 18, 16, 3, no, no) \ SC( 56, 18, 16, 4, no, no) \ \ SC( 57, 19, 17, 1, no, no) \ SC( 58, 19, 17, 2, no, no) \ SC( 59, 19, 17, 3, no, no) \ SC( 60, 19, 17, 4, no, no) \ \ SC( 61, 20, 18, 1, no, no) \ SC( 62, 20, 18, 2, no, no) \ SC( 63, 20, 18, 3, no, no) \ SC( 64, 20, 18, 4, no, no) \ \ SC( 65, 21, 19, 1, no, no) \ SC( 66, 21, 19, 2, no, no) \ SC( 67, 21, 19, 3, no, no) \ SC( 68, 21, 19, 4, no, no) \ \ SC( 69, 22, 20, 1, no, no) \ SC( 70, 22, 20, 2, no, no) \ SC( 71, 22, 20, 3, no, no) \ SC( 72, 22, 20, 4, no, no) \ \ SC( 73, 23, 21, 1, no, no) \ SC( 74, 23, 21, 2, no, no) \ SC( 75, 23, 21, 3, no, no) \ SC( 76, 23, 21, 4, no, no) \ \ SC( 77, 24, 22, 1, no, no) \ SC( 78, 24, 22, 2, no, no) \ SC( 79, 24, 22, 3, no, no) \ SC( 80, 24, 22, 4, no, no) \ \ SC( 81, 25, 23, 1, no, no) \ SC( 82, 25, 23, 2, no, no) \ SC( 83, 25, 23, 3, no, no) \ SC( 84, 25, 23, 4, no, no) \ \ SC( 85, 26, 24, 1, no, no) \ SC( 86, 26, 24, 2, no, no) \ SC( 87, 26, 24, 3, no, no) \ SC( 88, 26, 24, 4, no, no) \ \ SC( 89, 27, 25, 1, no, no) \ SC( 90, 27, 25, 2, no, no) \ SC( 91, 27, 25, 3, no, no) \ SC( 92, 27, 25, 4, no, no) \ \ SC( 93, 28, 26, 1, no, no) \ SC( 94, 28, 26, 2, no, no) \ SC( 95, 28, 26, 3, no, no) \ SC( 96, 28, 26, 4, no, no) \ \ SC( 97, 29, 27, 1, no, no) \ SC( 98, 29, 27, 2, no, no) \ SC( 99, 29, 27, 3, no, no) \ SC(100, 29, 27, 4, no, no) \ \ SC(101, 30, 28, 1, no, no) \ SC(102, 30, 28, 2, no, no) \ SC(103, 30, 28, 3, no, no) \ SC(104, 30, 28, 4, no, no) \ \ SC(105, 31, 29, 1, no, no) \ SC(106, 31, 29, 2, no, no) \ SC(107, 31, 29, 3, no, no) \ SC(108, 31, 29, 4, no, no) \ \ SC(109, 32, 30, 1, no, no) \ SC(110, 32, 30, 2, no, no) \ SC(111, 32, 30, 3, no, no) \ SC(112, 32, 30, 4, no, no) \ \ SC(113, 33, 31, 1, no, no) \ SC(114, 33, 31, 2, no, no) \ SC(115, 33, 31, 3, no, no) \ SC(116, 33, 31, 4, no, no) \ \ SC(117, 34, 32, 1, no, no) \ SC(118, 34, 32, 2, no, no) \ SC(119, 34, 32, 3, no, no) \ SC(120, 34, 32, 4, no, no) \ \ SC(121, 35, 33, 1, no, no) \ SC(122, 35, 33, 2, no, no) \ SC(123, 35, 33, 3, no, no) \ SC(124, 35, 33, 4, no, no) \ \ SC(125, 36, 34, 1, no, no) \ SC(126, 36, 34, 2, no, no) \ SC(127, 36, 34, 3, no, no) \ SC(128, 36, 34, 4, no, no) \ \ SC(129, 37, 35, 1, no, no) \ SC(130, 37, 35, 2, no, no) \ SC(131, 37, 35, 3, no, no) \ SC(132, 37, 35, 4, no, no) \ \ SC(133, 38, 36, 1, no, no) \ SC(134, 38, 36, 2, no, no) \ SC(135, 38, 36, 3, no, no) \ SC(136, 38, 36, 4, no, no) \ \ SC(137, 39, 37, 1, no, no) \ SC(138, 39, 37, 2, no, no) \ SC(139, 39, 37, 3, no, no) \ SC(140, 39, 37, 4, no, no) \ \ SC(141, 40, 38, 1, no, no) \ SC(142, 40, 38, 2, no, no) \ SC(143, 40, 38, 3, no, no) \ SC(144, 40, 38, 4, no, no) \ \ SC(145, 41, 39, 1, no, no) \ SC(146, 41, 39, 2, no, no) \ SC(147, 41, 39, 3, no, no) \ SC(148, 41, 39, 4, no, no) \ \ SC(149, 42, 40, 1, no, no) \ SC(150, 42, 40, 2, no, no) \ SC(151, 42, 40, 3, no, no) \ SC(152, 42, 40, 4, no, no) \ \ SC(153, 43, 41, 1, no, no) \ SC(154, 43, 41, 2, no, no) \ SC(155, 43, 41, 3, no, no) \ SC(156, 43, 41, 4, no, no) \ \ SC(157, 44, 42, 1, no, no) \ SC(158, 44, 42, 2, no, no) \ SC(159, 44, 42, 3, no, no) \ SC(160, 44, 42, 4, no, no) \ \ SC(161, 45, 43, 1, no, no) \ SC(162, 45, 43, 2, no, no) \ SC(163, 45, 43, 3, no, no) \ SC(164, 45, 43, 4, no, no) \ \ SC(165, 46, 44, 1, no, no) \ SC(166, 46, 44, 2, no, no) \ SC(167, 46, 44, 3, no, no) \ SC(168, 46, 44, 4, no, no) \ \ SC(169, 47, 45, 1, no, no) \ SC(170, 47, 45, 2, no, no) \ SC(171, 47, 45, 3, no, no) \ SC(172, 47, 45, 4, no, no) \ \ SC(173, 48, 46, 1, no, no) \ SC(174, 48, 46, 2, no, no) \ SC(175, 48, 46, 3, no, no) \ SC(176, 48, 46, 4, no, no) \ \ SC(177, 49, 47, 1, no, no) \ SC(178, 49, 47, 2, no, no) \ SC(179, 49, 47, 3, no, no) \ SC(180, 49, 47, 4, no, no) \ \ SC(181, 50, 48, 1, no, no) \ SC(182, 50, 48, 2, no, no) \ SC(183, 50, 48, 3, no, no) \ SC(184, 50, 48, 4, no, no) \ \ SC(185, 51, 49, 1, no, no) \ SC(186, 51, 49, 2, no, no) \ SC(187, 51, 49, 3, no, no) \ SC(188, 51, 49, 4, no, no) \ \ SC(189, 52, 50, 1, no, no) \ SC(190, 52, 50, 2, no, no) \ SC(191, 52, 50, 3, no, no) \ SC(192, 52, 50, 4, no, no) \ \ SC(193, 53, 51, 1, no, no) \ SC(194, 53, 51, 2, no, no) \ SC(195, 53, 51, 3, no, no) \ SC(196, 53, 51, 4, no, no) \ \ SC(197, 54, 52, 1, no, no) \ SC(198, 54, 52, 2, no, no) \ SC(199, 54, 52, 3, no, no) \ SC(200, 54, 52, 4, no, no) \ \ SC(201, 55, 53, 1, no, no) \ SC(202, 55, 53, 2, no, no) \ SC(203, 55, 53, 3, no, no) \ SC(204, 55, 53, 4, no, no) \ \ SC(205, 56, 54, 1, no, no) \ SC(206, 56, 54, 2, no, no) \ SC(207, 56, 54, 3, no, no) \ SC(208, 56, 54, 4, no, no) \ \ SC(209, 57, 55, 1, no, no) \ SC(210, 57, 55, 2, no, no) \ SC(211, 57, 55, 3, no, no) \ SC(212, 57, 55, 4, no, no) \ \ SC(213, 58, 56, 1, no, no) \ SC(214, 58, 56, 2, no, no) \ SC(215, 58, 56, 3, no, no) \ SC(216, 58, 56, 4, no, no) \ \ SC(217, 59, 57, 1, no, no) \ SC(218, 59, 57, 2, no, no) \ SC(219, 59, 57, 3, no, no) \ SC(220, 59, 57, 4, no, no) \ \ SC(221, 60, 58, 1, no, no) \ SC(222, 60, 58, 2, no, no) \ SC(223, 60, 58, 3, no, no) \ SC(224, 60, 58, 4, no, no) \ \ SC(225, 61, 59, 1, no, no) \ SC(226, 61, 59, 2, no, no) \ SC(227, 61, 59, 3, no, no) \ SC(228, 61, 59, 4, no, no) \ \ SC(229, 62, 60, 1, no, no) \ SC(230, 62, 60, 2, no, no) \ SC(231, 62, 60, 3, no, no) \ SC(232, 62, 60, 4, no, no) \ \ SC(233, 63, 61, 1, no, no) \ SC(234, 63, 61, 2, no, no) \ SC(235, 63, 61, 3, no, no) \ #define SIZE_CLASSES_DEFINED #define NTBINS 1 #define NLBINS 29 #define NBINS 44 #define LG_TINY_MAXCLASS 3 #define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) #define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13)) #endif #if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 12) #define SIZE_CLASSES \ /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \ SC( 0, 4, 4, 0, yes, 4) \ SC( 1, 4, 4, 1, yes, 4) \ SC( 2, 4, 4, 2, yes, 4) \ SC( 3, 4, 4, 3, yes, 4) \ \ SC( 4, 6, 4, 1, yes, 4) \ SC( 5, 6, 4, 2, yes, 4) \ SC( 6, 6, 4, 3, yes, 4) \ SC( 7, 6, 4, 4, yes, 4) \ \ SC( 8, 7, 5, 1, yes, 5) \ SC( 9, 7, 5, 2, yes, 5) \ SC( 10, 7, 5, 3, yes, 5) \ SC( 11, 7, 5, 4, yes, 5) \ \ SC( 12, 8, 6, 1, yes, 6) \ SC( 13, 8, 6, 2, yes, 6) \ SC( 14, 8, 6, 3, yes, 6) \ SC( 15, 8, 6, 4, yes, 6) \ \ SC( 16, 9, 7, 1, yes, 7) \ SC( 17, 9, 7, 2, yes, 7) \ SC( 18, 9, 7, 3, yes, 7) \ SC( 19, 9, 7, 4, yes, 7) \ \ SC( 20, 10, 8, 1, yes, 8) \ SC( 21, 10, 8, 2, yes, 8) \ SC( 22, 10, 8, 3, yes, 8) \ SC( 23, 10, 8, 4, yes, 8) \ \ SC( 24, 11, 9, 1, yes, 9) \ SC( 25, 11, 9, 2, yes, 9) \ SC( 26, 11, 9, 3, yes, 9) \ SC( 27, 11, 9, 4, no, 9) \ \ SC( 28, 12, 10, 1, no, no) \ SC( 29, 12, 10, 2, no, no) \ SC( 30, 12, 10, 3, no, no) \ SC( 31, 12, 10, 4, no, no) \ \ SC( 32, 13, 11, 1, no, no) \ SC( 33, 13, 11, 2, no, no) \ SC( 34, 13, 11, 3, no, no) \ SC( 35, 13, 11, 4, no, no) \ \ SC( 36, 14, 12, 1, no, no) \ SC( 37, 14, 12, 2, no, no) \ SC( 38, 14, 12, 3, no, no) \ SC( 39, 14, 12, 4, no, no) \ \ SC( 40, 15, 13, 1, no, no) \ SC( 41, 15, 13, 2, no, no) \ SC( 42, 15, 13, 3, no, no) \ SC( 43, 15, 13, 4, no, no) \ \ SC( 44, 16, 14, 1, no, no) \ SC( 45, 16, 14, 2, no, no) \ SC( 46, 16, 14, 3, no, no) \ SC( 47, 16, 14, 4, no, no) \ \ SC( 48, 17, 15, 1, no, no) \ SC( 49, 17, 15, 2, no, no) \ SC( 50, 17, 15, 3, no, no) \ SC( 51, 17, 15, 4, no, no) \ \ SC( 52, 18, 16, 1, no, no) \ SC( 53, 18, 16, 2, no, no) \ SC( 54, 18, 16, 3, no, no) \ SC( 55, 18, 16, 4, no, no) \ \ SC( 56, 19, 17, 1, no, no) \ SC( 57, 19, 17, 2, no, no) \ SC( 58, 19, 17, 3, no, no) \ SC( 59, 19, 17, 4, no, no) \ \ SC( 60, 20, 18, 1, no, no) \ SC( 61, 20, 18, 2, no, no) \ SC( 62, 20, 18, 3, no, no) \ SC( 63, 20, 18, 4, no, no) \ \ SC( 64, 21, 19, 1, no, no) \ SC( 65, 21, 19, 2, no, no) \ SC( 66, 21, 19, 3, no, no) \ SC( 67, 21, 19, 4, no, no) \ \ SC( 68, 22, 20, 1, no, no) \ SC( 69, 22, 20, 2, no, no) \ SC( 70, 22, 20, 3, no, no) \ SC( 71, 22, 20, 4, no, no) \ \ SC( 72, 23, 21, 1, no, no) \ SC( 73, 23, 21, 2, no, no) \ SC( 74, 23, 21, 3, no, no) \ SC( 75, 23, 21, 4, no, no) \ \ SC( 76, 24, 22, 1, no, no) \ SC( 77, 24, 22, 2, no, no) \ SC( 78, 24, 22, 3, no, no) \ SC( 79, 24, 22, 4, no, no) \ \ SC( 80, 25, 23, 1, no, no) \ SC( 81, 25, 23, 2, no, no) \ SC( 82, 25, 23, 3, no, no) \ SC( 83, 25, 23, 4, no, no) \ \ SC( 84, 26, 24, 1, no, no) \ SC( 85, 26, 24, 2, no, no) \ SC( 86, 26, 24, 3, no, no) \ SC( 87, 26, 24, 4, no, no) \ \ SC( 88, 27, 25, 1, no, no) \ SC( 89, 27, 25, 2, no, no) \ SC( 90, 27, 25, 3, no, no) \ SC( 91, 27, 25, 4, no, no) \ \ SC( 92, 28, 26, 1, no, no) \ SC( 93, 28, 26, 2, no, no) \ SC( 94, 28, 26, 3, no, no) \ SC( 95, 28, 26, 4, no, no) \ \ SC( 96, 29, 27, 1, no, no) \ SC( 97, 29, 27, 2, no, no) \ SC( 98, 29, 27, 3, no, no) \ SC( 99, 29, 27, 4, no, no) \ \ SC(100, 30, 28, 1, no, no) \ SC(101, 30, 28, 2, no, no) \ SC(102, 30, 28, 3, no, no) \ SC(103, 30, 28, 4, no, no) \ \ SC(104, 31, 29, 1, no, no) \ SC(105, 31, 29, 2, no, no) \ SC(106, 31, 29, 3, no, no) \ SC(107, 31, 29, 4, no, no) \ \ SC(108, 32, 30, 1, no, no) \ SC(109, 32, 30, 2, no, no) \ SC(110, 32, 30, 3, no, no) \ SC(111, 32, 30, 4, no, no) \ \ SC(112, 33, 31, 1, no, no) \ SC(113, 33, 31, 2, no, no) \ SC(114, 33, 31, 3, no, no) \ SC(115, 33, 31, 4, no, no) \ \ SC(116, 34, 32, 1, no, no) \ SC(117, 34, 32, 2, no, no) \ SC(118, 34, 32, 3, no, no) \ SC(119, 34, 32, 4, no, no) \ \ SC(120, 35, 33, 1, no, no) \ SC(121, 35, 33, 2, no, no) \ SC(122, 35, 33, 3, no, no) \ SC(123, 35, 33, 4, no, no) \ \ SC(124, 36, 34, 1, no, no) \ SC(125, 36, 34, 2, no, no) \ SC(126, 36, 34, 3, no, no) \ SC(127, 36, 34, 4, no, no) \ \ SC(128, 37, 35, 1, no, no) \ SC(129, 37, 35, 2, no, no) \ SC(130, 37, 35, 3, no, no) \ SC(131, 37, 35, 4, no, no) \ \ SC(132, 38, 36, 1, no, no) \ SC(133, 38, 36, 2, no, no) \ SC(134, 38, 36, 3, no, no) \ SC(135, 38, 36, 4, no, no) \ \ SC(136, 39, 37, 1, no, no) \ SC(137, 39, 37, 2, no, no) \ SC(138, 39, 37, 3, no, no) \ SC(139, 39, 37, 4, no, no) \ \ SC(140, 40, 38, 1, no, no) \ SC(141, 40, 38, 2, no, no) \ SC(142, 40, 38, 3, no, no) \ SC(143, 40, 38, 4, no, no) \ \ SC(144, 41, 39, 1, no, no) \ SC(145, 41, 39, 2, no, no) \ SC(146, 41, 39, 3, no, no) \ SC(147, 41, 39, 4, no, no) \ \ SC(148, 42, 40, 1, no, no) \ SC(149, 42, 40, 2, no, no) \ SC(150, 42, 40, 3, no, no) \ SC(151, 42, 40, 4, no, no) \ \ SC(152, 43, 41, 1, no, no) \ SC(153, 43, 41, 2, no, no) \ SC(154, 43, 41, 3, no, no) \ SC(155, 43, 41, 4, no, no) \ \ SC(156, 44, 42, 1, no, no) \ SC(157, 44, 42, 2, no, no) \ SC(158, 44, 42, 3, no, no) \ SC(159, 44, 42, 4, no, no) \ \ SC(160, 45, 43, 1, no, no) \ SC(161, 45, 43, 2, no, no) \ SC(162, 45, 43, 3, no, no) \ SC(163, 45, 43, 4, no, no) \ \ SC(164, 46, 44, 1, no, no) \ SC(165, 46, 44, 2, no, no) \ SC(166, 46, 44, 3, no, no) \ SC(167, 46, 44, 4, no, no) \ \ SC(168, 47, 45, 1, no, no) \ SC(169, 47, 45, 2, no, no) \ SC(170, 47, 45, 3, no, no) \ SC(171, 47, 45, 4, no, no) \ \ SC(172, 48, 46, 1, no, no) \ SC(173, 48, 46, 2, no, no) \ SC(174, 48, 46, 3, no, no) \ SC(175, 48, 46, 4, no, no) \ \ SC(176, 49, 47, 1, no, no) \ SC(177, 49, 47, 2, no, no) \ SC(178, 49, 47, 3, no, no) \ SC(179, 49, 47, 4, no, no) \ \ SC(180, 50, 48, 1, no, no) \ SC(181, 50, 48, 2, no, no) \ SC(182, 50, 48, 3, no, no) \ SC(183, 50, 48, 4, no, no) \ \ SC(184, 51, 49, 1, no, no) \ SC(185, 51, 49, 2, no, no) \ SC(186, 51, 49, 3, no, no) \ SC(187, 51, 49, 4, no, no) \ \ SC(188, 52, 50, 1, no, no) \ SC(189, 52, 50, 2, no, no) \ SC(190, 52, 50, 3, no, no) \ SC(191, 52, 50, 4, no, no) \ \ SC(192, 53, 51, 1, no, no) \ SC(193, 53, 51, 2, no, no) \ SC(194, 53, 51, 3, no, no) \ SC(195, 53, 51, 4, no, no) \ \ SC(196, 54, 52, 1, no, no) \ SC(197, 54, 52, 2, no, no) \ SC(198, 54, 52, 3, no, no) \ SC(199, 54, 52, 4, no, no) \ \ SC(200, 55, 53, 1, no, no) \ SC(201, 55, 53, 2, no, no) \ SC(202, 55, 53, 3, no, no) \ SC(203, 55, 53, 4, no, no) \ \ SC(204, 56, 54, 1, no, no) \ SC(205, 56, 54, 2, no, no) \ SC(206, 56, 54, 3, no, no) \ SC(207, 56, 54, 4, no, no) \ \ SC(208, 57, 55, 1, no, no) \ SC(209, 57, 55, 2, no, no) \ SC(210, 57, 55, 3, no, no) \ SC(211, 57, 55, 4, no, no) \ \ SC(212, 58, 56, 1, no, no) \ SC(213, 58, 56, 2, no, no) \ SC(214, 58, 56, 3, no, no) \ SC(215, 58, 56, 4, no, no) \ \ SC(216, 59, 57, 1, no, no) \ SC(217, 59, 57, 2, no, no) \ SC(218, 59, 57, 3, no, no) \ SC(219, 59, 57, 4, no, no) \ \ SC(220, 60, 58, 1, no, no) \ SC(221, 60, 58, 2, no, no) \ SC(222, 60, 58, 3, no, no) \ SC(223, 60, 58, 4, no, no) \ \ SC(224, 61, 59, 1, no, no) \ SC(225, 61, 59, 2, no, no) \ SC(226, 61, 59, 3, no, no) \ SC(227, 61, 59, 4, no, no) \ \ SC(228, 62, 60, 1, no, no) \ SC(229, 62, 60, 2, no, no) \ SC(230, 62, 60, 3, no, no) \ SC(231, 62, 60, 4, no, no) \ \ SC(232, 63, 61, 1, no, no) \ SC(233, 63, 61, 2, no, no) \ SC(234, 63, 61, 3, no, no) \ #define SIZE_CLASSES_DEFINED #define NTBINS 0 #define NLBINS 28 #define NBINS 27 #define LG_TINY_MAXCLASS "NA" #define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) #define SMALL_MAXCLASS ((((size_t)1) << 11) + (((size_t)3) << 9)) #endif #if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 13) #define SIZE_CLASSES \ /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \ SC( 0, 4, 4, 0, yes, 4) \ SC( 1, 4, 4, 1, yes, 4) \ SC( 2, 4, 4, 2, yes, 4) \ SC( 3, 4, 4, 3, yes, 4) \ \ SC( 4, 6, 4, 1, yes, 4) \ SC( 5, 6, 4, 2, yes, 4) \ SC( 6, 6, 4, 3, yes, 4) \ SC( 7, 6, 4, 4, yes, 4) \ \ SC( 8, 7, 5, 1, yes, 5) \ SC( 9, 7, 5, 2, yes, 5) \ SC( 10, 7, 5, 3, yes, 5) \ SC( 11, 7, 5, 4, yes, 5) \ \ SC( 12, 8, 6, 1, yes, 6) \ SC( 13, 8, 6, 2, yes, 6) \ SC( 14, 8, 6, 3, yes, 6) \ SC( 15, 8, 6, 4, yes, 6) \ \ SC( 16, 9, 7, 1, yes, 7) \ SC( 17, 9, 7, 2, yes, 7) \ SC( 18, 9, 7, 3, yes, 7) \ SC( 19, 9, 7, 4, yes, 7) \ \ SC( 20, 10, 8, 1, yes, 8) \ SC( 21, 10, 8, 2, yes, 8) \ SC( 22, 10, 8, 3, yes, 8) \ SC( 23, 10, 8, 4, yes, 8) \ \ SC( 24, 11, 9, 1, yes, 9) \ SC( 25, 11, 9, 2, yes, 9) \ SC( 26, 11, 9, 3, yes, 9) \ SC( 27, 11, 9, 4, yes, 9) \ \ SC( 28, 12, 10, 1, yes, no) \ SC( 29, 12, 10, 2, yes, no) \ SC( 30, 12, 10, 3, yes, no) \ SC( 31, 12, 10, 4, no, no) \ \ SC( 32, 13, 11, 1, no, no) \ SC( 33, 13, 11, 2, no, no) \ SC( 34, 13, 11, 3, no, no) \ SC( 35, 13, 11, 4, no, no) \ \ SC( 36, 14, 12, 1, no, no) \ SC( 37, 14, 12, 2, no, no) \ SC( 38, 14, 12, 3, no, no) \ SC( 39, 14, 12, 4, no, no) \ \ SC( 40, 15, 13, 1, no, no) \ SC( 41, 15, 13, 2, no, no) \ SC( 42, 15, 13, 3, no, no) \ SC( 43, 15, 13, 4, no, no) \ \ SC( 44, 16, 14, 1, no, no) \ SC( 45, 16, 14, 2, no, no) \ SC( 46, 16, 14, 3, no, no) \ SC( 47, 16, 14, 4, no, no) \ \ SC( 48, 17, 15, 1, no, no) \ SC( 49, 17, 15, 2, no, no) \ SC( 50, 17, 15, 3, no, no) \ SC( 51, 17, 15, 4, no, no) \ \ SC( 52, 18, 16, 1, no, no) \ SC( 53, 18, 16, 2, no, no) \ SC( 54, 18, 16, 3, no, no) \ SC( 55, 18, 16, 4, no, no) \ \ SC( 56, 19, 17, 1, no, no) \ SC( 57, 19, 17, 2, no, no) \ SC( 58, 19, 17, 3, no, no) \ SC( 59, 19, 17, 4, no, no) \ \ SC( 60, 20, 18, 1, no, no) \ SC( 61, 20, 18, 2, no, no) \ SC( 62, 20, 18, 3, no, no) \ SC( 63, 20, 18, 4, no, no) \ \ SC( 64, 21, 19, 1, no, no) \ SC( 65, 21, 19, 2, no, no) \ SC( 66, 21, 19, 3, no, no) \ SC( 67, 21, 19, 4, no, no) \ \ SC( 68, 22, 20, 1, no, no) \ SC( 69, 22, 20, 2, no, no) \ SC( 70, 22, 20, 3, no, no) \ SC( 71, 22, 20, 4, no, no) \ \ SC( 72, 23, 21, 1, no, no) \ SC( 73, 23, 21, 2, no, no) \ SC( 74, 23, 21, 3, no, no) \ SC( 75, 23, 21, 4, no, no) \ \ SC( 76, 24, 22, 1, no, no) \ SC( 77, 24, 22, 2, no, no) \ SC( 78, 24, 22, 3, no, no) \ SC( 79, 24, 22, 4, no, no) \ \ SC( 80, 25, 23, 1, no, no) \ SC( 81, 25, 23, 2, no, no) \ SC( 82, 25, 23, 3, no, no) \ SC( 83, 25, 23, 4, no, no) \ \ SC( 84, 26, 24, 1, no, no) \ SC( 85, 26, 24, 2, no, no) \ SC( 86, 26, 24, 3, no, no) \ SC( 87, 26, 24, 4, no, no) \ \ SC( 88, 27, 25, 1, no, no) \ SC( 89, 27, 25, 2, no, no) \ SC( 90, 27, 25, 3, no, no) \ SC( 91, 27, 25, 4, no, no) \ \ SC( 92, 28, 26, 1, no, no) \ SC( 93, 28, 26, 2, no, no) \ SC( 94, 28, 26, 3, no, no) \ SC( 95, 28, 26, 4, no, no) \ \ SC( 96, 29, 27, 1, no, no) \ SC( 97, 29, 27, 2, no, no) \ SC( 98, 29, 27, 3, no, no) \ SC( 99, 29, 27, 4, no, no) \ \ SC(100, 30, 28, 1, no, no) \ SC(101, 30, 28, 2, no, no) \ SC(102, 30, 28, 3, no, no) \ SC(103, 30, 28, 4, no, no) \ \ SC(104, 31, 29, 1, no, no) \ SC(105, 31, 29, 2, no, no) \ SC(106, 31, 29, 3, no, no) \ SC(107, 31, 29, 4, no, no) \ \ SC(108, 32, 30, 1, no, no) \ SC(109, 32, 30, 2, no, no) \ SC(110, 32, 30, 3, no, no) \ SC(111, 32, 30, 4, no, no) \ \ SC(112, 33, 31, 1, no, no) \ SC(113, 33, 31, 2, no, no) \ SC(114, 33, 31, 3, no, no) \ SC(115, 33, 31, 4, no, no) \ \ SC(116, 34, 32, 1, no, no) \ SC(117, 34, 32, 2, no, no) \ SC(118, 34, 32, 3, no, no) \ SC(119, 34, 32, 4, no, no) \ \ SC(120, 35, 33, 1, no, no) \ SC(121, 35, 33, 2, no, no) \ SC(122, 35, 33, 3, no, no) \ SC(123, 35, 33, 4, no, no) \ \ SC(124, 36, 34, 1, no, no) \ SC(125, 36, 34, 2, no, no) \ SC(126, 36, 34, 3, no, no) \ SC(127, 36, 34, 4, no, no) \ \ SC(128, 37, 35, 1, no, no) \ SC(129, 37, 35, 2, no, no) \ SC(130, 37, 35, 3, no, no) \ SC(131, 37, 35, 4, no, no) \ \ SC(132, 38, 36, 1, no, no) \ SC(133, 38, 36, 2, no, no) \ SC(134, 38, 36, 3, no, no) \ SC(135, 38, 36, 4, no, no) \ \ SC(136, 39, 37, 1, no, no) \ SC(137, 39, 37, 2, no, no) \ SC(138, 39, 37, 3, no, no) \ SC(139, 39, 37, 4, no, no) \ \ SC(140, 40, 38, 1, no, no) \ SC(141, 40, 38, 2, no, no) \ SC(142, 40, 38, 3, no, no) \ SC(143, 40, 38, 4, no, no) \ \ SC(144, 41, 39, 1, no, no) \ SC(145, 41, 39, 2, no, no) \ SC(146, 41, 39, 3, no, no) \ SC(147, 41, 39, 4, no, no) \ \ SC(148, 42, 40, 1, no, no) \ SC(149, 42, 40, 2, no, no) \ SC(150, 42, 40, 3, no, no) \ SC(151, 42, 40, 4, no, no) \ \ SC(152, 43, 41, 1, no, no) \ SC(153, 43, 41, 2, no, no) \ SC(154, 43, 41, 3, no, no) \ SC(155, 43, 41, 4, no, no) \ \ SC(156, 44, 42, 1, no, no) \ SC(157, 44, 42, 2, no, no) \ SC(158, 44, 42, 3, no, no) \ SC(159, 44, 42, 4, no, no) \ \ SC(160, 45, 43, 1, no, no) \ SC(161, 45, 43, 2, no, no) \ SC(162, 45, 43, 3, no, no) \ SC(163, 45, 43, 4, no, no) \ \ SC(164, 46, 44, 1, no, no) \ SC(165, 46, 44, 2, no, no) \ SC(166, 46, 44, 3, no, no) \ SC(167, 46, 44, 4, no, no) \ \ SC(168, 47, 45, 1, no, no) \ SC(169, 47, 45, 2, no, no) \ SC(170, 47, 45, 3, no, no) \ SC(171, 47, 45, 4, no, no) \ \ SC(172, 48, 46, 1, no, no) \ SC(173, 48, 46, 2, no, no) \ SC(174, 48, 46, 3, no, no) \ SC(175, 48, 46, 4, no, no) \ \ SC(176, 49, 47, 1, no, no) \ SC(177, 49, 47, 2, no, no) \ SC(178, 49, 47, 3, no, no) \ SC(179, 49, 47, 4, no, no) \ \ SC(180, 50, 48, 1, no, no) \ SC(181, 50, 48, 2, no, no) \ SC(182, 50, 48, 3, no, no) \ SC(183, 50, 48, 4, no, no) \ \ SC(184, 51, 49, 1, no, no) \ SC(185, 51, 49, 2, no, no) \ SC(186, 51, 49, 3, no, no) \ SC(187, 51, 49, 4, no, no) \ \ SC(188, 52, 50, 1, no, no) \ SC(189, 52, 50, 2, no, no) \ SC(190, 52, 50, 3, no, no) \ SC(191, 52, 50, 4, no, no) \ \ SC(192, 53, 51, 1, no, no) \ SC(193, 53, 51, 2, no, no) \ SC(194, 53, 51, 3, no, no) \ SC(195, 53, 51, 4, no, no) \ \ SC(196, 54, 52, 1, no, no) \ SC(197, 54, 52, 2, no, no) \ SC(198, 54, 52, 3, no, no) \ SC(199, 54, 52, 4, no, no) \ \ SC(200, 55, 53, 1, no, no) \ SC(201, 55, 53, 2, no, no) \ SC(202, 55, 53, 3, no, no) \ SC(203, 55, 53, 4, no, no) \ \ SC(204, 56, 54, 1, no, no) \ SC(205, 56, 54, 2, no, no) \ SC(206, 56, 54, 3, no, no) \ SC(207, 56, 54, 4, no, no) \ \ SC(208, 57, 55, 1, no, no) \ SC(209, 57, 55, 2, no, no) \ SC(210, 57, 55, 3, no, no) \ SC(211, 57, 55, 4, no, no) \ \ SC(212, 58, 56, 1, no, no) \ SC(213, 58, 56, 2, no, no) \ SC(214, 58, 56, 3, no, no) \ SC(215, 58, 56, 4, no, no) \ \ SC(216, 59, 57, 1, no, no) \ SC(217, 59, 57, 2, no, no) \ SC(218, 59, 57, 3, no, no) \ SC(219, 59, 57, 4, no, no) \ \ SC(220, 60, 58, 1, no, no) \ SC(221, 60, 58, 2, no, no) \ SC(222, 60, 58, 3, no, no) \ SC(223, 60, 58, 4, no, no) \ \ SC(224, 61, 59, 1, no, no) \ SC(225, 61, 59, 2, no, no) \ SC(226, 61, 59, 3, no, no) \ SC(227, 61, 59, 4, no, no) \ \ SC(228, 62, 60, 1, no, no) \ SC(229, 62, 60, 2, no, no) \ SC(230, 62, 60, 3, no, no) \ SC(231, 62, 60, 4, no, no) \ \ SC(232, 63, 61, 1, no, no) \ SC(233, 63, 61, 2, no, no) \ SC(234, 63, 61, 3, no, no) \ #define SIZE_CLASSES_DEFINED #define NTBINS 0 #define NLBINS 28 #define NBINS 31 #define LG_TINY_MAXCLASS "NA" #define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) #define SMALL_MAXCLASS ((((size_t)1) << 12) + (((size_t)3) << 10)) #endif #if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 16) #define SIZE_CLASSES \ /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \ SC( 0, 4, 4, 0, yes, 4) \ SC( 1, 4, 4, 1, yes, 4) \ SC( 2, 4, 4, 2, yes, 4) \ SC( 3, 4, 4, 3, yes, 4) \ \ SC( 4, 6, 4, 1, yes, 4) \ SC( 5, 6, 4, 2, yes, 4) \ SC( 6, 6, 4, 3, yes, 4) \ SC( 7, 6, 4, 4, yes, 4) \ \ SC( 8, 7, 5, 1, yes, 5) \ SC( 9, 7, 5, 2, yes, 5) \ SC( 10, 7, 5, 3, yes, 5) \ SC( 11, 7, 5, 4, yes, 5) \ \ SC( 12, 8, 6, 1, yes, 6) \ SC( 13, 8, 6, 2, yes, 6) \ SC( 14, 8, 6, 3, yes, 6) \ SC( 15, 8, 6, 4, yes, 6) \ \ SC( 16, 9, 7, 1, yes, 7) \ SC( 17, 9, 7, 2, yes, 7) \ SC( 18, 9, 7, 3, yes, 7) \ SC( 19, 9, 7, 4, yes, 7) \ \ SC( 20, 10, 8, 1, yes, 8) \ SC( 21, 10, 8, 2, yes, 8) \ SC( 22, 10, 8, 3, yes, 8) \ SC( 23, 10, 8, 4, yes, 8) \ \ SC( 24, 11, 9, 1, yes, 9) \ SC( 25, 11, 9, 2, yes, 9) \ SC( 26, 11, 9, 3, yes, 9) \ SC( 27, 11, 9, 4, yes, 9) \ \ SC( 28, 12, 10, 1, yes, no) \ SC( 29, 12, 10, 2, yes, no) \ SC( 30, 12, 10, 3, yes, no) \ SC( 31, 12, 10, 4, yes, no) \ \ SC( 32, 13, 11, 1, yes, no) \ SC( 33, 13, 11, 2, yes, no) \ SC( 34, 13, 11, 3, yes, no) \ SC( 35, 13, 11, 4, yes, no) \ \ SC( 36, 14, 12, 1, yes, no) \ SC( 37, 14, 12, 2, yes, no) \ SC( 38, 14, 12, 3, yes, no) \ SC( 39, 14, 12, 4, yes, no) \ \ SC( 40, 15, 13, 1, yes, no) \ SC( 41, 15, 13, 2, yes, no) \ SC( 42, 15, 13, 3, yes, no) \ SC( 43, 15, 13, 4, no, no) \ \ SC( 44, 16, 14, 1, no, no) \ SC( 45, 16, 14, 2, no, no) \ SC( 46, 16, 14, 3, no, no) \ SC( 47, 16, 14, 4, no, no) \ \ SC( 48, 17, 15, 1, no, no) \ SC( 49, 17, 15, 2, no, no) \ SC( 50, 17, 15, 3, no, no) \ SC( 51, 17, 15, 4, no, no) \ \ SC( 52, 18, 16, 1, no, no) \ SC( 53, 18, 16, 2, no, no) \ SC( 54, 18, 16, 3, no, no) \ SC( 55, 18, 16, 4, no, no) \ \ SC( 56, 19, 17, 1, no, no) \ SC( 57, 19, 17, 2, no, no) \ SC( 58, 19, 17, 3, no, no) \ SC( 59, 19, 17, 4, no, no) \ \ SC( 60, 20, 18, 1, no, no) \ SC( 61, 20, 18, 2, no, no) \ SC( 62, 20, 18, 3, no, no) \ SC( 63, 20, 18, 4, no, no) \ \ SC( 64, 21, 19, 1, no, no) \ SC( 65, 21, 19, 2, no, no) \ SC( 66, 21, 19, 3, no, no) \ SC( 67, 21, 19, 4, no, no) \ \ SC( 68, 22, 20, 1, no, no) \ SC( 69, 22, 20, 2, no, no) \ SC( 70, 22, 20, 3, no, no) \ SC( 71, 22, 20, 4, no, no) \ \ SC( 72, 23, 21, 1, no, no) \ SC( 73, 23, 21, 2, no, no) \ SC( 74, 23, 21, 3, no, no) \ SC( 75, 23, 21, 4, no, no) \ \ SC( 76, 24, 22, 1, no, no) \ SC( 77, 24, 22, 2, no, no) \ SC( 78, 24, 22, 3, no, no) \ SC( 79, 24, 22, 4, no, no) \ \ SC( 80, 25, 23, 1, no, no) \ SC( 81, 25, 23, 2, no, no) \ SC( 82, 25, 23, 3, no, no) \ SC( 83, 25, 23, 4, no, no) \ \ SC( 84, 26, 24, 1, no, no) \ SC( 85, 26, 24, 2, no, no) \ SC( 86, 26, 24, 3, no, no) \ SC( 87, 26, 24, 4, no, no) \ \ SC( 88, 27, 25, 1, no, no) \ SC( 89, 27, 25, 2, no, no) \ SC( 90, 27, 25, 3, no, no) \ SC( 91, 27, 25, 4, no, no) \ \ SC( 92, 28, 26, 1, no, no) \ SC( 93, 28, 26, 2, no, no) \ SC( 94, 28, 26, 3, no, no) \ SC( 95, 28, 26, 4, no, no) \ \ SC( 96, 29, 27, 1, no, no) \ SC( 97, 29, 27, 2, no, no) \ SC( 98, 29, 27, 3, no, no) \ SC( 99, 29, 27, 4, no, no) \ \ SC(100, 30, 28, 1, no, no) \ SC(101, 30, 28, 2, no, no) \ SC(102, 30, 28, 3, no, no) \ SC(103, 30, 28, 4, no, no) \ \ SC(104, 31, 29, 1, no, no) \ SC(105, 31, 29, 2, no, no) \ SC(106, 31, 29, 3, no, no) \ SC(107, 31, 29, 4, no, no) \ \ SC(108, 32, 30, 1, no, no) \ SC(109, 32, 30, 2, no, no) \ SC(110, 32, 30, 3, no, no) \ SC(111, 32, 30, 4, no, no) \ \ SC(112, 33, 31, 1, no, no) \ SC(113, 33, 31, 2, no, no) \ SC(114, 33, 31, 3, no, no) \ SC(115, 33, 31, 4, no, no) \ \ SC(116, 34, 32, 1, no, no) \ SC(117, 34, 32, 2, no, no) \ SC(118, 34, 32, 3, no, no) \ SC(119, 34, 32, 4, no, no) \ \ SC(120, 35, 33, 1, no, no) \ SC(121, 35, 33, 2, no, no) \ SC(122, 35, 33, 3, no, no) \ SC(123, 35, 33, 4, no, no) \ \ SC(124, 36, 34, 1, no, no) \ SC(125, 36, 34, 2, no, no) \ SC(126, 36, 34, 3, no, no) \ SC(127, 36, 34, 4, no, no) \ \ SC(128, 37, 35, 1, no, no) \ SC(129, 37, 35, 2, no, no) \ SC(130, 37, 35, 3, no, no) \ SC(131, 37, 35, 4, no, no) \ \ SC(132, 38, 36, 1, no, no) \ SC(133, 38, 36, 2, no, no) \ SC(134, 38, 36, 3, no, no) \ SC(135, 38, 36, 4, no, no) \ \ SC(136, 39, 37, 1, no, no) \ SC(137, 39, 37, 2, no, no) \ SC(138, 39, 37, 3, no, no) \ SC(139, 39, 37, 4, no, no) \ \ SC(140, 40, 38, 1, no, no) \ SC(141, 40, 38, 2, no, no) \ SC(142, 40, 38, 3, no, no) \ SC(143, 40, 38, 4, no, no) \ \ SC(144, 41, 39, 1, no, no) \ SC(145, 41, 39, 2, no, no) \ SC(146, 41, 39, 3, no, no) \ SC(147, 41, 39, 4, no, no) \ \ SC(148, 42, 40, 1, no, no) \ SC(149, 42, 40, 2, no, no) \ SC(150, 42, 40, 3, no, no) \ SC(151, 42, 40, 4, no, no) \ \ SC(152, 43, 41, 1, no, no) \ SC(153, 43, 41, 2, no, no) \ SC(154, 43, 41, 3, no, no) \ SC(155, 43, 41, 4, no, no) \ \ SC(156, 44, 42, 1, no, no) \ SC(157, 44, 42, 2, no, no) \ SC(158, 44, 42, 3, no, no) \ SC(159, 44, 42, 4, no, no) \ \ SC(160, 45, 43, 1, no, no) \ SC(161, 45, 43, 2, no, no) \ SC(162, 45, 43, 3, no, no) \ SC(163, 45, 43, 4, no, no) \ \ SC(164, 46, 44, 1, no, no) \ SC(165, 46, 44, 2, no, no) \ SC(166, 46, 44, 3, no, no) \ SC(167, 46, 44, 4, no, no) \ \ SC(168, 47, 45, 1, no, no) \ SC(169, 47, 45, 2, no, no) \ SC(170, 47, 45, 3, no, no) \ SC(171, 47, 45, 4, no, no) \ \ SC(172, 48, 46, 1, no, no) \ SC(173, 48, 46, 2, no, no) \ SC(174, 48, 46, 3, no, no) \ SC(175, 48, 46, 4, no, no) \ \ SC(176, 49, 47, 1, no, no) \ SC(177, 49, 47, 2, no, no) \ SC(178, 49, 47, 3, no, no) \ SC(179, 49, 47, 4, no, no) \ \ SC(180, 50, 48, 1, no, no) \ SC(181, 50, 48, 2, no, no) \ SC(182, 50, 48, 3, no, no) \ SC(183, 50, 48, 4, no, no) \ \ SC(184, 51, 49, 1, no, no) \ SC(185, 51, 49, 2, no, no) \ SC(186, 51, 49, 3, no, no) \ SC(187, 51, 49, 4, no, no) \ \ SC(188, 52, 50, 1, no, no) \ SC(189, 52, 50, 2, no, no) \ SC(190, 52, 50, 3, no, no) \ SC(191, 52, 50, 4, no, no) \ \ SC(192, 53, 51, 1, no, no) \ SC(193, 53, 51, 2, no, no) \ SC(194, 53, 51, 3, no, no) \ SC(195, 53, 51, 4, no, no) \ \ SC(196, 54, 52, 1, no, no) \ SC(197, 54, 52, 2, no, no) \ SC(198, 54, 52, 3, no, no) \ SC(199, 54, 52, 4, no, no) \ \ SC(200, 55, 53, 1, no, no) \ SC(201, 55, 53, 2, no, no) \ SC(202, 55, 53, 3, no, no) \ SC(203, 55, 53, 4, no, no) \ \ SC(204, 56, 54, 1, no, no) \ SC(205, 56, 54, 2, no, no) \ SC(206, 56, 54, 3, no, no) \ SC(207, 56, 54, 4, no, no) \ \ SC(208, 57, 55, 1, no, no) \ SC(209, 57, 55, 2, no, no) \ SC(210, 57, 55, 3, no, no) \ SC(211, 57, 55, 4, no, no) \ \ SC(212, 58, 56, 1, no, no) \ SC(213, 58, 56, 2, no, no) \ SC(214, 58, 56, 3, no, no) \ SC(215, 58, 56, 4, no, no) \ \ SC(216, 59, 57, 1, no, no) \ SC(217, 59, 57, 2, no, no) \ SC(218, 59, 57, 3, no, no) \ SC(219, 59, 57, 4, no, no) \ \ SC(220, 60, 58, 1, no, no) \ SC(221, 60, 58, 2, no, no) \ SC(222, 60, 58, 3, no, no) \ SC(223, 60, 58, 4, no, no) \ \ SC(224, 61, 59, 1, no, no) \ SC(225, 61, 59, 2, no, no) \ SC(226, 61, 59, 3, no, no) \ SC(227, 61, 59, 4, no, no) \ \ SC(228, 62, 60, 1, no, no) \ SC(229, 62, 60, 2, no, no) \ SC(230, 62, 60, 3, no, no) \ SC(231, 62, 60, 4, no, no) \ \ SC(232, 63, 61, 1, no, no) \ SC(233, 63, 61, 2, no, no) \ SC(234, 63, 61, 3, no, no) \ #define SIZE_CLASSES_DEFINED #define NTBINS 0 #define NLBINS 28 #define NBINS 43 #define LG_TINY_MAXCLASS "NA" #define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) #define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13)) #endif #ifndef SIZE_CLASSES_DEFINED # error "No size class definitions match configuration" #endif #undef SIZE_CLASSES_DEFINED /* * The small_size2bin lookup table uses uint8_t to encode each bin index, so we * cannot support more than 256 small size classes. Further constrain NBINS to * 255 since all small size classes, plus a "not small" size class must be * stored in 8 bits of arena_chunk_map_t's bits field. */ #if (NBINS > 255) # error "Too many small size classes" #endif #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
199,875
46.931894
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/private_namespace.h
#define a0calloc JEMALLOC_N(a0calloc) #define a0free JEMALLOC_N(a0free) #define a0malloc JEMALLOC_N(a0malloc) #define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small) #define arena_bin_index JEMALLOC_N(arena_bin_index) #define arena_bin_info JEMALLOC_N(arena_bin_info) #define arena_boot JEMALLOC_N(arena_boot) #define arena_chunk_alloc_huge JEMALLOC_N(arena_chunk_alloc_huge) #define arena_chunk_dalloc_huge JEMALLOC_N(arena_chunk_dalloc_huge) #define arena_dalloc JEMALLOC_N(arena_dalloc) #define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin) #define arena_dalloc_bin_locked JEMALLOC_N(arena_dalloc_bin_locked) #define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) #define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) #define arena_dalloc_large JEMALLOC_N(arena_dalloc_large) #define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked) #define arena_dalloc_small JEMALLOC_N(arena_dalloc_small) #define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get) #define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set) #define arena_malloc JEMALLOC_N(arena_malloc) #define arena_malloc_large JEMALLOC_N(arena_malloc_large) #define arena_malloc_small JEMALLOC_N(arena_malloc_small) #define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get) #define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get) #define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get) #define arena_mapbits_get JEMALLOC_N(arena_mapbits_get) #define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set) #define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get) #define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set) #define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get) #define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get) #define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set) #define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set) #define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get) #define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set) #define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get) #define arena_mapbits_unzeroed_set JEMALLOC_N(arena_mapbits_unzeroed_set) #define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get) #define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read) #define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write) #define arena_mapelm_to_pageind JEMALLOC_N(arena_mapelm_to_pageind) #define arena_mapp_get JEMALLOC_N(arena_mapp_get) #define arena_maxclass JEMALLOC_N(arena_maxclass) #define arena_new JEMALLOC_N(arena_new) #define arena_palloc JEMALLOC_N(arena_palloc) #define arena_postfork_child JEMALLOC_N(arena_postfork_child) #define arena_postfork_parent JEMALLOC_N(arena_postfork_parent) #define arena_prefork JEMALLOC_N(arena_prefork) #define arena_prof_accum JEMALLOC_N(arena_prof_accum) #define arena_prof_accum_impl JEMALLOC_N(arena_prof_accum_impl) #define arena_prof_accum_locked JEMALLOC_N(arena_prof_accum_locked) #define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get) #define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set) #define arena_prof_promoted JEMALLOC_N(arena_prof_promoted) #define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get) #define arena_purge_all JEMALLOC_N(arena_purge_all) #define arena_quarantine_junk_small JEMALLOC_N(arena_quarantine_junk_small) #define arena_ralloc JEMALLOC_N(arena_ralloc) #define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) #define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move) #define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) #define arena_run_regind JEMALLOC_N(arena_run_regind) #define arena_runs_avail_tree_iter JEMALLOC_N(arena_runs_avail_tree_iter) #define arena_salloc JEMALLOC_N(arena_salloc) #define arena_stats_merge JEMALLOC_N(arena_stats_merge) #define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small) #define arenas JEMALLOC_N(arenas) #define pools JEMALLOC_N(pools) #define arenas_booted JEMALLOC_N(arenas_booted) #define arenas_cleanup JEMALLOC_N(arenas_cleanup) #define arenas_extend JEMALLOC_N(arenas_extend) #define arenas_initialized JEMALLOC_N(arenas_initialized) #define arenas_lock JEMALLOC_N(arenas_lock) #define arenas_tls JEMALLOC_N(arenas_tls) #define arenas_tsd JEMALLOC_N(arenas_tsd) #define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot) #define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper) #define arenas_tsd_get JEMALLOC_N(arenas_tsd_get) #define arenas_tsd_get_wrapper JEMALLOC_N(arenas_tsd_get_wrapper) #define arenas_tsd_init_head JEMALLOC_N(arenas_tsd_init_head) #define arenas_tsd_set JEMALLOC_N(arenas_tsd_set) #define atomic_add_u JEMALLOC_N(atomic_add_u) #define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32) #define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64) #define atomic_add_z JEMALLOC_N(atomic_add_z) #define atomic_sub_u JEMALLOC_N(atomic_sub_u) #define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32) #define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64) #define atomic_sub_z JEMALLOC_N(atomic_sub_z) #define base_alloc JEMALLOC_N(base_alloc) #define base_boot JEMALLOC_N(base_boot) #define base_calloc JEMALLOC_N(base_calloc) #define base_free_fn JEMALLOC_N(base_free_fn) #define base_malloc_fn JEMALLOC_N(base_malloc_fn) #define base_node_alloc JEMALLOC_N(base_node_alloc) #define base_node_dalloc JEMALLOC_N(base_node_dalloc) #define base_pool JEMALLOC_N(base_pool) #define base_postfork_child JEMALLOC_N(base_postfork_child) #define base_postfork_parent JEMALLOC_N(base_postfork_parent) #define base_prefork JEMALLOC_N(base_prefork) #define bitmap_full JEMALLOC_N(bitmap_full) #define bitmap_get JEMALLOC_N(bitmap_get) #define bitmap_info_init JEMALLOC_N(bitmap_info_init) #define bitmap_info_ngroups JEMALLOC_N(bitmap_info_ngroups) #define bitmap_init JEMALLOC_N(bitmap_init) #define bitmap_set JEMALLOC_N(bitmap_set) #define bitmap_sfu JEMALLOC_N(bitmap_sfu) #define bitmap_size JEMALLOC_N(bitmap_size) #define bitmap_unset JEMALLOC_N(bitmap_unset) #define bt_init JEMALLOC_N(bt_init) #define buferror JEMALLOC_N(buferror) #define choose_arena JEMALLOC_N(choose_arena) #define choose_arena_hard JEMALLOC_N(choose_arena_hard) #define chunk_alloc_arena JEMALLOC_N(chunk_alloc_arena) #define chunk_alloc_base JEMALLOC_N(chunk_alloc_base) #define chunk_alloc_default JEMALLOC_N(chunk_alloc_default) #define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss) #define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap) #define chunk_global_boot JEMALLOC_N(chunk_global_boot) #define chunk_boot JEMALLOC_N(chunk_boot) #define chunk_dalloc_default JEMALLOC_N(chunk_dalloc_default) #define chunk_dalloc_mmap JEMALLOC_N(chunk_dalloc_mmap) #define chunk_dss_boot JEMALLOC_N(chunk_dss_boot) #define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child) #define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent) #define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get) #define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set) #define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork) #define chunk_in_dss JEMALLOC_N(chunk_in_dss) #define chunk_npages JEMALLOC_N(chunk_npages) #define chunk_postfork_child JEMALLOC_N(chunk_postfork_child) #define chunk_postfork_parent JEMALLOC_N(chunk_postfork_parent) #define chunk_prefork JEMALLOC_N(chunk_prefork) #define chunk_unmap JEMALLOC_N(chunk_unmap) #define chunk_record JEMALLOC_N(chunk_record) #define chunks_mtx JEMALLOC_N(chunks_mtx) #define chunks_rtree JEMALLOC_N(chunks_rtree) #define chunksize JEMALLOC_N(chunksize) #define chunksize_mask JEMALLOC_N(chunksize_mask) #define ckh_bucket_search JEMALLOC_N(ckh_bucket_search) #define ckh_count JEMALLOC_N(ckh_count) #define ckh_delete JEMALLOC_N(ckh_delete) #define ckh_evict_reloc_insert JEMALLOC_N(ckh_evict_reloc_insert) #define ckh_insert JEMALLOC_N(ckh_insert) #define ckh_isearch JEMALLOC_N(ckh_isearch) #define ckh_iter JEMALLOC_N(ckh_iter) #define ckh_new JEMALLOC_N(ckh_new) #define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash) #define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp) #define ckh_rebuild JEMALLOC_N(ckh_rebuild) #define ckh_remove JEMALLOC_N(ckh_remove) #define ckh_search JEMALLOC_N(ckh_search) #define ckh_string_hash JEMALLOC_N(ckh_string_hash) #define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp) #define ckh_try_bucket_insert JEMALLOC_N(ckh_try_bucket_insert) #define ckh_try_insert JEMALLOC_N(ckh_try_insert) #define ctl_boot JEMALLOC_N(ctl_boot) #define ctl_bymib JEMALLOC_N(ctl_bymib) #define ctl_byname JEMALLOC_N(ctl_byname) #define ctl_nametomib JEMALLOC_N(ctl_nametomib) #define ctl_postfork_child JEMALLOC_N(ctl_postfork_child) #define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent) #define ctl_prefork JEMALLOC_N(ctl_prefork) #define dss_prec_names JEMALLOC_N(dss_prec_names) #define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first) #define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert) #define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter) #define extent_tree_ad_iter_recurse JEMALLOC_N(extent_tree_ad_iter_recurse) #define extent_tree_ad_iter_start JEMALLOC_N(extent_tree_ad_iter_start) #define extent_tree_ad_last JEMALLOC_N(extent_tree_ad_last) #define extent_tree_ad_new JEMALLOC_N(extent_tree_ad_new) #define extent_tree_ad_next JEMALLOC_N(extent_tree_ad_next) #define extent_tree_ad_nsearch JEMALLOC_N(extent_tree_ad_nsearch) #define extent_tree_ad_prev JEMALLOC_N(extent_tree_ad_prev) #define extent_tree_ad_psearch JEMALLOC_N(extent_tree_ad_psearch) #define extent_tree_ad_remove JEMALLOC_N(extent_tree_ad_remove) #define extent_tree_ad_reverse_iter JEMALLOC_N(extent_tree_ad_reverse_iter) #define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse) #define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start) #define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search) #define extent_tree_szad_first JEMALLOC_N(extent_tree_szad_first) #define extent_tree_szad_insert JEMALLOC_N(extent_tree_szad_insert) #define extent_tree_szad_iter JEMALLOC_N(extent_tree_szad_iter) #define extent_tree_szad_iter_recurse JEMALLOC_N(extent_tree_szad_iter_recurse) #define extent_tree_szad_iter_start JEMALLOC_N(extent_tree_szad_iter_start) #define extent_tree_szad_last JEMALLOC_N(extent_tree_szad_last) #define extent_tree_szad_new JEMALLOC_N(extent_tree_szad_new) #define extent_tree_szad_next JEMALLOC_N(extent_tree_szad_next) #define extent_tree_szad_nsearch JEMALLOC_N(extent_tree_szad_nsearch) #define extent_tree_szad_prev JEMALLOC_N(extent_tree_szad_prev) #define extent_tree_szad_psearch JEMALLOC_N(extent_tree_szad_psearch) #define extent_tree_szad_remove JEMALLOC_N(extent_tree_szad_remove) #define extent_tree_szad_reverse_iter JEMALLOC_N(extent_tree_szad_reverse_iter) #define extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse) #define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start) #define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search) #define get_errno JEMALLOC_N(get_errno) #define hash JEMALLOC_N(hash) #define hash_fmix_32 JEMALLOC_N(hash_fmix_32) #define hash_fmix_64 JEMALLOC_N(hash_fmix_64) #define hash_get_block_32 JEMALLOC_N(hash_get_block_32) #define hash_get_block_64 JEMALLOC_N(hash_get_block_64) #define hash_rotl_32 JEMALLOC_N(hash_rotl_32) #define hash_rotl_64 JEMALLOC_N(hash_rotl_64) #define hash_x64_128 JEMALLOC_N(hash_x64_128) #define hash_x86_128 JEMALLOC_N(hash_x86_128) #define hash_x86_32 JEMALLOC_N(hash_x86_32) #define huge_allocated JEMALLOC_N(huge_allocated) #define huge_boot JEMALLOC_N(huge_boot) #define huge_dalloc JEMALLOC_N(huge_dalloc) #define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk) #define huge_malloc JEMALLOC_N(huge_malloc) #define huge_ndalloc JEMALLOC_N(huge_ndalloc) #define huge_nmalloc JEMALLOC_N(huge_nmalloc) #define huge_palloc JEMALLOC_N(huge_palloc) #define huge_postfork_child JEMALLOC_N(huge_postfork_child) #define huge_postfork_parent JEMALLOC_N(huge_postfork_parent) #define huge_prefork JEMALLOC_N(huge_prefork) #define huge_prof_ctx_get JEMALLOC_N(huge_prof_ctx_get) #define huge_prof_ctx_set JEMALLOC_N(huge_prof_ctx_set) #define huge_ralloc JEMALLOC_N(huge_ralloc) #define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move) #define huge_salloc JEMALLOC_N(huge_salloc) #define icalloc JEMALLOC_N(icalloc) #define icalloct JEMALLOC_N(icalloct) #define idalloc JEMALLOC_N(idalloc) #define idalloct JEMALLOC_N(idalloct) #define imalloc JEMALLOC_N(imalloc) #define imalloct JEMALLOC_N(imalloct) #define in_valgrind JEMALLOC_N(in_valgrind) #define ipalloc JEMALLOC_N(ipalloc) #define ipalloct JEMALLOC_N(ipalloct) #define iqalloc JEMALLOC_N(iqalloc) #define iqalloct JEMALLOC_N(iqalloct) #define iralloc JEMALLOC_N(iralloc) #define iralloct JEMALLOC_N(iralloct) #define iralloct_realign JEMALLOC_N(iralloct_realign) #define isalloc JEMALLOC_N(isalloc) #define isthreaded JEMALLOC_N(isthreaded) #define ivsalloc JEMALLOC_N(ivsalloc) #define ixalloc JEMALLOC_N(ixalloc) #define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child) #define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent) #define jemalloc_prefork JEMALLOC_N(jemalloc_prefork) #define lg_floor JEMALLOC_N(lg_floor) #define malloc_cprintf JEMALLOC_N(malloc_cprintf) #define malloc_mutex_init JEMALLOC_N(malloc_mutex_init) #define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock) #define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child) #define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent) #define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork) #define malloc_mutex_unlock JEMALLOC_N(malloc_mutex_unlock) #define malloc_rwlock_init JEMALLOC_N(malloc_rwlock_init) #define malloc_rwlock_postfork_child JEMALLOC_N(malloc_rwlock_postfork_child) #define malloc_rwlock_postfork_parent JEMALLOC_N(malloc_rwlock_postfork_parent) #define malloc_rwlock_prefork JEMALLOC_N(malloc_rwlock_prefork) #define malloc_rwlock_rdlock JEMALLOC_N(malloc_rwlock_rdlock) #define malloc_rwlock_wrlock JEMALLOC_N(malloc_rwlock_wrlock) #define malloc_rwlock_unlock JEMALLOC_N(malloc_rwlock_unlock) #define malloc_rwlock_destroy JEMALLOC_N(malloc_rwlock_destroy) #define malloc_printf JEMALLOC_N(malloc_printf) #define malloc_snprintf JEMALLOC_N(malloc_snprintf) #define malloc_strtoumax JEMALLOC_N(malloc_strtoumax) #define malloc_tsd_boot JEMALLOC_N(malloc_tsd_boot) #define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register) #define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc) #define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc) #define malloc_tsd_no_cleanup JEMALLOC_N(malloc_tsd_no_cleanup) #define malloc_vcprintf JEMALLOC_N(malloc_vcprintf) #define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf) #define malloc_write JEMALLOC_N(malloc_write) #define map_bias JEMALLOC_N(map_bias) #define mb_write JEMALLOC_N(mb_write) #define mutex_boot JEMALLOC_N(mutex_boot) #define narenas_auto JEMALLOC_N(narenas_auto) #define narenas_total JEMALLOC_N(narenas_total) #define narenas_total_get JEMALLOC_N(narenas_total_get) #define ncpus JEMALLOC_N(ncpus) #define nhbins JEMALLOC_N(nhbins) #define npools JEMALLOC_N(npools) #define npools_cnt JEMALLOC_N(npools_cnt) #define opt_abort JEMALLOC_N(opt_abort) #define opt_dss JEMALLOC_N(opt_dss) #define opt_junk JEMALLOC_N(opt_junk) #define opt_lg_chunk JEMALLOC_N(opt_lg_chunk) #define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult) #define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval) #define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample) #define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max) #define opt_narenas JEMALLOC_N(opt_narenas) #define opt_prof JEMALLOC_N(opt_prof) #define opt_prof_accum JEMALLOC_N(opt_prof_accum) #define opt_prof_active JEMALLOC_N(opt_prof_active) #define opt_prof_final JEMALLOC_N(opt_prof_final) #define opt_prof_gdump JEMALLOC_N(opt_prof_gdump) #define opt_prof_leak JEMALLOC_N(opt_prof_leak) #define opt_prof_prefix JEMALLOC_N(opt_prof_prefix) #define opt_quarantine JEMALLOC_N(opt_quarantine) #define opt_redzone JEMALLOC_N(opt_redzone) #define opt_stats_print JEMALLOC_N(opt_stats_print) #define opt_tcache JEMALLOC_N(opt_tcache) #define opt_utrace JEMALLOC_N(opt_utrace) #define opt_xmalloc JEMALLOC_N(opt_xmalloc) #define opt_zero JEMALLOC_N(opt_zero) #define p2rz JEMALLOC_N(p2rz) #define pages_purge JEMALLOC_N(pages_purge) #define pools_shared_data_initialized JEMALLOC_N(pools_shared_data_initialized) #define pow2_ceil JEMALLOC_N(pow2_ceil) #define prof_backtrace JEMALLOC_N(prof_backtrace) #define prof_boot0 JEMALLOC_N(prof_boot0) #define prof_boot1 JEMALLOC_N(prof_boot1) #define prof_boot2 JEMALLOC_N(prof_boot2) #define prof_bt_count JEMALLOC_N(prof_bt_count) #define prof_ctx_get JEMALLOC_N(prof_ctx_get) #define prof_ctx_set JEMALLOC_N(prof_ctx_set) #define prof_dump_open JEMALLOC_N(prof_dump_open) #define prof_free JEMALLOC_N(prof_free) #define prof_gdump JEMALLOC_N(prof_gdump) #define prof_idump JEMALLOC_N(prof_idump) #define prof_interval JEMALLOC_N(prof_interval) #define prof_lookup JEMALLOC_N(prof_lookup) #define prof_malloc JEMALLOC_N(prof_malloc) #define prof_malloc_record_object JEMALLOC_N(prof_malloc_record_object) #define prof_mdump JEMALLOC_N(prof_mdump) #define prof_postfork_child JEMALLOC_N(prof_postfork_child) #define prof_postfork_parent JEMALLOC_N(prof_postfork_parent) #define prof_prefork JEMALLOC_N(prof_prefork) #define prof_realloc JEMALLOC_N(prof_realloc) #define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update) #define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update) #define prof_tdata_booted JEMALLOC_N(prof_tdata_booted) #define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup) #define prof_tdata_get JEMALLOC_N(prof_tdata_get) #define prof_tdata_init JEMALLOC_N(prof_tdata_init) #define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized) #define prof_tdata_tls JEMALLOC_N(prof_tdata_tls) #define prof_tdata_tsd JEMALLOC_N(prof_tdata_tsd) #define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot) #define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper) #define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get) #define prof_tdata_tsd_get_wrapper JEMALLOC_N(prof_tdata_tsd_get_wrapper) #define prof_tdata_tsd_init_head JEMALLOC_N(prof_tdata_tsd_init_head) #define prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set) #define quarantine JEMALLOC_N(quarantine) #define quarantine_alloc_hook JEMALLOC_N(quarantine_alloc_hook) #define quarantine_boot JEMALLOC_N(quarantine_boot) #define quarantine_booted JEMALLOC_N(quarantine_booted) #define quarantine_cleanup JEMALLOC_N(quarantine_cleanup) #define quarantine_init JEMALLOC_N(quarantine_init) #define quarantine_tls JEMALLOC_N(quarantine_tls) #define quarantine_tsd JEMALLOC_N(quarantine_tsd) #define quarantine_tsd_boot JEMALLOC_N(quarantine_tsd_boot) #define quarantine_tsd_cleanup_wrapper JEMALLOC_N(quarantine_tsd_cleanup_wrapper) #define quarantine_tsd_get JEMALLOC_N(quarantine_tsd_get) #define quarantine_tsd_get_wrapper JEMALLOC_N(quarantine_tsd_get_wrapper) #define quarantine_tsd_init_head JEMALLOC_N(quarantine_tsd_init_head) #define quarantine_tsd_set JEMALLOC_N(quarantine_tsd_set) #define register_zone JEMALLOC_N(register_zone) #define rtree_delete JEMALLOC_N(rtree_delete) #define rtree_get JEMALLOC_N(rtree_get) #define rtree_get_locked JEMALLOC_N(rtree_get_locked) #define rtree_new JEMALLOC_N(rtree_new) #define rtree_postfork_child JEMALLOC_N(rtree_postfork_child) #define rtree_postfork_parent JEMALLOC_N(rtree_postfork_parent) #define rtree_prefork JEMALLOC_N(rtree_prefork) #define rtree_set JEMALLOC_N(rtree_set) #define s2u JEMALLOC_N(s2u) #define sa2u JEMALLOC_N(sa2u) #define set_errno JEMALLOC_N(set_errno) #define small_bin2size JEMALLOC_N(small_bin2size) #define small_bin2size_compute JEMALLOC_N(small_bin2size_compute) #define small_bin2size_lookup JEMALLOC_N(small_bin2size_lookup) #define small_bin2size_tab JEMALLOC_N(small_bin2size_tab) #define small_s2u JEMALLOC_N(small_s2u) #define small_s2u_compute JEMALLOC_N(small_s2u_compute) #define small_s2u_lookup JEMALLOC_N(small_s2u_lookup) #define small_size2bin JEMALLOC_N(small_size2bin) #define small_size2bin_compute JEMALLOC_N(small_size2bin_compute) #define small_size2bin_lookup JEMALLOC_N(small_size2bin_lookup) #define small_size2bin_tab JEMALLOC_N(small_size2bin_tab) #define stats_cactive JEMALLOC_N(stats_cactive) #define stats_cactive_add JEMALLOC_N(stats_cactive_add) #define stats_cactive_get JEMALLOC_N(stats_cactive_get) #define stats_cactive_sub JEMALLOC_N(stats_cactive_sub) #define stats_chunks JEMALLOC_N(stats_chunks) #define stats_print JEMALLOC_N(stats_print) #define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy) #define tcache_alloc_large JEMALLOC_N(tcache_alloc_large) #define tcache_alloc_small JEMALLOC_N(tcache_alloc_small) #define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard) #define tcache_arena_associate JEMALLOC_N(tcache_arena_associate) #define tcache_arena_dissociate JEMALLOC_N(tcache_arena_dissociate) #define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large) #define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small) #define tcache_bin_info JEMALLOC_N(tcache_bin_info) #define tcache_boot0 JEMALLOC_N(tcache_boot0) #define tcache_boot1 JEMALLOC_N(tcache_boot1) #define tcache_booted JEMALLOC_N(tcache_booted) #define tcache_create JEMALLOC_N(tcache_create) #define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large) #define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small) #define tcache_destroy JEMALLOC_N(tcache_destroy) #define tcache_enabled_booted JEMALLOC_N(tcache_enabled_booted) #define tcache_enabled_get JEMALLOC_N(tcache_enabled_get) #define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized) #define tcache_enabled_set JEMALLOC_N(tcache_enabled_set) #define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls) #define tcache_enabled_tsd JEMALLOC_N(tcache_enabled_tsd) #define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot) #define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper) #define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get) #define tcache_enabled_tsd_get_wrapper JEMALLOC_N(tcache_enabled_tsd_get_wrapper) #define tcache_enabled_tsd_init_head JEMALLOC_N(tcache_enabled_tsd_init_head) #define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set) #define tcache_event JEMALLOC_N(tcache_event) #define tcache_event_hard JEMALLOC_N(tcache_event_hard) #define tcache_flush JEMALLOC_N(tcache_flush) #define tcache_get JEMALLOC_N(tcache_get) #define tcache_get_hard JEMALLOC_N(tcache_get_hard) #define tcache_initialized JEMALLOC_N(tcache_initialized) #define tcache_maxclass JEMALLOC_N(tcache_maxclass) #define tcache_salloc JEMALLOC_N(tcache_salloc) #define tcache_stats_merge JEMALLOC_N(tcache_stats_merge) #define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup) #define tcache_tls JEMALLOC_N(tcache_tls) #define tcache_tsd JEMALLOC_N(tcache_tsd) #define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot) #define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper) #define tcache_tsd_get JEMALLOC_N(tcache_tsd_get) #define tcache_tsd_get_wrapper JEMALLOC_N(tcache_tsd_get_wrapper) #define tcache_tsd_init_head JEMALLOC_N(tcache_tsd_init_head) #define tcache_tsd_set JEMALLOC_N(tcache_tsd_set) #define thread_allocated_booted JEMALLOC_N(thread_allocated_booted) #define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized) #define thread_allocated_tls JEMALLOC_N(thread_allocated_tls) #define thread_allocated_tsd JEMALLOC_N(thread_allocated_tsd) #define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot) #define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper) #define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get) #define thread_allocated_tsd_get_wrapper JEMALLOC_N(thread_allocated_tsd_get_wrapper) #define thread_allocated_tsd_init_head JEMALLOC_N(thread_allocated_tsd_init_head) #define thread_allocated_tsd_set JEMALLOC_N(thread_allocated_tsd_set) #define tsd_init_check_recursion JEMALLOC_N(tsd_init_check_recursion) #define tsd_init_finish JEMALLOC_N(tsd_init_finish) #define u2rz JEMALLOC_N(u2rz) #define valgrind_freelike_block JEMALLOC_N(valgrind_freelike_block) #define valgrind_make_mem_defined JEMALLOC_N(valgrind_make_mem_defined) #define valgrind_make_mem_noaccess JEMALLOC_N(valgrind_make_mem_noaccess) #define valgrind_make_mem_undefined JEMALLOC_N(valgrind_make_mem_undefined) #define pool_new JEMALLOC_N(pool_new) #define pool_destroy JEMALLOC_N(pool_destroy) #define pools_lock JEMALLOC_N(pools_lock) #define pool_base_lock JEMALLOC_N(pool_base_lock) #define pool_prefork JEMALLOC_N(pool_prefork) #define pool_postfork_parent JEMALLOC_N(pool_postfork_parent) #define pool_postfork_child JEMALLOC_N(pool_postfork_child) #define pool_alloc JEMALLOC_N(pool_alloc) #define vec_get JEMALLOC_N(vec_get) #define vec_set JEMALLOC_N(vec_set) #define vec_delete JEMALLOC_N(vec_delete)
25,252
53.778742
95
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/jemalloc_internal_defs.h
/* ./../windows/jemalloc_gen/include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */ #ifndef JEMALLOC_INTERNAL_DEFS_H_ #define JEMALLOC_INTERNAL_DEFS_H_ /* * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all * public APIs to be prefixed. This makes it possible, with some care, to use * multiple allocators simultaneously. */ #define JEMALLOC_PREFIX "je_vmem_" #define JEMALLOC_CPREFIX "JE_VMEM_" /* * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. * For shared libraries, symbol visibility mechanisms prevent these symbols * from being exported, but for static libraries, naming collisions are a real * possibility. */ #define JEMALLOC_PRIVATE_NAMESPACE je_vmem_je_ /* * Hyper-threaded CPUs may need a special instruction inside spin loops in * order to yield to another virtual CPU. */ #define CPU_SPINWAIT /* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */ /* #undef JEMALLOC_ATOMIC9 */ /* * Defined if OSAtomic*() functions are available, as provided by Darwin, and * documented in the atomic(3) manual page. */ /* #undef JEMALLOC_OSATOMIC */ /* * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the * functions are defined in libgcc instead of being inlines) */ /* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 */ /* * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the * functions are defined in libgcc instead of being inlines) */ /* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 */ /* * Defined if __builtin_clz() and __builtin_clzl() are available. */ /* #undef JEMALLOC_HAVE_BUILTIN_CLZ */ /* * Defined if madvise(2) is available. */ /* #undef JEMALLOC_HAVE_MADVISE */ /* * Defined if OSSpin*() functions are available, as provided by Darwin, and * documented in the spinlock(3) manual page. */ /* #undef JEMALLOC_OSSPIN */ /* * Defined if _malloc_thread_cleanup() exists. At least in the case of * FreeBSD, pthread_key_create() allocates, which if used during malloc * bootstrapping will cause recursion into the pthreads library. Therefore, if * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in * malloc_tsd. */ /* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */ /* * Defined if threaded initialization is known to be safe on this platform. * Among other things, it must be possible to initialize a mutex without * triggering allocation in order for threaded allocation to be safe. */ /* #undef JEMALLOC_THREADED_INIT */ /* * Defined if the pthreads implementation defines * _pthread_mutex_init_calloc_cb(), in which case the function is used in order * to avoid recursive allocation during mutex initialization. */ /* #undef JEMALLOC_MUTEX_INIT_CB */ /* Non-empty if the tls_model attribute is supported. */ #define JEMALLOC_TLS_MODEL /* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */ #define JEMALLOC_CC_SILENCE /* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */ /* #undef JEMALLOC_CODE_COVERAGE */ /* * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables * inline functions. */ /* #undef JEMALLOC_DEBUG */ /* JEMALLOC_STATS enables statistics calculation. */ #define JEMALLOC_STATS /* JEMALLOC_PROF enables allocation profiling. */ /* #undef JEMALLOC_PROF */ /* Use libunwind for profile backtracing if defined. */ /* #undef JEMALLOC_PROF_LIBUNWIND */ /* Use libgcc for profile backtracing if defined. */ /* #undef JEMALLOC_PROF_LIBGCC */ /* Use gcc intrinsics for profile backtracing if defined. */ /* #undef JEMALLOC_PROF_GCC */ /* * JEMALLOC_TCACHE enables a thread-specific caching layer for small objects. * This makes it possible to allocate/deallocate objects without any locking * when the cache is in the steady state. */ #define JEMALLOC_TCACHE /* * JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage * segment (DSS). */ /* #undef JEMALLOC_DSS */ /* Support memory filling (junk/zero/quarantine/redzone). */ #define JEMALLOC_FILL /* Support utrace(2)-based tracing. */ /* #undef JEMALLOC_UTRACE */ /* Support Valgrind. */ /* #undef JEMALLOC_VALGRIND */ /* Support optional abort() on OOM. */ /* #undef JEMALLOC_XMALLOC */ /* Support lazy locking (avoid locking unless a second thread is launched). */ /* #undef JEMALLOC_LAZY_LOCK */ /* One page is 2^STATIC_PAGE_SHIFT bytes. */ #define STATIC_PAGE_SHIFT 12 /* * If defined, use munmap() to unmap freed chunks, rather than storing them for * later reuse. This is disabled by default on Linux because common sequences * of mmap()/munmap() calls will cause virtual memory map holes. */ /* #undef JEMALLOC_MUNMAP */ /* TLS is used to map arenas and magazine caches to threads. */ /* #undef JEMALLOC_TLS */ /* * ffs()/ffsl() functions to use for bitmapping. Don't use these directly; * instead, use jemalloc_ffs() or jemalloc_ffsl() from util.h. */ #define JEMALLOC_INTERNAL_FFSL ffsl #define JEMALLOC_INTERNAL_FFS ffs /* * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside * within jemalloc-owned chunks before dereferencing them. */ /* #undef JEMALLOC_IVSALLOC */ /* * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. */ /* #undef JEMALLOC_ZONE */ /* #undef JEMALLOC_ZONE_VERSION */ /* * Methods for purging unused pages differ between operating systems. * * madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages, * such that new pages will be demand-zeroed if * the address region is later touched. * madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being * unused, such that they will be discarded rather * than swapped out. */ /* #undef JEMALLOC_PURGE_MADVISE_DONTNEED */ /* #undef JEMALLOC_PURGE_MADVISE_FREE */ /* * Define if operating system has alloca.h header. */ /* #undef JEMALLOC_HAS_ALLOCA_H */ /* C99 restrict keyword supported. */ /* #undef JEMALLOC_HAS_RESTRICT */ /* For use by hash code. */ /* #undef JEMALLOC_BIG_ENDIAN */ /* sizeof(int) == 2^LG_SIZEOF_INT. */ #define LG_SIZEOF_INT 2 /* sizeof(long) == 2^LG_SIZEOF_LONG. */ #define LG_SIZEOF_LONG 2 /* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ #define LG_SIZEOF_INTMAX_T 3 #endif /* JEMALLOC_INTERNAL_DEFS_H_ */
6,731
30.457944
142
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/public_unnamespace.h
#undef je_pool_create #undef je_pool_delete #undef je_pool_malloc #undef je_pool_calloc #undef je_pool_ralloc #undef je_pool_aligned_alloc #undef je_pool_free #undef je_pool_malloc_usable_size #undef je_pool_malloc_stats_print #undef je_pool_extend #undef je_pool_set_alloc_funcs #undef je_pool_check #undef je_malloc_conf #undef je_malloc_message #undef je_malloc #undef je_calloc #undef je_posix_memalign #undef je_aligned_alloc #undef je_realloc #undef je_free #undef je_mallocx #undef je_rallocx #undef je_xallocx #undef je_sallocx #undef je_dallocx #undef je_nallocx #undef je_mallctl #undef je_mallctlnametomib #undef je_mallctlbymib #undef je_navsnprintf #undef je_malloc_stats_print #undef je_malloc_usable_size
720
20.848485
33
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/public_namespace.h
#define je_pool_create JEMALLOC_N(pool_create) #define je_pool_delete JEMALLOC_N(pool_delete) #define je_pool_malloc JEMALLOC_N(pool_malloc) #define je_pool_calloc JEMALLOC_N(pool_calloc) #define je_pool_ralloc JEMALLOC_N(pool_ralloc) #define je_pool_aligned_alloc JEMALLOC_N(pool_aligned_alloc) #define je_pool_free JEMALLOC_N(pool_free) #define je_pool_malloc_usable_size JEMALLOC_N(pool_malloc_usable_size) #define je_pool_malloc_stats_print JEMALLOC_N(pool_malloc_stats_print) #define je_pool_extend JEMALLOC_N(pool_extend) #define je_pool_set_alloc_funcs JEMALLOC_N(pool_set_alloc_funcs) #define je_pool_check JEMALLOC_N(pool_check) #define je_malloc_conf JEMALLOC_N(malloc_conf) #define je_malloc_message JEMALLOC_N(malloc_message) #define je_malloc JEMALLOC_N(malloc) #define je_calloc JEMALLOC_N(calloc) #define je_posix_memalign JEMALLOC_N(posix_memalign) #define je_aligned_alloc JEMALLOC_N(aligned_alloc) #define je_realloc JEMALLOC_N(realloc) #define je_free JEMALLOC_N(free) #define je_mallocx JEMALLOC_N(mallocx) #define je_rallocx JEMALLOC_N(rallocx) #define je_xallocx JEMALLOC_N(xallocx) #define je_sallocx JEMALLOC_N(sallocx) #define je_dallocx JEMALLOC_N(dallocx) #define je_nallocx JEMALLOC_N(nallocx) #define je_mallctl JEMALLOC_N(mallctl) #define je_mallctlnametomib JEMALLOC_N(mallctlnametomib) #define je_mallctlbymib JEMALLOC_N(mallctlbymib) #define je_navsnprintf JEMALLOC_N(navsnprintf) #define je_malloc_stats_print JEMALLOC_N(malloc_stats_print) #define je_malloc_usable_size JEMALLOC_N(malloc_usable_size)
1,536
45.575758
70
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/jemalloc_internal.h
#ifndef JEMALLOC_INTERNAL_H #define JEMALLOC_INTERNAL_H #include "jemalloc_internal_defs.h" #include "jemalloc/internal/jemalloc_internal_decls.h" #ifdef JEMALLOC_UTRACE #include <sys/ktrace.h> #endif #define JEMALLOC_NO_DEMANGLE #ifdef JEMALLOC_JET # define JEMALLOC_N(n) jet_##n # include "jemalloc/internal/public_namespace.h" # define JEMALLOC_NO_RENAME # include "jemalloc/jemalloc.h" # undef JEMALLOC_NO_RENAME #else # define JEMALLOC_N(n) je_vmem_je_##n # include "jemalloc/jemalloc.h" #endif #include "jemalloc/internal/private_namespace.h" static const bool config_debug = #ifdef JEMALLOC_DEBUG true #else false #endif ; static const bool have_dss = #ifdef JEMALLOC_DSS true #else false #endif ; static const bool config_fill = #ifdef JEMALLOC_FILL true #else false #endif ; static const bool config_lazy_lock = #ifdef JEMALLOC_LAZY_LOCK true #else false #endif ; static const bool config_prof = #ifdef JEMALLOC_PROF true #else false #endif ; static const bool config_prof_libgcc = #ifdef JEMALLOC_PROF_LIBGCC true #else false #endif ; static const bool config_prof_libunwind = #ifdef JEMALLOC_PROF_LIBUNWIND true #else false #endif ; static const bool config_munmap = #ifdef JEMALLOC_MUNMAP true #else false #endif ; static const bool config_stats = #ifdef JEMALLOC_STATS true #else false #endif ; static const bool config_tcache = #ifdef JEMALLOC_TCACHE true #else false #endif ; static const bool config_tls = #ifdef JEMALLOC_TLS true #else false #endif ; static const bool config_utrace = #ifdef JEMALLOC_UTRACE true #else false #endif ; static const bool config_valgrind = #ifdef JEMALLOC_VALGRIND true #else false #endif ; static const bool config_xmalloc = #ifdef JEMALLOC_XMALLOC true #else false #endif ; static const bool config_ivsalloc = #ifdef JEMALLOC_IVSALLOC true #else false #endif ; #ifdef JEMALLOC_ATOMIC9 #include <machine/atomic.h> #endif #if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) #include <libkern/OSAtomic.h> #endif #ifdef JEMALLOC_ZONE #include <mach/mach_error.h> #include <mach/mach_init.h> #include <mach/vm_map.h> #include <malloc/malloc.h> #endif #define RB_COMPACT #include "jemalloc/internal/rb.h" #include "jemalloc/internal/qr.h" #include "jemalloc/internal/ql.h" /* * jemalloc can conceptually be broken into components (arena, tcache, etc.), * but there are circular dependencies that cannot be broken without * substantial performance degradation. In order to reduce the effect on * visual code flow, read the header files in multiple passes, with one of the * following cpp variables defined during each pass: * * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data * types. * JEMALLOC_H_STRUCTS : Data structures. * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. * JEMALLOC_H_INLINES : Inline functions. */ /******************************************************************************/ #define JEMALLOC_H_TYPES #include "jemalloc/internal/jemalloc_internal_macros.h" #define MALLOCX_LG_ALIGN_MASK ((int)0x3f) /* Smallest size class to support. */ #define LG_TINY_MIN 3 #define TINY_MIN (1U << LG_TINY_MIN) /* * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size * classes). */ #ifndef LG_QUANTUM # if (defined(__i386__) || defined(_M_IX86)) # define LG_QUANTUM 4 # endif # ifdef __ia64__ # define LG_QUANTUM 4 # endif # ifdef __alpha__ # define LG_QUANTUM 4 # endif # ifdef __sparc64__ # define LG_QUANTUM 4 # endif # if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) # define LG_QUANTUM 4 # endif # ifdef __arm__ # define LG_QUANTUM 3 # endif # ifdef __aarch64__ # define LG_QUANTUM 4 # endif # ifdef __hppa__ # define LG_QUANTUM 4 # endif # ifdef __mips__ # define LG_QUANTUM 3 # endif # ifdef __powerpc__ # define LG_QUANTUM 4 # endif # ifdef __s390__ # define LG_QUANTUM 4 # endif # ifdef __SH4__ # define LG_QUANTUM 4 # endif # ifdef __tile__ # define LG_QUANTUM 4 # endif # ifdef __le32__ # define LG_QUANTUM 4 # endif # ifndef LG_QUANTUM # error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS" # endif #endif #define QUANTUM ((size_t)(1U << LG_QUANTUM)) #define QUANTUM_MASK (QUANTUM - 1) /* Return the smallest quantum multiple that is >= a. */ #define QUANTUM_CEILING(a) \ (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) #define LONG ((size_t)(1U << LG_SIZEOF_LONG)) #define LONG_MASK (LONG - 1) /* Return the smallest long multiple that is >= a. */ #define LONG_CEILING(a) \ (((a) + LONG_MASK) & ~LONG_MASK) #define SIZEOF_PTR (1U << LG_SIZEOF_PTR) #define PTR_MASK (SIZEOF_PTR - 1) /* Return the smallest (void *) multiple that is >= a. */ #define PTR_CEILING(a) \ (((a) + PTR_MASK) & ~PTR_MASK) /* * Maximum size of L1 cache line. This is used to avoid cache line aliasing. * In addition, this controls the spacing of cacheline-spaced size classes. * * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can * only handle raw constants. */ #define LG_CACHELINE 6 #define CACHELINE 64 #define CACHELINE_MASK (CACHELINE - 1) /* Return the smallest cacheline multiple that is >= s. */ #define CACHELINE_CEILING(s) \ (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) /* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */ #ifdef PAGE_MASK # undef PAGE_MASK #endif #define LG_PAGE STATIC_PAGE_SHIFT #define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT)) #define PAGE_MASK ((size_t)(PAGE - 1)) /* Return the smallest pagesize multiple that is >= s. */ #define PAGE_CEILING(s) \ (((s) + PAGE_MASK) & ~PAGE_MASK) /* Return the nearest aligned address at or below a. */ #define ALIGNMENT_ADDR2BASE(a, alignment) \ ((void *)((uintptr_t)(a) & (-(alignment)))) /* Return the offset between a and the nearest aligned address at or below a. */ #define ALIGNMENT_ADDR2OFFSET(a, alignment) \ ((size_t)((uintptr_t)(a) & ((alignment) - 1))) /* Return the smallest alignment multiple that is >= s. */ #define ALIGNMENT_CEILING(s, alignment) \ (((s) + ((alignment) - 1)) & (-(alignment))) /* Declare a variable length array */ #if __STDC_VERSION__ < 199901L # ifdef _MSC_VER # include <malloc.h> #ifndef alloca # define alloca _alloca #endif # else # ifdef JEMALLOC_HAS_ALLOCA_H # include <alloca.h> # else # include <stdlib.h> # endif # endif # define VARIABLE_ARRAY(type, name, count) \ type *name = alloca(sizeof(type) * (count)) #else # define VARIABLE_ARRAY(type, name, count) type name[(count)] #endif #include "jemalloc/internal/valgrind.h" #include "jemalloc/internal/util.h" #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/prng.h" #include "jemalloc/internal/ckh.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/stats.h" #include "jemalloc/internal/ctl.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/tsd.h" #include "jemalloc/internal/mb.h" #include "jemalloc/internal/extent.h" #include "jemalloc/internal/arena.h" #include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/base.h" #include "jemalloc/internal/chunk.h" #include "jemalloc/internal/huge.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/tcache.h" #include "jemalloc/internal/hash.h" #include "jemalloc/internal/quarantine.h" #include "jemalloc/internal/prof.h" #include "jemalloc/internal/pool.h" #include "jemalloc/internal/vector.h" #undef JEMALLOC_H_TYPES /******************************************************************************/ #define JEMALLOC_H_STRUCTS #include "jemalloc/internal/valgrind.h" #include "jemalloc/internal/util.h" #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/prng.h" #include "jemalloc/internal/ckh.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/stats.h" #include "jemalloc/internal/ctl.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/tsd.h" #include "jemalloc/internal/mb.h" #include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/extent.h" #include "jemalloc/internal/arena.h" #include "jemalloc/internal/base.h" #include "jemalloc/internal/chunk.h" #include "jemalloc/internal/huge.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/tcache.h" #include "jemalloc/internal/hash.h" #include "jemalloc/internal/quarantine.h" #include "jemalloc/internal/prof.h" #include "jemalloc/internal/pool.h" #include "jemalloc/internal/vector.h" typedef struct { uint64_t allocated; uint64_t deallocated; } thread_allocated_t; /* * The JEMALLOC_ARG_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro * argument. */ #define THREAD_ALLOCATED_INITIALIZER JEMALLOC_ARG_CONCAT({0, 0}) #undef JEMALLOC_H_STRUCTS /******************************************************************************/ #define JEMALLOC_H_EXTERNS extern bool opt_abort; extern bool opt_junk; extern size_t opt_quarantine; extern bool opt_redzone; extern bool opt_utrace; extern bool opt_xmalloc; extern bool opt_zero; extern size_t opt_narenas; extern bool in_valgrind; /* Number of CPUs. */ extern unsigned ncpus; extern unsigned npools; extern unsigned npools_cnt; extern pool_t base_pool; extern pool_t **pools; extern malloc_mutex_t pools_lock; extern void *(*base_malloc_fn)(size_t); extern void (*base_free_fn)(void *); extern bool pools_shared_data_create(void); arena_t *arenas_extend(pool_t *pool, unsigned ind); bool arenas_tsd_extend(tsd_pool_t *tsd, unsigned len); void arenas_cleanup(void *arg); arena_t *choose_arena_hard(pool_t *pool); void jemalloc_prefork(void); void jemalloc_postfork_parent(void); void jemalloc_postfork_child(void); #include "jemalloc/internal/valgrind.h" #include "jemalloc/internal/util.h" #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/prng.h" #include "jemalloc/internal/ckh.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/stats.h" #include "jemalloc/internal/ctl.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/tsd.h" #include "jemalloc/internal/mb.h" #include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/extent.h" #include "jemalloc/internal/arena.h" #include "jemalloc/internal/base.h" #include "jemalloc/internal/chunk.h" #include "jemalloc/internal/huge.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/tcache.h" #include "jemalloc/internal/hash.h" #include "jemalloc/internal/quarantine.h" #include "jemalloc/internal/prof.h" #include "jemalloc/internal/pool.h" #include "jemalloc/internal/vector.h" #undef JEMALLOC_H_EXTERNS /******************************************************************************/ #define JEMALLOC_H_INLINES #include "jemalloc/internal/pool.h" #include "jemalloc/internal/valgrind.h" #include "jemalloc/internal/util.h" #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/prng.h" #include "jemalloc/internal/ckh.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/stats.h" #include "jemalloc/internal/ctl.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/tsd.h" #include "jemalloc/internal/mb.h" #include "jemalloc/internal/extent.h" #include "jemalloc/internal/base.h" #include "jemalloc/internal/chunk.h" #include "jemalloc/internal/huge.h" /* * Include arena.h the first time in order to provide inline functions for this * header's inlines. */ #define JEMALLOC_ARENA_INLINE_A #include "jemalloc/internal/arena.h" #undef JEMALLOC_ARENA_INLINE_A #ifndef JEMALLOC_ENABLE_INLINE malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, tsd_pool_t) size_t s2u(size_t size); size_t sa2u(size_t size, size_t alignment); unsigned narenas_total_get(pool_t *pool); arena_t *choose_arena(arena_t *arena); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) /* * Map of pthread_self() --> arenas[???], used for selecting an arena to use * for allocations. */ malloc_tsd_externs(arenas, tsd_pool_t) malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, tsd_pool_t, {0}, arenas_cleanup) /* * Check if the arena is dummy. */ JEMALLOC_ALWAYS_INLINE bool is_arena_dummy(arena_t *arena) { return (arena->ind == ARENA_DUMMY_IND); } /* * Compute usable size that would result from allocating an object with the * specified size. */ JEMALLOC_ALWAYS_INLINE size_t s2u(size_t size) { if (size <= SMALL_MAXCLASS) return (small_s2u(size)); if (size <= arena_maxclass) return (PAGE_CEILING(size)); return (CHUNK_CEILING(size)); } /* * Compute usable size that would result from allocating an object with the * specified size and alignment. */ JEMALLOC_ALWAYS_INLINE size_t sa2u(size_t size, size_t alignment) { size_t usize; assert(alignment != 0 && ((alignment - 1) & alignment) == 0); /* * Round size up to the nearest multiple of alignment. * * This done, we can take advantage of the fact that for each small * size class, every object is aligned at the smallest power of two * that is non-zero in the base two representation of the size. For * example: * * Size | Base 2 | Minimum alignment * -----+----------+------------------ * 96 | 1100000 | 32 * 144 | 10100000 | 32 * 192 | 11000000 | 64 */ usize = ALIGNMENT_CEILING(size, alignment); /* * (usize < size) protects against the combination of maximal * alignment and size greater than maximal alignment. */ if (usize < size) { /* size_t overflow. */ return (0); } if (usize <= arena_maxclass && alignment <= PAGE) { if (usize <= SMALL_MAXCLASS) return (small_s2u(usize)); return (PAGE_CEILING(usize)); } else { size_t run_size; /* * We can't achieve subpage alignment, so round up alignment * permanently; it makes later calculations simpler. */ alignment = PAGE_CEILING(alignment); usize = PAGE_CEILING(size); /* * (usize < size) protects against very large sizes within * PAGE of SIZE_T_MAX. * * (usize + alignment < usize) protects against the * combination of maximal alignment and usize large enough * to cause overflow. This is similar to the first overflow * check above, but it needs to be repeated due to the new * usize value, which may now be *equal* to maximal * alignment, whereas before we only detected overflow if the * original size was *greater* than maximal alignment. */ if (usize < size || usize + alignment < usize) { /* size_t overflow. */ return (0); } /* * Calculate the size of the over-size run that arena_palloc() * would need to allocate in order to guarantee the alignment. * If the run wouldn't fit within a chunk, round up to a huge * allocation size. */ run_size = usize + alignment - PAGE; if (run_size <= arena_maxclass) return (PAGE_CEILING(usize)); return (CHUNK_CEILING(usize)); } } JEMALLOC_INLINE unsigned narenas_total_get(pool_t *pool) { unsigned narenas; malloc_rwlock_rdlock(&pool->arenas_lock); narenas = pool->narenas_total; malloc_rwlock_unlock(&pool->arenas_lock); return (narenas); } /* * Choose an arena based on a per-thread value. * Arena pointer must be either a valid arena pointer or a dummy arena with * pool field filled. */ JEMALLOC_INLINE arena_t * choose_arena(arena_t *arena) { arena_t *ret; tsd_pool_t *tsd; pool_t *pool; if (!is_arena_dummy(arena)) return (arena); pool = arena->pool; tsd = arenas_tsd_get(); /* expand arenas array if necessary */ if ((tsd->npools <= pool->pool_id) && arenas_tsd_extend(tsd, pool->pool_id)) { return (NULL); } if ( (tsd->seqno[pool->pool_id] != pool->seqno) || (ret = tsd->arenas[pool->pool_id]) == NULL) { ret = choose_arena_hard(pool); assert(ret != NULL); } return (ret); } #endif #include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/rtree.h" /* * Include arena.h the second and third times in order to resolve circular * dependencies with tcache.h. */ #define JEMALLOC_ARENA_INLINE_B #include "jemalloc/internal/arena.h" #undef JEMALLOC_ARENA_INLINE_B #include "jemalloc/internal/tcache.h" #define JEMALLOC_ARENA_INLINE_C #include "jemalloc/internal/arena.h" #undef JEMALLOC_ARENA_INLINE_C #include "jemalloc/internal/hash.h" #include "jemalloc/internal/quarantine.h" #ifndef JEMALLOC_ENABLE_INLINE void *imalloct(size_t size, bool try_tcache, arena_t *arena); void *imalloc(size_t size); void *pool_imalloc(pool_t *pool, size_t size); void *icalloct(size_t size, bool try_tcache, arena_t *arena); void *icalloc(size_t size); void *pool_icalloc(pool_t *pool, size_t size); void *ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache, arena_t *arena); void *ipalloc(size_t usize, size_t alignment, bool zero); void *pool_ipalloc(pool_t *pool, size_t usize, size_t alignment, bool zero); size_t isalloc(const void *ptr, bool demote); size_t pool_isalloc(pool_t *pool, const void *ptr, bool demote); size_t ivsalloc(const void *ptr, bool demote); size_t u2rz(size_t usize); size_t p2rz(const void *ptr); void idalloct(void *ptr, bool try_tcache); void pool_idalloct(pool_t *pool, void *ptr, bool try_tcache); void idalloc(void *ptr); void iqalloct(void *ptr, bool try_tcache); void pool_iqalloct(pool_t *pool, void *ptr, bool try_tcache); void iqalloc(void *ptr); void *iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena); void *iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena); void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero); void *pool_iralloc(pool_t *pool, void *ptr, size_t size, size_t extra, size_t alignment, bool zero); bool ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero); int msc_clz(unsigned int val); malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t) #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) # ifdef _MSC_VER JEMALLOC_ALWAYS_INLINE int msc_clz(unsigned int val) { unsigned int res = 0; # if LG_SIZEOF_INT == 2 if (_BitScanReverse(&res, val)) { return 31 - res; } else { return 32; } # elif LG_SIZEOF_INT == 3 if (_BitScanReverse64(&res, val)) { return 63 - res; } else { return 64; } # else # error "Unsupported clz function for that size of int" # endif } #endif JEMALLOC_ALWAYS_INLINE void * imalloct(size_t size, bool try_tcache, arena_t *arena) { assert(size != 0); if (size <= arena_maxclass) return (arena_malloc(arena, size, false, try_tcache)); else return (huge_malloc(arena, size, false)); } JEMALLOC_ALWAYS_INLINE void * imalloc(size_t size) { arena_t dummy; DUMMY_ARENA_INITIALIZE(dummy, &base_pool); return (imalloct(size, true, &dummy)); } JEMALLOC_ALWAYS_INLINE void * pool_imalloc(pool_t *pool, size_t size) { arena_t dummy; DUMMY_ARENA_INITIALIZE(dummy, pool); return (imalloct(size, true, &dummy)); } JEMALLOC_ALWAYS_INLINE void * icalloct(size_t size, bool try_tcache, arena_t *arena) { if (size <= arena_maxclass) return (arena_malloc(arena, size, true, try_tcache)); else return (huge_malloc(arena, size, true)); } JEMALLOC_ALWAYS_INLINE void * icalloc(size_t size) { arena_t dummy; DUMMY_ARENA_INITIALIZE(dummy, &base_pool); return (icalloct(size, true, &dummy)); } JEMALLOC_ALWAYS_INLINE void * pool_icalloc(pool_t *pool, size_t size) { arena_t dummy; DUMMY_ARENA_INITIALIZE(dummy, pool); return (icalloct(size, true, &dummy)); } JEMALLOC_ALWAYS_INLINE void * ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache, arena_t *arena) { void *ret; assert(usize != 0); assert(usize == sa2u(usize, alignment)); if (usize <= arena_maxclass && alignment <= PAGE) ret = arena_malloc(arena, usize, zero, try_tcache); else { if (usize <= arena_maxclass) { ret = arena_palloc(choose_arena(arena), usize, alignment, zero); } else if (alignment <= chunksize) ret = huge_malloc(arena, usize, zero); else ret = huge_palloc(arena, usize, alignment, zero); } assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); return (ret); } JEMALLOC_ALWAYS_INLINE void * ipalloc(size_t usize, size_t alignment, bool zero) { arena_t dummy; DUMMY_ARENA_INITIALIZE(dummy, &base_pool); return (ipalloct(usize, alignment, zero, true, &dummy)); } JEMALLOC_ALWAYS_INLINE void * pool_ipalloc(pool_t *pool, size_t usize, size_t alignment, bool zero) { arena_t dummy; DUMMY_ARENA_INITIALIZE(dummy, pool); return (ipalloct(usize, alignment, zero, true, &dummy)); } /* * Typical usage: * void *ptr = [...] * size_t sz = isalloc(ptr, config_prof); */ JEMALLOC_ALWAYS_INLINE size_t isalloc(const void *ptr, bool demote) { size_t ret; arena_chunk_t *chunk; assert(ptr != NULL); /* Demotion only makes sense if config_prof is true. */ assert(config_prof || demote == false); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (chunk != ptr) ret = arena_salloc(ptr, demote); else ret = huge_salloc(ptr); return (ret); } /* * Typical usage: * void *ptr = [...] * size_t sz = isalloc(ptr, config_prof); */ JEMALLOC_ALWAYS_INLINE size_t pool_isalloc(pool_t *pool, const void *ptr, bool demote) { size_t ret; arena_chunk_t *chunk; assert(ptr != NULL); /* Demotion only makes sense if config_prof is true. */ assert(config_prof || demote == false); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (chunk != ptr) ret = arena_salloc(ptr, demote); else ret = huge_pool_salloc(pool, ptr); return (ret); } JEMALLOC_ALWAYS_INLINE size_t ivsalloc(const void *ptr, bool demote) { size_t i; malloc_mutex_lock(&pools_lock); unsigned n = npools; for (i = 0; i < n; ++i) { pool_t *pool = pools[i]; if (pool == NULL) continue; /* Return 0 if ptr is not within a chunk managed by jemalloc. */ if (rtree_get(pool->chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) != 0) break; } malloc_mutex_unlock(&pools_lock); if (i == n) return 0; return (isalloc(ptr, demote)); } JEMALLOC_INLINE size_t u2rz(size_t usize) { size_t ret; if (usize <= SMALL_MAXCLASS) { size_t binind = small_size2bin(usize); assert(binind < NBINS); ret = arena_bin_info[binind].redzone_size; } else ret = 0; return (ret); } JEMALLOC_INLINE size_t p2rz(const void *ptr) { size_t usize = isalloc(ptr, false); return (u2rz(usize)); } JEMALLOC_ALWAYS_INLINE void idalloct(void *ptr, bool try_tcache) { arena_chunk_t *chunk; assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (chunk != ptr) arena_dalloc(chunk, ptr, try_tcache); else huge_dalloc(&base_pool, ptr); } JEMALLOC_ALWAYS_INLINE void pool_idalloct(pool_t *pool, void *ptr, bool try_tcache) { arena_chunk_t *chunk; assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (chunk != ptr) arena_dalloc(chunk, ptr, try_tcache); else huge_dalloc(pool, ptr); } JEMALLOC_ALWAYS_INLINE void idalloc(void *ptr) { idalloct(ptr, true); } JEMALLOC_ALWAYS_INLINE void iqalloct(void *ptr, bool try_tcache) { if (config_fill && opt_quarantine) quarantine(ptr); else idalloct(ptr, try_tcache); } JEMALLOC_ALWAYS_INLINE void pool_iqalloct(pool_t *pool, void *ptr, bool try_tcache) { if (config_fill && opt_quarantine) quarantine(ptr); else pool_idalloct(pool, ptr, try_tcache); } JEMALLOC_ALWAYS_INLINE void iqalloc(void *ptr) { iqalloct(ptr, true); } JEMALLOC_ALWAYS_INLINE void * iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena) { void *p; size_t usize, copysize; usize = sa2u(size + extra, alignment); if (usize == 0) return (NULL); p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena); if (p == NULL) { if (extra == 0) return (NULL); /* Try again, without extra this time. */ usize = sa2u(size, alignment); if (usize == 0) return (NULL); p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena); if (p == NULL) return (NULL); } /* * Copy at most size bytes (not size+extra), since the caller has no * expectation that the extra bytes will be reliably preserved. */ copysize = (size < oldsize) ? size : oldsize; memcpy(p, ptr, copysize); pool_iqalloct(arena->pool, ptr, try_tcache_dalloc); return (p); } JEMALLOC_ALWAYS_INLINE void * iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena) { size_t oldsize; assert(ptr != NULL); assert(size != 0); oldsize = isalloc(ptr, config_prof); if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) != 0) { /* * Existing object alignment is inadequate; allocate new space * and copy. */ return (iralloct_realign(ptr, oldsize, size, extra, alignment, zero, try_tcache_alloc, try_tcache_dalloc, arena)); } if (size + extra <= arena_maxclass) { void *ret; ret = arena_ralloc(arena, ptr, oldsize, size, extra, alignment, zero, try_tcache_alloc, try_tcache_dalloc); if ((ret != NULL) || (size + extra > oldsize)) return (ret); if (oldsize > chunksize) { size_t old_usize JEMALLOC_CC_SILENCE_INIT(0); UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); if (config_valgrind && in_valgrind) { old_usize = isalloc(ptr, config_prof); old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize); } ret = huge_ralloc(arena, ptr, oldsize, chunksize, 0, alignment, zero, try_tcache_dalloc); JEMALLOC_VALGRIND_REALLOC(true, ret, s2u(chunksize), true, ptr, old_usize, old_rzsize, true, false); if (ret != NULL) { /* Now, it should succeed... */ return arena_ralloc(arena, ret, chunksize, size, extra, alignment, zero, try_tcache_alloc, try_tcache_dalloc); } } return NULL; } else { return (huge_ralloc(arena, ptr, oldsize, size, extra, alignment, zero, try_tcache_dalloc)); } } JEMALLOC_ALWAYS_INLINE void * iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero) { arena_t dummy; DUMMY_ARENA_INITIALIZE(dummy, &base_pool); return (iralloct(ptr, size, extra, alignment, zero, true, true, &dummy)); } JEMALLOC_ALWAYS_INLINE void * pool_iralloc(pool_t *pool, void *ptr, size_t size, size_t extra, size_t alignment, bool zero) { arena_t dummy; DUMMY_ARENA_INITIALIZE(dummy, pool); return (iralloct(ptr, size, extra, alignment, zero, true, true, &dummy)); } JEMALLOC_ALWAYS_INLINE bool ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero) { size_t oldsize; assert(ptr != NULL); assert(size != 0); oldsize = isalloc(ptr, config_prof); if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) != 0) { /* Existing object alignment is inadequate. */ return (true); } if (size <= arena_maxclass) return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero)); else return (huge_ralloc_no_move(&base_pool, ptr, oldsize, size, extra, zero)); } malloc_tsd_externs(thread_allocated, thread_allocated_t) malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, thread_allocated, thread_allocated_t, THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup) #endif #include "jemalloc/internal/prof.h" #undef JEMALLOC_H_INLINES #ifdef _WIN32 #define __builtin_clz(x) msc_clz(x) #endif /******************************************************************************/ #endif /* JEMALLOC_INTERNAL_H */
27,780
24.095754
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/private_unnamespace.h
#undef a0calloc #undef a0free #undef a0malloc #undef arena_alloc_junk_small #undef arena_bin_index #undef arena_bin_info #undef arena_boot #undef arena_chunk_alloc_huge #undef arena_chunk_dalloc_huge #undef arena_dalloc #undef arena_dalloc_bin #undef arena_dalloc_bin_locked #undef arena_dalloc_junk_large #undef arena_dalloc_junk_small #undef arena_dalloc_large #undef arena_dalloc_large_locked #undef arena_dalloc_small #undef arena_dss_prec_get #undef arena_dss_prec_set #undef arena_malloc #undef arena_malloc_large #undef arena_malloc_small #undef arena_mapbits_allocated_get #undef arena_mapbits_binind_get #undef arena_mapbits_dirty_get #undef arena_mapbits_get #undef arena_mapbits_large_binind_set #undef arena_mapbits_large_get #undef arena_mapbits_large_set #undef arena_mapbits_large_size_get #undef arena_mapbits_small_runind_get #undef arena_mapbits_small_set #undef arena_mapbits_unallocated_set #undef arena_mapbits_unallocated_size_get #undef arena_mapbits_unallocated_size_set #undef arena_mapbits_unzeroed_get #undef arena_mapbits_unzeroed_set #undef arena_mapbitsp_get #undef arena_mapbitsp_read #undef arena_mapbitsp_write #undef arena_mapelm_to_pageind #undef arena_mapp_get #undef arena_maxclass #undef arena_new #undef arena_palloc #undef arena_postfork_child #undef arena_postfork_parent #undef arena_prefork #undef arena_prof_accum #undef arena_prof_accum_impl #undef arena_prof_accum_locked #undef arena_prof_ctx_get #undef arena_prof_ctx_set #undef arena_prof_promoted #undef arena_ptr_small_binind_get #undef arena_purge_all #undef arena_quarantine_junk_small #undef arena_ralloc #undef arena_ralloc_junk_large #undef arena_ralloc_no_move #undef arena_redzone_corruption #undef arena_run_regind #undef arena_runs_avail_tree_iter #undef arena_salloc #undef arena_stats_merge #undef arena_tcache_fill_small #undef arenas #undef pools #undef arenas_booted #undef arenas_cleanup #undef arenas_extend #undef arenas_initialized #undef arenas_lock #undef arenas_tls #undef arenas_tsd #undef arenas_tsd_boot #undef arenas_tsd_cleanup_wrapper #undef arenas_tsd_get #undef arenas_tsd_get_wrapper #undef arenas_tsd_init_head #undef arenas_tsd_set #undef atomic_add_u #undef atomic_add_uint32 #undef atomic_add_uint64 #undef atomic_add_z #undef atomic_sub_u #undef atomic_sub_uint32 #undef atomic_sub_uint64 #undef atomic_sub_z #undef base_alloc #undef base_boot #undef base_calloc #undef base_free_fn #undef base_malloc_fn #undef base_node_alloc #undef base_node_dalloc #undef base_pool #undef base_postfork_child #undef base_postfork_parent #undef base_prefork #undef bitmap_full #undef bitmap_get #undef bitmap_info_init #undef bitmap_info_ngroups #undef bitmap_init #undef bitmap_set #undef bitmap_sfu #undef bitmap_size #undef bitmap_unset #undef bt_init #undef buferror #undef choose_arena #undef choose_arena_hard #undef chunk_alloc_arena #undef chunk_alloc_base #undef chunk_alloc_default #undef chunk_alloc_dss #undef chunk_alloc_mmap #undef chunk_global_boot #undef chunk_boot #undef chunk_dalloc_default #undef chunk_dalloc_mmap #undef chunk_dss_boot #undef chunk_dss_postfork_child #undef chunk_dss_postfork_parent #undef chunk_dss_prec_get #undef chunk_dss_prec_set #undef chunk_dss_prefork #undef chunk_in_dss #undef chunk_npages #undef chunk_postfork_child #undef chunk_postfork_parent #undef chunk_prefork #undef chunk_unmap #undef chunk_record #undef chunks_mtx #undef chunks_rtree #undef chunksize #undef chunksize_mask #undef ckh_bucket_search #undef ckh_count #undef ckh_delete #undef ckh_evict_reloc_insert #undef ckh_insert #undef ckh_isearch #undef ckh_iter #undef ckh_new #undef ckh_pointer_hash #undef ckh_pointer_keycomp #undef ckh_rebuild #undef ckh_remove #undef ckh_search #undef ckh_string_hash #undef ckh_string_keycomp #undef ckh_try_bucket_insert #undef ckh_try_insert #undef ctl_boot #undef ctl_bymib #undef ctl_byname #undef ctl_nametomib #undef ctl_postfork_child #undef ctl_postfork_parent #undef ctl_prefork #undef dss_prec_names #undef extent_tree_ad_first #undef extent_tree_ad_insert #undef extent_tree_ad_iter #undef extent_tree_ad_iter_recurse #undef extent_tree_ad_iter_start #undef extent_tree_ad_last #undef extent_tree_ad_new #undef extent_tree_ad_next #undef extent_tree_ad_nsearch #undef extent_tree_ad_prev #undef extent_tree_ad_psearch #undef extent_tree_ad_remove #undef extent_tree_ad_reverse_iter #undef extent_tree_ad_reverse_iter_recurse #undef extent_tree_ad_reverse_iter_start #undef extent_tree_ad_search #undef extent_tree_szad_first #undef extent_tree_szad_insert #undef extent_tree_szad_iter #undef extent_tree_szad_iter_recurse #undef extent_tree_szad_iter_start #undef extent_tree_szad_last #undef extent_tree_szad_new #undef extent_tree_szad_next #undef extent_tree_szad_nsearch #undef extent_tree_szad_prev #undef extent_tree_szad_psearch #undef extent_tree_szad_remove #undef extent_tree_szad_reverse_iter #undef extent_tree_szad_reverse_iter_recurse #undef extent_tree_szad_reverse_iter_start #undef extent_tree_szad_search #undef get_errno #undef hash #undef hash_fmix_32 #undef hash_fmix_64 #undef hash_get_block_32 #undef hash_get_block_64 #undef hash_rotl_32 #undef hash_rotl_64 #undef hash_x64_128 #undef hash_x86_128 #undef hash_x86_32 #undef huge_allocated #undef huge_boot #undef huge_dalloc #undef huge_dalloc_junk #undef huge_malloc #undef huge_ndalloc #undef huge_nmalloc #undef huge_palloc #undef huge_postfork_child #undef huge_postfork_parent #undef huge_prefork #undef huge_prof_ctx_get #undef huge_prof_ctx_set #undef huge_ralloc #undef huge_ralloc_no_move #undef huge_salloc #undef icalloc #undef icalloct #undef idalloc #undef idalloct #undef imalloc #undef imalloct #undef in_valgrind #undef ipalloc #undef ipalloct #undef iqalloc #undef iqalloct #undef iralloc #undef iralloct #undef iralloct_realign #undef isalloc #undef isthreaded #undef ivsalloc #undef ixalloc #undef jemalloc_postfork_child #undef jemalloc_postfork_parent #undef jemalloc_prefork #undef lg_floor #undef malloc_cprintf #undef malloc_mutex_init #undef malloc_mutex_lock #undef malloc_mutex_postfork_child #undef malloc_mutex_postfork_parent #undef malloc_mutex_prefork #undef malloc_mutex_unlock #undef malloc_rwlock_init #undef malloc_rwlock_postfork_child #undef malloc_rwlock_postfork_parent #undef malloc_rwlock_prefork #undef malloc_rwlock_rdlock #undef malloc_rwlock_wrlock #undef malloc_rwlock_unlock #undef malloc_rwlock_destroy #undef malloc_printf #undef malloc_snprintf #undef malloc_strtoumax #undef malloc_tsd_boot #undef malloc_tsd_cleanup_register #undef malloc_tsd_dalloc #undef malloc_tsd_malloc #undef malloc_tsd_no_cleanup #undef malloc_vcprintf #undef malloc_vsnprintf #undef malloc_write #undef map_bias #undef mb_write #undef mutex_boot #undef narenas_auto #undef narenas_total #undef narenas_total_get #undef ncpus #undef nhbins #undef npools #undef npools_cnt #undef opt_abort #undef opt_dss #undef opt_junk #undef opt_lg_chunk #undef opt_lg_dirty_mult #undef opt_lg_prof_interval #undef opt_lg_prof_sample #undef opt_lg_tcache_max #undef opt_narenas #undef opt_prof #undef opt_prof_accum #undef opt_prof_active #undef opt_prof_final #undef opt_prof_gdump #undef opt_prof_leak #undef opt_prof_prefix #undef opt_quarantine #undef opt_redzone #undef opt_stats_print #undef opt_tcache #undef opt_utrace #undef opt_xmalloc #undef opt_zero #undef p2rz #undef pages_purge #undef pools_shared_data_initialized #undef pow2_ceil #undef prof_backtrace #undef prof_boot0 #undef prof_boot1 #undef prof_boot2 #undef prof_bt_count #undef prof_ctx_get #undef prof_ctx_set #undef prof_dump_open #undef prof_free #undef prof_gdump #undef prof_idump #undef prof_interval #undef prof_lookup #undef prof_malloc #undef prof_malloc_record_object #undef prof_mdump #undef prof_postfork_child #undef prof_postfork_parent #undef prof_prefork #undef prof_realloc #undef prof_sample_accum_update #undef prof_sample_threshold_update #undef prof_tdata_booted #undef prof_tdata_cleanup #undef prof_tdata_get #undef prof_tdata_init #undef prof_tdata_initialized #undef prof_tdata_tls #undef prof_tdata_tsd #undef prof_tdata_tsd_boot #undef prof_tdata_tsd_cleanup_wrapper #undef prof_tdata_tsd_get #undef prof_tdata_tsd_get_wrapper #undef prof_tdata_tsd_init_head #undef prof_tdata_tsd_set #undef quarantine #undef quarantine_alloc_hook #undef quarantine_boot #undef quarantine_booted #undef quarantine_cleanup #undef quarantine_init #undef quarantine_tls #undef quarantine_tsd #undef quarantine_tsd_boot #undef quarantine_tsd_cleanup_wrapper #undef quarantine_tsd_get #undef quarantine_tsd_get_wrapper #undef quarantine_tsd_init_head #undef quarantine_tsd_set #undef register_zone #undef rtree_delete #undef rtree_get #undef rtree_get_locked #undef rtree_new #undef rtree_postfork_child #undef rtree_postfork_parent #undef rtree_prefork #undef rtree_set #undef s2u #undef sa2u #undef set_errno #undef small_bin2size #undef small_bin2size_compute #undef small_bin2size_lookup #undef small_bin2size_tab #undef small_s2u #undef small_s2u_compute #undef small_s2u_lookup #undef small_size2bin #undef small_size2bin_compute #undef small_size2bin_lookup #undef small_size2bin_tab #undef stats_cactive #undef stats_cactive_add #undef stats_cactive_get #undef stats_cactive_sub #undef stats_chunks #undef stats_print #undef tcache_alloc_easy #undef tcache_alloc_large #undef tcache_alloc_small #undef tcache_alloc_small_hard #undef tcache_arena_associate #undef tcache_arena_dissociate #undef tcache_bin_flush_large #undef tcache_bin_flush_small #undef tcache_bin_info #undef tcache_boot0 #undef tcache_boot1 #undef tcache_booted #undef tcache_create #undef tcache_dalloc_large #undef tcache_dalloc_small #undef tcache_destroy #undef tcache_enabled_booted #undef tcache_enabled_get #undef tcache_enabled_initialized #undef tcache_enabled_set #undef tcache_enabled_tls #undef tcache_enabled_tsd #undef tcache_enabled_tsd_boot #undef tcache_enabled_tsd_cleanup_wrapper #undef tcache_enabled_tsd_get #undef tcache_enabled_tsd_get_wrapper #undef tcache_enabled_tsd_init_head #undef tcache_enabled_tsd_set #undef tcache_event #undef tcache_event_hard #undef tcache_flush #undef tcache_get #undef tcache_get_hard #undef tcache_initialized #undef tcache_maxclass #undef tcache_salloc #undef tcache_stats_merge #undef tcache_thread_cleanup #undef tcache_tls #undef tcache_tsd #undef tcache_tsd_boot #undef tcache_tsd_cleanup_wrapper #undef tcache_tsd_get #undef tcache_tsd_get_wrapper #undef tcache_tsd_init_head #undef tcache_tsd_set #undef thread_allocated_booted #undef thread_allocated_initialized #undef thread_allocated_tls #undef thread_allocated_tsd #undef thread_allocated_tsd_boot #undef thread_allocated_tsd_cleanup_wrapper #undef thread_allocated_tsd_get #undef thread_allocated_tsd_get_wrapper #undef thread_allocated_tsd_init_head #undef thread_allocated_tsd_set #undef tsd_init_check_recursion #undef tsd_init_finish #undef u2rz #undef valgrind_freelike_block #undef valgrind_make_mem_defined #undef valgrind_make_mem_noaccess #undef valgrind_make_mem_undefined #undef pool_new #undef pool_destroy #undef pools_lock #undef pool_base_lock #undef pool_prefork #undef pool_postfork_parent #undef pool_postfork_child #undef pool_alloc #undef vec_get #undef vec_set #undef vec_delete
11,246
23.396963
44
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/err.h
/* * Copyright 2016-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * err.h - error and warning messages */ #ifndef ERR_H #define ERR_H 1 #include <stdlib.h> #include <stdio.h> #include <stdarg.h> /* * err - windows implementation of unix err function */ __declspec(noreturn) static void err(int eval, const char *fmt, ...) { va_list vl; va_start(vl, fmt); vfprintf(stderr, fmt, vl); va_end(vl); exit(eval); } /* * warn - windows implementation of unix warn function */ static void warn(const char *fmt, ...) { va_list vl; va_start(vl, fmt); fprintf(stderr, "Warning: "); vfprintf(stderr, fmt, vl); va_end(vl); } #endif /* ERR_H */
2,190
29.859155
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sched.h
/* * Copyright 2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * fake sched.h */
1,620
44.027778
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/win_mmap.h
/* * Copyright 2015-2018, Intel Corporation * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * win_mmap.h -- (internal) tracks the regions mapped by mmap */ #ifndef WIN_MMAP_H #define WIN_MMAP_H 1 #include "queue.h" #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) #define rounddown(x, y) (((x) / (y)) * (y)) void win_mmap_init(void); void win_mmap_fini(void); /* allocation/mmap granularity */ extern unsigned long long Mmap_align; typedef enum FILE_MAPPING_TRACKER_FLAGS { FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED = 0x0001, /* * This should hold the value of all flags ORed for debug purpose. */ FILE_MAPPING_TRACKER_FLAGS_MASK = FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED } FILE_MAPPING_TRACKER_FLAGS; /* * this structure tracks the file mappings outstanding per file handle */ typedef struct FILE_MAPPING_TRACKER { SORTEDQ_ENTRY(FILE_MAPPING_TRACKER) ListEntry; HANDLE FileHandle; HANDLE FileMappingHandle; void *BaseAddress; void *EndAddress; DWORD Access; os_off_t Offset; size_t FileLen; FILE_MAPPING_TRACKER_FLAGS Flags; } FILE_MAPPING_TRACKER, *PFILE_MAPPING_TRACKER; extern SRWLOCK FileMappingQLock; extern SORTEDQ_HEAD(FMLHead, FILE_MAPPING_TRACKER) FileMappingQHead; #endif /* WIN_MMAP_H */
2,817
33.790123
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/platform.h
/* * Copyright 2015-2018, Intel Corporation * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * platform.h -- dirty hacks to compile Linux code on Windows using VC++ * * This is included to each source file using "/FI" (forced include) option. * * XXX - it is a subject for refactoring */ #ifndef PLATFORM_H #define PLATFORM_H 1 #pragma warning(disable : 4996) #pragma warning(disable : 4200) /* allow flexible array member */ #pragma warning(disable : 4819) /* non unicode characteres */ #ifdef __cplusplus extern "C" { #endif /* Prevent PMDK compilation for 32-bit platforms */ #if defined(_WIN32) && !defined(_WIN64) #error "32-bit builds of PMDK are not supported!" #endif #define _CRT_RAND_S /* rand_s() */ #include <windows.h> #include <stdint.h> #include <time.h> #include <io.h> #include <process.h> #include <fcntl.h> #include <sys/types.h> #include <malloc.h> #include <signal.h> #include <intrin.h> #include <direct.h> /* use uuid_t definition from util.h */ #ifdef uuid_t #undef uuid_t #endif /* a few trivial substitutions */ #define PATH_MAX MAX_PATH #define __thread __declspec(thread) #define __func__ __FUNCTION__ #ifdef _DEBUG #define DEBUG #endif /* * The inline keyword is available only in VC++. * https://msdn.microsoft.com/en-us/library/bw1hbe6y.aspx */ #ifndef __cplusplus #define inline __inline #endif /* XXX - no equivalents in VC++ */ #define __attribute__(a) #define __builtin_constant_p(cnd) 0 /* * missing definitions */ /* errno.h */ #define ELIBACC 79 /* cannot access a needed shared library */ /* sys/stat.h */ #define S_IRUSR S_IREAD #define S_IWUSR S_IWRITE #define S_IRGRP S_IRUSR #define S_IWGRP S_IWUSR #define O_SYNC 0 typedef int mode_t; #define fchmod(fd, mode) 0 /* XXX - dummy */ #define setlinebuf(fp) setvbuf(fp, NULL, _IOLBF, BUFSIZ); /* unistd.h */ typedef long long os_off_t; typedef long long ssize_t; int setenv(const char *name, const char *value, int overwrite); int unsetenv(const char *name); /* fcntl.h */ int posix_fallocate(int fd, os_off_t offset, os_off_t len); /* string.h */ #define strtok_r strtok_s /* time.h */ #define CLOCK_MONOTONIC 1 #define CLOCK_REALTIME 2 int clock_gettime(int id, struct timespec *ts); /* signal.h */ typedef unsigned long long sigset_t; /* one bit for each signal */ C_ASSERT(NSIG <= sizeof(sigset_t) * 8); struct sigaction { void (*sa_handler) (int signum); /* void (*sa_sigaction)(int, siginfo_t *, void *); */ sigset_t sa_mask; int sa_flags; void (*sa_restorer) (void); }; __inline int sigemptyset(sigset_t *set) { *set = 0; return 0; } __inline int sigfillset(sigset_t *set) { *set = ~0; return 0; } __inline int sigaddset(sigset_t *set, int signum) { if (signum <= 0 || signum >= NSIG) { errno = EINVAL; return -1; } *set |= (1ULL << (signum - 1)); return 0; } __inline int sigdelset(sigset_t *set, int signum) { if (signum <= 0 || signum >= NSIG) { errno = EINVAL; return -1; } *set &= ~(1ULL << (signum - 1)); return 0; } __inline int sigismember(const sigset_t *set, int signum) { if (signum <= 0 || signum >= NSIG) { errno = EINVAL; return -1; } return ((*set & (1ULL << (signum - 1))) ? 1 : 0); } /* sched.h */ /* * sched_yield -- yield the processor */ __inline int sched_yield(void) { SwitchToThread(); return 0; /* always succeeds */ } /* * helper macros for library ctor/dtor function declarations */ #define MSVC_CONSTR(func) \ void func(void); \ __pragma(comment(linker, "/include:_" #func)) \ __pragma(section(".CRT$XCU", read)) \ __declspec(allocate(".CRT$XCU")) \ const void (WINAPI *_##func)(void) = (const void (WINAPI *)(void))func; #define MSVC_DESTR(func) \ void func(void); \ static void _##func##_reg(void) { atexit(func); }; \ MSVC_CONSTR(_##func##_reg) #ifdef __cplusplus } #endif #endif /* PLATFORM_H */
5,389
22.744493
76
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/libgen.h
/* * Copyright 2016, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * fake libgen.h */
1,621
44.055556
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/endian.h
/* * Copyright 2015-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * endian.h -- convert values between host and big-/little-endian byte order */ #ifndef ENDIAN_H #define ENDIAN_H 1 /* * XXX: On Windows we can assume little-endian architecture */ #include <intrin.h> #define htole16(a) (a) #define htole32(a) (a) #define htole64(a) (a) #define le16toh(a) (a) #define le32toh(a) (a) #define le64toh(a) (a) #define htobe16(x) _byteswap_ushort(x) #define htobe32(x) _byteswap_ulong(x) #define htobe64(x) _byteswap_uint64(x) #define be16toh(x) _byteswap_ushort(x) #define be32toh(x) _byteswap_ulong(x) #define be64toh(x) _byteswap_uint64(x) #endif /* ENDIAN_H */
2,211
34.677419
76
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/features.h
/* * Copyright 2016, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * fake features.h */
1,623
44.111111
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/unistd.h
/* * Copyright 2015-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * unistd.h -- compatibility layer for POSIX operating system API */ #ifndef UNISTD_H #define UNISTD_H 1 #include <stdio.h> #define _SC_PAGESIZE 0 #define _SC_NPROCESSORS_ONLN 1 #define R_OK 04 #define W_OK 02 #define X_OK 00 /* execute permission doesn't exist on Windows */ #define F_OK 00 /* * sysconf -- get configuration information at run time */ static __inline long sysconf(int p) { SYSTEM_INFO si; int ret = 0; switch (p) { case _SC_PAGESIZE: GetSystemInfo(&si); return si.dwPageSize; case _SC_NPROCESSORS_ONLN: for (int i = 0; i < GetActiveProcessorGroupCount(); i++) { ret += GetActiveProcessorCount(i); } return ret; default: return 0; } } #define getpid _getpid /* * pread -- read from a file descriptor at given offset */ static ssize_t pread(int fd, void *buf, size_t count, os_off_t offset) { __int64 position = _lseeki64(fd, 0, SEEK_CUR); _lseeki64(fd, offset, SEEK_SET); int ret = _read(fd, buf, (unsigned)count); _lseeki64(fd, position, SEEK_SET); return ret; } /* * pwrite -- write to a file descriptor at given offset */ static ssize_t pwrite(int fd, const void *buf, size_t count, os_off_t offset) { __int64 position = _lseeki64(fd, 0, SEEK_CUR); _lseeki64(fd, offset, SEEK_SET); int ret = _write(fd, buf, (unsigned)count); _lseeki64(fd, position, SEEK_SET); return ret; } #define S_ISBLK(x) 0 /* BLK devices not exist on Windows */ /* * basename -- parse pathname and return filename component */ static char * basename(char *path) { char fname[_MAX_FNAME]; char ext[_MAX_EXT]; _splitpath(path, NULL, NULL, fname, ext); sprintf(path, "%s%s", fname, ext); return path; } /* * dirname -- parse pathname and return directory component */ static char * dirname(char *path) { if (path == NULL) return "."; size_t len = strlen(path); if (len == 0) return "."; char *end = path + len; /* strip trailing forslashes and backslashes */ while ((--end) > path) { if (*end != '\\' && *end != '/') { *(end + 1) = '\0'; break; } } /* strip basename */ while ((--end) > path) { if (*end == '\\' || *end == '/') { *end = '\0'; break; } } if (end != path) { return path; /* handle edge cases */ } else if (*end == '\\' || *end == '/') { *(end + 1) = '\0'; } else { *end++ = '.'; *end = '\0'; } return path; } #endif /* UNISTD_H */
3,962
22.873494
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/strings.h
/* * Copyright 2015-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * fake strings.h */
1,627
44.222222
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/dirent.h
/* * Copyright 2015-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * fake dirent.h */
1,626
44.194444
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/uio.h
/* * Copyright 2015-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * sys/uio.h -- definition of iovec structure */ #ifndef SYS_UIO_H #define SYS_UIO_H 1 #include <pmemcompat.h> #ifdef __cplusplus extern "C" { #endif ssize_t writev(int fd, const struct iovec *iov, int iovcnt); #ifdef __cplusplus } #endif #endif /* SYS_UIO_H */
1,874
34.377358
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/file.h
/* * Copyright 2015-2018, Intel Corporation * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * sys/file.h -- file locking */
1,706
45.135135
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/statvfs.h
/* * Copyright 2016, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * fake statvfs.h */
1,622
44.083333
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/param.h
/* * Copyright 2015-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * sys/param.h -- a few useful macros */ #ifndef SYS_PARAM_H #define SYS_PARAM_H 1 #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) #define howmany(x, y) (((x) + ((y) - 1)) / (y)) #define BPB 8 /* bits per byte */ #define setbit(b, i) ((b)[(i) / BPB] |= 1 << ((i) % BPB)) #define isset(b, i) ((b)[(i) / BPB] & (1 << ((i) % BPB))) #define isclr(b, i) (((b)[(i) / BPB] & (1 << ((i) % BPB))) == 0) #define MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MAX(a, b) (((a) > (b)) ? (a) : (b)) #endif /* SYS_PARAM_H */
2,127
39.150943
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/mount.h
/* * Copyright 2015-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * fake sys/mount.h */
1,629
44.277778
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/mman.h
/* * Copyright 2015-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * sys/mman.h -- memory-mapped files for Windows */ #ifndef SYS_MMAN_H #define SYS_MMAN_H 1 #ifdef __cplusplus extern "C" { #endif #define PROT_NONE 0x0 #define PROT_READ 0x1 #define PROT_WRITE 0x2 #define PROT_EXEC 0x4 #define MAP_SHARED 0x1 #define MAP_PRIVATE 0x2 #define MAP_FIXED 0x10 #define MAP_ANONYMOUS 0x20 #define MAP_ANON MAP_ANONYMOUS #define MAP_NORESERVE 0x04000 #define MS_ASYNC 1 #define MS_SYNC 4 #define MS_INVALIDATE 2 #define MAP_FAILED ((void *)(-1)) void *mmap(void *addr, size_t len, int prot, int flags, int fd, os_off_t offset); int munmap(void *addr, size_t len); int msync(void *addr, size_t len, int flags); int mprotect(void *addr, size_t len, int prot); #ifdef __cplusplus } #endif #endif /* SYS_MMAN_H */
2,357
30.026316
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/resource.h
/* * Copyright 2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * fake sys/resource.h */
1,627
44.222222
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/wait.h
/* * Copyright 2015-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * fake sys/wait.h */
1,628
44.25
74
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/linux/limits.h
/* * Copyright 2015-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * linux/limits.h -- fake header file */ /* * XXX - The only purpose of this empty file is to avoid preprocessor * errors when including a Linux-specific header file that has no equivalent * on Windows. With this cheap trick, we don't need a lot of preprocessor * conditionals in all the source code files. * * In the future, this will be addressed in some other way. */
1,986
43.155556
76
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/coverage.sh
#!/bin/sh set -e objdir=$1 suffix=$2 shift 2 objs=$@ gcov -b -p -f -o "${objdir}" ${objs} # Move gcov outputs so that subsequent gcov invocations won't clobber results # for the same sources with different compilation flags. for f in `find . -maxdepth 1 -type f -name '*.gcov'` ; do mv "${f}" "${f}.${suffix}" done
321
17.941176
77
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/autogen.sh
#!/bin/sh for i in autoconf; do echo "$i" $i if [ $? -ne 0 ]; then echo "Error $? in $i" exit 1 fi done echo "./configure --enable-autogen $@" ./configure --enable-autogen $@ if [ $? -ne 0 ]; then echo "Error $? in ./configure" exit 1 fi
266
13.833333
38
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/win_autogen.sh
#!/bin/sh # Copyright 2016, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # JEMALLOC_GEN=./../windows/jemalloc_gen AC_PATH=./../../jemalloc autoconf if [ $? -ne 0 ]; then echo "Error $? in $i" exit 1 fi if [ ! -d "$JEMALLOC_GEN" ]; then echo Creating... $JEMALLOC_GEN mkdir "$JEMALLOC_GEN" fi cd $JEMALLOC_GEN echo "Run configure..." $AC_PATH/configure \ --enable-autogen \ CC=cl \ --enable-lazy-lock=no \ --without-export \ --with-jemalloc-prefix=je_vmem_ \ --with-private-namespace=je_vmem_ \ --disable-xmalloc \ --disable-munmap \ EXTRA_CFLAGS="-DJEMALLOC_LIBVMEM" if [ $? -ne 0 ]; then echo "Error $? in $AC_PATH/configure" exit 1 fi
2,161
32.261538
73
sh
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/prof_accum.h
#include "test/jemalloc_test.h" #define NTHREADS 4 #define NALLOCS_PER_THREAD 50 #define DUMP_INTERVAL 1 #define BT_COUNT_CHECK_INTERVAL 5 #define alloc_n_proto(n) \ void *alloc_##n(unsigned bits); alloc_n_proto(0) alloc_n_proto(1) #define alloc_n_gen(n) \ void * \ alloc_##n(unsigned bits) \ { \ void *p; \ \ if (bits == 0) \ p = mallocx(1, 0); \ else { \ switch (bits & 0x1U) { \ case 0: \ p = (alloc_0(bits >> 1)); \ break; \ case 1: \ p = (alloc_1(bits >> 1)); \ break; \ default: not_reached(); \ } \ } \ /* Intentionally sabotage tail call optimization. */ \ assert_ptr_not_null(p, "Unexpected mallocx() failure"); \ return (p); \ }
794
21.083333
59
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/pool.h
#include "test/jemalloc_test.h" #define TEST_POOL_SIZE (16L * 1024L * 1024L) #define TEST_TOO_SMALL_POOL_SIZE (2L * 1024L * 1024L) #define TEST_VALUE 123456 #define TEST_MALLOC_FREE_LOOPS 2 #define TEST_MALLOC_SIZE 1024 #define TEST_ALLOCS_SIZE (TEST_POOL_SIZE / 8) #define TEST_BUFFOR_CMP_SIZE (4L * 1024L * 1024L) static char mem_pool[TEST_POOL_SIZE]; static char mem_extend_ok[TEST_POOL_SIZE]; static void* allocs[TEST_ALLOCS_SIZE]; static int custom_allocs; TEST_BEGIN(test_pool_create_errors) { pool_t *pool; memset(mem_pool, 1, TEST_POOL_SIZE); pool = pool_create(mem_pool, 0, 0, 1); assert_ptr_null(pool, "pool_create() should return NULL for size 0"); pool = pool_create(NULL, TEST_POOL_SIZE, 0, 1); assert_ptr_null(pool, "pool_create() should return NULL for input addr NULL"); } TEST_END TEST_BEGIN(test_pool_create) { pool_t *pool; custom_allocs = 0; memset(mem_pool, 0, TEST_POOL_SIZE); pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1); assert_ptr_eq(pool, mem_pool, "pool_create() should return addr with valid input"); pool_delete(pool); assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator"); } TEST_END TEST_BEGIN(test_pool_malloc) { pool_t *pool; custom_allocs = 0; memset(mem_pool, 0, TEST_POOL_SIZE); pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1); int *test = pool_malloc(pool, sizeof(int)); assert_ptr_not_null(test, "pool_malloc should return valid ptr"); *test = TEST_VALUE; assert_x_eq(*test, TEST_VALUE, "ptr should be usable"); assert_lu_gt((uintptr_t)test, (uintptr_t)mem_pool, "pool_malloc() should return pointer to memory from pool"); assert_lu_lt((uintptr_t)test, (uintptr_t)mem_pool+TEST_POOL_SIZE, "pool_malloc() should return pointer to memory from pool"); pool_free(pool, test); pool_delete(pool); assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator"); } TEST_END TEST_BEGIN(test_pool_free) { pool_t *pool; int i, j, s = 0, prev_s = 0; int allocs = TEST_POOL_SIZE/TEST_MALLOC_SIZE; void *arr[allocs]; custom_allocs = 0; memset(mem_pool, 0, TEST_POOL_SIZE); pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1); for (i = 0; i < TEST_MALLOC_FREE_LOOPS; ++i) { for (j = 0; j < allocs; ++j) { arr[j] = pool_malloc(pool, TEST_MALLOC_SIZE); if (arr[j] != NULL) { s++; } } for (j = 0; j < allocs; ++j) { if (arr[j] != NULL) { pool_free(pool, arr[j]); } } if (prev_s != 0) { assert_x_eq(s, prev_s, "pool_free() should record back used chunks"); } prev_s = s; s = 0; } pool_delete(pool); assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator"); } TEST_END TEST_BEGIN(test_pool_calloc) { pool_t *pool; custom_allocs = 0; memset(mem_pool, 1, TEST_POOL_SIZE); pool = pool_create(mem_pool, TEST_POOL_SIZE, 0, 1); int *test = pool_calloc(pool, 1, sizeof(int)); assert_ptr_not_null(test, "pool_calloc should return valid ptr"); assert_x_eq(*test, 0, "pool_calloc should return zeroed memory"); pool_free(pool, test); pool_delete(pool); assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator"); } TEST_END TEST_BEGIN(test_pool_realloc) { pool_t *pool; custom_allocs = 0; memset(mem_pool, 0, TEST_POOL_SIZE); pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1); int *test = pool_ralloc(pool, NULL, sizeof(int)); assert_ptr_not_null(test, "pool_ralloc with NULL addr should return valid ptr"); int *test2 = pool_ralloc(pool, test, sizeof(int)*2); assert_ptr_not_null(test, "pool_ralloc should return valid ptr"); test2[0] = TEST_VALUE; test2[1] = TEST_VALUE; assert_x_eq(test[1], TEST_VALUE, "ptr should be usable"); pool_free(pool, test2); pool_delete(pool); assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator"); } TEST_END TEST_BEGIN(test_pool_aligned_alloc) { pool_t *pool; custom_allocs = 0; memset(mem_pool, 0, TEST_POOL_SIZE); pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1); int *test = pool_aligned_alloc(pool, 1024, 1024); assert_ptr_not_null(test, "pool_aligned_alloc should return valid ptr"); assert_x_eq(((uintptr_t)(test) & 1023), 0, "ptr should be aligned"); assert_lu_gt((uintptr_t)test, (uintptr_t)mem_pool, "pool_aligned_alloc() should return pointer to memory from pool"); assert_lu_lt((uintptr_t)test, (uintptr_t)mem_pool+TEST_POOL_SIZE, "pool_aligned_alloc() should return pointer to memory from pool"); *test = TEST_VALUE; assert_x_eq(*test, TEST_VALUE, "ptr should be usable"); pool_free(pool, test); pool_delete(pool); assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator"); } TEST_END TEST_BEGIN(test_pool_reuse_pool) { pool_t *pool; size_t pool_num = 0; custom_allocs = 0; /* create and destroy pool multiple times */ for (; pool_num<100; ++pool_num) { pool = pool_create(mem_pool, TEST_POOL_SIZE, 0, 1); assert_ptr_not_null(pool, "Can not create pool!!!"); if (pool == NULL) { break; } void *prev = NULL; size_t i = 0; /* allocate memory from pool */ for (; i<100; ++i) { void **next = pool_malloc(pool, sizeof (void *)); assert_lu_gt((uintptr_t)next, (uintptr_t)mem_pool, "pool_malloc() should return pointer to memory from pool"); assert_lu_lt((uintptr_t)next, (uintptr_t)mem_pool+TEST_POOL_SIZE, "pool_malloc() should return pointer to memory from pool"); *next = prev; prev = next; } /* free all allocated memory from pool */ while (prev != NULL) { void **act = prev; prev = *act; pool_free(pool, act); } pool_delete(pool); } assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator"); } TEST_END TEST_BEGIN(test_pool_check_memory) { pool_t *pool; size_t pool_size = POOL_MINIMAL_SIZE; assert_lu_lt(POOL_MINIMAL_SIZE, TEST_POOL_SIZE, "Too small pool size"); size_t object_size; size_t size_allocated; size_t i; size_t j; for (object_size = 8; object_size <= TEST_BUFFOR_CMP_SIZE ; object_size *= 2) { custom_allocs = 0; pool = pool_create(mem_pool, pool_size, 0, 1); assert_ptr_not_null(pool, "Can not create pool!!!"); size_allocated = 0; memset(allocs, 0, TEST_ALLOCS_SIZE * sizeof(void *)); for (i = 0; i < TEST_ALLOCS_SIZE;++i) { allocs[i] = pool_malloc(pool, object_size); if (allocs[i] == NULL) { /* out of memory in pool */ break; } assert_lu_gt((uintptr_t)allocs[i], (uintptr_t)mem_pool, "pool_malloc() should return pointer to memory from pool"); assert_lu_lt((uintptr_t)allocs[i], (uintptr_t)mem_pool+pool_size, "pool_malloc() should return pointer to memory from pool"); size_allocated += object_size; /* fill each allocation with a unique value */ memset(allocs[i], (char)i, object_size); } assert_ptr_not_null(allocs[0], "pool_malloc should return valid ptr"); assert_lu_lt(i + 1, TEST_ALLOCS_SIZE, "All memory should be used"); /* check for unexpected modifications of prepare data */ for (i = 0; i < TEST_ALLOCS_SIZE && allocs[i] != NULL; ++i) { char *buffer = allocs[i]; for (j = 0; j < object_size; ++j) if (buffer[j] != (char)i) { assert_true(0, "Content of data object was modified unexpectedly" " for object size: %zu, id: %zu", object_size, j); break; } } pool_delete(pool); assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator"); } } TEST_END TEST_BEGIN(test_pool_use_all_memory) { pool_t *pool; size_t size = 0; size_t pool_size = POOL_MINIMAL_SIZE; assert_lu_lt(POOL_MINIMAL_SIZE, TEST_POOL_SIZE, "Too small pool size"); custom_allocs = 0; pool = pool_create(mem_pool, pool_size, 0, 1); assert_ptr_not_null(pool, "Can not create pool!!!"); void *prev = NULL; for (;;) { void **next = pool_malloc(pool, sizeof (void *)); if (next == NULL) { /* Out of memory in pool, test end */ break; } size += sizeof (void *); assert_ptr_not_null(next, "pool_malloc should return valid ptr"); assert_lu_gt((uintptr_t)next, (uintptr_t)mem_pool, "pool_malloc() should return pointer to memory from pool"); assert_lu_lt((uintptr_t)next, (uintptr_t)mem_pool+pool_size, "pool_malloc() should return pointer to memory from pool"); *next = prev; assert_x_eq((uintptr_t)(*next), (uintptr_t)(prev), "ptr should be usable"); prev = next; } assert_lu_gt(size, 0, "Can not alloc any memory from pool"); /* Free all allocated memory from pool */ while (prev != NULL) { void **act = prev; prev = *act; pool_free(pool, act); } pool_delete(pool); assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator"); } TEST_END TEST_BEGIN(test_pool_extend_errors) { pool_t *pool; custom_allocs = 0; memset(mem_pool, 0, TEST_POOL_SIZE); pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1); memset(mem_extend_ok, 0, TEST_TOO_SMALL_POOL_SIZE); size_t usable_size = pool_extend(pool, mem_extend_ok, TEST_TOO_SMALL_POOL_SIZE, 0); assert_zu_eq(usable_size, 0, "pool_extend() should return 0" " when provided with memory size smaller then chunksize"); pool_delete(pool); assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator"); } TEST_END TEST_BEGIN(test_pool_extend) { pool_t *pool; custom_allocs = 0; memset(mem_pool, 0, TEST_POOL_SIZE); pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1); memset(mem_extend_ok, 0, TEST_POOL_SIZE); size_t usable_size = pool_extend(pool, mem_extend_ok, TEST_POOL_SIZE, 0); assert_zu_ne(usable_size, 0, "pool_extend() should return value" " after alignment when provided with enough memory"); pool_delete(pool); assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator"); } TEST_END TEST_BEGIN(test_pool_extend_after_out_of_memory) { pool_t *pool; custom_allocs = 0; memset(mem_pool, 0, TEST_POOL_SIZE); pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1); /* use the all memory from pool and from base allocator */ while (pool_malloc(pool, sizeof (void *))); pool->base_next_addr = pool->base_past_addr; memset(mem_extend_ok, 0, TEST_POOL_SIZE); size_t usable_size = pool_extend(pool, mem_extend_ok, TEST_POOL_SIZE, 0); assert_zu_ne(usable_size, 0, "pool_extend() should return value" " after alignment when provided with enough memory"); pool_delete(pool); assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator"); } TEST_END /* * print_jemalloc_messages -- custom print function, for jemalloc */ static void print_jemalloc_messages(void* ignore, const char *s) { } TEST_BEGIN(test_pool_check_extend) { je_malloc_message = print_jemalloc_messages; pool_t *pool; custom_allocs = 0; pool = pool_create(mem_pool, TEST_POOL_SIZE, 0, 1); pool_malloc(pool, 100); assert_d_eq(je_pool_check(pool), 1, "je_pool_check() return error"); pool_delete(pool); assert_d_ne(je_pool_check(pool), 1, "je_pool_check() not return error"); pool = pool_create(mem_pool, TEST_POOL_SIZE, 0, 1); assert_d_eq(je_pool_check(pool), 1, "je_pool_check() return error"); size_t size_extend = pool_extend(pool, mem_extend_ok, TEST_POOL_SIZE, 1); assert_zu_ne(size_extend, 0, "pool_extend() should add some free space"); assert_d_eq(je_pool_check(pool), 1, "je_pool_check() return error"); pool_malloc(pool, 100); pool_delete(pool); assert_d_ne(je_pool_check(pool), 1, "je_pool_check() not return error"); assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator"); je_malloc_message = NULL; } TEST_END TEST_BEGIN(test_pool_check_memory_out_of_range) { je_malloc_message = print_jemalloc_messages; pool_t *pool; custom_allocs = 0; pool = pool_create(mem_pool, TEST_POOL_SIZE, 0, 1); assert_d_eq(je_pool_check(pool), 1, "je_pool_check() return error"); void *usable_addr = (void *)CHUNK_CEILING((uintptr_t)mem_extend_ok); size_t usable_size = (TEST_POOL_SIZE - (uintptr_t)(usable_addr - (void *)mem_extend_ok)) & ~chunksize_mask; chunk_record(pool, &pool->chunks_szad_mmap, &pool->chunks_ad_mmap, usable_addr, usable_size, 0); assert_d_ne(je_pool_check(pool), 1, "je_pool_check() not return error"); pool_delete(pool); assert_d_ne(je_pool_check(pool), 1, "je_pool_check() return error"); assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator"); je_malloc_message = NULL; } TEST_END TEST_BEGIN(test_pool_check_memory_overlap) { je_malloc_message = print_jemalloc_messages; pool_t *pool; pool_t *pool2; custom_allocs = 0; memset(mem_pool, 0, TEST_POOL_SIZE); pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1); size_t size_extend = pool_extend(pool, mem_extend_ok, TEST_POOL_SIZE, 1); assert_zu_ne(size_extend, 0, "pool_extend() should add some free space"); assert_d_eq(je_pool_check(pool), 1, "je_pool_check() return error"); /* create another pool in the same memory region */ pool2 = pool_create(mem_extend_ok, TEST_POOL_SIZE, 0, 1); assert_d_ne(je_pool_check(pool), 1, "je_pool_check() not return error"); assert_d_ne(je_pool_check(pool2), 1, "je_pool_check() not return error"); pool_delete(pool2); pool_delete(pool); assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator"); je_malloc_message = NULL; } TEST_END #define POOL_TEST_CASES\ test_pool_create_errors, \ test_pool_create, \ test_pool_malloc, \ test_pool_free, \ test_pool_calloc, \ test_pool_realloc, \ test_pool_aligned_alloc, \ test_pool_reuse_pool, \ test_pool_check_memory, \ test_pool_use_all_memory, \ test_pool_extend_errors, \ test_pool_extend, \ test_pool_extend_after_out_of_memory, \ test_pool_check_extend, \ test_pool_check_memory_out_of_range, \ test_pool_check_memory_overlap
13,511
27.267782
84
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS_H #define SFMT_PARAMS_H #if !defined(MEXP) #ifdef __GNUC__ #warning "MEXP is not defined. I assume MEXP is 19937." #endif #define MEXP 19937 #endif /*----------------- BASIC DEFINITIONS -----------------*/ /** Mersenne Exponent. The period of the sequence * is a multiple of 2^MEXP-1. * #define MEXP 19937 */ /** SFMT generator has an internal state array of 128-bit integers, * and N is its size. */ #define N (MEXP / 128 + 1) /** N32 is the size of internal state array when regarded as an array * of 32-bit integers.*/ #define N32 (N * 4) /** N64 is the size of internal state array when regarded as an array * of 64-bit integers.*/ #define N64 (N * 2) /*---------------------- the parameters of SFMT following definitions are in paramsXXXX.h file. ----------------------*/ /** the pick up position of the array. #define POS1 122 */ /** the parameter of shift left as four 32-bit registers. #define SL1 18 */ /** the parameter of shift left as one 128-bit register. * The 128-bit integer is shifted by (SL2 * 8) bits. #define SL2 1 */ /** the parameter of shift right as four 32-bit registers. #define SR1 11 */ /** the parameter of shift right as one 128-bit register. * The 128-bit integer is shifted by (SL2 * 8) bits. #define SR2 1 */ /** A bitmask, used in the recursion. These parameters are introduced * to break symmetry of SIMD. #define MSK1 0xdfffffefU #define MSK2 0xddfecb7fU #define MSK3 0xbffaffffU #define MSK4 0xbffffff6U */ /** These definitions are part of a 128-bit period certification vector. #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0xc98e126aU */ #if MEXP == 607 #include "test/SFMT-params607.h" #elif MEXP == 1279 #include "test/SFMT-params1279.h" #elif MEXP == 2281 #include "test/SFMT-params2281.h" #elif MEXP == 4253 #include "test/SFMT-params4253.h" #elif MEXP == 11213 #include "test/SFMT-params11213.h" #elif MEXP == 19937 #include "test/SFMT-params19937.h" #elif MEXP == 44497 #include "test/SFMT-params44497.h" #elif MEXP == 86243 #include "test/SFMT-params86243.h" #elif MEXP == 132049 #include "test/SFMT-params132049.h" #elif MEXP == 216091 #include "test/SFMT-params216091.h" #else #ifdef __GNUC__ #error "MEXP is not valid." #undef MEXP #else #undef MEXP #endif #endif #endif /* SFMT_PARAMS_H */
4,286
31.233083
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params4253.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS4253_H #define SFMT_PARAMS4253_H #define POS1 17 #define SL1 20 #define SL2 1 #define SR1 7 #define SR2 1 #define MSK1 0x9f7bffffU #define MSK2 0x9fffff5fU #define MSK3 0x3efffffbU #define MSK4 0xfffff7bbU #define PARITY1 0xa8000001U #define PARITY2 0xaf5390a3U #define PARITY3 0xb740b3f8U #define PARITY4 0x6c11486dU /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) #define ALTI_SL2_PERM64 \ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-4253:17-20-1-7-1:9f7bffff-9fffff5f-3efffffb-fffff7bb" #endif /* SFMT_PARAMS4253_H */
3,552
42.329268
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params607.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS607_H #define SFMT_PARAMS607_H #define POS1 2 #define SL1 15 #define SL2 3 #define SR1 13 #define SR2 3 #define MSK1 0xfdff37ffU #define MSK2 0xef7f3f7dU #define MSK3 0xff777b7dU #define MSK4 0x7ff7fb2fU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0x5986f054U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) #define ALTI_SL2_PERM64 \ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) #define ALTI_SR2_PERM \ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) #define ALTI_SR2_PERM64 \ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} #endif /* For OSX */ #define IDSTR "SFMT-607:2-15-3-13-3:fdff37ff-ef7f3f7d-ff777b7d-7ff7fb2f" #endif /* SFMT_PARAMS607_H */
3,558
42.402439
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params216091.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS216091_H #define SFMT_PARAMS216091_H #define POS1 627 #define SL1 11 #define SL2 3 #define SR1 10 #define SR2 1 #define MSK1 0xbff7bff7U #define MSK2 0xbfffffffU #define MSK3 0xbffffa7fU #define MSK4 0xffddfbfbU #define PARITY1 0xf8000001U #define PARITY2 0x89e80709U #define PARITY3 0x3bd2b64bU #define PARITY4 0x0c64b1e4U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) #define ALTI_SL2_PERM64 \ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-216091:627-11-3-10-1:bff7bff7-bfffffff-bffffa7f-ffddfbfb" #endif /* SFMT_PARAMS216091_H */
3,566
42.5
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/mq.h
/* * Simple templated message queue implementation that relies on only mutexes for * synchronization (which reduces portability issues). Given the following * setup: * * typedef struct mq_msg_s mq_msg_t; * struct mq_msg_s { * mq_msg(mq_msg_t) link; * [message data] * }; * mq_gen(, mq_, mq_t, mq_msg_t, link) * * The API is as follows: * * bool mq_init(mq_t *mq); * void mq_fini(mq_t *mq); * unsigned mq_count(mq_t *mq); * mq_msg_t *mq_tryget(mq_t *mq); * mq_msg_t *mq_get(mq_t *mq); * void mq_put(mq_t *mq, mq_msg_t *msg); * * The message queue linkage embedded in each message is to be treated as * externally opaque (no need to initialize or clean up externally). mq_fini() * does not perform any cleanup of messages, since it knows nothing of their * payloads. */ #define mq_msg(a_mq_msg_type) ql_elm(a_mq_msg_type) #define mq_gen(a_attr, a_prefix, a_mq_type, a_mq_msg_type, a_field) \ typedef struct { \ mtx_t lock; \ ql_head(a_mq_msg_type) msgs; \ unsigned count; \ } a_mq_type; \ a_attr bool \ a_prefix##init(a_mq_type *mq) { \ \ if (mtx_init(&mq->lock)) \ return (true); \ ql_new(&mq->msgs); \ mq->count = 0; \ return (false); \ } \ a_attr void \ a_prefix##fini(a_mq_type *mq) \ { \ \ mtx_fini(&mq->lock); \ } \ a_attr unsigned \ a_prefix##count(a_mq_type *mq) \ { \ unsigned count; \ \ mtx_lock(&mq->lock); \ count = mq->count; \ mtx_unlock(&mq->lock); \ return (count); \ } \ a_attr a_mq_msg_type * \ a_prefix##tryget(a_mq_type *mq) \ { \ a_mq_msg_type *msg; \ \ mtx_lock(&mq->lock); \ msg = ql_first(&mq->msgs); \ if (msg != NULL) { \ ql_head_remove(&mq->msgs, a_mq_msg_type, a_field); \ mq->count--; \ } \ mtx_unlock(&mq->lock); \ return (msg); \ } \ a_attr a_mq_msg_type * \ a_prefix##get(a_mq_type *mq) \ { \ a_mq_msg_type *msg; \ struct timespec timeout; \ \ msg = a_prefix##tryget(mq); \ if (msg != NULL) \ return (msg); \ \ timeout.tv_sec = 0; \ timeout.tv_nsec = 1; \ while (true) { \ nanosleep(&timeout, NULL); \ msg = a_prefix##tryget(mq); \ if (msg != NULL) \ return (msg); \ if (timeout.tv_sec == 0) { \ /* Double sleep time, up to max 1 second. */ \ timeout.tv_nsec <<= 1; \ if (timeout.tv_nsec >= 1000*1000*1000) { \ timeout.tv_sec = 1; \ timeout.tv_nsec = 0; \ } \ } \ } \ } \ a_attr void \ a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg) \ { \ \ mtx_lock(&mq->lock); \ ql_elm_new(msg, a_field); \ ql_tail_insert(&mq->msgs, msg, a_field); \ mq->count++; \ mtx_unlock(&mq->lock); \ }
2,992
25.963964
80
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params1279.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS1279_H #define SFMT_PARAMS1279_H #define POS1 7 #define SL1 14 #define SL2 3 #define SR1 5 #define SR2 1 #define MSK1 0xf7fefffdU #define MSK2 0x7fefcfffU #define MSK3 0xaff3ef3fU #define MSK4 0xb5ffff7fU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0x20000000U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) #define ALTI_SL2_PERM64 \ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-1279:7-14-3-5-1:f7fefffd-7fefcfff-aff3ef3f-b5ffff7f" #endif /* SFMT_PARAMS1279_H */
3,552
42.329268
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params11213.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS11213_H #define SFMT_PARAMS11213_H #define POS1 68 #define SL1 14 #define SL2 3 #define SR1 7 #define SR2 3 #define MSK1 0xeffff7fbU #define MSK2 0xffffffefU #define MSK3 0xdfdfbfffU #define MSK4 0x7fffdbfdU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0xe8148000U #define PARITY4 0xd0c7afa3U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) #define ALTI_SL2_PERM64 \ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) #define ALTI_SR2_PERM \ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) #define ALTI_SR2_PERM64 \ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} #endif /* For OSX */ #define IDSTR "SFMT-11213:68-14-3-7-3:effff7fb-ffffffef-dfdfbfff-7fffdbfd" #endif /* SFMT_PARAMS11213_H */
3,566
42.5
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-sse2.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file SFMT-sse2.h * @brief SIMD oriented Fast Mersenne Twister(SFMT) for Intel SSE2 * * @author Mutsuo Saito (Hiroshima University) * @author Makoto Matsumoto (Hiroshima University) * * @note We assume LITTLE ENDIAN in this file * * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * The new BSD License is applied to this software, see LICENSE.txt */ #ifndef SFMT_SSE2_H #define SFMT_SSE2_H /** * This function represents the recursion formula. * @param a a 128-bit part of the interal state array * @param b a 128-bit part of the interal state array * @param c a 128-bit part of the interal state array * @param d a 128-bit part of the interal state array * @param mask 128-bit mask * @return output */ JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b, __m128i c, __m128i d, __m128i mask) { __m128i v, x, y, z; x = _mm_load_si128(a); y = _mm_srli_epi32(*b, SR1); z = _mm_srli_si128(c, SR2); v = _mm_slli_epi32(d, SL1); z = _mm_xor_si128(z, x); z = _mm_xor_si128(z, v); x = _mm_slli_si128(x, SL2); y = _mm_and_si128(y, mask); z = _mm_xor_si128(z, x); z = _mm_xor_si128(z, y); return z; } /** * This function fills the internal state array with pseudorandom * integers. */ JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) { int i; __m128i r, r1, r2, mask; mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); r1 = _mm_load_si128(&ctx->sfmt[N - 2].si); r2 = _mm_load_si128(&ctx->sfmt[N - 1].si); for (i = 0; i < N - POS1; i++) { r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2, mask); _mm_store_si128(&ctx->sfmt[i].si, r); r1 = r2; r2 = r; } for (; i < N; i++) { r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1 - N].si, r1, r2, mask); _mm_store_si128(&ctx->sfmt[i].si, r); r1 = r2; r2 = r; } } /** * This function fills the user-specified array with pseudorandom * integers. * * @param array an 128-bit array to be filled by pseudorandom numbers. * @param size number of 128-bit pesudorandom numbers to be generated. */ JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { int i, j; __m128i r, r1, r2, mask; mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); r1 = _mm_load_si128(&ctx->sfmt[N - 2].si); r2 = _mm_load_si128(&ctx->sfmt[N - 1].si); for (i = 0; i < N - POS1; i++) { r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2, mask); _mm_store_si128(&array[i].si, r); r1 = r2; r2 = r; } for (; i < N; i++) { r = mm_recursion(&ctx->sfmt[i].si, &array[i + POS1 - N].si, r1, r2, mask); _mm_store_si128(&array[i].si, r); r1 = r2; r2 = r; } /* main loop */ for (; i < size - N; i++) { r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2, mask); _mm_store_si128(&array[i].si, r); r1 = r2; r2 = r; } for (j = 0; j < 2 * N - size; j++) { r = _mm_load_si128(&array[j + size - N].si); _mm_store_si128(&ctx->sfmt[j].si, r); } for (; i < size; i++) { r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2, mask); _mm_store_si128(&array[i].si, r); _mm_store_si128(&ctx->sfmt[j++].si, r); r1 = r2; r2 = r; } } #endif
5,215
32.012658
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/math.h
#ifndef JEMALLOC_ENABLE_INLINE double ln_gamma(double x); double i_gamma(double x, double p, double ln_gamma_p); double pt_norm(double p); double pt_chi2(double p, double df, double ln_gamma_df_2); double pt_gamma(double p, double shape, double scale, double ln_gamma_shape); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(MATH_C_)) /* * Compute the natural log of Gamma(x), accurate to 10 decimal places. * * This implementation is based on: * * Pike, M.C., I.D. Hill (1966) Algorithm 291: Logarithm of Gamma function * [S14]. Communications of the ACM 9(9):684. */ JEMALLOC_INLINE double ln_gamma(double x) { double f, z; assert(x > 0.0); if (x < 7.0) { f = 1.0; z = x; while (z < 7.0) { f *= z; z += 1.0; } x = z; f = -log(f); } else f = 0.0; z = 1.0 / (x * x); return (f + (x-0.5) * log(x) - x + 0.918938533204673 + (((-0.000595238095238 * z + 0.000793650793651) * z - 0.002777777777778) * z + 0.083333333333333) / x); } /* * Compute the incomplete Gamma ratio for [0..x], where p is the shape * parameter, and ln_gamma_p is ln_gamma(p). * * This implementation is based on: * * Bhattacharjee, G.P. (1970) Algorithm AS 32: The incomplete Gamma integral. * Applied Statistics 19:285-287. */ JEMALLOC_INLINE double i_gamma(double x, double p, double ln_gamma_p) { double acu, factor, oflo, gin, term, rn, a, b, an, dif; double pn[6]; unsigned i; assert(p > 0.0); assert(x >= 0.0); if (x == 0.0) return (0.0); acu = 1.0e-10; oflo = 1.0e30; gin = 0.0; factor = exp(p * log(x) - x - ln_gamma_p); if (x <= 1.0 || x < p) { /* Calculation by series expansion. */ gin = 1.0; term = 1.0; rn = p; while (true) { rn += 1.0; term *= x / rn; gin += term; if (term <= acu) { gin *= factor / p; return (gin); } } } else { /* Calculation by continued fraction. */ a = 1.0 - p; b = a + x + 1.0; term = 0.0; pn[0] = 1.0; pn[1] = x; pn[2] = x + 1.0; pn[3] = x * b; gin = pn[2] / pn[3]; while (true) { a += 1.0; b += 2.0; term += 1.0; an = a * term; for (i = 0; i < 2; i++) pn[i+4] = b * pn[i+2] - an * pn[i]; if (pn[5] != 0.0) { rn = pn[4] / pn[5]; dif = fabs(gin - rn); if (dif <= acu && dif <= acu * rn) { gin = 1.0 - factor * gin; return (gin); } gin = rn; } for (i = 0; i < 4; i++) pn[i] = pn[i+2]; if (fabs(pn[4]) >= oflo) { for (i = 0; i < 4; i++) pn[i] /= oflo; } } } } /* * Given a value p in [0..1] of the lower tail area of the normal distribution, * compute the limit on the definite integral from [-inf..z] that satisfies p, * accurate to 16 decimal places. * * This implementation is based on: * * Wichura, M.J. (1988) Algorithm AS 241: The percentage points of the normal * distribution. Applied Statistics 37(3):477-484. */ JEMALLOC_INLINE double pt_norm(double p) { double q, r, ret; assert(p > 0.0 && p < 1.0); q = p - 0.5; if (fabs(q) <= 0.425) { /* p close to 1/2. */ r = 0.180625 - q * q; return (q * (((((((2.5090809287301226727e3 * r + 3.3430575583588128105e4) * r + 6.7265770927008700853e4) * r + 4.5921953931549871457e4) * r + 1.3731693765509461125e4) * r + 1.9715909503065514427e3) * r + 1.3314166789178437745e2) * r + 3.3871328727963666080e0) / (((((((5.2264952788528545610e3 * r + 2.8729085735721942674e4) * r + 3.9307895800092710610e4) * r + 2.1213794301586595867e4) * r + 5.3941960214247511077e3) * r + 6.8718700749205790830e2) * r + 4.2313330701600911252e1) * r + 1.0)); } else { if (q < 0.0) r = p; else r = 1.0 - p; assert(r > 0.0); r = sqrt(-log(r)); if (r <= 5.0) { /* p neither close to 1/2 nor 0 or 1. */ r -= 1.6; ret = ((((((((7.74545014278341407640e-4 * r + 2.27238449892691845833e-2) * r + 2.41780725177450611770e-1) * r + 1.27045825245236838258e0) * r + 3.64784832476320460504e0) * r + 5.76949722146069140550e0) * r + 4.63033784615654529590e0) * r + 1.42343711074968357734e0) / (((((((1.05075007164441684324e-9 * r + 5.47593808499534494600e-4) * r + 1.51986665636164571966e-2) * r + 1.48103976427480074590e-1) * r + 6.89767334985100004550e-1) * r + 1.67638483018380384940e0) * r + 2.05319162663775882187e0) * r + 1.0)); } else { /* p near 0 or 1. */ r -= 5.0; ret = ((((((((2.01033439929228813265e-7 * r + 2.71155556874348757815e-5) * r + 1.24266094738807843860e-3) * r + 2.65321895265761230930e-2) * r + 2.96560571828504891230e-1) * r + 1.78482653991729133580e0) * r + 5.46378491116411436990e0) * r + 6.65790464350110377720e0) / (((((((2.04426310338993978564e-15 * r + 1.42151175831644588870e-7) * r + 1.84631831751005468180e-5) * r + 7.86869131145613259100e-4) * r + 1.48753612908506148525e-2) * r + 1.36929880922735805310e-1) * r + 5.99832206555887937690e-1) * r + 1.0)); } if (q < 0.0) ret = -ret; return (ret); } } /* * Given a value p in [0..1] of the lower tail area of the Chi^2 distribution * with df degrees of freedom, where ln_gamma_df_2 is ln_gamma(df/2.0), compute * the upper limit on the definite integral from [0..z] that satisfies p, * accurate to 12 decimal places. * * This implementation is based on: * * Best, D.J., D.E. Roberts (1975) Algorithm AS 91: The percentage points of * the Chi^2 distribution. Applied Statistics 24(3):385-388. * * Shea, B.L. (1991) Algorithm AS R85: A remark on AS 91: The percentage * points of the Chi^2 distribution. Applied Statistics 40(1):233-235. */ JEMALLOC_INLINE double pt_chi2(double p, double df, double ln_gamma_df_2) { double e, aa, xx, c, ch, a, q, p1, p2, t, x, b, s1, s2, s3, s4, s5, s6; unsigned i; assert(p >= 0.0 && p < 1.0); assert(df > 0.0); e = 5.0e-7; aa = 0.6931471805; xx = 0.5 * df; c = xx - 1.0; if (df < -1.24 * log(p)) { /* Starting approximation for small Chi^2. */ ch = pow(p * xx * exp(ln_gamma_df_2 + xx * aa), 1.0 / xx); if (ch - e < 0.0) return (ch); } else { if (df > 0.32) { x = pt_norm(p); /* * Starting approximation using Wilson and Hilferty * estimate. */ p1 = 0.222222 / df; ch = df * pow(x * sqrt(p1) + 1.0 - p1, 3.0); /* Starting approximation for p tending to 1. */ if (ch > 2.2 * df + 6.0) { ch = -2.0 * (log(1.0 - p) - c * log(0.5 * ch) + ln_gamma_df_2); } } else { ch = 0.4; a = log(1.0 - p); while (true) { q = ch; p1 = 1.0 + ch * (4.67 + ch); p2 = ch * (6.73 + ch * (6.66 + ch)); t = -0.5 + (4.67 + 2.0 * ch) / p1 - (6.73 + ch * (13.32 + 3.0 * ch)) / p2; ch -= (1.0 - exp(a + ln_gamma_df_2 + 0.5 * ch + c * aa) * p2 / p1) / t; if (fabs(q / ch - 1.0) - 0.01 <= 0.0) break; } } } for (i = 0; i < 20; i++) { /* Calculation of seven-term Taylor series. */ q = ch; p1 = 0.5 * ch; if (p1 < 0.0) return (-1.0); p2 = p - i_gamma(p1, xx, ln_gamma_df_2); t = p2 * exp(xx * aa + ln_gamma_df_2 + p1 - c * log(ch)); b = t / ch; a = 0.5 * t - b * c; s1 = (210.0 + a * (140.0 + a * (105.0 + a * (84.0 + a * (70.0 + 60.0 * a))))) / 420.0; s2 = (420.0 + a * (735.0 + a * (966.0 + a * (1141.0 + 1278.0 * a)))) / 2520.0; s3 = (210.0 + a * (462.0 + a * (707.0 + 932.0 * a))) / 2520.0; s4 = (252.0 + a * (672.0 + 1182.0 * a) + c * (294.0 + a * (889.0 + 1740.0 * a))) / 5040.0; s5 = (84.0 + 264.0 * a + c * (175.0 + 606.0 * a)) / 2520.0; s6 = (120.0 + c * (346.0 + 127.0 * c)) / 5040.0; ch += t * (1.0 + 0.5 * t * s1 - b * c * (s1 - b * (s2 - b * (s3 - b * (s4 - b * (s5 - b * s6)))))); if (fabs(q / ch - 1.0) <= e) break; } return (ch); } /* * Given a value p in [0..1] and Gamma distribution shape and scale parameters, * compute the upper limit on the definite integeral from [0..z] that satisfies * p. */ JEMALLOC_INLINE double pt_gamma(double p, double shape, double scale, double ln_gamma_shape) { return (pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale); } #endif
8,173
25.198718
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/mtx.h
/* * mtx is a slightly simplified version of malloc_mutex. This code duplication * is unfortunate, but there are allocator bootstrapping considerations that * would leak into the test infrastructure if malloc_mutex were used directly * in tests. */ typedef struct { #ifdef _WIN32 CRITICAL_SECTION lock; #elif (defined(JEMALLOC_OSSPIN)) OSSpinLock lock; #else pthread_mutex_t lock; #endif } mtx_t; bool mtx_init(mtx_t *mtx); void mtx_fini(mtx_t *mtx); void mtx_lock(mtx_t *mtx); void mtx_unlock(mtx_t *mtx);
520
22.681818
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params2281.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS2281_H #define SFMT_PARAMS2281_H #define POS1 12 #define SL1 19 #define SL2 1 #define SR1 5 #define SR2 1 #define MSK1 0xbff7ffbfU #define MSK2 0xfdfffffeU #define MSK3 0xf7ffef7fU #define MSK4 0xf2f7cbbfU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0x41dfa600U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) #define ALTI_SL2_PERM64 \ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-2281:12-19-1-5-1:bff7ffbf-fdfffffe-f7ffef7f-f2f7cbbf" #endif /* SFMT_PARAMS2281_H */
3,552
42.329268
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params19937.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS19937_H #define SFMT_PARAMS19937_H #define POS1 122 #define SL1 18 #define SL2 1 #define SR1 11 #define SR2 1 #define MSK1 0xdfffffefU #define MSK2 0xddfecb7fU #define MSK3 0xbffaffffU #define MSK4 0xbffffff6U #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0x13c9e684U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) #define ALTI_SL2_PERM64 \ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-19937:122-18-1-11-1:dfffffef-ddfecb7f-bffaffff-bffffff6" #endif /* SFMT_PARAMS19937_H */
3,560
42.426829
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/test.h
#define ASSERT_BUFSIZE 256 #define assert_cmp(t, a, b, cmp, neg_cmp, pri, ...) do { \ t a_ = (a); \ t b_ = (b); \ if (!(a_ cmp b_)) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Failed assertion: " \ "(%s) "#cmp" (%s) --> " \ "%"pri" "#neg_cmp" %"pri": ", \ __func__, __FILE__, __LINE__, \ #a, #b, a_, b_); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } \ } while (0) #define assert_ptr_eq(a, b, ...) assert_cmp(void *, a, b, ==, \ !=, "p", __VA_ARGS__) #define assert_ptr_ne(a, b, ...) assert_cmp(void *, a, b, !=, \ ==, "p", __VA_ARGS__) #define assert_ptr_null(a, ...) assert_cmp(void *, a, NULL, ==, \ !=, "p", __VA_ARGS__) #define assert_ptr_not_null(a, ...) assert_cmp(void *, a, NULL, !=, \ ==, "p", __VA_ARGS__) #define assert_c_eq(a, b, ...) assert_cmp(char, a, b, ==, !=, "c", __VA_ARGS__) #define assert_c_ne(a, b, ...) assert_cmp(char, a, b, !=, ==, "c", __VA_ARGS__) #define assert_c_lt(a, b, ...) assert_cmp(char, a, b, <, >=, "c", __VA_ARGS__) #define assert_c_le(a, b, ...) assert_cmp(char, a, b, <=, >, "c", __VA_ARGS__) #define assert_c_ge(a, b, ...) assert_cmp(char, a, b, >=, <, "c", __VA_ARGS__) #define assert_c_gt(a, b, ...) assert_cmp(char, a, b, >, <=, "c", __VA_ARGS__) #define assert_x_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__) #define assert_x_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__) #define assert_x_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "#x", __VA_ARGS__) #define assert_x_le(a, b, ...) assert_cmp(int, a, b, <=, >, "#x", __VA_ARGS__) #define assert_x_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "#x", __VA_ARGS__) #define assert_x_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "#x", __VA_ARGS__) #define assert_d_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "d", __VA_ARGS__) #define assert_d_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "d", __VA_ARGS__) #define assert_d_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "d", __VA_ARGS__) #define assert_d_le(a, b, ...) assert_cmp(int, a, b, <=, >, "d", __VA_ARGS__) #define assert_d_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "d", __VA_ARGS__) #define assert_d_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "d", __VA_ARGS__) #define assert_u_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "u", __VA_ARGS__) #define assert_u_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "u", __VA_ARGS__) #define assert_u_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "u", __VA_ARGS__) #define assert_u_le(a, b, ...) assert_cmp(int, a, b, <=, >, "u", __VA_ARGS__) #define assert_u_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "u", __VA_ARGS__) #define assert_u_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "u", __VA_ARGS__) #define assert_ld_eq(a, b, ...) assert_cmp(long, a, b, ==, \ !=, "ld", __VA_ARGS__) #define assert_ld_ne(a, b, ...) assert_cmp(long, a, b, !=, \ ==, "ld", __VA_ARGS__) #define assert_ld_lt(a, b, ...) assert_cmp(long, a, b, <, \ >=, "ld", __VA_ARGS__) #define assert_ld_le(a, b, ...) assert_cmp(long, a, b, <=, \ >, "ld", __VA_ARGS__) #define assert_ld_ge(a, b, ...) assert_cmp(long, a, b, >=, \ <, "ld", __VA_ARGS__) #define assert_ld_gt(a, b, ...) assert_cmp(long, a, b, >, \ <=, "ld", __VA_ARGS__) #define assert_lu_eq(a, b, ...) assert_cmp(unsigned long, \ a, b, ==, !=, "lu", __VA_ARGS__) #define assert_lu_ne(a, b, ...) assert_cmp(unsigned long, \ a, b, !=, ==, "lu", __VA_ARGS__) #define assert_lu_lt(a, b, ...) assert_cmp(unsigned long, \ a, b, <, >=, "lu", __VA_ARGS__) #define assert_lu_le(a, b, ...) assert_cmp(unsigned long, \ a, b, <=, >, "lu", __VA_ARGS__) #define assert_lu_ge(a, b, ...) assert_cmp(unsigned long, \ a, b, >=, <, "lu", __VA_ARGS__) #define assert_lu_gt(a, b, ...) assert_cmp(unsigned long, \ a, b, >, <=, "lu", __VA_ARGS__) #define assert_qd_eq(a, b, ...) assert_cmp(long long, a, b, ==, \ !=, "qd", __VA_ARGS__) #define assert_qd_ne(a, b, ...) assert_cmp(long long, a, b, !=, \ ==, "qd", __VA_ARGS__) #define assert_qd_lt(a, b, ...) assert_cmp(long long, a, b, <, \ >=, "qd", __VA_ARGS__) #define assert_qd_le(a, b, ...) assert_cmp(long long, a, b, <=, \ >, "qd", __VA_ARGS__) #define assert_qd_ge(a, b, ...) assert_cmp(long long, a, b, >=, \ <, "qd", __VA_ARGS__) #define assert_qd_gt(a, b, ...) assert_cmp(long long, a, b, >, \ <=, "qd", __VA_ARGS__) #define assert_qu_eq(a, b, ...) assert_cmp(unsigned long long, \ a, b, ==, !=, "qu", __VA_ARGS__) #define assert_qu_ne(a, b, ...) assert_cmp(unsigned long long, \ a, b, !=, ==, "qu", __VA_ARGS__) #define assert_qu_lt(a, b, ...) assert_cmp(unsigned long long, \ a, b, <, >=, "qu", __VA_ARGS__) #define assert_qu_le(a, b, ...) assert_cmp(unsigned long long, \ a, b, <=, >, "qu", __VA_ARGS__) #define assert_qu_ge(a, b, ...) assert_cmp(unsigned long long, \ a, b, >=, <, "qu", __VA_ARGS__) #define assert_qu_gt(a, b, ...) assert_cmp(unsigned long long, \ a, b, >, <=, "qu", __VA_ARGS__) #define assert_jd_eq(a, b, ...) assert_cmp(intmax_t, a, b, ==, \ !=, "jd", __VA_ARGS__) #define assert_jd_ne(a, b, ...) assert_cmp(intmax_t, a, b, !=, \ ==, "jd", __VA_ARGS__) #define assert_jd_lt(a, b, ...) assert_cmp(intmax_t, a, b, <, \ >=, "jd", __VA_ARGS__) #define assert_jd_le(a, b, ...) assert_cmp(intmax_t, a, b, <=, \ >, "jd", __VA_ARGS__) #define assert_jd_ge(a, b, ...) assert_cmp(intmax_t, a, b, >=, \ <, "jd", __VA_ARGS__) #define assert_jd_gt(a, b, ...) assert_cmp(intmax_t, a, b, >, \ <=, "jd", __VA_ARGS__) #define assert_ju_eq(a, b, ...) assert_cmp(uintmax_t, a, b, ==, \ !=, "ju", __VA_ARGS__) #define assert_ju_ne(a, b, ...) assert_cmp(uintmax_t, a, b, !=, \ ==, "ju", __VA_ARGS__) #define assert_ju_lt(a, b, ...) assert_cmp(uintmax_t, a, b, <, \ >=, "ju", __VA_ARGS__) #define assert_ju_le(a, b, ...) assert_cmp(uintmax_t, a, b, <=, \ >, "ju", __VA_ARGS__) #define assert_ju_ge(a, b, ...) assert_cmp(uintmax_t, a, b, >=, \ <, "ju", __VA_ARGS__) #define assert_ju_gt(a, b, ...) assert_cmp(uintmax_t, a, b, >, \ <=, "ju", __VA_ARGS__) #define assert_zd_eq(a, b, ...) assert_cmp(ssize_t, a, b, ==, \ !=, "zd", __VA_ARGS__) #define assert_zd_ne(a, b, ...) assert_cmp(ssize_t, a, b, !=, \ ==, "zd", __VA_ARGS__) #define assert_zd_lt(a, b, ...) assert_cmp(ssize_t, a, b, <, \ >=, "zd", __VA_ARGS__) #define assert_zd_le(a, b, ...) assert_cmp(ssize_t, a, b, <=, \ >, "zd", __VA_ARGS__) #define assert_zd_ge(a, b, ...) assert_cmp(ssize_t, a, b, >=, \ <, "zd", __VA_ARGS__) #define assert_zd_gt(a, b, ...) assert_cmp(ssize_t, a, b, >, \ <=, "zd", __VA_ARGS__) #define assert_zu_eq(a, b, ...) assert_cmp(size_t, a, b, ==, \ !=, "zu", __VA_ARGS__) #define assert_zu_ne(a, b, ...) assert_cmp(size_t, a, b, !=, \ ==, "zu", __VA_ARGS__) #define assert_zu_lt(a, b, ...) assert_cmp(size_t, a, b, <, \ >=, "zu", __VA_ARGS__) #define assert_zu_le(a, b, ...) assert_cmp(size_t, a, b, <=, \ >, "zu", __VA_ARGS__) #define assert_zu_ge(a, b, ...) assert_cmp(size_t, a, b, >=, \ <, "zu", __VA_ARGS__) #define assert_zu_gt(a, b, ...) assert_cmp(size_t, a, b, >, \ <=, "zu", __VA_ARGS__) #define assert_d32_eq(a, b, ...) assert_cmp(int32_t, a, b, ==, \ !=, PRId32, __VA_ARGS__) #define assert_d32_ne(a, b, ...) assert_cmp(int32_t, a, b, !=, \ ==, PRId32, __VA_ARGS__) #define assert_d32_lt(a, b, ...) assert_cmp(int32_t, a, b, <, \ >=, PRId32, __VA_ARGS__) #define assert_d32_le(a, b, ...) assert_cmp(int32_t, a, b, <=, \ >, PRId32, __VA_ARGS__) #define assert_d32_ge(a, b, ...) assert_cmp(int32_t, a, b, >=, \ <, PRId32, __VA_ARGS__) #define assert_d32_gt(a, b, ...) assert_cmp(int32_t, a, b, >, \ <=, PRId32, __VA_ARGS__) #define assert_u32_eq(a, b, ...) assert_cmp(uint32_t, a, b, ==, \ !=, PRIu32, __VA_ARGS__) #define assert_u32_ne(a, b, ...) assert_cmp(uint32_t, a, b, !=, \ ==, PRIu32, __VA_ARGS__) #define assert_u32_lt(a, b, ...) assert_cmp(uint32_t, a, b, <, \ >=, PRIu32, __VA_ARGS__) #define assert_u32_le(a, b, ...) assert_cmp(uint32_t, a, b, <=, \ >, PRIu32, __VA_ARGS__) #define assert_u32_ge(a, b, ...) assert_cmp(uint32_t, a, b, >=, \ <, PRIu32, __VA_ARGS__) #define assert_u32_gt(a, b, ...) assert_cmp(uint32_t, a, b, >, \ <=, PRIu32, __VA_ARGS__) #define assert_d64_eq(a, b, ...) assert_cmp(int64_t, a, b, ==, \ !=, PRId64, __VA_ARGS__) #define assert_d64_ne(a, b, ...) assert_cmp(int64_t, a, b, !=, \ ==, PRId64, __VA_ARGS__) #define assert_d64_lt(a, b, ...) assert_cmp(int64_t, a, b, <, \ >=, PRId64, __VA_ARGS__) #define assert_d64_le(a, b, ...) assert_cmp(int64_t, a, b, <=, \ >, PRId64, __VA_ARGS__) #define assert_d64_ge(a, b, ...) assert_cmp(int64_t, a, b, >=, \ <, PRId64, __VA_ARGS__) #define assert_d64_gt(a, b, ...) assert_cmp(int64_t, a, b, >, \ <=, PRId64, __VA_ARGS__) #define assert_u64_eq(a, b, ...) assert_cmp(uint64_t, a, b, ==, \ !=, PRIu64, __VA_ARGS__) #define assert_u64_ne(a, b, ...) assert_cmp(uint64_t, a, b, !=, \ ==, PRIu64, __VA_ARGS__) #define assert_u64_lt(a, b, ...) assert_cmp(uint64_t, a, b, <, \ >=, PRIu64, __VA_ARGS__) #define assert_u64_le(a, b, ...) assert_cmp(uint64_t, a, b, <=, \ >, PRIu64, __VA_ARGS__) #define assert_u64_ge(a, b, ...) assert_cmp(uint64_t, a, b, >=, \ <, PRIu64, __VA_ARGS__) #define assert_u64_gt(a, b, ...) assert_cmp(uint64_t, a, b, >, \ <=, PRIu64, __VA_ARGS__) #define assert_b_eq(a, b, ...) do { \ bool a_ = (a); \ bool b_ = (b); \ if (!(a_ == b_)) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Failed assertion: " \ "(%s) == (%s) --> %s != %s: ", \ __func__, __FILE__, __LINE__, \ #a, #b, a_ ? "true" : "false", \ b_ ? "true" : "false"); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } \ } while (0) #define assert_b_ne(a, b, ...) do { \ bool a_ = (a); \ bool b_ = (b); \ if (!(a_ != b_)) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Failed assertion: " \ "(%s) != (%s) --> %s == %s: ", \ __func__, __FILE__, __LINE__, \ #a, #b, a_ ? "true" : "false", \ b_ ? "true" : "false"); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } \ } while (0) #define assert_true(a, ...) assert_b_eq(a, true, __VA_ARGS__) #define assert_false(a, ...) assert_b_eq(a, false, __VA_ARGS__) #define assert_str_eq(a, b, ...) do { \ if (strcmp((a), (b))) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Failed assertion: " \ "(%s) same as (%s) --> " \ "\"%s\" differs from \"%s\": ", \ __func__, __FILE__, __LINE__, #a, #b, a, b); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } \ } while (0) #define assert_str_ne(a, b, ...) do { \ if (!strcmp((a), (b))) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Failed assertion: " \ "(%s) differs from (%s) --> " \ "\"%s\" same as \"%s\": ", \ __func__, __FILE__, __LINE__, #a, #b, a, b); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } \ } while (0) #define assert_not_reached(...) do { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Unreachable code reached: ", \ __func__, __FILE__, __LINE__); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } while (0) /* * If this enum changes, corresponding changes in test/test.sh.in are also * necessary. */ typedef enum { test_status_pass = 0, test_status_skip = 1, test_status_fail = 2, test_status_count = 3 } test_status_t; typedef void (test_t)(void); #define TEST_BEGIN(f) \ static void \ f(void) \ { \ p_test_init(#f); #define TEST_END \ goto label_test_end; \ label_test_end: \ p_test_fini(); \ } #define test(...) \ p_test(__VA_ARGS__, NULL) #define test_not_init(...) \ p_test_not_init(__VA_ARGS__, NULL) #define test_skip_if(e) do { \ if (e) { \ test_skip("%s:%s:%d: Test skipped: (%s)", \ __func__, __FILE__, __LINE__, #e); \ goto label_test_end; \ } \ } while (0) void test_skip(const char *format, ...) JEMALLOC_ATTR(format(printf, 1, 2)); void test_fail(const char *format, ...) JEMALLOC_ATTR(format(printf, 1, 2)); /* For private use by macros. */ test_status_t p_test(test_t *t, ...); test_status_t p_test_not_init(test_t *t, ...); void p_test_init(const char *name); void p_test_fini(void); void p_test_fail(const char *prefix, const char *message);
13,309
38.731343
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file SFMT.h * * @brief SIMD oriented Fast Mersenne Twister(SFMT) pseudorandom * number generator * * @author Mutsuo Saito (Hiroshima University) * @author Makoto Matsumoto (Hiroshima University) * * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * The new BSD License is applied to this software. * see LICENSE.txt * * @note We assume that your system has inttypes.h. If your system * doesn't have inttypes.h, you have to typedef uint32_t and uint64_t, * and you have to define PRIu64 and PRIx64 in this file as follows: * @verbatim typedef unsigned int uint32_t typedef unsigned long long uint64_t #define PRIu64 "llu" #define PRIx64 "llx" @endverbatim * uint32_t must be exactly 32-bit unsigned integer type (no more, no * less), and uint64_t must be exactly 64-bit unsigned integer type. * PRIu64 and PRIx64 are used for printf function to print 64-bit * unsigned int and 64-bit unsigned int in hexadecimal format. */ #ifndef SFMT_H #define SFMT_H typedef struct sfmt_s sfmt_t; uint32_t gen_rand32(sfmt_t *ctx); uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit); uint64_t gen_rand64(sfmt_t *ctx); uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit); void fill_array32(sfmt_t *ctx, uint32_t *array, int size); void fill_array64(sfmt_t *ctx, uint64_t *array, int size); sfmt_t *init_gen_rand(uint32_t seed); sfmt_t *init_by_array(uint32_t *init_key, int key_length); void fini_gen_rand(sfmt_t *ctx); const char *get_idstring(void); int get_min_array_size32(void); int get_min_array_size64(void); #ifndef JEMALLOC_ENABLE_INLINE double to_real1(uint32_t v); double genrand_real1(sfmt_t *ctx); double to_real2(uint32_t v); double genrand_real2(sfmt_t *ctx); double to_real3(uint32_t v); double genrand_real3(sfmt_t *ctx); double to_res53(uint64_t v); double to_res53_mix(uint32_t x, uint32_t y); double genrand_res53(sfmt_t *ctx); double genrand_res53_mix(sfmt_t *ctx); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(SFMT_C_)) /* These real versions are due to Isaku Wada */ /** generates a random number on [0,1]-real-interval */ JEMALLOC_INLINE double to_real1(uint32_t v) { return v * (1.0/4294967295.0); /* divided by 2^32-1 */ } /** generates a random number on [0,1]-real-interval */ JEMALLOC_INLINE double genrand_real1(sfmt_t *ctx) { return to_real1(gen_rand32(ctx)); } /** generates a random number on [0,1)-real-interval */ JEMALLOC_INLINE double to_real2(uint32_t v) { return v * (1.0/4294967296.0); /* divided by 2^32 */ } /** generates a random number on [0,1)-real-interval */ JEMALLOC_INLINE double genrand_real2(sfmt_t *ctx) { return to_real2(gen_rand32(ctx)); } /** generates a random number on (0,1)-real-interval */ JEMALLOC_INLINE double to_real3(uint32_t v) { return (((double)v) + 0.5)*(1.0/4294967296.0); /* divided by 2^32 */ } /** generates a random number on (0,1)-real-interval */ JEMALLOC_INLINE double genrand_real3(sfmt_t *ctx) { return to_real3(gen_rand32(ctx)); } /** These real versions are due to Isaku Wada */ /** generates a random number on [0,1) with 53-bit resolution*/ JEMALLOC_INLINE double to_res53(uint64_t v) { return v * (1.0/18446744073709551616.0L); } /** generates a random number on [0,1) with 53-bit resolution from two * 32 bit integers */ JEMALLOC_INLINE double to_res53_mix(uint32_t x, uint32_t y) { return to_res53(x | ((uint64_t)y << 32)); } /** generates a random number on [0,1) with 53-bit resolution */ JEMALLOC_INLINE double genrand_res53(sfmt_t *ctx) { return to_res53(gen_rand64(ctx)); } /** generates a random number on [0,1) with 53-bit resolution using 32bit integer. */ JEMALLOC_INLINE double genrand_res53_mix(sfmt_t *ctx) { uint32_t x, y; x = gen_rand32(ctx); y = gen_rand32(ctx); return to_res53_mix(x, y); } #endif #endif
5,805
32.755814
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params44497.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS44497_H #define SFMT_PARAMS44497_H #define POS1 330 #define SL1 5 #define SL2 3 #define SR1 9 #define SR2 3 #define MSK1 0xeffffffbU #define MSK2 0xdfbebfffU #define MSK3 0xbfbf7befU #define MSK4 0x9ffd7bffU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0xa3ac4000U #define PARITY4 0xecc1327aU /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) #define ALTI_SL2_PERM64 \ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) #define ALTI_SR2_PERM \ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) #define ALTI_SR2_PERM64 \ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} #endif /* For OSX */ #define IDSTR "SFMT-44497:330-5-3-9-3:effffffb-dfbebfff-bfbf7bef-9ffd7bff" #endif /* SFMT_PARAMS44497_H */
3,566
42.5
79
h
null
NearPMSW-main/nearpmMDsync/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-alti.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file SFMT-alti.h * * @brief SIMD oriented Fast Mersenne Twister(SFMT) * pseudorandom number generator * * @author Mutsuo Saito (Hiroshima University) * @author Makoto Matsumoto (Hiroshima University) * * Copyright (C) 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * The new BSD License is applied to this software. * see LICENSE.txt */ #ifndef SFMT_ALTI_H #define SFMT_ALTI_H /** * This function represents the recursion formula in AltiVec and BIG ENDIAN. * @param a a 128-bit part of the interal state array * @param b a 128-bit part of the interal state array * @param c a 128-bit part of the interal state array * @param d a 128-bit part of the interal state array * @return output */ JEMALLOC_ALWAYS_INLINE vector unsigned int vec_recursion(vector unsigned int a, vector unsigned int b, vector unsigned int c, vector unsigned int d) { const vector unsigned int sl1 = ALTI_SL1; const vector unsigned int sr1 = ALTI_SR1; #ifdef ONLY64 const vector unsigned int mask = ALTI_MSK64; const vector unsigned char perm_sl = ALTI_SL2_PERM64; const vector unsigned char perm_sr = ALTI_SR2_PERM64; #else const vector unsigned int mask = ALTI_MSK; const vector unsigned char perm_sl = ALTI_SL2_PERM; const vector unsigned char perm_sr = ALTI_SR2_PERM; #endif vector unsigned int v, w, x, y, z; x = vec_perm(a, (vector unsigned int)perm_sl, perm_sl); v = a; y = vec_sr(b, sr1); z = vec_perm(c, (vector unsigned int)perm_sr, perm_sr); w = vec_sl(d, sl1); z = vec_xor(z, w); y = vec_and(y, mask); v = vec_xor(v, x); z = vec_xor(z, y); z = vec_xor(z, v); return z; } /** * This function fills the internal state array with pseudorandom * integers. */ JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) { int i; vector unsigned int r, r1, r2; r1 = ctx->sfmt[N - 2].s; r2 = ctx->sfmt[N - 1].s; for (i = 0; i < N - POS1; i++) { r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2); ctx->sfmt[i].s = r; r1 = r2; r2 = r; } for (; i < N; i++) { r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1 - N].s, r1, r2); ctx->sfmt[i].s = r; r1 = r2; r2 = r; } } /** * This function fills the user-specified array with pseudorandom * integers. * * @param array an 128-bit array to be filled by pseudorandom numbers. * @param size number of 128-bit pesudorandom numbers to be generated. */ JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { int i, j; vector unsigned int r, r1, r2; r1 = ctx->sfmt[N - 2].s; r2 = ctx->sfmt[N - 1].s; for (i = 0; i < N - POS1; i++) { r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2); array[i].s = r; r1 = r2; r2 = r; } for (; i < N; i++) { r = vec_recursion(ctx->sfmt[i].s, array[i + POS1 - N].s, r1, r2); array[i].s = r; r1 = r2; r2 = r; } /* main loop */ for (; i < size - N; i++) { r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2); array[i].s = r; r1 = r2; r2 = r; } for (j = 0; j < 2 * N - size; j++) { ctx->sfmt[j].s = array[j + size - N].s; } for (; i < size; i++) { r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2); array[i].s = r; ctx->sfmt[j++].s = r; r1 = r2; r2 = r; } } #ifndef ONLY64 #if defined(__APPLE__) #define ALTI_SWAP (vector unsigned char) \ (4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11) #else #define ALTI_SWAP {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11} #endif /** * This function swaps high and low 32-bit of 64-bit integers in user * specified array. * * @param array an 128-bit array to be swaped. * @param size size of 128-bit array. */ JEMALLOC_INLINE void swap(w128_t *array, int size) { int i; const vector unsigned char perm = ALTI_SWAP; for (i = 0; i < size; i++) { array[i].s = vec_perm(array[i].s, (vector unsigned int)perm, perm); } } #endif #endif
5,921
30.668449
79
h