repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmempool/check_write.c | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* check_write.c -- write fixed data back
*/
#include <stdint.h>
#include <endian.h>
#include "out.h"
#include "btt.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
enum questions {
Q_REPAIR_MAP,
Q_REPAIR_FLOG,
};
/*
* log_write -- (internal) write all structures for log pool
*/
static int
log_write(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
if (CHECK_WITHOUT_FIXING(ppc))
return 0;
/* endianness conversion */
struct pmemlog *log = &ppc->pool->hdr.log;
log_convert2le(log);
if (pool_write(ppc->pool, log, sizeof(*log), 0)) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
return CHECK_ERR(ppc, "writing pmemlog structure failed");
}
return 0;
}
/*
* blk_write_flog -- (internal) convert and write flog to file
*/
static int
blk_write_flog(PMEMpoolcheck *ppc, struct arena *arenap)
{
if (!arenap->flog) {
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc, "flog is missing");
}
uint64_t flogoff = arenap->offset + arenap->btt_info.flogoff;
uint8_t *ptr = arenap->flog;
uint32_t i;
for (i = 0; i < arenap->btt_info.nfree; i++) {
struct btt_flog *flog = (struct btt_flog *)ptr;
btt_flog_convert2le(&flog[0]);
btt_flog_convert2le(&flog[1]);
ptr += BTT_FLOG_PAIR_ALIGN;
}
if (pool_write(ppc->pool, arenap->flog, arenap->flogsize, flogoff)) {
CHECK_INFO(ppc, "%s", ppc->path);
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
return CHECK_ERR(ppc, "arena %u: writing BTT FLOG failed\n",
arenap->id);
}
return 0;
}
/*
* blk_write_map -- (internal) convert and write map to file
*/
static int
blk_write_map(PMEMpoolcheck *ppc, struct arena *arenap)
{
if (!arenap->map) {
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc, "map is missing");
}
uint64_t mapoff = arenap->offset + arenap->btt_info.mapoff;
uint32_t i;
for (i = 0; i < arenap->btt_info.external_nlba; i++)
arenap->map[i] = htole32(arenap->map[i]);
if (pool_write(ppc->pool, arenap->map, arenap->mapsize, mapoff)) {
CHECK_INFO(ppc, "%s", ppc->path);
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
return CHECK_ERR(ppc, "arena %u: writing BTT map failed\n",
arenap->id);
}
return 0;
}
/*
* blk_write -- (internal) write all structures for blk pool
*/
static int
blk_write(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
if (CHECK_WITHOUT_FIXING(ppc))
return 0;
/* endianness conversion */
ppc->pool->hdr.blk.bsize = htole32(ppc->pool->hdr.blk.bsize);
if (pool_write(ppc->pool, &ppc->pool->hdr.blk,
sizeof(ppc->pool->hdr.blk), 0)) {
CHECK_INFO(ppc, "%s", ppc->path);
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
return CHECK_ERR(ppc, "writing pmemblk structure failed");
}
return 0;
}
/*
* btt_data_write -- (internal) write BTT data
*/
static int
btt_data_write(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
struct arena *arenap;
TAILQ_FOREACH(arenap, &ppc->pool->arenas, next) {
if (ppc->pool->uuid_op == UUID_NOT_FROM_BTT) {
memcpy(arenap->btt_info.parent_uuid,
ppc->pool->hdr.pool.poolset_uuid,
sizeof(arenap->btt_info.parent_uuid));
util_checksum(&arenap->btt_info,
sizeof(arenap->btt_info),
&arenap->btt_info.checksum, 1, 0);
}
if (pool_write(ppc->pool, &arenap->btt_info,
sizeof(arenap->btt_info), arenap->offset)) {
CHECK_INFO(ppc, "%s", ppc->path);
CHECK_ERR(ppc, "arena %u: writing BTT Info failed",
arenap->id);
goto error;
}
if (pool_write(ppc->pool, &arenap->btt_info,
sizeof(arenap->btt_info), arenap->offset +
le64toh(arenap->btt_info.infooff))) {
CHECK_INFO(ppc, "%s", ppc->path);
CHECK_ERR(ppc,
"arena %u: writing BTT Info backup failed",
arenap->id);
goto error;
}
if (blk_write_flog(ppc, arenap))
goto error;
if (blk_write_map(ppc, arenap))
goto error;
}
return 0;
error:
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
return -1;
}
/*
* cto_write -- (internal) write all structures for pmemcto pool
*/
static int
cto_write(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
if (CHECK_WITHOUT_FIXING(ppc))
return 0;
if (pool_write(ppc->pool, &ppc->pool->hdr.cto,
sizeof(ppc->pool->hdr.cto), 0)) {
CHECK_INFO(ppc, "%s", ppc->path);
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
return CHECK_ERR(ppc, "writing pmemcto structure failed");
}
return 0;
}
struct step {
int (*func)(PMEMpoolcheck *, location *loc);
enum pool_type type;
};
static const struct step steps[] = {
{
.func = log_write,
.type = POOL_TYPE_LOG,
},
{
.func = blk_write,
.type = POOL_TYPE_BLK,
},
{
.func = cto_write,
.type = POOL_TYPE_CTO,
},
{
.func = btt_data_write,
.type = POOL_TYPE_BLK | POOL_TYPE_BTT,
},
{
.func = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static inline int
step_exe(PMEMpoolcheck *ppc, location *loc)
{
ASSERT(loc->step < ARRAY_SIZE(steps));
const struct step *step = &steps[loc->step++];
/* check step conditions */
if (!(step->type & ppc->pool->params.type))
return 0;
return step->func(ppc, loc);
}
/*
* check_write -- write fixed data back
*/
void
check_write(PMEMpoolcheck *ppc)
{
/*
* XXX: Disabling individual checks based on type should be done in the
* step structure. This however requires refactor of the step
* processing code.
*/
if (CHECK_IS_NOT(ppc, REPAIR))
return;
location *loc = (location *)check_get_step_data(ppc->data);
/* do all steps */
while (loc->step != CHECK_STEP_COMPLETE &&
steps[loc->step].func != NULL) {
if (step_exe(ppc, loc))
return;
}
}
| 7,165 | 22.728477 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmempool/check_cto.c | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* check_cto.c -- check pmemcto
*/
#include <inttypes.h>
#include <sys/param.h>
#include <endian.h>
#include "out.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
enum question {
Q_CTO_CONSISTENT,
Q_CTO_ADDR,
Q_CTO_SIZE,
Q_CTO_ROOT
};
/*
* cto_read -- (internal) read pmemcto header
*/
static int
cto_read(PMEMpoolcheck *ppc)
{
/*
* Here we want to read the pmemcto header without the pool_hdr as we've
* already done it before.
*
* Take the pointer to fields right after pool_hdr, compute the size and
* offset of remaining fields.
*/
uint8_t *ptr = (uint8_t *)&ppc->pool->hdr.cto;
ptr += sizeof(ppc->pool->hdr.cto.hdr);
size_t size = sizeof(ppc->pool->hdr.cto) -
sizeof(ppc->pool->hdr.cto.hdr);
uint64_t offset = sizeof(ppc->pool->hdr.log.hdr);
if (pool_read(ppc->pool, ptr, size, offset))
return CHECK_ERR(ppc, "cannot read pmemcto structure");
return 0;
}
/*
* cto_hdr_check -- (internal) check pmemcto header
*/
static int
cto_hdr_check(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
CHECK_INFO(ppc, "checking pmemcto header");
if (cto_read(ppc)) {
ppc->result = CHECK_RESULT_ERROR;
return -1;
}
if (ppc->pool->hdr.cto.consistent == 0) {
if (CHECK_ASK(ppc, Q_CTO_CONSISTENT,
"pmemcto.consistent flag is not set.|Do you want to set pmemcto.consistent flag?"))
goto error;
}
if ((void *)ppc->pool->hdr.cto.addr == NULL) {
if (CHECK_ASK(ppc, Q_CTO_ADDR,
"invalid pmemcto.addr: %p.|Do you want to recover pmemcto.addr?",
(void *)ppc->pool->hdr.cto.addr))
goto error;
}
if (ppc->pool->hdr.cto.size < PMEMCTO_MIN_POOL) {
CHECK_INFO(ppc,
"pmemcto.size is less than minimum: %zu < %zu.",
ppc->pool->hdr.cto.size,
PMEMCTO_MIN_POOL);
}
if (ppc->pool->hdr.cto.size != ppc->pool->params.size) {
if (CHECK_ASK(ppc, Q_CTO_SIZE,
"pmemcto.size is different than pool size: %zu != %zu.|Do you want to set pmemlog.size to the actual pool size?",
ppc->pool->hdr.cto.size,
ppc->pool->params.size))
goto error;
}
char *valid_addr_begin =
(char *)ppc->pool->hdr.cto.addr + CTO_DSC_SIZE_ALIGNED;
char *valid_addr_end =
(char *)ppc->pool->hdr.cto.addr + ppc->pool->hdr.cto.size;
if ((void *)ppc->pool->hdr.cto.root != NULL &&
((char *)ppc->pool->hdr.cto.root < valid_addr_begin ||
(char *)ppc->pool->hdr.cto.root >= valid_addr_end)) {
if (CHECK_ASK(ppc, Q_CTO_ROOT,
"invalid pmemcto.root: %p.|Do you want to recover pmemcto.root?",
(void *)ppc->pool->hdr.cto.root))
goto error;
}
if (ppc->result == CHECK_RESULT_CONSISTENT ||
ppc->result == CHECK_RESULT_REPAIRED)
CHECK_INFO(ppc, "pmemcto header correct");
return check_questions_sequence_validate(ppc);
error:
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
check_end(ppc->data);
return -1;
}
/*
* cto_hdr_fix -- (internal) fix pmemcto header
*/
static int
cto_hdr_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question, void *ctx)
{
LOG(3, NULL);
switch (question) {
case Q_CTO_CONSISTENT:
CHECK_INFO(ppc, "setting pmemcto.consistent flag");
ppc->pool->hdr.cto.consistent = 1;
break;
case Q_CTO_ADDR:
CHECK_INFO(ppc, "recovering pmemcto.addr");
ppc->pool->hdr.cto.addr = 0;
break;
case Q_CTO_SIZE:
CHECK_INFO(ppc,
"setting pmemcto.size to the actual pool size %zu",
ppc->pool->params.size);
ppc->pool->hdr.cto.size = ppc->pool->params.size;
break;
case Q_CTO_ROOT:
CHECK_INFO(ppc, "recovering pmemcto.root pointer");
ppc->pool->hdr.cto.root = 0;
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
struct step {
int (*check)(PMEMpoolcheck *, location *);
int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *);
enum pool_type type;
};
static const struct step steps[] = {
{
.check = cto_hdr_check,
.type = POOL_TYPE_CTO
},
{
.fix = cto_hdr_fix,
.type = POOL_TYPE_CTO
},
{
.check = NULL,
.fix = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static inline int
step_exe(PMEMpoolcheck *ppc, location *loc)
{
ASSERT(loc->step < ARRAY_SIZE(steps));
ASSERTeq(ppc->pool->params.type, POOL_TYPE_CTO);
const struct step *step = &steps[loc->step++];
if (!(step->type & ppc->pool->params.type))
return 0;
if (!step->fix)
return step->check(ppc, loc);
if (cto_read(ppc)) {
ppc->result = CHECK_RESULT_ERROR;
return -1;
}
return check_answer_loop(ppc, loc, NULL, 1, step->fix);
}
/*
* check_ctok -- entry point for pmemcto checks
*/
void
check_cto(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
location *loc = check_get_step_data(ppc->data);
/* do all checks */
while (CHECK_NOT_COMPLETE(loc, steps)) {
if (step_exe(ppc, loc))
break;
}
}
| 6,338 | 24.873469 | 117 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/pmem_posix.c | /*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmem_posix.c -- pmem utilities with Posix implementation
*/
#include <stddef.h>
#include <sys/mman.h>
#include "pmem.h"
#include "out.h"
#include "mmap.h"
/*
* is_pmem_detect -- implement pmem_is_pmem()
*
* This function returns true only if the entire range can be confirmed
* as being direct access persistent memory. Finding any part of the
* range is not direct access, or failing to look up the information
* because it is unmapped or because any sort of error happens, just
* results in returning false.
*/
int
is_pmem_detect(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
if (len == 0)
return 0;
int retval = util_range_is_pmem(addr, len);
LOG(4, "returning %d", retval);
return retval;
}
/*
* pmem_map_register -- memory map file and register mapping
*/
void *
pmem_map_register(int fd, size_t len, const char *path, int is_dev_dax)
{
LOG(3, "fd %d len %zu path %s id_dev_dax %d",
fd, len, path, is_dev_dax);
void *addr;
int map_sync;
addr = util_map(fd, len, MAP_SHARED, 0, 0, &map_sync);
if (!addr)
return NULL;
enum pmem_map_type type = MAX_PMEM_TYPE;
if (is_dev_dax)
type = PMEM_DEV_DAX;
else if (map_sync)
type = PMEM_MAP_SYNC;
if (type != MAX_PMEM_TYPE) {
if (util_range_register(addr, len, path, type)) {
LOG(1, "can't track mapped region");
goto err_unmap;
}
}
return addr;
err_unmap:
util_unmap(addr, len);
return NULL;
}
/*
* pmem_os_init -- os-dependent part of pmem initialization
*/
void
pmem_os_init(void)
{
LOG(3, NULL);
}
| 3,143 | 27.844037 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/libpmem.c | /*
* Copyright 2014-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* libpmem.c -- pmem entry points for libpmem
*/
#include <stdio.h>
#include <stdint.h>
#include "libpmem.h"
#include "pmem.h"
#include "pmemcommon.h"
/*
* libpmem_init -- load-time initialization for libpmem
*
* Called automatically by the run-time loader.
*/
ATTR_CONSTRUCTOR
void
libpmem_init(void)
{
common_init(PMEM_LOG_PREFIX, PMEM_LOG_LEVEL_VAR, PMEM_LOG_FILE_VAR,
PMEM_MAJOR_VERSION, PMEM_MINOR_VERSION);
LOG(3, NULL);
pmem_init();
}
/*
* libpmem_fini -- libpmem cleanup routine
*
* Called automatically when the process terminates.
*/
ATTR_DESTRUCTOR
void
libpmem_fini(void)
{
LOG(3, NULL);
common_fini();
}
/*
* pmem_check_versionU -- see if library meets application version requirements
*/
#ifndef _WIN32
static inline
#endif
const char *
pmem_check_versionU(unsigned major_required, unsigned minor_required)
{
LOG(3, "major_required %u minor_required %u",
major_required, minor_required);
if (major_required != PMEM_MAJOR_VERSION) {
ERR("libpmem major version mismatch (need %u, found %u)",
major_required, PMEM_MAJOR_VERSION);
return out_get_errormsg();
}
if (minor_required > PMEM_MINOR_VERSION) {
ERR("libpmem minor version mismatch (need %u, found %u)",
minor_required, PMEM_MINOR_VERSION);
return out_get_errormsg();
}
return NULL;
}
#ifndef _WIN32
/*
* pmem_check_version -- see if library meets application version requirements
*/
const char *
pmem_check_version(unsigned major_required, unsigned minor_required)
{
return pmem_check_versionU(major_required, minor_required);
}
#else
/*
* pmem_check_versionW -- see if library meets application version requirements
*/
const wchar_t *
pmem_check_versionW(unsigned major_required, unsigned minor_required)
{
if (pmem_check_versionU(major_required, minor_required) != NULL)
return out_get_errormsgW();
else
return NULL;
}
#endif
/*
* pmem_errormsgU -- return last error message
*/
#ifndef _WIN32
static inline
#endif
const char *
pmem_errormsgU(void)
{
return out_get_errormsg();
}
#ifndef _WIN32
/*
* pmem_errormsg -- return last error message
*/
const char *
pmem_errormsg(void)
{
return pmem_errormsgU();
}
#else
/*
* pmem_errormsgW -- return last error message as wchar_t
*/
const wchar_t *
pmem_errormsgW(void)
{
return out_get_errormsgW();
}
#endif
| 3,902 | 24.180645 | 79 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/memops_generic.c | /*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* memops_generic.c -- architecture-independent memmove & memset fallback
*
* This fallback is needed to fulfill guarantee that pmem_mem[cpy|set|move]
* will use at least 8-byte stores (for 8-byte aligned buffers and sizes),
* even when accelerated implementation is missing or disabled.
* This guarantee is needed to maintain correctness eg in pmemobj.
* Libc may do the same, but this behavior is not documented, so we can't rely
* on that.
*/
#include <stddef.h>
#include "out.h"
#include "pmem.h"
#include "libpmem.h"
#include "util.h"
/*
* cpy64 -- (internal) copy 64 bytes from src to dst
*/
static force_inline void
cpy64(uint64_t *dst, const uint64_t *src)
{
/*
* We use atomics here just to be sure compiler will not split stores.
* Order of stores doesn't matter.
*/
uint64_t tmp[8];
util_atomic_load_explicit64(&src[0], &tmp[0], memory_order_relaxed);
util_atomic_load_explicit64(&src[1], &tmp[1], memory_order_relaxed);
util_atomic_load_explicit64(&src[2], &tmp[2], memory_order_relaxed);
util_atomic_load_explicit64(&src[3], &tmp[3], memory_order_relaxed);
util_atomic_load_explicit64(&src[4], &tmp[4], memory_order_relaxed);
util_atomic_load_explicit64(&src[5], &tmp[5], memory_order_relaxed);
util_atomic_load_explicit64(&src[6], &tmp[6], memory_order_relaxed);
util_atomic_load_explicit64(&src[7], &tmp[7], memory_order_relaxed);
util_atomic_store_explicit64(&dst[0], tmp[0], memory_order_relaxed);
util_atomic_store_explicit64(&dst[1], tmp[1], memory_order_relaxed);
util_atomic_store_explicit64(&dst[2], tmp[2], memory_order_relaxed);
util_atomic_store_explicit64(&dst[3], tmp[3], memory_order_relaxed);
util_atomic_store_explicit64(&dst[4], tmp[4], memory_order_relaxed);
util_atomic_store_explicit64(&dst[5], tmp[5], memory_order_relaxed);
util_atomic_store_explicit64(&dst[6], tmp[6], memory_order_relaxed);
util_atomic_store_explicit64(&dst[7], tmp[7], memory_order_relaxed);
}
/*
* cpy8 -- (internal) copy 8 bytes from src to dst
*/
static force_inline void
cpy8(uint64_t *dst, const uint64_t *src)
{
uint64_t tmp;
util_atomic_load_explicit64(src, &tmp, memory_order_relaxed);
util_atomic_store_explicit64(dst, tmp, memory_order_relaxed);
}
/*
* store8 -- (internal) store 8 bytes
*/
static force_inline void
store8(uint64_t *dst, uint64_t c)
{
util_atomic_store_explicit64(dst, c, memory_order_relaxed);
}
/*
* memmove_nodrain_generic -- generic memmove to pmem without hw drain
*/
void *
memmove_nodrain_generic(void *dst, const void *src, size_t len,
unsigned flags)
{
LOG(15, "pmemdest %p src %p len %zu flags 0x%x", dst, src, len,
flags);
char *cdst = dst;
const char *csrc = src;
size_t remaining;
(void) flags;
if ((uintptr_t)cdst - (uintptr_t)csrc >= len) {
size_t cnt = (uint64_t)cdst & 7;
if (cnt > 0) {
cnt = 8 - cnt;
if (cnt > len)
cnt = len;
for (size_t i = 0; i < cnt; ++i)
cdst[i] = csrc[i];
pmem_flush_flags(cdst, cnt, flags);
cdst += cnt;
csrc += cnt;
len -= cnt;
}
uint64_t *dst8 = (uint64_t *)cdst;
const uint64_t *src8 = (const uint64_t *)csrc;
while (len >= 64) {
cpy64(dst8, src8);
pmem_flush_flags(dst8, 64, flags);
len -= 64;
dst8 += 8;
src8 += 8;
}
remaining = len;
while (len >= 8) {
cpy8(dst8, src8);
len -= 8;
dst8++;
src8++;
}
cdst = (char *)dst8;
csrc = (const char *)src8;
for (size_t i = 0; i < len; ++i)
*cdst++ = *csrc++;
if (remaining)
pmem_flush_flags(cdst - remaining, remaining, flags);
} else {
cdst += len;
csrc += len;
size_t cnt = (uint64_t)cdst & 7;
if (cnt > 0) {
if (cnt > len)
cnt = len;
cdst -= cnt;
csrc -= cnt;
len -= cnt;
for (size_t i = cnt; i > 0; --i)
cdst[i - 1] = csrc[i - 1];
pmem_flush_flags(cdst, cnt, flags);
}
uint64_t *dst8 = (uint64_t *)cdst;
const uint64_t *src8 = (const uint64_t *)csrc;
while (len >= 64) {
dst8 -= 8;
src8 -= 8;
cpy64(dst8, src8);
pmem_flush_flags(dst8, 64, flags);
len -= 64;
}
remaining = len;
while (len >= 8) {
--dst8;
--src8;
cpy8(dst8, src8);
len -= 8;
}
cdst = (char *)dst8;
csrc = (const char *)src8;
for (size_t i = len; i > 0; --i)
*--cdst = *--csrc;
if (remaining)
pmem_flush_flags(cdst, remaining, flags);
}
return dst;
}
/*
* memset_nodrain_generic -- generic memset to pmem without hw drain
*/
void *
memset_nodrain_generic(void *dst, int c, size_t len, unsigned flags)
{
LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x", dst, c, len,
flags);
(void) flags;
char *cdst = dst;
size_t cnt = (uint64_t)cdst & 7;
if (cnt > 0) {
cnt = 8 - cnt;
if (cnt > len)
cnt = len;
for (size_t i = 0; i < cnt; ++i)
cdst[i] = (char)c;
pmem_flush_flags(cdst, cnt, flags);
cdst += cnt;
len -= cnt;
}
uint64_t *dst8 = (uint64_t *)cdst;
uint64_t u = (unsigned char)c;
uint64_t tmp = (u << 56) | (u << 48) | (u << 40) | (u << 32) |
(u << 24) | (u << 16) | (u << 8) | u;
while (len >= 64) {
store8(&dst8[0], tmp);
store8(&dst8[1], tmp);
store8(&dst8[2], tmp);
store8(&dst8[3], tmp);
store8(&dst8[4], tmp);
store8(&dst8[5], tmp);
store8(&dst8[6], tmp);
store8(&dst8[7], tmp);
pmem_flush_flags(dst8, 64, flags);
len -= 64;
dst8 += 8;
}
size_t remaining = len;
while (len >= 8) {
store8(dst8, tmp);
len -= 8;
dst8++;
}
cdst = (char *)dst8;
for (size_t i = 0; i < len; ++i)
*cdst++ = (char)c;
if (remaining)
pmem_flush_flags(cdst - remaining, remaining, flags);
return dst;
}
| 7,120 | 25.180147 | 78 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/libpmem_main.c | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* libpmem_main.c -- entry point for libpmem.dll
*
* XXX - This is a placeholder. All the library initialization/cleanup
* that is done in library ctors/dtors, as well as TLS initialization
* should be moved here.
*/
#include "win_mmap.h"
void libpmem_init(void);
void libpmem_fini(void);
int APIENTRY
DllMain(HINSTANCE hInstance, DWORD dwReason, LPVOID lpReserved)
{
switch (dwReason) {
case DLL_PROCESS_ATTACH:
libpmem_init();
win_mmap_init();
break;
case DLL_THREAD_ATTACH:
case DLL_THREAD_DETACH:
break;
case DLL_PROCESS_DETACH:
win_mmap_fini();
libpmem_fini();
break;
}
return TRUE;
}
| 2,227 | 32.757576 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/pmem.c | /*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmem.c -- pmem entry points for libpmem
*
*
* PERSISTENT MEMORY INSTRUCTIONS ON X86
*
* The primary feature of this library is to provide a way to flush
* changes to persistent memory as outlined below (note that many
* of the decisions below are made at initialization time, and not
* repeated every time a flush is requested).
*
* To flush a range to pmem when CLWB is available:
*
* CLWB for each cache line in the given range.
*
* SFENCE to ensure the CLWBs above have completed.
*
* To flush a range to pmem when CLFLUSHOPT is available and CLWB is not
* (same as above but issue CLFLUSHOPT instead of CLWB):
*
* CLFLUSHOPT for each cache line in the given range.
*
* SFENCE to ensure the CLWBs above have completed.
*
* To flush a range to pmem when neither CLFLUSHOPT or CLWB are available
* (same as above but fences surrounding CLFLUSH are not required):
*
* CLFLUSH for each cache line in the given range.
*
* To memcpy a range of memory to pmem when MOVNT is available:
*
* Copy any non-64-byte portion of the destination using MOV.
*
* Use the flush flow above without the fence for the copied portion.
*
* Copy using MOVNTDQ, up to any non-64-byte aligned end portion.
* (The MOVNT instructions bypass the cache, so no flush is required.)
*
* Copy any unaligned end portion using MOV.
*
* Use the flush flow above for the copied portion (including fence).
*
* To memcpy a range of memory to pmem when MOVNT is not available:
*
* Just pass the call to the normal memcpy() followed by pmem_persist().
*
* To memset a non-trivial sized range of memory to pmem:
*
* Same as the memcpy cases above but store the given value instead
* of reading values from the source.
*
* These features are supported for ARM AARCH64 using equivalent ARM
* assembly instruction. Please refer to (arm_cacheops.h) for more details.
*
* INTERFACES FOR FLUSHING TO PERSISTENT MEMORY
*
* Given the flows above, three interfaces are provided for flushing a range
* so that the caller has the ability to separate the steps when necessary,
* but otherwise leaves the detection of available instructions to the libpmem:
*
* pmem_persist(addr, len)
*
* This is the common case, which just calls the two other functions:
*
* pmem_flush(addr, len);
* pmem_drain();
*
* pmem_flush(addr, len)
*
* CLWB or CLFLUSHOPT or CLFLUSH for each cache line
*
* pmem_drain()
*
* SFENCE unless using CLFLUSH
*
*
* INTERFACES FOR COPYING/SETTING RANGES OF MEMORY
*
* Given the flows above, the following interfaces are provided for the
* memmove/memcpy/memset operations to persistent memory:
*
* pmem_memmove_nodrain()
*
* Checks for overlapped ranges to determine whether to copy from
* the beginning of the range or from the end. If MOVNT instructions
* are available, uses the memory copy flow described above, otherwise
* calls the libc memmove() followed by pmem_flush(). Since no conditional
* compilation and/or architecture specific CFLAGS are in use at the
* moment, SSE2 ( thus movnt ) is just assumed to be available.
*
* pmem_memcpy_nodrain()
*
* Just calls pmem_memmove_nodrain().
*
* pmem_memset_nodrain()
*
* If MOVNT instructions are available, uses the memset flow described
* above, otherwise calls the libc memset() followed by pmem_flush().
*
* pmem_memmove_persist()
* pmem_memcpy_persist()
* pmem_memset_persist()
*
* Calls the appropriate _nodrain() function followed by pmem_drain().
*
*
* DECISIONS MADE AT INITIALIZATION TIME
*
* As much as possible, all decisions described above are made at library
* initialization time. This is achieved using function pointers that are
* setup by pmem_init() when the library loads.
*
* Func_predrain_fence is used by pmem_drain() to call one of:
* predrain_fence_empty()
* predrain_memory_barrier()
*
* Func_flush is used by pmem_flush() to call one of:
* flush_dcache()
* flush_dcache_invalidate_opt()
* flush_dcache_invalidate()
*
* Func_memmove_nodrain is used by memmove_nodrain() to call one of:
* memmove_nodrain_libc()
* memmove_nodrain_movnt()
*
* Func_memset_nodrain is used by memset_nodrain() to call one of:
* memset_nodrain_libc()
* memset_nodrain_movnt()
*
* DEBUG LOGGING
*
* Many of the functions here get called hundreds of times from loops
* iterating over ranges, making the usual LOG() calls at level 3
* impractical. The call tracing log for those functions is set at 15.
*/
#include <sys/mman.h>
#include <sys/stat.h>
#include <errno.h>
#include <fcntl.h>
#include "libpmem.h"
#include "pmem.h"
#include "out.h"
#include "os.h"
#include "mmap.h"
#include "file.h"
#include "valgrind_internal.h"
#include "os_deep.h"
#include "os_auto_flush.h"
static struct pmem_funcs Funcs;
/*
* pmem_has_hw_drain -- return whether or not HW drain was found
*
* Always false for x86: HW drain is done by HW with no SW involvement.
*/
int
pmem_has_hw_drain(void)
{
LOG(3, NULL);
return 0;
}
/*
* pmem_drain -- wait for any PM stores to drain from HW buffers
*/
void
pmem_drain(void)
{
LOG(15, NULL);
Funcs.predrain_fence();
}
/*
* pmem_has_auto_flush -- check if platform supports eADR
*/
int
pmem_has_auto_flush()
{
LOG(3, NULL);
return os_auto_flush();
}
/*
* pmem_deep_flush -- flush processor cache for the given range
* regardless of eADR support on platform
*/
void
pmem_deep_flush(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len);
Funcs.deep_flush(addr, len);
}
/*
* pmem_flush -- flush processor cache for the given range
*/
void
pmem_flush(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len);
Funcs.flush(addr, len);
}
/*
* pmem_persist -- make any cached changes to a range of pmem persistent
*/
void
pmem_persist(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
pmem_flush(addr, len);
pmem_drain();
}
/*
* pmem_msync -- flush to persistence via msync
*
* Using msync() means this routine is less optimal for pmem (but it
* still works) but it also works for any memory mapped file, unlike
* pmem_persist() which is only safe where pmem_is_pmem() returns true.
*/
int
pmem_msync(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len);
/*
* msync requires len to be a multiple of pagesize, so
* adjust addr and len to represent the full 4k chunks
* covering the given range.
*/
/* increase len by the amount we gain when we round addr down */
len += (uintptr_t)addr & (Pagesize - 1);
/* round addr down to page boundary */
uintptr_t uptr = (uintptr_t)addr & ~((uintptr_t)Pagesize - 1);
/*
* msync accepts addresses aligned to page boundary, so we may sync
* more and part of it may have been marked as undefined/inaccessible
* Msyncing such memory is not a bug, so as a workaround temporarily
* disable error reporting.
*/
VALGRIND_DO_DISABLE_ERROR_REPORTING;
int ret;
if ((ret = msync((void *)uptr, len, MS_SYNC)) < 0)
ERR("!msync");
VALGRIND_DO_ENABLE_ERROR_REPORTING;
/* full flush */
VALGRIND_DO_PERSIST(uptr, len);
return ret;
}
/*
* is_pmem_always -- (internal) always true (for meaningful parameters) version
* of pmem_is_pmem()
*/
static int
is_pmem_always(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
if (len == 0)
return 0;
return 1;
}
/*
* is_pmem_never -- (internal) never true version of pmem_is_pmem()
*/
static int
is_pmem_never(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
return 0;
}
/*
* pmem_is_pmem_init -- (internal) initialize Func_is_pmem pointer
*
* This should be done only once - on the first call to pmem_is_pmem().
* If PMEM_IS_PMEM_FORCE is set, it would override the default behavior
* of pmem_is_pmem().
*/
static void
pmem_is_pmem_init(void)
{
LOG(3, NULL);
static volatile unsigned init;
while (init != 2) {
if (!util_bool_compare_and_swap32(&init, 0, 1))
continue;
/*
* For debugging/testing, allow pmem_is_pmem() to be forced
* to always true or never true using environment variable
* PMEM_IS_PMEM_FORCE values of zero or one.
*
* This isn't #ifdef DEBUG because it has a trivial performance
* impact and it may turn out to be useful as a "chicken bit"
* for systems where pmem_is_pmem() isn't correctly detecting
* true persistent memory.
*/
char *ptr = os_getenv("PMEM_IS_PMEM_FORCE");
if (ptr) {
int val = atoi(ptr);
if (val == 0)
Funcs.is_pmem = is_pmem_never;
else if (val == 1)
Funcs.is_pmem = is_pmem_always;
VALGRIND_ANNOTATE_HAPPENS_BEFORE(&Funcs.is_pmem);
LOG(4, "PMEM_IS_PMEM_FORCE=%d", val);
}
if (Funcs.is_pmem == NULL)
Funcs.is_pmem = is_pmem_never;
if (!util_bool_compare_and_swap32(&init, 1, 2))
FATAL("util_bool_compare_and_swap32");
}
}
/*
* pmem_is_pmem -- return true if entire range is persistent memory
*/
int
pmem_is_pmem(const void *addr, size_t len)
{
LOG(10, "addr %p len %zu", addr, len);
static int once;
/* This is not thread-safe, but pmem_is_pmem_init() is. */
if (once == 0) {
pmem_is_pmem_init();
util_fetch_and_add32(&once, 1);
}
VALGRIND_ANNOTATE_HAPPENS_AFTER(&Funcs.is_pmem);
return Funcs.is_pmem(addr, len);
}
#define PMEM_FILE_ALL_FLAGS\
(PMEM_FILE_CREATE|PMEM_FILE_EXCL|PMEM_FILE_SPARSE|PMEM_FILE_TMPFILE)
#define PMEM_DAX_VALID_FLAGS\
(PMEM_FILE_CREATE|PMEM_FILE_SPARSE)
/*
* pmem_map_fileU -- create or open the file and map it to memory
*/
#ifndef _WIN32
static inline
#endif
void *
pmem_map_fileU(const char *path, size_t len, int flags,
mode_t mode, size_t *mapped_lenp, int *is_pmemp)
{
LOG(3, "path \"%s\" size %zu flags %x mode %o mapped_lenp %p "
"is_pmemp %p", path, len, flags, mode, mapped_lenp, is_pmemp);
int oerrno;
int fd;
int open_flags = O_RDWR;
int delete_on_err = 0;
int file_type = util_file_get_type(path);
if (file_type == OTHER_ERROR)
return NULL;
if (flags & ~(PMEM_FILE_ALL_FLAGS)) {
ERR("invalid flag specified %x", flags);
errno = EINVAL;
return NULL;
}
if (file_type == TYPE_DEVDAX) {
if (flags & ~(PMEM_DAX_VALID_FLAGS)) {
ERR("flag unsupported for Device DAX %x", flags);
errno = EINVAL;
return NULL;
} else {
/* we are ignoring all of the flags */
flags = 0;
ssize_t actual_len = util_file_get_size(path);
if (actual_len < 0) {
ERR("unable to read Device DAX size");
errno = EINVAL;
return NULL;
}
if (len != 0 && len != (size_t)actual_len) {
ERR("Device DAX length must be either 0 or "
"the exact size of the device %zu",
len);
errno = EINVAL;
return NULL;
}
len = 0;
}
}
if (flags & PMEM_FILE_CREATE) {
if ((os_off_t)len < 0) {
ERR("invalid file length %zu", len);
errno = EINVAL;
return NULL;
}
open_flags |= O_CREAT;
}
if (flags & PMEM_FILE_EXCL)
open_flags |= O_EXCL;
if ((len != 0) && !(flags & PMEM_FILE_CREATE)) {
ERR("non-zero 'len' not allowed without PMEM_FILE_CREATE");
errno = EINVAL;
return NULL;
}
if ((len == 0) && (flags & PMEM_FILE_CREATE)) {
ERR("zero 'len' not allowed with PMEM_FILE_CREATE");
errno = EINVAL;
return NULL;
}
if ((flags & PMEM_FILE_TMPFILE) && !(flags & PMEM_FILE_CREATE)) {
ERR("PMEM_FILE_TMPFILE not allowed without PMEM_FILE_CREATE");
errno = EINVAL;
return NULL;
}
if (flags & PMEM_FILE_TMPFILE) {
if ((fd = util_tmpfile(path,
OS_DIR_SEP_STR"pmem.XXXXXX",
open_flags & O_EXCL)) < 0) {
LOG(2, "failed to create temporary file at \"%s\"",
path);
return NULL;
}
} else {
if ((fd = os_open(path, open_flags, mode)) < 0) {
ERR("!open %s", path);
return NULL;
}
if ((flags & PMEM_FILE_CREATE) && (flags & PMEM_FILE_EXCL))
delete_on_err = 1;
}
if (flags & PMEM_FILE_CREATE) {
/*
* Always set length of file to 'len'.
* (May either extend or truncate existing file.)
*/
if (os_ftruncate(fd, (os_off_t)len) != 0) {
ERR("!ftruncate");
goto err;
}
if ((flags & PMEM_FILE_SPARSE) == 0) {
if ((errno = os_posix_fallocate(fd, 0,
(os_off_t)len)) != 0) {
ERR("!posix_fallocate");
goto err;
}
}
} else {
ssize_t actual_size = util_file_get_size(path);
if (actual_size < 0) {
ERR("stat %s: negative size", path);
errno = EINVAL;
goto err;
}
len = (size_t)actual_size;
}
void *addr = pmem_map_register(fd, len, path, file_type == TYPE_DEVDAX);
if (addr == NULL)
goto err;
if (mapped_lenp != NULL)
*mapped_lenp = len;
if (is_pmemp != NULL)
*is_pmemp = pmem_is_pmem(addr, len);
LOG(3, "returning %p", addr);
VALGRIND_REGISTER_PMEM_MAPPING(addr, len);
VALGRIND_REGISTER_PMEM_FILE(fd, addr, len, 0);
(void) os_close(fd);
return addr;
err:
oerrno = errno;
(void) os_close(fd);
if (delete_on_err)
(void) os_unlink(path);
errno = oerrno;
return NULL;
}
#ifndef _WIN32
/*
* pmem_map_file -- create or open the file and map it to memory
*/
void *
pmem_map_file(const char *path, size_t len, int flags,
mode_t mode, size_t *mapped_lenp, int *is_pmemp)
{
return pmem_map_fileU(path, len, flags, mode, mapped_lenp, is_pmemp);
}
#else
/*
* pmem_map_fileW -- create or open the file and map it to memory
*/
void *
pmem_map_fileW(const wchar_t *path, size_t len, int flags, mode_t mode,
size_t *mapped_lenp, int *is_pmemp) {
char *upath = util_toUTF8(path);
if (upath == NULL)
return NULL;
void *ret = pmem_map_fileU(upath, len, flags, mode, mapped_lenp,
is_pmemp);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* pmem_unmap -- unmap the specified region
*/
int
pmem_unmap(void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
#ifndef _WIN32
util_range_unregister(addr, len);
#endif
VALGRIND_REMOVE_PMEM_MAPPING(addr, len);
return util_unmap(addr, len);
}
/*
* pmem_memmove -- memmove to pmem
*/
void *
pmem_memmove(void *pmemdest, const void *src, size_t len, unsigned flags)
{
LOG(15, "pmemdest %p src %p len %zu flags 0x%x",
pmemdest, src, len, flags);
#ifdef DEBUG
if (flags & ~PMEM_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
Funcs.memmove_nodrain(pmemdest, src, len, flags & ~PMEM_F_MEM_NODRAIN);
if ((flags & (PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH)) == 0)
pmem_drain();
return pmemdest;
}
/*
* pmem_memcpy -- memcpy to pmem
*/
void *
pmem_memcpy(void *pmemdest, const void *src, size_t len, unsigned flags)
{
return pmem_memmove(pmemdest, src, len, flags);
}
/*
* pmem_memset -- memset to pmem
*/
void *
pmem_memset(void *pmemdest, int c, size_t len, unsigned flags)
{
LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x",
pmemdest, c, len, flags);
#ifdef DEBUG
if (flags & ~PMEM_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
Funcs.memset_nodrain(pmemdest, c, len, flags & ~PMEM_F_MEM_NODRAIN);
if ((flags & (PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH)) == 0)
pmem_drain();
return pmemdest;
}
/*
* pmem_memmove_nodrain -- memmove to pmem without hw drain
*/
void *
pmem_memmove_nodrain(void *pmemdest, const void *src, size_t len)
{
return pmem_memmove(pmemdest, src, len, PMEM_F_MEM_NODRAIN);
}
/*
* pmem_memcpy_nodrain -- memcpy to pmem without hw drain
*/
void *
pmem_memcpy_nodrain(void *pmemdest, const void *src, size_t len)
{
return pmem_memcpy(pmemdest, src, len, PMEM_F_MEM_NODRAIN);
}
/*
* pmem_memmove_persist -- memmove to pmem
*/
void *
pmem_memmove_persist(void *pmemdest, const void *src, size_t len)
{
return pmem_memmove(pmemdest, src, len, 0);
}
/*
* pmem_memcpy_persist -- memcpy to pmem
*/
void *
pmem_memcpy_persist(void *pmemdest, const void *src, size_t len)
{
return pmem_memcpy(pmemdest, src, len, 0);
}
/*
* pmem_memset_nodrain -- memset to pmem without hw drain
*/
void *
pmem_memset_nodrain(void *pmemdest, int c, size_t len)
{
return pmem_memset(pmemdest, c, len, PMEM_F_MEM_NODRAIN);
}
/*
* pmem_memset_persist -- memset to pmem
*/
void *
pmem_memset_persist(void *pmemdest, int c, size_t len)
{
return pmem_memset(pmemdest, c, len, 0);
}
/*
* pmem_init -- load-time initialization for pmem.c
*/
void
pmem_init(void)
{
LOG(3, NULL);
pmem_init_funcs(&Funcs);
pmem_os_init();
}
/*
* pmem_deep_persist -- perform deep persist on a memory range
*
* It merely acts as wrapper around an msync call in most cases, the only
* exception is the case of an mmap'ed DAX device on Linux.
*/
int
pmem_deep_persist(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
pmem_deep_flush(addr, len);
return pmem_deep_drain(addr, len);
}
/*
* pmem_deep_drain -- perform deep drain on a memory range
*/
int
pmem_deep_drain(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
return os_range_deep_common((uintptr_t)addr, len);
}
| 18,443 | 23.592 | 79 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/pmem_windows.c | /*
* Copyright 2016-2018, Intel Corporation
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmem_windows.c -- pmem utilities with OS-specific implementation
*/
#include <memoryapi.h>
#include "pmem.h"
#include "out.h"
#include "mmap.h"
#include "win_mmap.h"
#include "sys/mman.h"
#if (NTDDI_VERSION >= NTDDI_WIN10_RS1)
typedef BOOL (WINAPI *PQVM)(
HANDLE, const void *,
enum WIN32_MEMORY_INFORMATION_CLASS, PVOID,
SIZE_T, PSIZE_T);
static PQVM Func_qvmi = NULL;
#endif
/*
* is_direct_mapped -- (internal) for each page in the given region
* checks with MM, if it's direct mapped.
*/
static int
is_direct_mapped(const void *begin, const void *end)
{
LOG(3, "begin %p end %p", begin, end);
#if (NTDDI_VERSION >= NTDDI_WIN10_RS1)
int retval = 1;
WIN32_MEMORY_REGION_INFORMATION region_info;
SIZE_T bytes_returned;
if (Func_qvmi == NULL) {
LOG(4, "QueryVirtualMemoryInformation not supported, "
"assuming non-DAX.");
return 0;
}
const void *begin_aligned = (const void *)rounddown((intptr_t)begin,
Pagesize);
const void *end_aligned = (const void *)roundup((intptr_t)end,
Pagesize);
for (const void *page = begin_aligned;
page < end_aligned;
page = (const void *)((char *)page + Pagesize)) {
if (Func_qvmi(GetCurrentProcess(), page,
MemoryRegionInfo, ®ion_info,
sizeof(region_info), &bytes_returned)) {
retval = region_info.DirectMapped;
} else {
LOG(4, "QueryVirtualMemoryInformation failed, assuming "
"non-DAX. Last error: %08x", GetLastError());
retval = 0;
}
if (retval == 0) {
LOG(4, "page %p is not direct mapped", page);
break;
}
}
return retval;
#else
/* if the MM API is not available the safest answer is NO */
return 0;
#endif /* NTDDI_VERSION >= NTDDI_WIN10_RS1 */
}
/*
* is_pmem_detect -- implement pmem_is_pmem()
*
* This function returns true only if the entire range can be confirmed
* as being direct access persistent memory. Finding any part of the
* range is not direct access, or failing to look up the information
* because it is unmapped or because any sort of error happens, just
* results in returning false.
*/
int
is_pmem_detect(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
if (len == 0)
return 0;
if (len > UINTPTR_MAX - (uintptr_t)addr) {
len = UINTPTR_MAX - (uintptr_t)addr;
LOG(4, "limit len to %zu to not get beyond address space", len);
}
int retval = 1;
const void *begin = addr;
const void *end = (const void *)((char *)addr + len);
LOG(4, "begin %p end %p", begin, end);
AcquireSRWLockShared(&FileMappingQLock);
PFILE_MAPPING_TRACKER mt;
SORTEDQ_FOREACH(mt, &FileMappingQHead, ListEntry) {
if (mt->BaseAddress >= end) {
LOG(4, "ignoring all mapped ranges beyond given range");
break;
}
if (mt->EndAddress <= begin) {
LOG(4, "skipping all mapped ranges before given range");
continue;
}
if (!(mt->Flags & FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED)) {
LOG(4, "tracked range [%p, %p) is not direct mapped",
mt->BaseAddress, mt->EndAddress);
retval = 0;
break;
}
/*
* If there is a gap between the given region that we process
* currently and the mapped region in our tracking list, we
* need to process the gap by taking the long route of asking
* MM for each page in that range.
*/
if (begin < mt->BaseAddress &&
!is_direct_mapped(begin, mt->BaseAddress)) {
LOG(4, "untracked range [%p, %p) is not direct mapped",
begin, mt->BaseAddress);
retval = 0;
break;
}
/* push our begin to reflect what we have already processed */
begin = mt->EndAddress;
}
/*
* If we still have a range to verify, check with MM if the entire
* region is direct mapped.
*/
if (begin < end && !is_direct_mapped(begin, end)) {
LOG(4, "untracked end range [%p, %p) is not direct mapped",
begin, end);
retval = 0;
}
ReleaseSRWLockShared(&FileMappingQLock);
LOG(4, "returning %d", retval);
return retval;
}
/*
* pmem_map_register -- memory map file and register mapping
*/
void *
pmem_map_register(int fd, size_t len, const char *path, int is_dev_dax)
{
/* there is no device dax on windows */
ASSERTeq(is_dev_dax, 0);
return util_map(fd, len, MAP_SHARED, 0, 0, NULL);
}
/*
* pmem_os_init -- os-dependent part of pmem initialization
*/
void
pmem_os_init(void)
{
LOG(3, NULL);
#if NTDDI_VERSION >= NTDDI_WIN10_RS1
Func_qvmi = (PQVM)GetProcAddress(
GetModuleHandle(TEXT("KernelBase.dll")),
"QueryVirtualMemoryInformation");
#endif
}
| 6,094 | 27.615023 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/pmem.h | /*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmem.h -- internal definitions for libpmem
*/
#ifndef PMEM_H
#define PMEM_H
#include <stddef.h>
#include "libpmem.h"
#include "util.h"
#ifdef __cplusplus
extern "C" {
#endif
#define PMEM_LOG_PREFIX "libpmem"
#define PMEM_LOG_LEVEL_VAR "PMEM_LOG_LEVEL"
#define PMEM_LOG_FILE_VAR "PMEM_LOG_FILE"
typedef void (*predrain_fence_func)(void);
typedef void (*flush_func)(const void *, size_t);
typedef int (*is_pmem_func)(const void *addr, size_t len);
typedef void *(*memmove_nodrain_func)(void *pmemdest, const void *src,
size_t len, unsigned flags);
typedef void *(*memset_nodrain_func)(void *pmemdest, int c, size_t len,
unsigned flags);
struct pmem_funcs {
predrain_fence_func predrain_fence;
flush_func flush;
is_pmem_func is_pmem;
memmove_nodrain_func memmove_nodrain;
memset_nodrain_func memset_nodrain;
flush_func deep_flush;
};
void pmem_init(void);
void pmem_os_init(void);
void pmem_init_funcs(struct pmem_funcs *funcs);
int is_pmem_detect(const void *addr, size_t len);
void *pmem_map_register(int fd, size_t len, const char *path, int is_dev_dax);
/*
* flush_empty_nolog -- (internal) do not flush the CPU cache
*/
static force_inline void
flush_empty_nolog(const void *addr, size_t len)
{
/* NOP */
}
/*
* flush64b_empty -- (internal) do not flush the CPU cache
*/
static force_inline void
flush64b_empty(const char *addr)
{
}
/*
* pmem_flush_flags -- internal wrapper around pmem_flush
*/
static inline void
pmem_flush_flags(const void *addr, size_t len, unsigned flags)
{
if (!(flags & PMEM_F_MEM_NOFLUSH))
pmem_flush(addr, len);
}
void *memmove_nodrain_generic(void *pmemdest, const void *src, size_t len,
unsigned flags);
void *memset_nodrain_generic(void *pmemdest, int c, size_t len, unsigned flags);
#ifdef __cplusplus
}
#endif
#endif
| 3,394 | 29.585586 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/flush.h | /*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef X86_64_FLUSH_H
#define X86_64_FLUSH_H
#include <emmintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "util.h"
#define FLUSH_ALIGN ((uintptr_t)64)
#ifdef _MSC_VER
#define pmem_clflushopt _mm_clflushopt
#define pmem_clwb _mm_clwb
#else
/*
* The x86 memory instructions are new enough that the compiler
* intrinsic functions are not always available. The intrinsic
* functions are defined here in terms of asm statements for now.
*/
#define pmem_clflushopt(addr)\
asm volatile(".byte 0x66; clflush %0" : "+m" \
(*(volatile char *)(addr)));
#define pmem_clwb(addr)\
asm volatile(".byte 0x66; xsaveopt %0" : "+m" \
(*(volatile char *)(addr)));
#endif /* _MSC_VER */
/*
* flush_clflush_nolog -- flush the CPU cache, using clflush
*/
static force_inline void
flush_clflush_nolog(const void *addr, size_t len)
{
uintptr_t uptr;
/*
* Loop through cache-line-size (typically 64B) aligned chunks
* covering the given range.
*/
for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1);
uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN)
_mm_clflush((char *)uptr);
}
/*
* flush_clflushopt_nolog -- flush the CPU cache, using clflushopt
*/
static force_inline void
flush_clflushopt_nolog(const void *addr, size_t len)
{
uintptr_t uptr;
/*
* Loop through cache-line-size (typically 64B) aligned chunks
* covering the given range.
*/
for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1);
uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) {
pmem_clflushopt((char *)uptr);
}
}
/*
* flush_clwb_nolog -- flush the CPU cache, using clwb
*/
static force_inline void
flush_clwb_nolog(const void *addr, size_t len)
{
uintptr_t uptr;
/*
* Loop through cache-line-size (typically 64B) aligned chunks
* covering the given range.
*/
for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1);
uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) {
pmem_clwb((char *)uptr);
}
}
#endif
| 3,520 | 29.885965 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/cpu.c | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* cpu.c -- CPU features detection
*
* These routines do not work AARCH64 platforms, and need new detection
* routiones to be added. Currently to ensure msync is not used and ARM
* FLUSH instructions are used PMEM_IS_PMEM_FORCE=1 needs to be used.
*/
/*
* Reference:
* http://www.intel.com/content/www/us/en/processors/
* architectures-software-developer-manuals.html
*
* https://support.amd.com/TechDocs/24594.pdf
*/
#include <string.h>
#include "out.h"
#include "cpu.h"
#define EAX_IDX 0
#define EBX_IDX 1
#define ECX_IDX 2
#define EDX_IDX 3
#if defined(__x86_64__) || defined(__amd64__)
#include <cpuid.h>
static inline void
cpuid(unsigned func, unsigned subfunc, unsigned cpuinfo[4])
{
__cpuid_count(func, subfunc, cpuinfo[EAX_IDX], cpuinfo[EBX_IDX],
cpuinfo[ECX_IDX], cpuinfo[EDX_IDX]);
}
#elif defined(_M_X64) || defined(_M_AMD64)
#include <intrin.h>
static inline void
cpuid(unsigned func, unsigned subfunc, unsigned cpuinfo[4])
{
__cpuidex(cpuinfo, func, subfunc);
}
#else /* not x86_64 */
#define cpuid(func, subfunc, cpuinfo)\
do { (void)(func); (void)(subfunc); (void)(cpuinfo); } while (0)
#endif
#ifndef bit_CLFLUSH
#define bit_CLFLUSH (1 << 19)
#endif
#ifndef bit_CLFLUSHOPT
#define bit_CLFLUSHOPT (1 << 23)
#endif
#ifndef bit_CLWB
#define bit_CLWB (1 << 24)
#endif
#ifndef bit_AVX
#define bit_AVX (1 << 28)
#endif
#ifndef bit_AVX512F
#define bit_AVX512F (1 << 16)
#endif
/*
* is_cpu_feature_present -- (internal) checks if CPU feature is supported
*/
static int
is_cpu_feature_present(unsigned func, unsigned reg, unsigned bit)
{
unsigned cpuinfo[4] = { 0 };
/* check CPUID level first */
cpuid(0x0, 0x0, cpuinfo);
if (cpuinfo[EAX_IDX] < func)
return 0;
cpuid(func, 0x0, cpuinfo);
return (cpuinfo[reg] & bit) != 0;
}
/*
* is_cpu_genuine_intel -- checks for genuine Intel CPU
*/
int
is_cpu_genuine_intel(void)
{
unsigned cpuinfo[4] = { 0 };
union {
char name[0x20];
unsigned cpuinfo[3];
} vendor;
memset(&vendor, 0, sizeof(vendor));
cpuid(0x0, 0x0, cpuinfo);
vendor.cpuinfo[0] = cpuinfo[EBX_IDX];
vendor.cpuinfo[1] = cpuinfo[EDX_IDX];
vendor.cpuinfo[2] = cpuinfo[ECX_IDX];
LOG(4, "CPU vendor: %s", vendor.name);
return (strncmp(vendor.name, "GenuineIntel",
sizeof(vendor.name))) == 0;
}
/*
* is_cpu_clflush_present -- checks if CLFLUSH instruction is supported
*/
int
is_cpu_clflush_present(void)
{
int ret = is_cpu_feature_present(0x1, EDX_IDX, bit_CLFLUSH);
LOG(4, "CLFLUSH %ssupported", ret == 0 ? "not " : "");
return ret;
}
/*
* is_cpu_clflushopt_present -- checks if CLFLUSHOPT instruction is supported
*/
int
is_cpu_clflushopt_present(void)
{
int ret = is_cpu_feature_present(0x7, EBX_IDX, bit_CLFLUSHOPT);
LOG(4, "CLFLUSHOPT %ssupported", ret == 0 ? "not " : "");
return ret;
}
/*
* is_cpu_clwb_present -- checks if CLWB instruction is supported
*/
int
is_cpu_clwb_present(void)
{
if (!is_cpu_genuine_intel())
return 0;
int ret = is_cpu_feature_present(0x7, EBX_IDX, bit_CLWB);
LOG(4, "CLWB %ssupported", ret == 0 ? "not " : "");
return ret;
}
/*
* is_cpu_avx_present -- checks if AVX instructions are supported
*/
int
is_cpu_avx_present(void)
{
int ret = is_cpu_feature_present(0x1, ECX_IDX, bit_AVX);
LOG(4, "AVX %ssupported", ret == 0 ? "not " : "");
return ret;
}
/*
* is_cpu_avx512f_present -- checks if AVX-512f instructions are supported
*/
int
is_cpu_avx512f_present(void)
{
int ret = is_cpu_feature_present(0x7, EBX_IDX, bit_AVX512F);
LOG(4, "AVX512f %ssupported", ret == 0 ? "not " : "");
return ret;
}
| 5,154 | 23.316038 | 77 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/cpu.h | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef PMDK_CPU_H
#define PMDK_CPU_H 1
/*
* cpu.h -- definitions for "cpu" module
*/
int is_cpu_genuine_intel(void);
int is_cpu_clflush_present(void);
int is_cpu_clflushopt_present(void);
int is_cpu_clwb_present(void);
int is_cpu_avx_present(void);
int is_cpu_avx512f_present(void);
#endif
| 1,898 | 38.5625 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/init.c | /*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include <xmmintrin.h>
#include "libpmem.h"
#include "cpu.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "os.h"
#include "out.h"
#include "pmem.h"
#include "valgrind_internal.h"
#define MOVNT_THRESHOLD 256
size_t Movnt_threshold = MOVNT_THRESHOLD;
/*
* predrain_fence_empty -- (internal) issue the pre-drain fence instruction
*/
static void
predrain_fence_empty(void)
{
LOG(15, NULL);
VALGRIND_DO_FENCE;
/* nothing to do (because CLFLUSH did it for us) */
}
/*
* predrain_memory_barrier -- (internal) issue the pre-drain fence instruction
*/
static void
predrain_memory_barrier(void)
{
LOG(15, NULL);
_mm_sfence(); /* ensure CLWB or CLFLUSHOPT completes */
}
/*
* flush_clflush -- (internal) flush the CPU cache, using clflush
*/
static void
flush_clflush(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
flush_clflush_nolog(addr, len);
}
/*
* flush_clflushopt -- (internal) flush the CPU cache, using clflushopt
*/
static void
flush_clflushopt(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
flush_clflushopt_nolog(addr, len);
}
/*
* flush_clwb -- (internal) flush the CPU cache, using clwb
*/
static void
flush_clwb(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
flush_clwb_nolog(addr, len);
}
/*
* flush_empty -- (internal) do not flush the CPU cache
*/
static void
flush_empty(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
flush_empty_nolog(addr, len);
}
#if SSE2_AVAILABLE || AVX_AVAILABLE || AVX512F_AVAILABLE
#define PMEM_F_MEM_MOVNT (PMEM_F_MEM_WC | PMEM_F_MEM_NONTEMPORAL)
#define PMEM_F_MEM_MOV (PMEM_F_MEM_WB | PMEM_F_MEM_TEMPORAL)
#define MEMCPY_TEMPLATE(isa, flush) \
static void *\
memmove_nodrain_##isa##_##flush(void *dest, const void *src, size_t len, \
unsigned flags)\
{\
if (len == 0 || src == dest)\
return dest;\
\
if (flags & PMEM_F_MEM_NOFLUSH) \
memmove_mov_##isa##_empty(dest, src, len); \
else if (flags & PMEM_F_MEM_MOVNT)\
memmove_movnt_##isa ##_##flush(dest, src, len);\
else if (flags & PMEM_F_MEM_MOV)\
memmove_mov_##isa##_##flush(dest, src, len);\
else if (len < Movnt_threshold)\
memmove_mov_##isa##_##flush(dest, src, len);\
else\
memmove_movnt_##isa##_##flush(dest, src, len);\
\
return dest;\
}
#define MEMSET_TEMPLATE(isa, flush)\
static void *\
memset_nodrain_##isa##_##flush(void *dest, int c, size_t len, unsigned flags)\
{\
if (len == 0)\
return dest;\
\
if (flags & PMEM_F_MEM_NOFLUSH) \
memset_mov_##isa##_empty(dest, c, len); \
else if (flags & PMEM_F_MEM_MOVNT)\
memset_movnt_##isa##_##flush(dest, c, len);\
else if (flags & PMEM_F_MEM_MOV)\
memset_mov_##isa##_##flush(dest, c, len);\
else if (len < Movnt_threshold)\
memset_mov_##isa##_##flush(dest, c, len);\
else\
memset_movnt_##isa##_##flush(dest, c, len);\
\
return dest;\
}
#endif
#if SSE2_AVAILABLE
MEMCPY_TEMPLATE(sse2, clflush)
MEMCPY_TEMPLATE(sse2, clflushopt)
MEMCPY_TEMPLATE(sse2, clwb)
MEMCPY_TEMPLATE(sse2, empty)
MEMSET_TEMPLATE(sse2, clflush)
MEMSET_TEMPLATE(sse2, clflushopt)
MEMSET_TEMPLATE(sse2, clwb)
MEMSET_TEMPLATE(sse2, empty)
#endif
#if AVX_AVAILABLE
MEMCPY_TEMPLATE(avx, clflush)
MEMCPY_TEMPLATE(avx, clflushopt)
MEMCPY_TEMPLATE(avx, clwb)
MEMCPY_TEMPLATE(avx, empty)
MEMSET_TEMPLATE(avx, clflush)
MEMSET_TEMPLATE(avx, clflushopt)
MEMSET_TEMPLATE(avx, clwb)
MEMSET_TEMPLATE(avx, empty)
#endif
#if AVX512F_AVAILABLE
MEMCPY_TEMPLATE(avx512f, clflush)
MEMCPY_TEMPLATE(avx512f, clflushopt)
MEMCPY_TEMPLATE(avx512f, clwb)
MEMCPY_TEMPLATE(avx512f, empty)
MEMSET_TEMPLATE(avx512f, clflush)
MEMSET_TEMPLATE(avx512f, clflushopt)
MEMSET_TEMPLATE(avx512f, clwb)
MEMSET_TEMPLATE(avx512f, empty)
#endif
/*
* memmove_nodrain_libc -- (internal) memmove to pmem using libc
*/
static void *
memmove_nodrain_libc(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
LOG(15, "pmemdest %p src %p len %zu flags 0x%x", pmemdest, src, len,
flags);
(void) flags;
memmove(pmemdest, src, len);
pmem_flush_flags(pmemdest, len, flags);
return pmemdest;
}
/*
* memset_nodrain_libc -- (internal) memset to pmem using libc
*/
static void *
memset_nodrain_libc(void *pmemdest, int c, size_t len, unsigned flags)
{
LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x", pmemdest, c, len,
flags);
(void) flags;
memset(pmemdest, c, len);
pmem_flush_flags(pmemdest, len, flags);
return pmemdest;
}
enum memcpy_impl {
MEMCPY_INVALID,
MEMCPY_LIBC,
MEMCPY_GENERIC,
MEMCPY_SSE2,
MEMCPY_AVX,
MEMCPY_AVX512F
};
/*
* use_sse2_memcpy_memset -- (internal) SSE2 detected, use it if possible
*/
static void
use_sse2_memcpy_memset(struct pmem_funcs *funcs, enum memcpy_impl *impl)
{
#if SSE2_AVAILABLE
*impl = MEMCPY_SSE2;
if (funcs->deep_flush == flush_clflush)
funcs->memmove_nodrain = memmove_nodrain_sse2_clflush;
else if (funcs->deep_flush == flush_clflushopt)
funcs->memmove_nodrain = memmove_nodrain_sse2_clflushopt;
else if (funcs->deep_flush == flush_clwb)
funcs->memmove_nodrain = memmove_nodrain_sse2_clwb;
else if (funcs->deep_flush == flush_empty)
funcs->memmove_nodrain = memmove_nodrain_sse2_empty;
else
ASSERT(0);
if (funcs->deep_flush == flush_clflush)
funcs->memset_nodrain = memset_nodrain_sse2_clflush;
else if (funcs->deep_flush == flush_clflushopt)
funcs->memset_nodrain = memset_nodrain_sse2_clflushopt;
else if (funcs->deep_flush == flush_clwb)
funcs->memset_nodrain = memset_nodrain_sse2_clwb;
else if (funcs->deep_flush == flush_empty)
funcs->memset_nodrain = memset_nodrain_sse2_empty;
else
ASSERT(0);
#else
LOG(3, "sse2 disabled at build time");
#endif
}
/*
* use_avx_memcpy_memset -- (internal) AVX detected, use it if possible
*/
static void
use_avx_memcpy_memset(struct pmem_funcs *funcs, enum memcpy_impl *impl)
{
#if AVX_AVAILABLE
LOG(3, "avx supported");
char *e = os_getenv("PMEM_AVX");
if (e == NULL || strcmp(e, "1") != 0) {
LOG(3, "PMEM_AVX not set or not == 1");
return;
}
LOG(3, "PMEM_AVX enabled");
*impl = MEMCPY_AVX;
if (funcs->deep_flush == flush_clflush)
funcs->memmove_nodrain = memmove_nodrain_avx_clflush;
else if (funcs->deep_flush == flush_clflushopt)
funcs->memmove_nodrain = memmove_nodrain_avx_clflushopt;
else if (funcs->deep_flush == flush_clwb)
funcs->memmove_nodrain = memmove_nodrain_avx_clwb;
else if (funcs->deep_flush == flush_empty)
funcs->memmove_nodrain = memmove_nodrain_avx_empty;
else
ASSERT(0);
if (funcs->deep_flush == flush_clflush)
funcs->memset_nodrain = memset_nodrain_avx_clflush;
else if (funcs->deep_flush == flush_clflushopt)
funcs->memset_nodrain = memset_nodrain_avx_clflushopt;
else if (funcs->deep_flush == flush_clwb)
funcs->memset_nodrain = memset_nodrain_avx_clwb;
else if (funcs->deep_flush == flush_empty)
funcs->memset_nodrain = memset_nodrain_avx_empty;
else
ASSERT(0);
#else
LOG(3, "avx supported, but disabled at build time");
#endif
}
/*
* use_avx512f_memcpy_memset -- (internal) AVX512F detected, use it if possible
*/
static void
use_avx512f_memcpy_memset(struct pmem_funcs *funcs, enum memcpy_impl *impl)
{
#if AVX512F_AVAILABLE
LOG(3, "avx512f supported");
char *e = os_getenv("PMEM_AVX512F");
if (e == NULL || strcmp(e, "1") != 0) {
LOG(3, "PMEM_AVX512F not set or not == 1");
return;
}
LOG(3, "PMEM_AVX512F enabled");
*impl = MEMCPY_AVX512F;
if (funcs->deep_flush == flush_clflush)
funcs->memmove_nodrain = memmove_nodrain_avx512f_clflush;
else if (funcs->deep_flush == flush_clflushopt)
funcs->memmove_nodrain = memmove_nodrain_avx512f_clflushopt;
else if (funcs->deep_flush == flush_clwb)
funcs->memmove_nodrain = memmove_nodrain_avx512f_clwb;
else if (funcs->deep_flush == flush_empty)
funcs->memmove_nodrain = memmove_nodrain_avx512f_empty;
else
ASSERT(0);
if (funcs->deep_flush == flush_clflush)
funcs->memset_nodrain = memset_nodrain_avx512f_clflush;
else if (funcs->deep_flush == flush_clflushopt)
funcs->memset_nodrain = memset_nodrain_avx512f_clflushopt;
else if (funcs->deep_flush == flush_clwb)
funcs->memset_nodrain = memset_nodrain_avx512f_clwb;
else if (funcs->deep_flush == flush_empty)
funcs->memset_nodrain = memset_nodrain_avx512f_empty;
else
ASSERT(0);
#else
LOG(3, "avx512f supported, but disabled at build time");
#endif
}
/*
* pmem_get_cpuinfo -- configure libpmem based on CPUID
*/
static void
pmem_cpuinfo_to_funcs(struct pmem_funcs *funcs, enum memcpy_impl *impl)
{
LOG(3, NULL);
if (is_cpu_clflush_present()) {
funcs->is_pmem = is_pmem_detect;
LOG(3, "clflush supported");
}
if (is_cpu_clflushopt_present()) {
LOG(3, "clflushopt supported");
char *e = os_getenv("PMEM_NO_CLFLUSHOPT");
if (e && strcmp(e, "1") == 0) {
LOG(3, "PMEM_NO_CLFLUSHOPT forced no clflushopt");
} else {
funcs->deep_flush = flush_clflushopt;
funcs->predrain_fence = predrain_memory_barrier;
}
}
if (is_cpu_clwb_present()) {
LOG(3, "clwb supported");
char *e = os_getenv("PMEM_NO_CLWB");
if (e && strcmp(e, "1") == 0) {
LOG(3, "PMEM_NO_CLWB forced no clwb");
} else {
funcs->deep_flush = flush_clwb;
funcs->predrain_fence = predrain_memory_barrier;
}
}
char *ptr = os_getenv("PMEM_NO_MOVNT");
if (ptr && strcmp(ptr, "1") == 0) {
LOG(3, "PMEM_NO_MOVNT forced no movnt");
} else {
use_sse2_memcpy_memset(funcs, impl);
if (is_cpu_avx_present())
use_avx_memcpy_memset(funcs, impl);
if (is_cpu_avx512f_present())
use_avx512f_memcpy_memset(funcs, impl);
}
}
/*
* pmem_init_funcs -- initialize architecture-specific list of pmem operations
*/
void
pmem_init_funcs(struct pmem_funcs *funcs)
{
LOG(3, NULL);
funcs->predrain_fence = predrain_fence_empty;
funcs->deep_flush = flush_clflush;
funcs->is_pmem = NULL;
funcs->memmove_nodrain = memmove_nodrain_generic;
funcs->memset_nodrain = memset_nodrain_generic;
enum memcpy_impl impl = MEMCPY_GENERIC;
char *ptr = os_getenv("PMEM_NO_GENERIC_MEMCPY");
if (ptr) {
long long val = atoll(ptr);
if (val) {
funcs->memmove_nodrain = memmove_nodrain_libc;
funcs->memset_nodrain = memset_nodrain_libc;
impl = MEMCPY_LIBC;
}
}
pmem_cpuinfo_to_funcs(funcs, &impl);
/*
* For testing, allow overriding the default threshold
* for using non-temporal stores in pmem_memcpy_*(), pmem_memmove_*()
* and pmem_memset_*().
* It has no effect if movnt is not supported or disabled.
*/
ptr = os_getenv("PMEM_MOVNT_THRESHOLD");
if (ptr) {
long long val = atoll(ptr);
if (val < 0) {
LOG(3, "Invalid PMEM_MOVNT_THRESHOLD");
} else {
LOG(3, "PMEM_MOVNT_THRESHOLD set to %zu", (size_t)val);
Movnt_threshold = (size_t)val;
}
}
int flush;
char *e = os_getenv("PMEM_NO_FLUSH");
if (e && (strcmp(e, "1") == 0)) {
flush = 0;
LOG(3, "Forced not flushing CPU_cache");
} else if (e && (strcmp(e, "0") == 0)) {
flush = 1;
LOG(3, "Forced flushing CPU_cache");
} else if (pmem_has_auto_flush() == 1) {
flush = 0;
LOG(3, "Not flushing CPU_cache, eADR detected");
} else {
flush = 1;
LOG(3, "Flushing CPU cache");
}
if (flush) {
funcs->flush = funcs->deep_flush;
} else {
funcs->flush = flush_empty;
funcs->predrain_fence = predrain_memory_barrier;
}
if (funcs->deep_flush == flush_clwb)
LOG(3, "using clwb");
else if (funcs->deep_flush == flush_clflushopt)
LOG(3, "using clflushopt");
else if (funcs->deep_flush == flush_clflush)
LOG(3, "using clflush");
else
FATAL("invalid deep flush function address");
if (funcs->flush == flush_empty)
LOG(3, "not flushing CPU cache");
else if (funcs->flush != funcs->deep_flush)
FATAL("invalid flush function address");
if (impl == MEMCPY_AVX512F)
LOG(3, "using movnt AVX512F");
else if (impl == MEMCPY_AVX)
LOG(3, "using movnt AVX");
else if (impl == MEMCPY_SSE2)
LOG(3, "using movnt SSE2");
else if (impl == MEMCPY_LIBC)
LOG(3, "using libc memmove");
else if (impl == MEMCPY_GENERIC)
LOG(3, "using generic memmove");
else
FATAL("invalid memcpy impl");
}
| 13,566 | 25.654224 | 79 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/avx.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef PMEM_AVX_H
#define PMEM_AVX_H
#include <immintrin.h>
#include "util.h"
/*
* avx_zeroupper -- _mm256_zeroupper wrapper
*
* _mm256_zeroupper clears upper parts of avx registers.
*
* It's needed for 2 reasons:
* - it improves performance of non-avx code after avx
* - it works around problem discovered by Valgrind
*
* In optimized builds gcc inserts VZEROUPPER automatically before
* calling non-avx code (or at the end of the function). But in release
* builds it doesn't, so if we don't do this by ourselves, then when
* someone memcpy'ies uninitialized data, Valgrind complains whenever
* someone reads those registers.
*
* One notable example is loader, which tries to detect whether it
* needs to save whole ymm registers by looking at their current
* (possibly uninitialized) value.
*
* Valgrind complains like that:
* Conditional jump or move depends on uninitialised value(s)
* at 0x4015CC9: _dl_runtime_resolve_avx_slow
* (in /lib/x86_64-linux-gnu/ld-2.24.so)
* by 0x10B531: test_realloc_api (obj_basic_integration.c:185)
* by 0x10F1EE: main (obj_basic_integration.c:594)
*
* Note: We have to be careful to not read AVX registers after this
* intrinsic, because of this stupid gcc bug:
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82735
*/
static force_inline void
avx_zeroupper(void)
{
_mm256_zeroupper();
}
static force_inline __m128i
m256_get16b(__m256i ymm)
{
return _mm256_extractf128_si256(ymm, 0);
}
#ifdef _MSC_VER
static force_inline uint64_t
m256_get8b(__m256i ymm)
{
return (uint64_t)_mm_extract_epi64(m256_get16b(ymm), 0);
}
static force_inline uint32_t
m256_get4b(__m256i ymm)
{
return (uint32_t)m256_get8b(ymm);
}
static force_inline uint16_t
m256_get2b(__m256i ymm)
{
return (uint16_t)m256_get8b(ymm);
}
#else
static force_inline uint64_t
m256_get8b(__m256i ymm)
{
return (uint64_t)_mm256_extract_epi64(ymm, 0);
}
static force_inline uint32_t
m256_get4b(__m256i ymm)
{
return (uint32_t)_mm256_extract_epi32(ymm, 0);
}
static force_inline uint16_t
m256_get2b(__m256i ymm)
{
return (uint16_t)_mm256_extract_epi16(ymm, 0);
}
#endif
#endif
| 3,753 | 31.362069 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy_memset.h | /*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MEMCPY_MEMSET_H
#define MEMCPY_MEMSET_H
#include <stddef.h>
#include <xmmintrin.h>
#include "pmem.h"
static inline void
barrier_after_ntstores(void)
{
/*
* In this configuration pmem_drain does not contain sfence, so we have
* to serialize non-temporal store instructions.
*/
_mm_sfence();
}
static inline void
no_barrier_after_ntstores(void)
{
/*
* In this configuration pmem_drain contains sfence, so we don't have
* to serialize non-temporal store instructions
*/
}
#ifndef AVX512F_AVAILABLE
/* XXX not supported in MSVC version we currently use */
#ifdef _MSC_VER
#define AVX512F_AVAILABLE 0
#else
#define AVX512F_AVAILABLE 1
#endif
#endif
#ifndef AVX_AVAILABLE
#define AVX_AVAILABLE 1
#endif
#ifndef SSE2_AVAILABLE
#define SSE2_AVAILABLE 1
#endif
#if SSE2_AVAILABLE
void memmove_mov_sse2_clflush(char *dest, const char *src, size_t len);
void memmove_mov_sse2_clflushopt(char *dest, const char *src, size_t len);
void memmove_mov_sse2_clwb(char *dest, const char *src, size_t len);
void memmove_mov_sse2_empty(char *dest, const char *src, size_t len);
void memmove_movnt_sse2_clflush(char *dest, const char *src, size_t len);
void memmove_movnt_sse2_clflushopt(char *dest, const char *src, size_t len);
void memmove_movnt_sse2_clwb(char *dest, const char *src, size_t len);
void memmove_movnt_sse2_empty(char *dest, const char *src, size_t len);
void memset_mov_sse2_clflush(char *dest, int c, size_t len);
void memset_mov_sse2_clflushopt(char *dest, int c, size_t len);
void memset_mov_sse2_clwb(char *dest, int c, size_t len);
void memset_mov_sse2_empty(char *dest, int c, size_t len);
void memset_movnt_sse2_clflush(char *dest, int c, size_t len);
void memset_movnt_sse2_clflushopt(char *dest, int c, size_t len);
void memset_movnt_sse2_clwb(char *dest, int c, size_t len);
void memset_movnt_sse2_empty(char *dest, int c, size_t len);
#endif
#if AVX_AVAILABLE
void memmove_mov_avx_clflush(char *dest, const char *src, size_t len);
void memmove_mov_avx_clflushopt(char *dest, const char *src, size_t len);
void memmove_mov_avx_clwb(char *dest, const char *src, size_t len);
void memmove_mov_avx_empty(char *dest, const char *src, size_t len);
void memmove_movnt_avx_clflush(char *dest, const char *src, size_t len);
void memmove_movnt_avx_clflushopt(char *dest, const char *src, size_t len);
void memmove_movnt_avx_clwb(char *dest, const char *src, size_t len);
void memmove_movnt_avx_empty(char *dest, const char *src, size_t len);
void memset_mov_avx_clflush(char *dest, int c, size_t len);
void memset_mov_avx_clflushopt(char *dest, int c, size_t len);
void memset_mov_avx_clwb(char *dest, int c, size_t len);
void memset_mov_avx_empty(char *dest, int c, size_t len);
void memset_movnt_avx_clflush(char *dest, int c, size_t len);
void memset_movnt_avx_clflushopt(char *dest, int c, size_t len);
void memset_movnt_avx_clwb(char *dest, int c, size_t len);
void memset_movnt_avx_empty(char *dest, int c, size_t len);
#endif
#if AVX512F_AVAILABLE
void memmove_mov_avx512f_clflush(char *dest, const char *src, size_t len);
void memmove_mov_avx512f_clflushopt(char *dest, const char *src, size_t len);
void memmove_mov_avx512f_clwb(char *dest, const char *src, size_t len);
void memmove_mov_avx512f_empty(char *dest, const char *src, size_t len);
void memmove_movnt_avx512f_clflush(char *dest, const char *src, size_t len);
void memmove_movnt_avx512f_clflushopt(char *dest, const char *src, size_t len);
void memmove_movnt_avx512f_clwb(char *dest, const char *src, size_t len);
void memmove_movnt_avx512f_empty(char *dest, const char *src, size_t len);
void memset_mov_avx512f_clflush(char *dest, int c, size_t len);
void memset_mov_avx512f_clflushopt(char *dest, int c, size_t len);
void memset_mov_avx512f_clwb(char *dest, int c, size_t len);
void memset_mov_avx512f_empty(char *dest, int c, size_t len);
void memset_movnt_avx512f_clflush(char *dest, int c, size_t len);
void memset_movnt_avx512f_clflushopt(char *dest, int c, size_t len);
void memset_movnt_avx512f_clwb(char *dest, int c, size_t len);
void memset_movnt_avx512f_empty(char *dest, int c, size_t len);
#endif
extern size_t Movnt_threshold;
#endif
| 5,754 | 41.316176 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_avx_clflush.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_clflush_nolog
#define EXPORTED_SYMBOL memset_movnt_avx_clflush
#define maybe_barrier barrier_after_ntstores
#include "memset_nt_avx.h"
| 1,757 | 46.513514 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_avx512f.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef PMEM_MEMSET_AVX512F_H
#define PMEM_MEMSET_AVX512F_H
#include <stddef.h>
#include "memset_avx.h"
static force_inline void
memset_small_avx512f(char *dest, __m256i ymm, size_t len)
{
/* We can't do better than AVX here. */
memset_small_avx(dest, ymm, len);
}
#endif
| 1,880 | 38.1875 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_avx_clflush.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b _mm_clflush
#define flush flush_clflush_nolog
#define EXPORTED_SYMBOL memset_mov_avx_clflush
#include "memset_t_avx.h"
| 1,738 | 46 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_sse2_empty.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b flush64b_empty
#define flush flush_empty_nolog
#define EXPORTED_SYMBOL memset_mov_sse2_empty
#include "memset_t_sse2.h"
| 1,739 | 46.027027 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_sse2_clwb.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b pmem_clwb
#define flush flush_clwb_nolog
#define EXPORTED_SYMBOL memset_mov_sse2_clwb
#include "memset_t_sse2.h"
| 1,732 | 45.837838 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_avx_clwb.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_clwb_nolog
#define EXPORTED_SYMBOL memset_movnt_avx_clwb
#define maybe_barrier no_barrier_after_ntstores
#include "memset_nt_avx.h"
| 1,754 | 46.432432 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_avx_clwb.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b pmem_clwb
#define flush flush_clwb_nolog
#define EXPORTED_SYMBOL memset_mov_avx_clwb
#include "memset_t_avx.h"
| 1,730 | 45.783784 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_sse2.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem.h"
#include "flush.h"
#include "libpmem.h"
#include "memcpy_memset.h"
#include "memset_sse2.h"
#include "out.h"
#include "valgrind_internal.h"
static force_inline void
memset_movnt4x64b(char *dest, __m128i xmm)
{
_mm_stream_si128((__m128i *)dest + 0, xmm);
_mm_stream_si128((__m128i *)dest + 1, xmm);
_mm_stream_si128((__m128i *)dest + 2, xmm);
_mm_stream_si128((__m128i *)dest + 3, xmm);
_mm_stream_si128((__m128i *)dest + 4, xmm);
_mm_stream_si128((__m128i *)dest + 5, xmm);
_mm_stream_si128((__m128i *)dest + 6, xmm);
_mm_stream_si128((__m128i *)dest + 7, xmm);
_mm_stream_si128((__m128i *)dest + 8, xmm);
_mm_stream_si128((__m128i *)dest + 9, xmm);
_mm_stream_si128((__m128i *)dest + 10, xmm);
_mm_stream_si128((__m128i *)dest + 11, xmm);
_mm_stream_si128((__m128i *)dest + 12, xmm);
_mm_stream_si128((__m128i *)dest + 13, xmm);
_mm_stream_si128((__m128i *)dest + 14, xmm);
_mm_stream_si128((__m128i *)dest + 15, xmm);
VALGRIND_DO_FLUSH(dest, 4 * 64);
}
static force_inline void
memset_movnt2x64b(char *dest, __m128i xmm)
{
_mm_stream_si128((__m128i *)dest + 0, xmm);
_mm_stream_si128((__m128i *)dest + 1, xmm);
_mm_stream_si128((__m128i *)dest + 2, xmm);
_mm_stream_si128((__m128i *)dest + 3, xmm);
_mm_stream_si128((__m128i *)dest + 4, xmm);
_mm_stream_si128((__m128i *)dest + 5, xmm);
_mm_stream_si128((__m128i *)dest + 6, xmm);
_mm_stream_si128((__m128i *)dest + 7, xmm);
VALGRIND_DO_FLUSH(dest, 2 * 64);
}
static force_inline void
memset_movnt1x64b(char *dest, __m128i xmm)
{
_mm_stream_si128((__m128i *)dest + 0, xmm);
_mm_stream_si128((__m128i *)dest + 1, xmm);
_mm_stream_si128((__m128i *)dest + 2, xmm);
_mm_stream_si128((__m128i *)dest + 3, xmm);
VALGRIND_DO_FLUSH(dest, 64);
}
static force_inline void
memset_movnt1x32b(char *dest, __m128i xmm)
{
_mm_stream_si128((__m128i *)dest + 0, xmm);
_mm_stream_si128((__m128i *)dest + 1, xmm);
VALGRIND_DO_FLUSH(dest, 32);
}
static force_inline void
memset_movnt1x16b(char *dest, __m128i xmm)
{
_mm_stream_si128((__m128i *)dest, xmm);
VALGRIND_DO_FLUSH(dest, 16);
}
static force_inline void
memset_movnt1x8b(char *dest, __m128i xmm)
{
uint64_t x = (uint64_t)_mm_cvtsi128_si64(xmm);
_mm_stream_si64((long long *)dest, (long long)x);
VALGRIND_DO_FLUSH(dest, 8);
}
static force_inline void
memset_movnt1x4b(char *dest, __m128i xmm)
{
uint32_t x = (uint32_t)_mm_cvtsi128_si32(xmm);
_mm_stream_si32((int *)dest, (int)x);
VALGRIND_DO_FLUSH(dest, 4);
}
void
EXPORTED_SYMBOL(char *dest, int c, size_t len)
{
__m128i xmm = _mm_set1_epi8((char)c);
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memset_small_sse2(dest, xmm, cnt);
dest += cnt;
len -= cnt;
}
while (len >= 4 * 64) {
memset_movnt4x64b(dest, xmm);
dest += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memset_movnt2x64b(dest, xmm);
dest += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memset_movnt1x64b(dest, xmm);
dest += 1 * 64;
len -= 1 * 64;
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32)
memset_movnt1x32b(dest, xmm);
else if (len == 16)
memset_movnt1x16b(dest, xmm);
else if (len == 8)
memset_movnt1x8b(dest, xmm);
else if (len == 4)
memset_movnt1x4b(dest, xmm);
else
goto nonnt;
goto end;
}
nonnt:
memset_small_sse2(dest, xmm, len);
end:
maybe_barrier();
}
| 5,136 | 25.755208 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_sse2.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memset_sse2.h"
static force_inline void
memset_mov4x64b(char *dest, __m128i xmm)
{
_mm_store_si128((__m128i *)dest + 0, xmm);
_mm_store_si128((__m128i *)dest + 1, xmm);
_mm_store_si128((__m128i *)dest + 2, xmm);
_mm_store_si128((__m128i *)dest + 3, xmm);
_mm_store_si128((__m128i *)dest + 4, xmm);
_mm_store_si128((__m128i *)dest + 5, xmm);
_mm_store_si128((__m128i *)dest + 6, xmm);
_mm_store_si128((__m128i *)dest + 7, xmm);
_mm_store_si128((__m128i *)dest + 8, xmm);
_mm_store_si128((__m128i *)dest + 9, xmm);
_mm_store_si128((__m128i *)dest + 10, xmm);
_mm_store_si128((__m128i *)dest + 11, xmm);
_mm_store_si128((__m128i *)dest + 12, xmm);
_mm_store_si128((__m128i *)dest + 13, xmm);
_mm_store_si128((__m128i *)dest + 14, xmm);
_mm_store_si128((__m128i *)dest + 15, xmm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
}
static force_inline void
memset_mov2x64b(char *dest, __m128i xmm)
{
_mm_store_si128((__m128i *)dest + 0, xmm);
_mm_store_si128((__m128i *)dest + 1, xmm);
_mm_store_si128((__m128i *)dest + 2, xmm);
_mm_store_si128((__m128i *)dest + 3, xmm);
_mm_store_si128((__m128i *)dest + 4, xmm);
_mm_store_si128((__m128i *)dest + 5, xmm);
_mm_store_si128((__m128i *)dest + 6, xmm);
_mm_store_si128((__m128i *)dest + 7, xmm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
}
static force_inline void
memset_mov1x64b(char *dest, __m128i xmm)
{
_mm_store_si128((__m128i *)dest + 0, xmm);
_mm_store_si128((__m128i *)dest + 1, xmm);
_mm_store_si128((__m128i *)dest + 2, xmm);
_mm_store_si128((__m128i *)dest + 3, xmm);
flush64b(dest + 0 * 64);
}
void
EXPORTED_SYMBOL(char *dest, int c, size_t len)
{
__m128i xmm = _mm_set1_epi8((char)c);
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memset_small_sse2(dest, xmm, cnt);
dest += cnt;
len -= cnt;
}
while (len >= 4 * 64) {
memset_mov4x64b(dest, xmm);
dest += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memset_mov2x64b(dest, xmm);
dest += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memset_mov1x64b(dest, xmm);
dest += 1 * 64;
len -= 1 * 64;
}
if (len)
memset_small_sse2(dest, xmm, len);
}
| 3,985 | 28.525926 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_sse2_clflush.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_clflush_nolog
#define EXPORTED_SYMBOL memset_movnt_sse2_clflush
#define maybe_barrier barrier_after_ntstores
#include "memset_nt_sse2.h"
| 1,759 | 46.567568 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_avx.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem.h"
#include "avx.h"
#include "flush.h"
#include "libpmem.h"
#include "memset_avx.h"
#include "memcpy_memset.h"
#include "out.h"
#include "valgrind_internal.h"
static force_inline void
memset_movnt8x64b(char *dest, __m256i ymm)
{
_mm256_stream_si256((__m256i *)dest + 0, ymm);
_mm256_stream_si256((__m256i *)dest + 1, ymm);
_mm256_stream_si256((__m256i *)dest + 2, ymm);
_mm256_stream_si256((__m256i *)dest + 3, ymm);
_mm256_stream_si256((__m256i *)dest + 4, ymm);
_mm256_stream_si256((__m256i *)dest + 5, ymm);
_mm256_stream_si256((__m256i *)dest + 6, ymm);
_mm256_stream_si256((__m256i *)dest + 7, ymm);
_mm256_stream_si256((__m256i *)dest + 8, ymm);
_mm256_stream_si256((__m256i *)dest + 9, ymm);
_mm256_stream_si256((__m256i *)dest + 10, ymm);
_mm256_stream_si256((__m256i *)dest + 11, ymm);
_mm256_stream_si256((__m256i *)dest + 12, ymm);
_mm256_stream_si256((__m256i *)dest + 13, ymm);
_mm256_stream_si256((__m256i *)dest + 14, ymm);
_mm256_stream_si256((__m256i *)dest + 15, ymm);
VALGRIND_DO_FLUSH(dest, 8 * 64);
}
static force_inline void
memset_movnt4x64b(char *dest, __m256i ymm)
{
_mm256_stream_si256((__m256i *)dest + 0, ymm);
_mm256_stream_si256((__m256i *)dest + 1, ymm);
_mm256_stream_si256((__m256i *)dest + 2, ymm);
_mm256_stream_si256((__m256i *)dest + 3, ymm);
_mm256_stream_si256((__m256i *)dest + 4, ymm);
_mm256_stream_si256((__m256i *)dest + 5, ymm);
_mm256_stream_si256((__m256i *)dest + 6, ymm);
_mm256_stream_si256((__m256i *)dest + 7, ymm);
VALGRIND_DO_FLUSH(dest, 4 * 64);
}
static force_inline void
memset_movnt2x64b(char *dest, __m256i ymm)
{
_mm256_stream_si256((__m256i *)dest + 0, ymm);
_mm256_stream_si256((__m256i *)dest + 1, ymm);
_mm256_stream_si256((__m256i *)dest + 2, ymm);
_mm256_stream_si256((__m256i *)dest + 3, ymm);
VALGRIND_DO_FLUSH(dest, 2 * 64);
}
static force_inline void
memset_movnt1x64b(char *dest, __m256i ymm)
{
_mm256_stream_si256((__m256i *)dest + 0, ymm);
_mm256_stream_si256((__m256i *)dest + 1, ymm);
VALGRIND_DO_FLUSH(dest, 64);
}
static force_inline void
memset_movnt1x32b(char *dest, __m256i ymm)
{
_mm256_stream_si256((__m256i *)dest, ymm);
VALGRIND_DO_FLUSH(dest, 32);
}
static force_inline void
memset_movnt1x16b(char *dest, __m256i ymm)
{
__m128i xmm0 = m256_get16b(ymm);
_mm_stream_si128((__m128i *)dest, xmm0);
VALGRIND_DO_FLUSH(dest - 16, 16);
}
static force_inline void
memset_movnt1x8b(char *dest, __m256i ymm)
{
uint64_t x = m256_get8b(ymm);
_mm_stream_si64((long long *)dest, (long long)x);
VALGRIND_DO_FLUSH(dest, 8);
}
static force_inline void
memset_movnt1x4b(char *dest, __m256i ymm)
{
uint32_t x = m256_get4b(ymm);
_mm_stream_si32((int *)dest, (int)x);
VALGRIND_DO_FLUSH(dest, 4);
}
void
EXPORTED_SYMBOL(char *dest, int c, size_t len)
{
__m256i ymm = _mm256_set1_epi8((char)c);
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memset_small_avx(dest, ymm, cnt);
dest += cnt;
len -= cnt;
}
while (len >= 8 * 64) {
memset_movnt8x64b(dest, ymm);
dest += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memset_movnt4x64b(dest, ymm);
dest += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memset_movnt2x64b(dest, ymm);
dest += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memset_movnt1x64b(dest, ymm);
dest += 1 * 64;
len -= 1 * 64;
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32)
memset_movnt1x32b(dest, ymm);
else if (len == 16)
memset_movnt1x16b(dest, ymm);
else if (len == 8)
memset_movnt1x8b(dest, ymm);
else if (len == 4)
memset_movnt1x4b(dest, ymm);
else
goto nonnt;
goto end;
}
nonnt:
memset_small_avx(dest, ymm, len);
end:
avx_zeroupper();
maybe_barrier();
}
| 5,514 | 25.137441 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_avx.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef PMEM_MEMSET_AVX_H
#define PMEM_MEMSET_AVX_H
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include "avx.h"
#include "libpmem.h"
#include "out.h"
static force_inline void
memset_small_avx_noflush(char *dest, __m256i ymm, size_t len)
{
ASSERT(len <= 64);
if (len <= 8)
goto le8;
if (len <= 32)
goto le32;
/* 33..64 */
_mm256_storeu_si256((__m256i *)dest, ymm);
_mm256_storeu_si256((__m256i *)(dest + len - 32), ymm);
return;
le32:
if (len > 16) {
/* 17..32 */
__m128i xmm = m256_get16b(ymm);
_mm_storeu_si128((__m128i *)dest, xmm);
_mm_storeu_si128((__m128i *)(dest + len - 16), xmm);
return;
}
/* 9..16 */
uint64_t d8 = m256_get8b(ymm);
*(uint64_t *)dest = d8;
*(uint64_t *)(dest + len - 8) = d8;
return;
le8:
if (len <= 2)
goto le2;
if (len > 4) {
/* 5..8 */
uint32_t d = m256_get4b(ymm);
*(uint32_t *)dest = d;
*(uint32_t *)(dest + len - 4) = d;
return;
}
/* 3..4 */
uint16_t d2 = m256_get2b(ymm);
*(uint16_t *)dest = d2;
*(uint16_t *)(dest + len - 2) = d2;
return;
le2:
if (len == 2) {
uint16_t d2 = m256_get2b(ymm);
*(uint16_t *)dest = d2;
return;
}
*(uint8_t *)dest = (uint8_t)m256_get2b(ymm);
}
static force_inline void
memset_small_avx(char *dest, __m256i ymm, size_t len)
{
memset_small_avx_noflush(dest, ymm, len);
flush(dest, len);
}
#endif
| 2,975 | 24.655172 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_avx512f_clwb.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_clwb_nolog
#define EXPORTED_SYMBOL memset_movnt_avx512f_clwb
#define maybe_barrier no_barrier_after_ntstores
#include "memset_nt_avx512f.h"
| 1,762 | 46.648649 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_avx_clflushopt.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_clflushopt_nolog
#define EXPORTED_SYMBOL memset_movnt_avx_clflushopt
#define maybe_barrier no_barrier_after_ntstores
#include "memset_nt_avx.h"
| 1,766 | 46.756757 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_sse2_clflushopt.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_clflushopt_nolog
#define EXPORTED_SYMBOL memset_movnt_sse2_clflushopt
#define maybe_barrier no_barrier_after_ntstores
#include "memset_nt_sse2.h"
| 1,768 | 46.810811 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_avx.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem.h"
#include "avx.h"
#include "flush.h"
#include "memset_avx.h"
#include "memcpy_memset.h"
static force_inline void
memset_mov8x64b(char *dest, __m256i ymm)
{
_mm256_store_si256((__m256i *)dest + 0, ymm);
_mm256_store_si256((__m256i *)dest + 1, ymm);
_mm256_store_si256((__m256i *)dest + 2, ymm);
_mm256_store_si256((__m256i *)dest + 3, ymm);
_mm256_store_si256((__m256i *)dest + 4, ymm);
_mm256_store_si256((__m256i *)dest + 5, ymm);
_mm256_store_si256((__m256i *)dest + 6, ymm);
_mm256_store_si256((__m256i *)dest + 7, ymm);
_mm256_store_si256((__m256i *)dest + 8, ymm);
_mm256_store_si256((__m256i *)dest + 9, ymm);
_mm256_store_si256((__m256i *)dest + 10, ymm);
_mm256_store_si256((__m256i *)dest + 11, ymm);
_mm256_store_si256((__m256i *)dest + 12, ymm);
_mm256_store_si256((__m256i *)dest + 13, ymm);
_mm256_store_si256((__m256i *)dest + 14, ymm);
_mm256_store_si256((__m256i *)dest + 15, ymm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
}
static force_inline void
memset_mov4x64b(char *dest, __m256i ymm)
{
_mm256_store_si256((__m256i *)dest + 0, ymm);
_mm256_store_si256((__m256i *)dest + 1, ymm);
_mm256_store_si256((__m256i *)dest + 2, ymm);
_mm256_store_si256((__m256i *)dest + 3, ymm);
_mm256_store_si256((__m256i *)dest + 4, ymm);
_mm256_store_si256((__m256i *)dest + 5, ymm);
_mm256_store_si256((__m256i *)dest + 6, ymm);
_mm256_store_si256((__m256i *)dest + 7, ymm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
}
static force_inline void
memset_mov2x64b(char *dest, __m256i ymm)
{
_mm256_store_si256((__m256i *)dest + 0, ymm);
_mm256_store_si256((__m256i *)dest + 1, ymm);
_mm256_store_si256((__m256i *)dest + 2, ymm);
_mm256_store_si256((__m256i *)dest + 3, ymm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
}
static force_inline void
memset_mov1x64b(char *dest, __m256i ymm)
{
_mm256_store_si256((__m256i *)dest + 0, ymm);
_mm256_store_si256((__m256i *)dest + 1, ymm);
flush64b(dest + 0 * 64);
}
void
EXPORTED_SYMBOL(char *dest, int c, size_t len)
{
__m256i ymm = _mm256_set1_epi8((char)c);
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memset_small_avx(dest, ymm, cnt);
dest += cnt;
len -= cnt;
}
while (len >= 8 * 64) {
memset_mov8x64b(dest, ymm);
dest += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memset_mov4x64b(dest, ymm);
dest += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memset_mov2x64b(dest, ymm);
dest += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memset_mov1x64b(dest, ymm);
dest += 1 * 64;
len -= 1 * 64;
}
if (len)
memset_small_avx(dest, ymm, len);
avx_zeroupper();
}
| 4,570 | 27.56875 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_avx512f_clflushopt.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_clflushopt_nolog
#define EXPORTED_SYMBOL memset_movnt_avx512f_clflushopt
#define maybe_barrier no_barrier_after_ntstores
#include "memset_nt_avx512f.h"
| 1,774 | 46.972973 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_avx512f_clflushopt.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b pmem_clflushopt
#define flush flush_clflushopt_nolog
#define EXPORTED_SYMBOL memset_mov_avx512f_clflushopt
#include "memset_t_avx512f.h"
| 1,756 | 46.486486 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_avx512f.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem.h"
#include "avx.h"
#include "flush.h"
#include "memset_avx512f.h"
#include "memcpy_memset.h"
static force_inline void
memset_mov32x64b(char *dest, __m512i zmm)
{
_mm512_store_si512((__m512i *)dest + 0, zmm);
_mm512_store_si512((__m512i *)dest + 1, zmm);
_mm512_store_si512((__m512i *)dest + 2, zmm);
_mm512_store_si512((__m512i *)dest + 3, zmm);
_mm512_store_si512((__m512i *)dest + 4, zmm);
_mm512_store_si512((__m512i *)dest + 5, zmm);
_mm512_store_si512((__m512i *)dest + 6, zmm);
_mm512_store_si512((__m512i *)dest + 7, zmm);
_mm512_store_si512((__m512i *)dest + 8, zmm);
_mm512_store_si512((__m512i *)dest + 9, zmm);
_mm512_store_si512((__m512i *)dest + 10, zmm);
_mm512_store_si512((__m512i *)dest + 11, zmm);
_mm512_store_si512((__m512i *)dest + 12, zmm);
_mm512_store_si512((__m512i *)dest + 13, zmm);
_mm512_store_si512((__m512i *)dest + 14, zmm);
_mm512_store_si512((__m512i *)dest + 15, zmm);
_mm512_store_si512((__m512i *)dest + 16, zmm);
_mm512_store_si512((__m512i *)dest + 17, zmm);
_mm512_store_si512((__m512i *)dest + 18, zmm);
_mm512_store_si512((__m512i *)dest + 19, zmm);
_mm512_store_si512((__m512i *)dest + 20, zmm);
_mm512_store_si512((__m512i *)dest + 21, zmm);
_mm512_store_si512((__m512i *)dest + 22, zmm);
_mm512_store_si512((__m512i *)dest + 23, zmm);
_mm512_store_si512((__m512i *)dest + 24, zmm);
_mm512_store_si512((__m512i *)dest + 25, zmm);
_mm512_store_si512((__m512i *)dest + 26, zmm);
_mm512_store_si512((__m512i *)dest + 27, zmm);
_mm512_store_si512((__m512i *)dest + 28, zmm);
_mm512_store_si512((__m512i *)dest + 29, zmm);
_mm512_store_si512((__m512i *)dest + 30, zmm);
_mm512_store_si512((__m512i *)dest + 31, zmm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
flush64b(dest + 8 * 64);
flush64b(dest + 9 * 64);
flush64b(dest + 10 * 64);
flush64b(dest + 11 * 64);
flush64b(dest + 12 * 64);
flush64b(dest + 13 * 64);
flush64b(dest + 14 * 64);
flush64b(dest + 15 * 64);
flush64b(dest + 16 * 64);
flush64b(dest + 17 * 64);
flush64b(dest + 18 * 64);
flush64b(dest + 19 * 64);
flush64b(dest + 20 * 64);
flush64b(dest + 21 * 64);
flush64b(dest + 22 * 64);
flush64b(dest + 23 * 64);
flush64b(dest + 24 * 64);
flush64b(dest + 25 * 64);
flush64b(dest + 26 * 64);
flush64b(dest + 27 * 64);
flush64b(dest + 28 * 64);
flush64b(dest + 29 * 64);
flush64b(dest + 30 * 64);
flush64b(dest + 31 * 64);
}
static force_inline void
memset_mov16x64b(char *dest, __m512i zmm)
{
_mm512_store_si512((__m512i *)dest + 0, zmm);
_mm512_store_si512((__m512i *)dest + 1, zmm);
_mm512_store_si512((__m512i *)dest + 2, zmm);
_mm512_store_si512((__m512i *)dest + 3, zmm);
_mm512_store_si512((__m512i *)dest + 4, zmm);
_mm512_store_si512((__m512i *)dest + 5, zmm);
_mm512_store_si512((__m512i *)dest + 6, zmm);
_mm512_store_si512((__m512i *)dest + 7, zmm);
_mm512_store_si512((__m512i *)dest + 8, zmm);
_mm512_store_si512((__m512i *)dest + 9, zmm);
_mm512_store_si512((__m512i *)dest + 10, zmm);
_mm512_store_si512((__m512i *)dest + 11, zmm);
_mm512_store_si512((__m512i *)dest + 12, zmm);
_mm512_store_si512((__m512i *)dest + 13, zmm);
_mm512_store_si512((__m512i *)dest + 14, zmm);
_mm512_store_si512((__m512i *)dest + 15, zmm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
flush64b(dest + 8 * 64);
flush64b(dest + 9 * 64);
flush64b(dest + 10 * 64);
flush64b(dest + 11 * 64);
flush64b(dest + 12 * 64);
flush64b(dest + 13 * 64);
flush64b(dest + 14 * 64);
flush64b(dest + 15 * 64);
}
static force_inline void
memset_mov8x64b(char *dest, __m512i zmm)
{
_mm512_store_si512((__m512i *)dest + 0, zmm);
_mm512_store_si512((__m512i *)dest + 1, zmm);
_mm512_store_si512((__m512i *)dest + 2, zmm);
_mm512_store_si512((__m512i *)dest + 3, zmm);
_mm512_store_si512((__m512i *)dest + 4, zmm);
_mm512_store_si512((__m512i *)dest + 5, zmm);
_mm512_store_si512((__m512i *)dest + 6, zmm);
_mm512_store_si512((__m512i *)dest + 7, zmm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
}
static force_inline void
memset_mov4x64b(char *dest, __m512i zmm)
{
_mm512_store_si512((__m512i *)dest + 0, zmm);
_mm512_store_si512((__m512i *)dest + 1, zmm);
_mm512_store_si512((__m512i *)dest + 2, zmm);
_mm512_store_si512((__m512i *)dest + 3, zmm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
}
static force_inline void
memset_mov2x64b(char *dest, __m512i zmm)
{
_mm512_store_si512((__m512i *)dest + 0, zmm);
_mm512_store_si512((__m512i *)dest + 1, zmm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
}
static force_inline void
memset_mov1x64b(char *dest, __m512i zmm)
{
_mm512_store_si512((__m512i *)dest + 0, zmm);
flush64b(dest + 0 * 64);
}
void
EXPORTED_SYMBOL(char *dest, int c, size_t len)
{
__m512i zmm = _mm512_set1_epi8((char)c);
/* See comment in memset_movnt_avx512f */
__m256i ymm = _mm256_set1_epi8((char)c);
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memset_small_avx512f(dest, ymm, cnt);
dest += cnt;
len -= cnt;
}
while (len >= 32 * 64) {
memset_mov32x64b(dest, zmm);
dest += 32 * 64;
len -= 32 * 64;
}
if (len >= 16 * 64) {
memset_mov16x64b(dest, zmm);
dest += 16 * 64;
len -= 16 * 64;
}
if (len >= 8 * 64) {
memset_mov8x64b(dest, zmm);
dest += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memset_mov4x64b(dest, zmm);
dest += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memset_mov2x64b(dest, zmm);
dest += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memset_mov1x64b(dest, zmm);
dest += 1 * 64;
len -= 1 * 64;
}
if (len)
memset_small_avx512f(dest, ymm, len);
avx_zeroupper();
}
| 7,852 | 28.411985 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_sse2_clflush.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b _mm_clflush
#define flush flush_clflush_nolog
#define EXPORTED_SYMBOL memset_mov_sse2_clflush
#include "memset_t_sse2.h"
| 1,740 | 46.054054 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_avx512f_clflush.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_clflush_nolog
#define EXPORTED_SYMBOL memset_movnt_avx512f_clflush
#define maybe_barrier barrier_after_ntstores
#include "memset_nt_avx512f.h"
| 1,765 | 46.72973 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_avx_clflushopt.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b pmem_clflushopt
#define flush flush_clflushopt_nolog
#define EXPORTED_SYMBOL memset_mov_avx_clflushopt
#include "memset_t_avx.h"
| 1,748 | 46.27027 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_sse2_empty.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_empty_nolog
#define EXPORTED_SYMBOL memset_movnt_sse2_empty
#define maybe_barrier barrier_after_ntstores
#include "memset_nt_sse2.h"
| 1,755 | 46.459459 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_avx512f_clflush.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b _mm_clflush
#define flush flush_clflush_nolog
#define EXPORTED_SYMBOL memset_mov_avx512f_clflush
#include "memset_t_avx512f.h"
| 1,746 | 46.216216 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_avx512f_clwb.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b pmem_clwb
#define flush flush_clwb_nolog
#define EXPORTED_SYMBOL memset_mov_avx512f_clwb
#include "memset_t_avx512f.h"
| 1,738 | 46 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_avx_empty.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b flush64b_empty
#define flush flush_empty_nolog
#define EXPORTED_SYMBOL memset_mov_avx_empty
#include "memset_t_avx.h"
| 1,737 | 45.972973 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_avx_empty.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_empty_nolog
#define EXPORTED_SYMBOL memset_movnt_avx_empty
#define maybe_barrier barrier_after_ntstores
#include "memset_nt_avx.h"
| 1,753 | 46.405405 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_sse2.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef PMEM_MEMSET_SSE2_H
#define PMEM_MEMSET_SSE2_H
#include <xmmintrin.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include "libpmem.h"
#include "out.h"
static force_inline void
memset_small_sse2_noflush(char *dest, __m128i xmm, size_t len)
{
ASSERT(len <= 64);
if (len <= 8)
goto le8;
if (len <= 32)
goto le32;
if (len > 48) {
/* 49..64 */
_mm_storeu_si128((__m128i *)(dest + 0), xmm);
_mm_storeu_si128((__m128i *)(dest + 16), xmm);
_mm_storeu_si128((__m128i *)(dest + 32), xmm);
_mm_storeu_si128((__m128i *)(dest + len - 16), xmm);
return;
}
/* 33..48 */
_mm_storeu_si128((__m128i *)(dest + 0), xmm);
_mm_storeu_si128((__m128i *)(dest + 16), xmm);
_mm_storeu_si128((__m128i *)(dest + len - 16), xmm);
return;
le32:
if (len > 16) {
/* 17..32 */
_mm_storeu_si128((__m128i *)(dest + 0), xmm);
_mm_storeu_si128((__m128i *)(dest + len - 16), xmm);
return;
}
/* 9..16 */
uint64_t d8 = (uint64_t)_mm_cvtsi128_si64(xmm);
*(uint64_t *)dest = d8;
*(uint64_t *)(dest + len - 8) = d8;
return;
le8:
if (len <= 2)
goto le2;
if (len > 4) {
/* 5..8 */
uint32_t d4 = (uint32_t)_mm_cvtsi128_si32(xmm);
*(uint32_t *)dest = d4;
*(uint32_t *)(dest + len - 4) = d4;
return;
}
/* 3..4 */
uint16_t d2 = (uint16_t)(uint32_t)_mm_cvtsi128_si32(xmm);
*(uint16_t *)dest = d2;
*(uint16_t *)(dest + len - 2) = d2;
return;
le2:
if (len == 2) {
uint16_t d2 = (uint16_t)(uint32_t)_mm_cvtsi128_si32(xmm);
*(uint16_t *)dest = d2;
return;
}
*(uint8_t *)dest = (uint8_t)_mm_cvtsi128_si32(xmm);
}
static force_inline void
memset_small_sse2(char *dest, __m128i xmm, size_t len)
{
memset_small_sse2_noflush(dest, xmm, len);
flush(dest, len);
}
#endif
| 3,327 | 26.056911 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_sse2_clwb.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_clwb_nolog
#define EXPORTED_SYMBOL memset_movnt_sse2_clwb
#define maybe_barrier no_barrier_after_ntstores
#include "memset_nt_sse2.h"
| 1,756 | 46.486486 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_sse2_clflushopt.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b pmem_clflushopt
#define flush flush_clflushopt_nolog
#define EXPORTED_SYMBOL memset_mov_sse2_clflushopt
#include "memset_t_sse2.h"
| 1,750 | 46.324324 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_avx512f_empty.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b flush64b_empty
#define flush flush_empty_nolog
#define EXPORTED_SYMBOL memset_mov_avx512f_empty
#include "memset_t_avx512f.h"
| 1,745 | 46.189189 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_avx512f.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem.h"
#include "avx.h"
#include "flush.h"
#include "libpmem.h"
#include "memcpy_memset.h"
#include "memset_avx512f.h"
#include "out.h"
#include "util.h"
#include "valgrind_internal.h"
static force_inline void
memset_movnt32x64b(char *dest, __m512i zmm)
{
_mm512_stream_si512((__m512i *)dest + 0, zmm);
_mm512_stream_si512((__m512i *)dest + 1, zmm);
_mm512_stream_si512((__m512i *)dest + 2, zmm);
_mm512_stream_si512((__m512i *)dest + 3, zmm);
_mm512_stream_si512((__m512i *)dest + 4, zmm);
_mm512_stream_si512((__m512i *)dest + 5, zmm);
_mm512_stream_si512((__m512i *)dest + 6, zmm);
_mm512_stream_si512((__m512i *)dest + 7, zmm);
_mm512_stream_si512((__m512i *)dest + 8, zmm);
_mm512_stream_si512((__m512i *)dest + 9, zmm);
_mm512_stream_si512((__m512i *)dest + 10, zmm);
_mm512_stream_si512((__m512i *)dest + 11, zmm);
_mm512_stream_si512((__m512i *)dest + 12, zmm);
_mm512_stream_si512((__m512i *)dest + 13, zmm);
_mm512_stream_si512((__m512i *)dest + 14, zmm);
_mm512_stream_si512((__m512i *)dest + 15, zmm);
_mm512_stream_si512((__m512i *)dest + 16, zmm);
_mm512_stream_si512((__m512i *)dest + 17, zmm);
_mm512_stream_si512((__m512i *)dest + 18, zmm);
_mm512_stream_si512((__m512i *)dest + 19, zmm);
_mm512_stream_si512((__m512i *)dest + 20, zmm);
_mm512_stream_si512((__m512i *)dest + 21, zmm);
_mm512_stream_si512((__m512i *)dest + 22, zmm);
_mm512_stream_si512((__m512i *)dest + 23, zmm);
_mm512_stream_si512((__m512i *)dest + 24, zmm);
_mm512_stream_si512((__m512i *)dest + 25, zmm);
_mm512_stream_si512((__m512i *)dest + 26, zmm);
_mm512_stream_si512((__m512i *)dest + 27, zmm);
_mm512_stream_si512((__m512i *)dest + 28, zmm);
_mm512_stream_si512((__m512i *)dest + 29, zmm);
_mm512_stream_si512((__m512i *)dest + 30, zmm);
_mm512_stream_si512((__m512i *)dest + 31, zmm);
VALGRIND_DO_FLUSH(dest, 32 * 64);
}
static force_inline void
memset_movnt16x64b(char *dest, __m512i zmm)
{
_mm512_stream_si512((__m512i *)dest + 0, zmm);
_mm512_stream_si512((__m512i *)dest + 1, zmm);
_mm512_stream_si512((__m512i *)dest + 2, zmm);
_mm512_stream_si512((__m512i *)dest + 3, zmm);
_mm512_stream_si512((__m512i *)dest + 4, zmm);
_mm512_stream_si512((__m512i *)dest + 5, zmm);
_mm512_stream_si512((__m512i *)dest + 6, zmm);
_mm512_stream_si512((__m512i *)dest + 7, zmm);
_mm512_stream_si512((__m512i *)dest + 8, zmm);
_mm512_stream_si512((__m512i *)dest + 9, zmm);
_mm512_stream_si512((__m512i *)dest + 10, zmm);
_mm512_stream_si512((__m512i *)dest + 11, zmm);
_mm512_stream_si512((__m512i *)dest + 12, zmm);
_mm512_stream_si512((__m512i *)dest + 13, zmm);
_mm512_stream_si512((__m512i *)dest + 14, zmm);
_mm512_stream_si512((__m512i *)dest + 15, zmm);
VALGRIND_DO_FLUSH(dest, 16 * 64);
}
static force_inline void
memset_movnt8x64b(char *dest, __m512i zmm)
{
_mm512_stream_si512((__m512i *)dest + 0, zmm);
_mm512_stream_si512((__m512i *)dest + 1, zmm);
_mm512_stream_si512((__m512i *)dest + 2, zmm);
_mm512_stream_si512((__m512i *)dest + 3, zmm);
_mm512_stream_si512((__m512i *)dest + 4, zmm);
_mm512_stream_si512((__m512i *)dest + 5, zmm);
_mm512_stream_si512((__m512i *)dest + 6, zmm);
_mm512_stream_si512((__m512i *)dest + 7, zmm);
VALGRIND_DO_FLUSH(dest, 8 * 64);
}
static force_inline void
memset_movnt4x64b(char *dest, __m512i zmm)
{
_mm512_stream_si512((__m512i *)dest + 0, zmm);
_mm512_stream_si512((__m512i *)dest + 1, zmm);
_mm512_stream_si512((__m512i *)dest + 2, zmm);
_mm512_stream_si512((__m512i *)dest + 3, zmm);
VALGRIND_DO_FLUSH(dest, 4 * 64);
}
static force_inline void
memset_movnt2x64b(char *dest, __m512i zmm)
{
_mm512_stream_si512((__m512i *)dest + 0, zmm);
_mm512_stream_si512((__m512i *)dest + 1, zmm);
VALGRIND_DO_FLUSH(dest, 2 * 64);
}
static force_inline void
memset_movnt1x64b(char *dest, __m512i zmm)
{
_mm512_stream_si512((__m512i *)dest + 0, zmm);
VALGRIND_DO_FLUSH(dest, 64);
}
static force_inline void
memset_movnt1x32b(char *dest, __m256i ymm)
{
_mm256_stream_si256((__m256i *)dest, ymm);
VALGRIND_DO_FLUSH(dest, 32);
}
static force_inline void
memset_movnt1x16b(char *dest, __m256i ymm)
{
__m128i xmm = _mm256_extracti128_si256(ymm, 0);
_mm_stream_si128((__m128i *)dest, xmm);
VALGRIND_DO_FLUSH(dest, 16);
}
static force_inline void
memset_movnt1x8b(char *dest, __m256i ymm)
{
uint64_t x = m256_get8b(ymm);
_mm_stream_si64((long long *)dest, (long long)x);
VALGRIND_DO_FLUSH(dest, 8);
}
static force_inline void
memset_movnt1x4b(char *dest, __m256i ymm)
{
uint32_t x = m256_get4b(ymm);
_mm_stream_si32((int *)dest, (int)x);
VALGRIND_DO_FLUSH(dest, 4);
}
void
EXPORTED_SYMBOL(char *dest, int c, size_t len)
{
__m512i zmm = _mm512_set1_epi8((char)c);
/*
* Can't use _mm512_extracti64x4_epi64, because some versions of gcc
* crash. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82887
*/
__m256i ymm = _mm256_set1_epi8((char)c);
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memset_small_avx512f(dest, ymm, cnt);
dest += cnt;
len -= cnt;
}
while (len >= 32 * 64) {
memset_movnt32x64b(dest, zmm);
dest += 32 * 64;
len -= 32 * 64;
}
if (len >= 16 * 64) {
memset_movnt16x64b(dest, zmm);
dest += 16 * 64;
len -= 16 * 64;
}
if (len >= 8 * 64) {
memset_movnt8x64b(dest, zmm);
dest += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memset_movnt4x64b(dest, zmm);
dest += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memset_movnt2x64b(dest, zmm);
dest += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memset_movnt1x64b(dest, zmm);
dest += 1 * 64;
len -= 1 * 64;
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32)
memset_movnt1x32b(dest, ymm);
else if (len == 16)
memset_movnt1x16b(dest, ymm);
else if (len == 8)
memset_movnt1x8b(dest, ymm);
else if (len == 4)
memset_movnt1x4b(dest, ymm);
else
goto nonnt;
goto end;
}
nonnt:
memset_small_avx512f(dest, ymm, len);
end:
avx_zeroupper();
maybe_barrier();
}
| 7,756 | 27.105072 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_avx512f_empty.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_empty_nolog
#define EXPORTED_SYMBOL memset_movnt_avx512f_empty
#define maybe_barrier barrier_after_ntstores
#include "memset_nt_avx512f.h"
| 1,761 | 46.621622 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx512f_clwb.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b pmem_clwb
#define flush flush_clwb_nolog
#define EXPORTED_SYMBOL memmove_mov_avx512f_clwb
#include "memcpy_t_avx512f.h"
| 1,739 | 46.027027 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx512f_clflush.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_clflush_nolog
#define EXPORTED_SYMBOL memmove_movnt_avx512f_clflush
#define maybe_barrier barrier_after_ntstores
#include "memcpy_nt_avx512f.h"
| 1,766 | 46.756757 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx512f_clflushopt.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_clflushopt_nolog
#define EXPORTED_SYMBOL memmove_movnt_avx512f_clflushopt
#define maybe_barrier no_barrier_after_ntstores
#include "memcpy_nt_avx512f.h"
| 1,775 | 47 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx_clflush.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b _mm_clflush
#define flush flush_clflush_nolog
#define EXPORTED_SYMBOL memmove_mov_avx_clflush
#include "memcpy_t_avx.h"
| 1,739 | 46.027027 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_sse2_clflushopt.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_clflushopt_nolog
#define EXPORTED_SYMBOL memmove_movnt_sse2_clflushopt
#define maybe_barrier no_barrier_after_ntstores
#include "memcpy_nt_sse2.h"
| 1,769 | 46.837838 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx_clflush.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_clflush_nolog
#define EXPORTED_SYMBOL memmove_movnt_avx_clflush
#define maybe_barrier barrier_after_ntstores
#include "memcpy_nt_avx.h"
| 1,758 | 46.540541 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_avx.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef PMEM_MEMCPY_AVX_H
#define PMEM_MEMCPY_AVX_H
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "libpmem.h"
#include "out.h"
static force_inline void
memmove_small_avx_noflush(char *dest, const char *src, size_t len)
{
ASSERT(len <= 64);
if (len <= 8)
goto le8;
if (len <= 32)
goto le32;
/* 33..64 */
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)(src + len - 32));
_mm256_storeu_si256((__m256i *)dest, ymm0);
_mm256_storeu_si256((__m256i *)(dest + len - 32), ymm1);
return;
le32:
if (len > 16) {
/* 17..32 */
__m128i xmm0 = _mm_loadu_si128((__m128i *)src);
__m128i xmm1 = _mm_loadu_si128((__m128i *)(src + len - 16));
_mm_storeu_si128((__m128i *)dest, xmm0);
_mm_storeu_si128((__m128i *)(dest + len - 16), xmm1);
return;
}
/* 9..16 */
uint64_t d80 = *(uint64_t *)src;
uint64_t d81 = *(uint64_t *)(src + len - 8);
*(uint64_t *)dest = d80;
*(uint64_t *)(dest + len - 8) = d81;
return;
le8:
if (len <= 2)
goto le2;
if (len > 4) {
/* 5..8 */
uint32_t d40 = *(uint32_t *)src;
uint32_t d41 = *(uint32_t *)(src + len - 4);
*(uint32_t *)dest = d40;
*(uint32_t *)(dest + len - 4) = d41;
return;
}
/* 3..4 */
uint16_t d20 = *(uint16_t *)src;
uint16_t d21 = *(uint16_t *)(src + len - 2);
*(uint16_t *)dest = d20;
*(uint16_t *)(dest + len - 2) = d21;
return;
le2:
if (len == 2) {
*(uint16_t *)dest = *(uint16_t *)src;
return;
}
*(uint8_t *)dest = *(uint8_t *)src;
}
static force_inline void
memmove_small_avx(char *dest, const char *src, size_t len)
{
memmove_small_avx_noflush(dest, src, len);
flush(dest, len);
}
#endif
| 3,275 | 26.529412 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_sse2.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef PMEM_MEMCPY_SSE2_H
#define PMEM_MEMCPY_SSE2_H
#include <xmmintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "libpmem.h"
#include "out.h"
static force_inline void
memmove_small_sse2_noflush(char *dest, const char *src, size_t len)
{
ASSERT(len <= 64);
if (len <= 8)
goto le8;
if (len <= 32)
goto le32;
if (len > 48) {
/* 49..64 */
__m128i xmm0 = _mm_loadu_si128((__m128i *)src);
__m128i xmm1 = _mm_loadu_si128((__m128i *)(src + 16));
__m128i xmm2 = _mm_loadu_si128((__m128i *)(src + 32));
__m128i xmm3 = _mm_loadu_si128((__m128i *)(src + len - 16));
_mm_storeu_si128((__m128i *)dest, xmm0);
_mm_storeu_si128((__m128i *)(dest + 16), xmm1);
_mm_storeu_si128((__m128i *)(dest + 32), xmm2);
_mm_storeu_si128((__m128i *)(dest + len - 16), xmm3);
return;
}
/* 33..48 */
__m128i xmm0 = _mm_loadu_si128((__m128i *)src);
__m128i xmm1 = _mm_loadu_si128((__m128i *)(src + 16));
__m128i xmm2 = _mm_loadu_si128((__m128i *)(src + len - 16));
_mm_storeu_si128((__m128i *)dest, xmm0);
_mm_storeu_si128((__m128i *)(dest + 16), xmm1);
_mm_storeu_si128((__m128i *)(dest + len - 16), xmm2);
return;
le32:
if (len > 16) {
/* 17..32 */
__m128i xmm0 = _mm_loadu_si128((__m128i *)src);
__m128i xmm1 = _mm_loadu_si128((__m128i *)(src + len - 16));
_mm_storeu_si128((__m128i *)dest, xmm0);
_mm_storeu_si128((__m128i *)(dest + len - 16), xmm1);
return;
}
/* 9..16 */
uint64_t d80 = *(uint64_t *)src;
uint64_t d81 = *(uint64_t *)(src + len - 8);
*(uint64_t *)dest = d80;
*(uint64_t *)(dest + len - 8) = d81;
return;
le8:
if (len <= 2)
goto le2;
if (len > 4) {
/* 5..8 */
uint32_t d40 = *(uint32_t *)src;
uint32_t d41 = *(uint32_t *)(src + len - 4);
*(uint32_t *)dest = d40;
*(uint32_t *)(dest + len - 4) = d41;
return;
}
/* 3..4 */
uint16_t d20 = *(uint16_t *)src;
uint16_t d21 = *(uint16_t *)(src + len - 2);
*(uint16_t *)dest = d20;
*(uint16_t *)(dest + len - 2) = d21;
return;
le2:
if (len == 2) {
*(uint16_t *)dest = *(uint16_t *)src;
return;
}
*(uint8_t *)dest = *(uint8_t *)src;
}
static force_inline void
memmove_small_sse2(char *dest, const char *src, size_t len)
{
memmove_small_sse2_noflush(dest, src, len);
flush(dest, len);
}
#endif
| 3,846 | 27.496296 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_sse2.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memcpy_sse2.h"
static force_inline void
memmove_mov4x64b(char *dest, const char *src)
{
__m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0);
__m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1);
__m128i xmm2 = _mm_loadu_si128((__m128i *)src + 2);
__m128i xmm3 = _mm_loadu_si128((__m128i *)src + 3);
__m128i xmm4 = _mm_loadu_si128((__m128i *)src + 4);
__m128i xmm5 = _mm_loadu_si128((__m128i *)src + 5);
__m128i xmm6 = _mm_loadu_si128((__m128i *)src + 6);
__m128i xmm7 = _mm_loadu_si128((__m128i *)src + 7);
__m128i xmm8 = _mm_loadu_si128((__m128i *)src + 8);
__m128i xmm9 = _mm_loadu_si128((__m128i *)src + 9);
__m128i xmm10 = _mm_loadu_si128((__m128i *)src + 10);
__m128i xmm11 = _mm_loadu_si128((__m128i *)src + 11);
__m128i xmm12 = _mm_loadu_si128((__m128i *)src + 12);
__m128i xmm13 = _mm_loadu_si128((__m128i *)src + 13);
__m128i xmm14 = _mm_loadu_si128((__m128i *)src + 14);
__m128i xmm15 = _mm_loadu_si128((__m128i *)src + 15);
_mm_store_si128((__m128i *)dest + 0, xmm0);
_mm_store_si128((__m128i *)dest + 1, xmm1);
_mm_store_si128((__m128i *)dest + 2, xmm2);
_mm_store_si128((__m128i *)dest + 3, xmm3);
_mm_store_si128((__m128i *)dest + 4, xmm4);
_mm_store_si128((__m128i *)dest + 5, xmm5);
_mm_store_si128((__m128i *)dest + 6, xmm6);
_mm_store_si128((__m128i *)dest + 7, xmm7);
_mm_store_si128((__m128i *)dest + 8, xmm8);
_mm_store_si128((__m128i *)dest + 9, xmm9);
_mm_store_si128((__m128i *)dest + 10, xmm10);
_mm_store_si128((__m128i *)dest + 11, xmm11);
_mm_store_si128((__m128i *)dest + 12, xmm12);
_mm_store_si128((__m128i *)dest + 13, xmm13);
_mm_store_si128((__m128i *)dest + 14, xmm14);
_mm_store_si128((__m128i *)dest + 15, xmm15);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
}
static force_inline void
memmove_mov2x64b(char *dest, const char *src)
{
__m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0);
__m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1);
__m128i xmm2 = _mm_loadu_si128((__m128i *)src + 2);
__m128i xmm3 = _mm_loadu_si128((__m128i *)src + 3);
__m128i xmm4 = _mm_loadu_si128((__m128i *)src + 4);
__m128i xmm5 = _mm_loadu_si128((__m128i *)src + 5);
__m128i xmm6 = _mm_loadu_si128((__m128i *)src + 6);
__m128i xmm7 = _mm_loadu_si128((__m128i *)src + 7);
_mm_store_si128((__m128i *)dest + 0, xmm0);
_mm_store_si128((__m128i *)dest + 1, xmm1);
_mm_store_si128((__m128i *)dest + 2, xmm2);
_mm_store_si128((__m128i *)dest + 3, xmm3);
_mm_store_si128((__m128i *)dest + 4, xmm4);
_mm_store_si128((__m128i *)dest + 5, xmm5);
_mm_store_si128((__m128i *)dest + 6, xmm6);
_mm_store_si128((__m128i *)dest + 7, xmm7);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
}
static force_inline void
memmove_mov1x64b(char *dest, const char *src)
{
__m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0);
__m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1);
__m128i xmm2 = _mm_loadu_si128((__m128i *)src + 2);
__m128i xmm3 = _mm_loadu_si128((__m128i *)src + 3);
_mm_store_si128((__m128i *)dest + 0, xmm0);
_mm_store_si128((__m128i *)dest + 1, xmm1);
_mm_store_si128((__m128i *)dest + 2, xmm2);
_mm_store_si128((__m128i *)dest + 3, xmm3);
flush64b(dest + 0 * 64);
}
static force_inline void
memmove_mov_sse_fw(char *dest, const char *src, size_t len)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_sse2(dest, src, cnt);
dest += cnt;
src += cnt;
len -= cnt;
}
while (len >= 4 * 64) {
memmove_mov4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_mov2x64b(dest, src);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_mov1x64b(dest, src);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len)
memmove_small_sse2(dest, src, len);
}
static force_inline void
memmove_mov_sse_bw(char *dest, const char *src, size_t len)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_sse2(dest, src, cnt);
}
while (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_mov4x64b(dest, src);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_mov2x64b(dest, src);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_mov1x64b(dest, src);
}
if (len)
memmove_small_sse2(dest - len, src - len, len);
}
void
EXPORTED_SYMBOL(char *dest, const char *src, size_t len)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_mov_sse_fw(dest, src, len);
else
memmove_mov_sse_bw(dest, src, len);
}
| 6,467 | 28.534247 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_sse2_clflush.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_clflush_nolog
#define EXPORTED_SYMBOL memmove_movnt_sse2_clflush
#define maybe_barrier barrier_after_ntstores
#include "memcpy_nt_sse2.h"
| 1,760 | 46.594595 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx512f_empty.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b flush64b_empty
#define flush flush_empty_nolog
#define EXPORTED_SYMBOL memmove_mov_avx512f_empty
#include "memcpy_t_avx512f.h"
| 1,746 | 46.216216 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx_clflushopt.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_clflushopt_nolog
#define EXPORTED_SYMBOL memmove_movnt_avx_clflushopt
#define maybe_barrier no_barrier_after_ntstores
#include "memcpy_nt_avx.h"
| 1,767 | 46.783784 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx512f.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_avx512f.h"
#include "memcpy_memset.h"
static force_inline void
memmove_mov32x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
__m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1);
__m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2);
__m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3);
__m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4);
__m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5);
__m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6);
__m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7);
__m512i zmm8 = _mm512_loadu_si512((__m512i *)src + 8);
__m512i zmm9 = _mm512_loadu_si512((__m512i *)src + 9);
__m512i zmm10 = _mm512_loadu_si512((__m512i *)src + 10);
__m512i zmm11 = _mm512_loadu_si512((__m512i *)src + 11);
__m512i zmm12 = _mm512_loadu_si512((__m512i *)src + 12);
__m512i zmm13 = _mm512_loadu_si512((__m512i *)src + 13);
__m512i zmm14 = _mm512_loadu_si512((__m512i *)src + 14);
__m512i zmm15 = _mm512_loadu_si512((__m512i *)src + 15);
__m512i zmm16 = _mm512_loadu_si512((__m512i *)src + 16);
__m512i zmm17 = _mm512_loadu_si512((__m512i *)src + 17);
__m512i zmm18 = _mm512_loadu_si512((__m512i *)src + 18);
__m512i zmm19 = _mm512_loadu_si512((__m512i *)src + 19);
__m512i zmm20 = _mm512_loadu_si512((__m512i *)src + 20);
__m512i zmm21 = _mm512_loadu_si512((__m512i *)src + 21);
__m512i zmm22 = _mm512_loadu_si512((__m512i *)src + 22);
__m512i zmm23 = _mm512_loadu_si512((__m512i *)src + 23);
__m512i zmm24 = _mm512_loadu_si512((__m512i *)src + 24);
__m512i zmm25 = _mm512_loadu_si512((__m512i *)src + 25);
__m512i zmm26 = _mm512_loadu_si512((__m512i *)src + 26);
__m512i zmm27 = _mm512_loadu_si512((__m512i *)src + 27);
__m512i zmm28 = _mm512_loadu_si512((__m512i *)src + 28);
__m512i zmm29 = _mm512_loadu_si512((__m512i *)src + 29);
__m512i zmm30 = _mm512_loadu_si512((__m512i *)src + 30);
__m512i zmm31 = _mm512_loadu_si512((__m512i *)src + 31);
_mm512_store_si512((__m512i *)dest + 0, zmm0);
_mm512_store_si512((__m512i *)dest + 1, zmm1);
_mm512_store_si512((__m512i *)dest + 2, zmm2);
_mm512_store_si512((__m512i *)dest + 3, zmm3);
_mm512_store_si512((__m512i *)dest + 4, zmm4);
_mm512_store_si512((__m512i *)dest + 5, zmm5);
_mm512_store_si512((__m512i *)dest + 6, zmm6);
_mm512_store_si512((__m512i *)dest + 7, zmm7);
_mm512_store_si512((__m512i *)dest + 8, zmm8);
_mm512_store_si512((__m512i *)dest + 9, zmm9);
_mm512_store_si512((__m512i *)dest + 10, zmm10);
_mm512_store_si512((__m512i *)dest + 11, zmm11);
_mm512_store_si512((__m512i *)dest + 12, zmm12);
_mm512_store_si512((__m512i *)dest + 13, zmm13);
_mm512_store_si512((__m512i *)dest + 14, zmm14);
_mm512_store_si512((__m512i *)dest + 15, zmm15);
_mm512_store_si512((__m512i *)dest + 16, zmm16);
_mm512_store_si512((__m512i *)dest + 17, zmm17);
_mm512_store_si512((__m512i *)dest + 18, zmm18);
_mm512_store_si512((__m512i *)dest + 19, zmm19);
_mm512_store_si512((__m512i *)dest + 20, zmm20);
_mm512_store_si512((__m512i *)dest + 21, zmm21);
_mm512_store_si512((__m512i *)dest + 22, zmm22);
_mm512_store_si512((__m512i *)dest + 23, zmm23);
_mm512_store_si512((__m512i *)dest + 24, zmm24);
_mm512_store_si512((__m512i *)dest + 25, zmm25);
_mm512_store_si512((__m512i *)dest + 26, zmm26);
_mm512_store_si512((__m512i *)dest + 27, zmm27);
_mm512_store_si512((__m512i *)dest + 28, zmm28);
_mm512_store_si512((__m512i *)dest + 29, zmm29);
_mm512_store_si512((__m512i *)dest + 30, zmm30);
_mm512_store_si512((__m512i *)dest + 31, zmm31);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
flush64b(dest + 8 * 64);
flush64b(dest + 9 * 64);
flush64b(dest + 10 * 64);
flush64b(dest + 11 * 64);
flush64b(dest + 12 * 64);
flush64b(dest + 13 * 64);
flush64b(dest + 14 * 64);
flush64b(dest + 15 * 64);
flush64b(dest + 16 * 64);
flush64b(dest + 17 * 64);
flush64b(dest + 18 * 64);
flush64b(dest + 19 * 64);
flush64b(dest + 20 * 64);
flush64b(dest + 21 * 64);
flush64b(dest + 22 * 64);
flush64b(dest + 23 * 64);
flush64b(dest + 24 * 64);
flush64b(dest + 25 * 64);
flush64b(dest + 26 * 64);
flush64b(dest + 27 * 64);
flush64b(dest + 28 * 64);
flush64b(dest + 29 * 64);
flush64b(dest + 30 * 64);
flush64b(dest + 31 * 64);
}
static force_inline void
memmove_mov16x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
__m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1);
__m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2);
__m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3);
__m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4);
__m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5);
__m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6);
__m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7);
__m512i zmm8 = _mm512_loadu_si512((__m512i *)src + 8);
__m512i zmm9 = _mm512_loadu_si512((__m512i *)src + 9);
__m512i zmm10 = _mm512_loadu_si512((__m512i *)src + 10);
__m512i zmm11 = _mm512_loadu_si512((__m512i *)src + 11);
__m512i zmm12 = _mm512_loadu_si512((__m512i *)src + 12);
__m512i zmm13 = _mm512_loadu_si512((__m512i *)src + 13);
__m512i zmm14 = _mm512_loadu_si512((__m512i *)src + 14);
__m512i zmm15 = _mm512_loadu_si512((__m512i *)src + 15);
_mm512_store_si512((__m512i *)dest + 0, zmm0);
_mm512_store_si512((__m512i *)dest + 1, zmm1);
_mm512_store_si512((__m512i *)dest + 2, zmm2);
_mm512_store_si512((__m512i *)dest + 3, zmm3);
_mm512_store_si512((__m512i *)dest + 4, zmm4);
_mm512_store_si512((__m512i *)dest + 5, zmm5);
_mm512_store_si512((__m512i *)dest + 6, zmm6);
_mm512_store_si512((__m512i *)dest + 7, zmm7);
_mm512_store_si512((__m512i *)dest + 8, zmm8);
_mm512_store_si512((__m512i *)dest + 9, zmm9);
_mm512_store_si512((__m512i *)dest + 10, zmm10);
_mm512_store_si512((__m512i *)dest + 11, zmm11);
_mm512_store_si512((__m512i *)dest + 12, zmm12);
_mm512_store_si512((__m512i *)dest + 13, zmm13);
_mm512_store_si512((__m512i *)dest + 14, zmm14);
_mm512_store_si512((__m512i *)dest + 15, zmm15);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
flush64b(dest + 8 * 64);
flush64b(dest + 9 * 64);
flush64b(dest + 10 * 64);
flush64b(dest + 11 * 64);
flush64b(dest + 12 * 64);
flush64b(dest + 13 * 64);
flush64b(dest + 14 * 64);
flush64b(dest + 15 * 64);
}
static force_inline void
memmove_mov8x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
__m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1);
__m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2);
__m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3);
__m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4);
__m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5);
__m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6);
__m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7);
_mm512_store_si512((__m512i *)dest + 0, zmm0);
_mm512_store_si512((__m512i *)dest + 1, zmm1);
_mm512_store_si512((__m512i *)dest + 2, zmm2);
_mm512_store_si512((__m512i *)dest + 3, zmm3);
_mm512_store_si512((__m512i *)dest + 4, zmm4);
_mm512_store_si512((__m512i *)dest + 5, zmm5);
_mm512_store_si512((__m512i *)dest + 6, zmm6);
_mm512_store_si512((__m512i *)dest + 7, zmm7);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
}
static force_inline void
memmove_mov4x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
__m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1);
__m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2);
__m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3);
_mm512_store_si512((__m512i *)dest + 0, zmm0);
_mm512_store_si512((__m512i *)dest + 1, zmm1);
_mm512_store_si512((__m512i *)dest + 2, zmm2);
_mm512_store_si512((__m512i *)dest + 3, zmm3);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
}
static force_inline void
memmove_mov2x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
__m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1);
_mm512_store_si512((__m512i *)dest + 0, zmm0);
_mm512_store_si512((__m512i *)dest + 1, zmm1);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
}
static force_inline void
memmove_mov1x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
_mm512_store_si512((__m512i *)dest + 0, zmm0);
flush64b(dest + 0 * 64);
}
static force_inline void
memmove_mov_avx512f_fw(char *dest, const char *src, size_t len)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_avx512f(dest, src, cnt);
dest += cnt;
src += cnt;
len -= cnt;
}
while (len >= 32 * 64) {
memmove_mov32x64b(dest, src);
dest += 32 * 64;
src += 32 * 64;
len -= 32 * 64;
}
if (len >= 16 * 64) {
memmove_mov16x64b(dest, src);
dest += 16 * 64;
src += 16 * 64;
len -= 16 * 64;
}
if (len >= 8 * 64) {
memmove_mov8x64b(dest, src);
dest += 8 * 64;
src += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memmove_mov4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_mov2x64b(dest, src);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_mov1x64b(dest, src);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len)
memmove_small_avx512f(dest, src, len);
}
static force_inline void
memmove_mov_avx512f_bw(char *dest, const char *src, size_t len)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_avx512f(dest, src, cnt);
}
while (len >= 32 * 64) {
dest -= 32 * 64;
src -= 32 * 64;
len -= 32 * 64;
memmove_mov32x64b(dest, src);
}
if (len >= 16 * 64) {
dest -= 16 * 64;
src -= 16 * 64;
len -= 16 * 64;
memmove_mov16x64b(dest, src);
}
if (len >= 8 * 64) {
dest -= 8 * 64;
src -= 8 * 64;
len -= 8 * 64;
memmove_mov8x64b(dest, src);
}
if (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_mov4x64b(dest, src);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_mov2x64b(dest, src);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_mov1x64b(dest, src);
}
if (len)
memmove_small_avx512f(dest - len, src - len, len);
}
void
EXPORTED_SYMBOL(char *dest, const char *src, size_t len)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_mov_avx512f_fw(dest, src, len);
else
memmove_mov_avx512f_bw(dest, src, len);
avx_zeroupper();
}
| 12,825 | 30.131068 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx_clwb.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_clwb_nolog
#define EXPORTED_SYMBOL memmove_movnt_avx_clwb
#define maybe_barrier no_barrier_after_ntstores
#include "memcpy_nt_avx.h"
| 1,755 | 46.459459 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_sse2_clflushopt.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b pmem_clflushopt
#define flush flush_clflushopt_nolog
#define EXPORTED_SYMBOL memmove_mov_sse2_clflushopt
#include "memcpy_t_sse2.h"
| 1,751 | 46.351351 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx_empty.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b flush64b_empty
#define flush flush_empty_nolog
#define EXPORTED_SYMBOL memmove_mov_avx_empty
#include "memcpy_t_avx.h"
| 1,738 | 46 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx512f_clflushopt.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b pmem_clflushopt
#define flush flush_clflushopt_nolog
#define EXPORTED_SYMBOL memmove_mov_avx512f_clflushopt
#include "memcpy_t_avx512f.h"
| 1,757 | 46.513514 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx_empty.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_empty_nolog
#define EXPORTED_SYMBOL memmove_movnt_avx_empty
#define maybe_barrier barrier_after_ntstores
#include "memcpy_nt_avx.h"
| 1,754 | 46.432432 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx_clflushopt.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b pmem_clflushopt
#define flush flush_clflushopt_nolog
#define EXPORTED_SYMBOL memmove_mov_avx_clflushopt
#include "memcpy_t_avx.h"
| 1,749 | 46.297297 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_avx.h"
#include "memcpy_memset.h"
static force_inline void
memmove_mov8x64b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1);
__m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2);
__m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3);
__m256i ymm4 = _mm256_loadu_si256((__m256i *)src + 4);
__m256i ymm5 = _mm256_loadu_si256((__m256i *)src + 5);
__m256i ymm6 = _mm256_loadu_si256((__m256i *)src + 6);
__m256i ymm7 = _mm256_loadu_si256((__m256i *)src + 7);
__m256i ymm8 = _mm256_loadu_si256((__m256i *)src + 8);
__m256i ymm9 = _mm256_loadu_si256((__m256i *)src + 9);
__m256i ymm10 = _mm256_loadu_si256((__m256i *)src + 10);
__m256i ymm11 = _mm256_loadu_si256((__m256i *)src + 11);
__m256i ymm12 = _mm256_loadu_si256((__m256i *)src + 12);
__m256i ymm13 = _mm256_loadu_si256((__m256i *)src + 13);
__m256i ymm14 = _mm256_loadu_si256((__m256i *)src + 14);
__m256i ymm15 = _mm256_loadu_si256((__m256i *)src + 15);
_mm256_store_si256((__m256i *)dest + 0, ymm0);
_mm256_store_si256((__m256i *)dest + 1, ymm1);
_mm256_store_si256((__m256i *)dest + 2, ymm2);
_mm256_store_si256((__m256i *)dest + 3, ymm3);
_mm256_store_si256((__m256i *)dest + 4, ymm4);
_mm256_store_si256((__m256i *)dest + 5, ymm5);
_mm256_store_si256((__m256i *)dest + 6, ymm6);
_mm256_store_si256((__m256i *)dest + 7, ymm7);
_mm256_store_si256((__m256i *)dest + 8, ymm8);
_mm256_store_si256((__m256i *)dest + 9, ymm9);
_mm256_store_si256((__m256i *)dest + 10, ymm10);
_mm256_store_si256((__m256i *)dest + 11, ymm11);
_mm256_store_si256((__m256i *)dest + 12, ymm12);
_mm256_store_si256((__m256i *)dest + 13, ymm13);
_mm256_store_si256((__m256i *)dest + 14, ymm14);
_mm256_store_si256((__m256i *)dest + 15, ymm15);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
}
static force_inline void
memmove_mov4x64b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1);
__m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2);
__m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3);
__m256i ymm4 = _mm256_loadu_si256((__m256i *)src + 4);
__m256i ymm5 = _mm256_loadu_si256((__m256i *)src + 5);
__m256i ymm6 = _mm256_loadu_si256((__m256i *)src + 6);
__m256i ymm7 = _mm256_loadu_si256((__m256i *)src + 7);
_mm256_store_si256((__m256i *)dest + 0, ymm0);
_mm256_store_si256((__m256i *)dest + 1, ymm1);
_mm256_store_si256((__m256i *)dest + 2, ymm2);
_mm256_store_si256((__m256i *)dest + 3, ymm3);
_mm256_store_si256((__m256i *)dest + 4, ymm4);
_mm256_store_si256((__m256i *)dest + 5, ymm5);
_mm256_store_si256((__m256i *)dest + 6, ymm6);
_mm256_store_si256((__m256i *)dest + 7, ymm7);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
}
static force_inline void
memmove_mov2x64b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1);
__m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2);
__m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3);
_mm256_store_si256((__m256i *)dest + 0, ymm0);
_mm256_store_si256((__m256i *)dest + 1, ymm1);
_mm256_store_si256((__m256i *)dest + 2, ymm2);
_mm256_store_si256((__m256i *)dest + 3, ymm3);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
}
static force_inline void
memmove_mov1x64b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1);
_mm256_store_si256((__m256i *)dest + 0, ymm0);
_mm256_store_si256((__m256i *)dest + 1, ymm1);
flush64b(dest + 0 * 64);
}
static force_inline void
memmove_mov_avx_fw(char *dest, const char *src, size_t len)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_avx(dest, src, cnt);
dest += cnt;
src += cnt;
len -= cnt;
}
while (len >= 8 * 64) {
memmove_mov8x64b(dest, src);
dest += 8 * 64;
src += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memmove_mov4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_mov2x64b(dest, src);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_mov1x64b(dest, src);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len)
memmove_small_avx(dest, src, len);
}
static force_inline void
memmove_mov_avx_bw(char *dest, const char *src, size_t len)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_avx(dest, src, cnt);
}
while (len >= 8 * 64) {
dest -= 8 * 64;
src -= 8 * 64;
len -= 8 * 64;
memmove_mov8x64b(dest, src);
}
if (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_mov4x64b(dest, src);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_mov2x64b(dest, src);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_mov1x64b(dest, src);
}
if (len)
memmove_small_avx(dest - len, src - len, len);
}
void
EXPORTED_SYMBOL(char *dest, const char *src, size_t len)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_mov_avx_fw(dest, src, len);
else
memmove_mov_avx_bw(dest, src, len);
avx_zeroupper();
}
| 7,378 | 27.937255 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx512f_clflush.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b _mm_clflush
#define flush flush_clflush_nolog
#define EXPORTED_SYMBOL memmove_mov_avx512f_clflush
#include "memcpy_t_avx512f.h"
| 1,747 | 46.243243 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_sse2_empty.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_empty_nolog
#define EXPORTED_SYMBOL memmove_movnt_sse2_empty
#define maybe_barrier barrier_after_ntstores
#include "memcpy_nt_sse2.h"
| 1,756 | 46.486486 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_sse2_clflush.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b _mm_clflush
#define flush flush_clflush_nolog
#define EXPORTED_SYMBOL memmove_mov_sse2_clflush
#include "memcpy_t_sse2.h"
| 1,741 | 46.081081 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_sse2_clwb.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_clwb_nolog
#define EXPORTED_SYMBOL memmove_movnt_sse2_clwb
#define maybe_barrier no_barrier_after_ntstores
#include "memcpy_nt_sse2.h"
| 1,757 | 46.513514 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_avx512f.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef PMEM_MEMCPY_AVX512F_H
#define PMEM_MEMCPY_AVX512F_H
#include <stddef.h>
#include "memcpy_avx.h"
static force_inline void
memmove_small_avx512f(char *dest, const char *src, size_t len)
{
/* We can't do better than AVX here. */
memmove_small_avx(dest, src, len);
}
#endif
| 1,886 | 38.3125 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_sse2_empty.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b flush64b_empty
#define flush flush_empty_nolog
#define EXPORTED_SYMBOL memmove_mov_sse2_empty
#include "memcpy_t_sse2.h"
| 1,740 | 46.054054 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx512f.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_avx512f.h"
#include "memcpy_memset.h"
#include "libpmem.h"
#include "valgrind_internal.h"
static force_inline void
memmove_movnt32x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
__m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1);
__m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2);
__m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3);
__m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4);
__m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5);
__m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6);
__m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7);
__m512i zmm8 = _mm512_loadu_si512((__m512i *)src + 8);
__m512i zmm9 = _mm512_loadu_si512((__m512i *)src + 9);
__m512i zmm10 = _mm512_loadu_si512((__m512i *)src + 10);
__m512i zmm11 = _mm512_loadu_si512((__m512i *)src + 11);
__m512i zmm12 = _mm512_loadu_si512((__m512i *)src + 12);
__m512i zmm13 = _mm512_loadu_si512((__m512i *)src + 13);
__m512i zmm14 = _mm512_loadu_si512((__m512i *)src + 14);
__m512i zmm15 = _mm512_loadu_si512((__m512i *)src + 15);
__m512i zmm16 = _mm512_loadu_si512((__m512i *)src + 16);
__m512i zmm17 = _mm512_loadu_si512((__m512i *)src + 17);
__m512i zmm18 = _mm512_loadu_si512((__m512i *)src + 18);
__m512i zmm19 = _mm512_loadu_si512((__m512i *)src + 19);
__m512i zmm20 = _mm512_loadu_si512((__m512i *)src + 20);
__m512i zmm21 = _mm512_loadu_si512((__m512i *)src + 21);
__m512i zmm22 = _mm512_loadu_si512((__m512i *)src + 22);
__m512i zmm23 = _mm512_loadu_si512((__m512i *)src + 23);
__m512i zmm24 = _mm512_loadu_si512((__m512i *)src + 24);
__m512i zmm25 = _mm512_loadu_si512((__m512i *)src + 25);
__m512i zmm26 = _mm512_loadu_si512((__m512i *)src + 26);
__m512i zmm27 = _mm512_loadu_si512((__m512i *)src + 27);
__m512i zmm28 = _mm512_loadu_si512((__m512i *)src + 28);
__m512i zmm29 = _mm512_loadu_si512((__m512i *)src + 29);
__m512i zmm30 = _mm512_loadu_si512((__m512i *)src + 30);
__m512i zmm31 = _mm512_loadu_si512((__m512i *)src + 31);
_mm512_stream_si512((__m512i *)dest + 0, zmm0);
_mm512_stream_si512((__m512i *)dest + 1, zmm1);
_mm512_stream_si512((__m512i *)dest + 2, zmm2);
_mm512_stream_si512((__m512i *)dest + 3, zmm3);
_mm512_stream_si512((__m512i *)dest + 4, zmm4);
_mm512_stream_si512((__m512i *)dest + 5, zmm5);
_mm512_stream_si512((__m512i *)dest + 6, zmm6);
_mm512_stream_si512((__m512i *)dest + 7, zmm7);
_mm512_stream_si512((__m512i *)dest + 8, zmm8);
_mm512_stream_si512((__m512i *)dest + 9, zmm9);
_mm512_stream_si512((__m512i *)dest + 10, zmm10);
_mm512_stream_si512((__m512i *)dest + 11, zmm11);
_mm512_stream_si512((__m512i *)dest + 12, zmm12);
_mm512_stream_si512((__m512i *)dest + 13, zmm13);
_mm512_stream_si512((__m512i *)dest + 14, zmm14);
_mm512_stream_si512((__m512i *)dest + 15, zmm15);
_mm512_stream_si512((__m512i *)dest + 16, zmm16);
_mm512_stream_si512((__m512i *)dest + 17, zmm17);
_mm512_stream_si512((__m512i *)dest + 18, zmm18);
_mm512_stream_si512((__m512i *)dest + 19, zmm19);
_mm512_stream_si512((__m512i *)dest + 20, zmm20);
_mm512_stream_si512((__m512i *)dest + 21, zmm21);
_mm512_stream_si512((__m512i *)dest + 22, zmm22);
_mm512_stream_si512((__m512i *)dest + 23, zmm23);
_mm512_stream_si512((__m512i *)dest + 24, zmm24);
_mm512_stream_si512((__m512i *)dest + 25, zmm25);
_mm512_stream_si512((__m512i *)dest + 26, zmm26);
_mm512_stream_si512((__m512i *)dest + 27, zmm27);
_mm512_stream_si512((__m512i *)dest + 28, zmm28);
_mm512_stream_si512((__m512i *)dest + 29, zmm29);
_mm512_stream_si512((__m512i *)dest + 30, zmm30);
_mm512_stream_si512((__m512i *)dest + 31, zmm31);
VALGRIND_DO_FLUSH(dest, 32 * 64);
}
static force_inline void
memmove_movnt16x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
__m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1);
__m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2);
__m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3);
__m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4);
__m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5);
__m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6);
__m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7);
__m512i zmm8 = _mm512_loadu_si512((__m512i *)src + 8);
__m512i zmm9 = _mm512_loadu_si512((__m512i *)src + 9);
__m512i zmm10 = _mm512_loadu_si512((__m512i *)src + 10);
__m512i zmm11 = _mm512_loadu_si512((__m512i *)src + 11);
__m512i zmm12 = _mm512_loadu_si512((__m512i *)src + 12);
__m512i zmm13 = _mm512_loadu_si512((__m512i *)src + 13);
__m512i zmm14 = _mm512_loadu_si512((__m512i *)src + 14);
__m512i zmm15 = _mm512_loadu_si512((__m512i *)src + 15);
_mm512_stream_si512((__m512i *)dest + 0, zmm0);
_mm512_stream_si512((__m512i *)dest + 1, zmm1);
_mm512_stream_si512((__m512i *)dest + 2, zmm2);
_mm512_stream_si512((__m512i *)dest + 3, zmm3);
_mm512_stream_si512((__m512i *)dest + 4, zmm4);
_mm512_stream_si512((__m512i *)dest + 5, zmm5);
_mm512_stream_si512((__m512i *)dest + 6, zmm6);
_mm512_stream_si512((__m512i *)dest + 7, zmm7);
_mm512_stream_si512((__m512i *)dest + 8, zmm8);
_mm512_stream_si512((__m512i *)dest + 9, zmm9);
_mm512_stream_si512((__m512i *)dest + 10, zmm10);
_mm512_stream_si512((__m512i *)dest + 11, zmm11);
_mm512_stream_si512((__m512i *)dest + 12, zmm12);
_mm512_stream_si512((__m512i *)dest + 13, zmm13);
_mm512_stream_si512((__m512i *)dest + 14, zmm14);
_mm512_stream_si512((__m512i *)dest + 15, zmm15);
VALGRIND_DO_FLUSH(dest, 16 * 64);
}
static force_inline void
memmove_movnt8x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
__m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1);
__m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2);
__m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3);
__m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4);
__m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5);
__m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6);
__m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7);
_mm512_stream_si512((__m512i *)dest + 0, zmm0);
_mm512_stream_si512((__m512i *)dest + 1, zmm1);
_mm512_stream_si512((__m512i *)dest + 2, zmm2);
_mm512_stream_si512((__m512i *)dest + 3, zmm3);
_mm512_stream_si512((__m512i *)dest + 4, zmm4);
_mm512_stream_si512((__m512i *)dest + 5, zmm5);
_mm512_stream_si512((__m512i *)dest + 6, zmm6);
_mm512_stream_si512((__m512i *)dest + 7, zmm7);
VALGRIND_DO_FLUSH(dest, 8 * 64);
}
static force_inline void
memmove_movnt4x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
__m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1);
__m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2);
__m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3);
_mm512_stream_si512((__m512i *)dest + 0, zmm0);
_mm512_stream_si512((__m512i *)dest + 1, zmm1);
_mm512_stream_si512((__m512i *)dest + 2, zmm2);
_mm512_stream_si512((__m512i *)dest + 3, zmm3);
VALGRIND_DO_FLUSH(dest, 4 * 64);
}
static force_inline void
memmove_movnt2x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
__m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1);
_mm512_stream_si512((__m512i *)dest + 0, zmm0);
_mm512_stream_si512((__m512i *)dest + 1, zmm1);
VALGRIND_DO_FLUSH(dest, 2 * 64);
}
static force_inline void
memmove_movnt1x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
_mm512_stream_si512((__m512i *)dest + 0, zmm0);
VALGRIND_DO_FLUSH(dest, 64);
}
static force_inline void
memmove_movnt1x32b(char *dest, const char *src)
{
__m256i zmm0 = _mm256_loadu_si256((__m256i *)src);
_mm256_stream_si256((__m256i *)dest, zmm0);
VALGRIND_DO_FLUSH(dest, 32);
}
static force_inline void
memmove_movnt1x16b(char *dest, const char *src)
{
__m128i ymm0 = _mm_loadu_si128((__m128i *)src);
_mm_stream_si128((__m128i *)dest, ymm0);
VALGRIND_DO_FLUSH(dest, 16);
}
static force_inline void
memmove_movnt1x8b(char *dest, const char *src)
{
_mm_stream_si64((long long *)dest, *(long long *)src);
VALGRIND_DO_FLUSH(dest, 8);
}
static force_inline void
memmove_movnt1x4b(char *dest, const char *src)
{
_mm_stream_si32((int *)dest, *(int *)src);
VALGRIND_DO_FLUSH(dest, 4);
}
static force_inline void
memmove_movnt_avx512f_fw(char *dest, const char *src, size_t len)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_avx512f(dest, src, cnt);
dest += cnt;
src += cnt;
len -= cnt;
}
while (len >= 32 * 64) {
memmove_movnt32x64b(dest, src);
dest += 32 * 64;
src += 32 * 64;
len -= 32 * 64;
}
if (len >= 16 * 64) {
memmove_movnt16x64b(dest, src);
dest += 16 * 64;
src += 16 * 64;
len -= 16 * 64;
}
if (len >= 8 * 64) {
memmove_movnt8x64b(dest, src);
dest += 8 * 64;
src += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memmove_movnt4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_movnt2x64b(dest, src);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_movnt1x64b(dest, src);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32)
memmove_movnt1x32b(dest, src);
else if (len == 16)
memmove_movnt1x16b(dest, src);
else if (len == 8)
memmove_movnt1x8b(dest, src);
else if (len == 4)
memmove_movnt1x4b(dest, src);
else
goto nonnt;
goto end;
}
nonnt:
memmove_small_avx512f(dest, src, len);
end:
avx_zeroupper();
}
static force_inline void
memmove_movnt_avx512f_bw(char *dest, const char *src, size_t len)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_avx512f(dest, src, cnt);
}
while (len >= 32 * 64) {
dest -= 32 * 64;
src -= 32 * 64;
len -= 32 * 64;
memmove_movnt32x64b(dest, src);
}
if (len >= 16 * 64) {
dest -= 16 * 64;
src -= 16 * 64;
len -= 16 * 64;
memmove_movnt16x64b(dest, src);
}
if (len >= 8 * 64) {
dest -= 8 * 64;
src -= 8 * 64;
len -= 8 * 64;
memmove_movnt8x64b(dest, src);
}
if (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_movnt2x64b(dest, src);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_movnt1x64b(dest, src);
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32) {
dest -= 32;
src -= 32;
memmove_movnt1x32b(dest, src);
} else if (len == 16) {
dest -= 16;
src -= 16;
memmove_movnt1x16b(dest, src);
} else if (len == 8) {
dest -= 8;
src -= 8;
memmove_movnt1x8b(dest, src);
} else if (len == 4) {
dest -= 4;
src -= 4;
memmove_movnt1x4b(dest, src);
} else {
goto nonnt;
}
goto end;
}
nonnt:
dest -= len;
src -= len;
memmove_small_avx512f(dest, src, len);
end:
avx_zeroupper();
}
void
EXPORTED_SYMBOL(char *dest, const char *src, size_t len)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_movnt_avx512f_fw(dest, src, len);
else
memmove_movnt_avx512f_bw(dest, src, len);
maybe_barrier();
}
| 13,191 | 28.446429 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx512f_empty.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_empty_nolog
#define EXPORTED_SYMBOL memmove_movnt_avx512f_empty
#define maybe_barrier barrier_after_ntstores
#include "memcpy_nt_avx512f.h"
| 1,762 | 46.648649 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_sse2.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memcpy_sse2.h"
#include "valgrind_internal.h"
static force_inline void
memmove_movnt4x64b(char *dest, const char *src)
{
__m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0);
__m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1);
__m128i xmm2 = _mm_loadu_si128((__m128i *)src + 2);
__m128i xmm3 = _mm_loadu_si128((__m128i *)src + 3);
__m128i xmm4 = _mm_loadu_si128((__m128i *)src + 4);
__m128i xmm5 = _mm_loadu_si128((__m128i *)src + 5);
__m128i xmm6 = _mm_loadu_si128((__m128i *)src + 6);
__m128i xmm7 = _mm_loadu_si128((__m128i *)src + 7);
__m128i xmm8 = _mm_loadu_si128((__m128i *)src + 8);
__m128i xmm9 = _mm_loadu_si128((__m128i *)src + 9);
__m128i xmm10 = _mm_loadu_si128((__m128i *)src + 10);
__m128i xmm11 = _mm_loadu_si128((__m128i *)src + 11);
__m128i xmm12 = _mm_loadu_si128((__m128i *)src + 12);
__m128i xmm13 = _mm_loadu_si128((__m128i *)src + 13);
__m128i xmm14 = _mm_loadu_si128((__m128i *)src + 14);
__m128i xmm15 = _mm_loadu_si128((__m128i *)src + 15);
_mm_stream_si128((__m128i *)dest + 0, xmm0);
_mm_stream_si128((__m128i *)dest + 1, xmm1);
_mm_stream_si128((__m128i *)dest + 2, xmm2);
_mm_stream_si128((__m128i *)dest + 3, xmm3);
_mm_stream_si128((__m128i *)dest + 4, xmm4);
_mm_stream_si128((__m128i *)dest + 5, xmm5);
_mm_stream_si128((__m128i *)dest + 6, xmm6);
_mm_stream_si128((__m128i *)dest + 7, xmm7);
_mm_stream_si128((__m128i *)dest + 8, xmm8);
_mm_stream_si128((__m128i *)dest + 9, xmm9);
_mm_stream_si128((__m128i *)dest + 10, xmm10);
_mm_stream_si128((__m128i *)dest + 11, xmm11);
_mm_stream_si128((__m128i *)dest + 12, xmm12);
_mm_stream_si128((__m128i *)dest + 13, xmm13);
_mm_stream_si128((__m128i *)dest + 14, xmm14);
_mm_stream_si128((__m128i *)dest + 15, xmm15);
VALGRIND_DO_FLUSH(dest, 4 * 64);
}
static force_inline void
memmove_movnt2x64b(char *dest, const char *src)
{
__m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0);
__m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1);
__m128i xmm2 = _mm_loadu_si128((__m128i *)src + 2);
__m128i xmm3 = _mm_loadu_si128((__m128i *)src + 3);
__m128i xmm4 = _mm_loadu_si128((__m128i *)src + 4);
__m128i xmm5 = _mm_loadu_si128((__m128i *)src + 5);
__m128i xmm6 = _mm_loadu_si128((__m128i *)src + 6);
__m128i xmm7 = _mm_loadu_si128((__m128i *)src + 7);
_mm_stream_si128((__m128i *)dest + 0, xmm0);
_mm_stream_si128((__m128i *)dest + 1, xmm1);
_mm_stream_si128((__m128i *)dest + 2, xmm2);
_mm_stream_si128((__m128i *)dest + 3, xmm3);
_mm_stream_si128((__m128i *)dest + 4, xmm4);
_mm_stream_si128((__m128i *)dest + 5, xmm5);
_mm_stream_si128((__m128i *)dest + 6, xmm6);
_mm_stream_si128((__m128i *)dest + 7, xmm7);
VALGRIND_DO_FLUSH(dest, 2 * 64);
}
static force_inline void
memmove_movnt1x64b(char *dest, const char *src)
{
__m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0);
__m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1);
__m128i xmm2 = _mm_loadu_si128((__m128i *)src + 2);
__m128i xmm3 = _mm_loadu_si128((__m128i *)src + 3);
_mm_stream_si128((__m128i *)dest + 0, xmm0);
_mm_stream_si128((__m128i *)dest + 1, xmm1);
_mm_stream_si128((__m128i *)dest + 2, xmm2);
_mm_stream_si128((__m128i *)dest + 3, xmm3);
VALGRIND_DO_FLUSH(dest, 64);
}
static force_inline void
memmove_movnt1x32b(char *dest, const char *src)
{
__m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0);
__m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1);
_mm_stream_si128((__m128i *)dest + 0, xmm0);
_mm_stream_si128((__m128i *)dest + 1, xmm1);
VALGRIND_DO_FLUSH(dest, 32);
}
static force_inline void
memmove_movnt1x16b(char *dest, const char *src)
{
__m128i xmm0 = _mm_loadu_si128((__m128i *)src);
_mm_stream_si128((__m128i *)dest, xmm0);
VALGRIND_DO_FLUSH(dest, 16);
}
static force_inline void
memmove_movnt1x8b(char *dest, const char *src)
{
_mm_stream_si64((long long *)dest, *(long long *)src);
VALGRIND_DO_FLUSH(dest, 8);
}
static force_inline void
memmove_movnt1x4b(char *dest, const char *src)
{
_mm_stream_si32((int *)dest, *(int *)src);
VALGRIND_DO_FLUSH(dest, 4);
}
static force_inline void
memmove_movnt_sse_fw(char *dest, const char *src, size_t len)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_sse2(dest, src, cnt);
dest += cnt;
src += cnt;
len -= cnt;
}
while (len >= 4 * 64) {
memmove_movnt4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_movnt2x64b(dest, src);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_movnt1x64b(dest, src);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len == 0)
return;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32)
memmove_movnt1x32b(dest, src);
else if (len == 16)
memmove_movnt1x16b(dest, src);
else if (len == 8)
memmove_movnt1x8b(dest, src);
else if (len == 4)
memmove_movnt1x4b(dest, src);
else
goto nonnt;
return;
}
nonnt:
memmove_small_sse2(dest, src, len);
}
static force_inline void
memmove_movnt_sse_bw(char *dest, const char *src, size_t len)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_sse2(dest, src, cnt);
}
while (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_movnt2x64b(dest, src);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_movnt1x64b(dest, src);
}
if (len == 0)
return;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32) {
dest -= 32;
src -= 32;
memmove_movnt1x32b(dest, src);
} else if (len == 16) {
dest -= 16;
src -= 16;
memmove_movnt1x16b(dest, src);
} else if (len == 8) {
dest -= 8;
src -= 8;
memmove_movnt1x8b(dest, src);
} else if (len == 4) {
dest -= 4;
src -= 4;
memmove_movnt1x4b(dest, src);
} else {
goto nonnt;
}
return;
}
nonnt:
dest -= len;
src -= len;
memmove_small_sse2(dest, src, len);
}
void
EXPORTED_SYMBOL(char *dest, const char *src, size_t len)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_movnt_sse_fw(dest, src, len);
else
memmove_movnt_sse_bw(dest, src, len);
maybe_barrier();
}
| 8,204 | 25.813725 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_sse2_clwb.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b pmem_clwb
#define flush flush_clwb_nolog
#define EXPORTED_SYMBOL memmove_mov_sse2_clwb
#include "memcpy_t_sse2.h"
| 1,733 | 45.864865 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx512f_clwb.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush flush_clwb_nolog
#define EXPORTED_SYMBOL memmove_movnt_avx512f_clwb
#define maybe_barrier no_barrier_after_ntstores
#include "memcpy_nt_avx512f.h"
| 1,763 | 46.675676 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_avx.h"
#include "memcpy_memset.h"
#include "valgrind_internal.h"
static force_inline void
memmove_movnt8x64b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1);
__m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2);
__m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3);
__m256i ymm4 = _mm256_loadu_si256((__m256i *)src + 4);
__m256i ymm5 = _mm256_loadu_si256((__m256i *)src + 5);
__m256i ymm6 = _mm256_loadu_si256((__m256i *)src + 6);
__m256i ymm7 = _mm256_loadu_si256((__m256i *)src + 7);
__m256i ymm8 = _mm256_loadu_si256((__m256i *)src + 8);
__m256i ymm9 = _mm256_loadu_si256((__m256i *)src + 9);
__m256i ymm10 = _mm256_loadu_si256((__m256i *)src + 10);
__m256i ymm11 = _mm256_loadu_si256((__m256i *)src + 11);
__m256i ymm12 = _mm256_loadu_si256((__m256i *)src + 12);
__m256i ymm13 = _mm256_loadu_si256((__m256i *)src + 13);
__m256i ymm14 = _mm256_loadu_si256((__m256i *)src + 14);
__m256i ymm15 = _mm256_loadu_si256((__m256i *)src + 15);
_mm256_stream_si256((__m256i *)dest + 0, ymm0);
_mm256_stream_si256((__m256i *)dest + 1, ymm1);
_mm256_stream_si256((__m256i *)dest + 2, ymm2);
_mm256_stream_si256((__m256i *)dest + 3, ymm3);
_mm256_stream_si256((__m256i *)dest + 4, ymm4);
_mm256_stream_si256((__m256i *)dest + 5, ymm5);
_mm256_stream_si256((__m256i *)dest + 6, ymm6);
_mm256_stream_si256((__m256i *)dest + 7, ymm7);
_mm256_stream_si256((__m256i *)dest + 8, ymm8);
_mm256_stream_si256((__m256i *)dest + 9, ymm9);
_mm256_stream_si256((__m256i *)dest + 10, ymm10);
_mm256_stream_si256((__m256i *)dest + 11, ymm11);
_mm256_stream_si256((__m256i *)dest + 12, ymm12);
_mm256_stream_si256((__m256i *)dest + 13, ymm13);
_mm256_stream_si256((__m256i *)dest + 14, ymm14);
_mm256_stream_si256((__m256i *)dest + 15, ymm15);
VALGRIND_DO_FLUSH(dest, 8 * 64);
}
static force_inline void
memmove_movnt4x64b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1);
__m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2);
__m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3);
__m256i ymm4 = _mm256_loadu_si256((__m256i *)src + 4);
__m256i ymm5 = _mm256_loadu_si256((__m256i *)src + 5);
__m256i ymm6 = _mm256_loadu_si256((__m256i *)src + 6);
__m256i ymm7 = _mm256_loadu_si256((__m256i *)src + 7);
_mm256_stream_si256((__m256i *)dest + 0, ymm0);
_mm256_stream_si256((__m256i *)dest + 1, ymm1);
_mm256_stream_si256((__m256i *)dest + 2, ymm2);
_mm256_stream_si256((__m256i *)dest + 3, ymm3);
_mm256_stream_si256((__m256i *)dest + 4, ymm4);
_mm256_stream_si256((__m256i *)dest + 5, ymm5);
_mm256_stream_si256((__m256i *)dest + 6, ymm6);
_mm256_stream_si256((__m256i *)dest + 7, ymm7);
VALGRIND_DO_FLUSH(dest, 4 * 64);
}
static force_inline void
memmove_movnt2x64b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1);
__m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2);
__m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3);
_mm256_stream_si256((__m256i *)dest + 0, ymm0);
_mm256_stream_si256((__m256i *)dest + 1, ymm1);
_mm256_stream_si256((__m256i *)dest + 2, ymm2);
_mm256_stream_si256((__m256i *)dest + 3, ymm3);
VALGRIND_DO_FLUSH(dest, 2 * 64);
}
static force_inline void
memmove_movnt1x64b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1);
_mm256_stream_si256((__m256i *)dest + 0, ymm0);
_mm256_stream_si256((__m256i *)dest + 1, ymm1);
VALGRIND_DO_FLUSH(dest, 64);
}
static force_inline void
memmove_movnt1x32b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src);
_mm256_stream_si256((__m256i *)dest, ymm0);
VALGRIND_DO_FLUSH(dest, 32);
}
static force_inline void
memmove_movnt1x16b(char *dest, const char *src)
{
__m128i xmm0 = _mm_loadu_si128((__m128i *)src);
_mm_stream_si128((__m128i *)dest, xmm0);
VALGRIND_DO_FLUSH(dest, 16);
}
static force_inline void
memmove_movnt1x8b(char *dest, const char *src)
{
_mm_stream_si64((long long *)dest, *(long long *)src);
VALGRIND_DO_FLUSH(dest, 8);
}
static force_inline void
memmove_movnt1x4b(char *dest, const char *src)
{
_mm_stream_si32((int *)dest, *(int *)src);
VALGRIND_DO_FLUSH(dest, 4);
}
static force_inline void
memmove_movnt_avx_fw(char *dest, const char *src, size_t len)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_avx(dest, src, cnt);
dest += cnt;
src += cnt;
len -= cnt;
}
while (len >= 8 * 64) {
memmove_movnt8x64b(dest, src);
dest += 8 * 64;
src += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memmove_movnt4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_movnt2x64b(dest, src);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_movnt1x64b(dest, src);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32)
memmove_movnt1x32b(dest, src);
else if (len == 16)
memmove_movnt1x16b(dest, src);
else if (len == 8)
memmove_movnt1x8b(dest, src);
else if (len == 4)
memmove_movnt1x4b(dest, src);
else
goto nonnt;
goto end;
}
nonnt:
memmove_small_avx(dest, src, len);
end:
avx_zeroupper();
}
static force_inline void
memmove_movnt_avx_bw(char *dest, const char *src, size_t len)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_avx(dest, src, cnt);
}
while (len >= 8 * 64) {
dest -= 8 * 64;
src -= 8 * 64;
len -= 8 * 64;
memmove_movnt8x64b(dest, src);
}
if (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_movnt2x64b(dest, src);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_movnt1x64b(dest, src);
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32) {
dest -= 32;
src -= 32;
memmove_movnt1x32b(dest, src);
} else if (len == 16) {
dest -= 16;
src -= 16;
memmove_movnt1x16b(dest, src);
} else if (len == 8) {
dest -= 8;
src -= 8;
memmove_movnt1x8b(dest, src);
} else if (len == 4) {
dest -= 4;
src -= 4;
memmove_movnt1x4b(dest, src);
} else {
goto nonnt;
}
goto end;
}
nonnt:
dest -= len;
src -= len;
memmove_small_avx(dest, src, len);
end:
avx_zeroupper();
}
void
EXPORTED_SYMBOL(char *dest, const char *src, size_t len)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_movnt_avx_fw(dest, src, len);
else
memmove_movnt_avx_bw(dest, src, len);
maybe_barrier();
}
| 8,883 | 25.519403 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx_clwb.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define flush64b pmem_clwb
#define flush flush_clwb_nolog
#define EXPORTED_SYMBOL memmove_mov_avx_clwb
#include "memcpy_t_avx.h"
| 1,731 | 45.810811 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/aarch64/flush.h | /*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef ARM64_FLUSH_H
#define ARM64_FLUSH_H
#include <stdint.h>
#include "arm_cacheops.h"
#include "util.h"
#define FLUSH_ALIGN ((uintptr_t)64)
/*
* flush_clflushopt_nolog -- flush the CPU cache, using
* arm_clean_and_invalidate_va_to_poc (see arm_cacheops.h) {DC CIVAC}
*/
static force_inline void
flush_dcache_invalidate_opt_nolog(const void *addr, size_t len)
{
uintptr_t uptr;
arm_data_memory_barrier();
for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1);
uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) {
arm_clean_and_invalidate_va_to_poc((char *)uptr);
}
arm_data_memory_barrier();
}
/*
* flush_dcache_nolog -- flush the CPU cache, using DC CVAC
*/
static force_inline void
flush_dcache_nolog(const void *addr, size_t len)
{
uintptr_t uptr;
/*
* Loop through cache-line-size (typically 64B) aligned chunks
* covering the given range.
*/
for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1);
uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) {
arm_clean_va_to_poc((char *)uptr);
}
}
#endif
| 2,631 | 32.74359 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/aarch64/arm_cacheops.h | /*
* Copyright 2014-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ARM inline assembly to flush and invalidate caches
* clwb => dc cvac
* clflush | clflushopt => dc civac
* fence => dmb ish
*/
#ifndef AARCH64_CACHEOPS_H
#define AARCH64_CACHEOPS_H
#include <stdlib.h>
static inline void
arm_clean_va_to_poc(void const *p __attribute__((unused)))
{
asm volatile("dc cvac, %0" : : "r" (p) : "memory");
}
static inline void
arm_data_memory_barrier(void)
{
asm volatile("dmb ish" : : : "memory");
}
static inline void
arm_clean_and_invalidate_va_to_poc(const void *addr)
{
asm volatile("dc civac, %0" : : "r" (addr) : "memory");
}
#endif
| 2,185 | 34.258065 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/aarch64/init.c | /*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include "libpmem.h"
#include "flush.h"
#include "os.h"
#include "out.h"
#include "pmem.h"
#include "valgrind_internal.h"
/*
* memmove_nodrain_libc -- (internal) memmove to pmem without hw drain
*/
static void *
memmove_nodrain_libc(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
LOG(15, "pmemdest %p src %p len %zu flags 0x%x", pmemdest, src, len,
flags);
memmove(pmemdest, src, len);
pmem_flush_flags(pmemdest, len, flags);
return pmemdest;
}
/*
* memset_nodrain_libc -- (internal) memset to pmem without hw drain
*/
static void *
memset_nodrain_libc(void *pmemdest, int c, size_t len, unsigned flags)
{
LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x", pmemdest, c, len,
flags);
memset(pmemdest, c, len);
pmem_flush_flags(pmemdest, len, flags);
return pmemdest;
}
/*
* predrain_fence_empty -- (internal) issue the pre-drain fence instruction
*/
static void
predrain_fence_empty(void)
{
LOG(15, NULL);
VALGRIND_DO_FENCE;
/* nothing to do (because CLFLUSH did it for us) */
}
/*
* predrain_memory_barrier -- (internal) issue the pre-drain fence instruction
*/
static void
predrain_memory_barrier(void)
{
LOG(15, NULL);
arm_data_memory_barrier();
}
/*
* flush_dcache_invalidate_opt -- (internal) flush the CPU cache,
* using clflushopt for X86 and arm_clean_and_invalidate_va_to_poc
* for aarch64 (see arm_cacheops.h) {DC CIVAC}
*/
static void
flush_dcache_invalidate_opt(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
flush_dcache_invalidate_opt_nolog(addr, len);
}
/*
* flush_dcache -- (internal) flush the CPU cache, using clwb
*/
static void
flush_dcache(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
flush_dcache_nolog(addr, len);
}
/*
* flush_empty -- (internal) do not flush the CPU cache
*/
static void
flush_empty(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
flush_empty_nolog(addr, len);
}
/*
* pmem_init_funcs -- initialize architecture-specific list of pmem operations
*/
void
pmem_init_funcs(struct pmem_funcs *funcs)
{
LOG(3, NULL);
funcs->predrain_fence = predrain_fence_empty;
funcs->deep_flush = flush_dcache_invalidate_opt;
funcs->is_pmem = is_pmem_detect;
funcs->memmove_nodrain = memmove_nodrain_generic;
funcs->memset_nodrain = memset_nodrain_generic;
char *ptr = os_getenv("PMEM_NO_GENERIC_MEMCPY");
if (ptr) {
long long val = atoll(ptr);
if (val) {
funcs->memmove_nodrain = memmove_nodrain_libc;
funcs->memset_nodrain = memset_nodrain_libc;
}
}
int flush;
char *e = os_getenv("PMEM_NO_FLUSH");
if (e && (strcmp(e, "1") == 0)) {
flush = 0;
LOG(3, "Forced not flushing CPU_cache");
} else if (e && (strcmp(e, "0") == 0)) {
flush = 1;
LOG(3, "Forced flushing CPU_cache");
} else if (pmem_has_auto_flush() == 1) {
flush = 0;
LOG(3, "Not flushing CPU_cache, eADR detected");
} else {
flush = 1;
LOG(3, "Flushing CPU cache");
}
if (flush) {
funcs->flush = funcs->deep_flush;
} else {
funcs->flush = flush_empty;
funcs->predrain_fence = predrain_memory_barrier;
}
if (funcs->deep_flush == flush_dcache)
LOG(3, "Using ARM invalidate");
else if (funcs->deep_flush == flush_dcache_invalidate_opt)
LOG(3, "Synchronize VA to poc for ARM");
else
FATAL("invalid deep flush function address");
if (funcs->deep_flush == flush_empty)
LOG(3, "not flushing CPU cache");
else if (funcs->flush != funcs->deep_flush)
FATAL("invalid flush function address");
if (funcs->memmove_nodrain == memmove_nodrain_generic)
LOG(3, "using generic memmove");
else if (funcs->memmove_nodrain == memmove_nodrain_libc)
LOG(3, "using libc memmove");
else
FATAL("invalid memove_nodrain function address");
}
| 5,348 | 26.572165 | 78 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libvmem/libvmem_main.c | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* libvmem_main.c -- entry point for libvmem.dll
*
* XXX - This is a placeholder. All the library initialization/cleanup
* that is done in library ctors/dtors, as well as TLS initialization
* should be moved here.
*/
#include "win_mmap.h"
void vmem_init(void);
void vmem_fini(void);
void jemalloc_constructor(void);
void jemalloc_destructor(void);
int APIENTRY
DllMain(HINSTANCE hInstance, DWORD dwReason, LPVOID lpReserved)
{
switch (dwReason) {
case DLL_PROCESS_ATTACH:
jemalloc_constructor();
vmem_init();
win_mmap_init();
break;
case DLL_THREAD_ATTACH:
case DLL_THREAD_DETACH:
break;
case DLL_PROCESS_DETACH:
win_mmap_fini();
vmem_fini();
jemalloc_destructor();
break;
}
return TRUE;
}
| 2,332 | 31.859155 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libvmem/vmem.c | /*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* vmem.c -- memory pool & allocation entry points for libvmem
*/
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/param.h>
#include <errno.h>
#include <stdint.h>
#include <fcntl.h>
#include <inttypes.h>
#include <wchar.h>
#include "libvmem.h"
#include "jemalloc.h"
#include "pmemcommon.h"
#include "sys_util.h"
#include "file.h"
#include "vmem.h"
#include "valgrind_internal.h"
/*
* private to this file...
*/
static size_t Header_size;
static os_mutex_t Vmem_init_lock;
static os_mutex_t Pool_lock; /* guards vmem_create and vmem_delete */
/*
* print_jemalloc_messages -- custom print function, for jemalloc
*
* Prints traces from jemalloc. All traces from jemalloc
* are considered as error messages.
*/
static void
print_jemalloc_messages(void *ignore, const char *s)
{
ERR("%s", s);
}
/*
* print_jemalloc_stats -- print function, for jemalloc statistics
*
* Prints statistics from jemalloc. All statistics are printed with level 0.
*/
static void
print_jemalloc_stats(void *ignore, const char *s)
{
LOG_NONL(0, "%s", s);
}
/*
* vmem_construct -- initialization for vmem
*
* Called automatically by the run-time loader or on the first use of vmem.
*/
void
vmem_construct(void)
{
static bool initialized = false;
int (*je_vmem_navsnprintf)
(char *, size_t, const char *, va_list) = NULL;
if (initialized)
return;
util_mutex_lock(&Vmem_init_lock);
if (!initialized) {
common_init(VMEM_LOG_PREFIX, VMEM_LOG_LEVEL_VAR,
VMEM_LOG_FILE_VAR, VMEM_MAJOR_VERSION,
VMEM_MINOR_VERSION);
out_set_vsnprintf_func(je_vmem_navsnprintf);
LOG(3, NULL);
Header_size = roundup(sizeof(VMEM), Pagesize);
/* Set up jemalloc messages to a custom print function */
je_vmem_malloc_message = print_jemalloc_messages;
initialized = true;
}
util_mutex_unlock(&Vmem_init_lock);
}
/*
* vmem_init -- load-time initialization for vmem
*
* Called automatically by the run-time loader.
*/
ATTR_CONSTRUCTOR
void
vmem_init(void)
{
util_mutex_init(&Vmem_init_lock);
util_mutex_init(&Pool_lock);
vmem_construct();
}
/*
* vmem_fini -- libvmem cleanup routine
*
* Called automatically when the process terminates.
*/
ATTR_DESTRUCTOR
void
vmem_fini(void)
{
LOG(3, NULL);
util_mutex_destroy(&Pool_lock);
util_mutex_destroy(&Vmem_init_lock);
/* set up jemalloc messages back to stderr */
je_vmem_malloc_message = NULL;
common_fini();
}
/*
* vmem_createU -- create a memory pool in a temp file
*/
#ifndef _WIN32
static inline
#endif
VMEM *
vmem_createU(const char *dir, size_t size)
{
vmem_construct();
LOG(3, "dir \"%s\" size %zu", dir, size);
if (size < VMEM_MIN_POOL) {
ERR("size %zu smaller than %zu", size, VMEM_MIN_POOL);
errno = EINVAL;
return NULL;
}
enum file_type type = util_file_get_type(dir);
if (type == OTHER_ERROR)
return NULL;
util_mutex_lock(&Pool_lock);
/* silently enforce multiple of mapping alignment */
size = roundup(size, Mmap_align);
void *addr;
if (type == TYPE_DEVDAX) {
if ((addr = util_file_map_whole(dir)) == NULL) {
util_mutex_unlock(&Pool_lock);
return NULL;
}
} else {
if ((addr = util_map_tmpfile(dir, size,
4 * MEGABYTE)) == NULL) {
util_mutex_unlock(&Pool_lock);
return NULL;
}
}
/* store opaque info at beginning of mapped area */
struct vmem *vmp = addr;
memset(&vmp->hdr, '\0', sizeof(vmp->hdr));
memcpy(vmp->hdr.signature, VMEM_HDR_SIG, POOL_HDR_SIG_LEN);
vmp->addr = addr;
vmp->size = size;
vmp->caller_mapped = 0;
/* Prepare pool for jemalloc */
if (je_vmem_pool_create((void *)((uintptr_t)addr + Header_size),
size - Header_size,
/* zeroed if */ type != TYPE_DEVDAX,
/* empty */ 1) == NULL) {
ERR("pool creation failed");
util_unmap(vmp->addr, vmp->size);
util_mutex_unlock(&Pool_lock);
return NULL;
}
/*
* If possible, turn off all permissions on the pool header page.
*
* The prototype PMFS doesn't allow this when large pages are in
* use. It is not considered an error if this fails.
*/
if (type != TYPE_DEVDAX)
util_range_none(addr, sizeof(struct pool_hdr));
util_mutex_unlock(&Pool_lock);
LOG(3, "vmp %p", vmp);
return vmp;
}
#ifndef _WIN32
/*
* vmem_create -- create a memory pool in a temp file
*/
VMEM *
vmem_create(const char *dir, size_t size)
{
return vmem_createU(dir, size);
}
#else
/*
* vmem_createW -- create a memory pool in a temp file
*/
VMEM *
vmem_createW(const wchar_t *dir, size_t size)
{
char *udir = util_toUTF8(dir);
if (udir == NULL)
return NULL;
VMEM *ret = vmem_createU(udir, size);
util_free_UTF8(udir);
return ret;
}
#endif
/*
* vmem_create_in_region -- create a memory pool in a given range
*/
VMEM *
vmem_create_in_region(void *addr, size_t size)
{
vmem_construct();
LOG(3, "addr %p size %zu", addr, size);
if (((uintptr_t)addr & (Pagesize - 1)) != 0) {
ERR("addr %p not aligned to pagesize %llu", addr, Pagesize);
errno = EINVAL;
return NULL;
}
if (size < VMEM_MIN_POOL) {
ERR("size %zu smaller than %zu", size, VMEM_MIN_POOL);
errno = EINVAL;
return NULL;
}
/*
* Initially, treat this memory region as undefined.
* Once jemalloc initializes its metadata, it will also mark
* registered free chunks (usable heap space) as unaddressable.
*/
VALGRIND_DO_MAKE_MEM_UNDEFINED(addr, size);
/* store opaque info at beginning of mapped area */
struct vmem *vmp = addr;
memset(&vmp->hdr, '\0', sizeof(vmp->hdr));
memcpy(vmp->hdr.signature, VMEM_HDR_SIG, POOL_HDR_SIG_LEN);
vmp->addr = addr;
vmp->size = size;
vmp->caller_mapped = 1;
util_mutex_lock(&Pool_lock);
/* Prepare pool for jemalloc */
if (je_vmem_pool_create((void *)((uintptr_t)addr + Header_size),
size - Header_size, 0,
/* empty */ 1) == NULL) {
ERR("pool creation failed");
util_mutex_unlock(&Pool_lock);
return NULL;
}
#ifndef _WIN32
/*
* If possible, turn off all permissions on the pool header page.
*
* The prototype PMFS doesn't allow this when large pages are in
* use. It is not considered an error if this fails.
*/
util_range_none(addr, sizeof(struct pool_hdr));
#endif
util_mutex_unlock(&Pool_lock);
LOG(3, "vmp %p", vmp);
return vmp;
}
/*
* vmem_delete -- delete a memory pool
*/
void
vmem_delete(VMEM *vmp)
{
LOG(3, "vmp %p", vmp);
util_mutex_lock(&Pool_lock);
int ret = je_vmem_pool_delete((pool_t *)((uintptr_t)vmp + Header_size));
if (ret != 0) {
ERR("invalid pool handle: 0x%" PRIxPTR, (uintptr_t)vmp);
errno = EINVAL;
util_mutex_unlock(&Pool_lock);
return;
}
#ifndef _WIN32
util_range_rw(vmp->addr, sizeof(struct pool_hdr));
#endif
if (vmp->caller_mapped == 0) {
util_unmap(vmp->addr, vmp->size);
} else {
/*
* The application cannot do any assumptions about the content
* of this memory region once the pool is destroyed.
*/
VALGRIND_DO_MAKE_MEM_UNDEFINED(vmp->addr, vmp->size);
}
util_mutex_unlock(&Pool_lock);
}
/*
* vmem_check -- memory pool consistency check
*/
int
vmem_check(VMEM *vmp)
{
vmem_construct();
LOG(3, "vmp %p", vmp);
util_mutex_lock(&Pool_lock);
int ret = je_vmem_pool_check((pool_t *)((uintptr_t)vmp + Header_size));
util_mutex_unlock(&Pool_lock);
return ret;
}
/*
* vmem_stats_print -- spew memory allocator stats for a pool
*/
void
vmem_stats_print(VMEM *vmp, const char *opts)
{
LOG(3, "vmp %p opts \"%s\"", vmp, opts ? opts : "");
je_vmem_pool_malloc_stats_print(
(pool_t *)((uintptr_t)vmp + Header_size),
print_jemalloc_stats, NULL, opts);
}
/*
* vmem_malloc -- allocate memory
*/
void *
vmem_malloc(VMEM *vmp, size_t size)
{
LOG(3, "vmp %p size %zu", vmp, size);
return je_vmem_pool_malloc(
(pool_t *)((uintptr_t)vmp + Header_size), size);
}
/*
* vmem_free -- free memory
*/
void
vmem_free(VMEM *vmp, void *ptr)
{
LOG(3, "vmp %p ptr %p", vmp, ptr);
je_vmem_pool_free((pool_t *)((uintptr_t)vmp + Header_size), ptr);
}
/*
* vmem_calloc -- allocate zeroed memory
*/
void *
vmem_calloc(VMEM *vmp, size_t nmemb, size_t size)
{
LOG(3, "vmp %p nmemb %zu size %zu", vmp, nmemb, size);
return je_vmem_pool_calloc((pool_t *)((uintptr_t)vmp + Header_size),
nmemb, size);
}
/*
* vmem_realloc -- resize a memory allocation
*/
void *
vmem_realloc(VMEM *vmp, void *ptr, size_t size)
{
LOG(3, "vmp %p ptr %p size %zu", vmp, ptr, size);
return je_vmem_pool_ralloc((pool_t *)((uintptr_t)vmp + Header_size),
ptr, size);
}
/*
* vmem_aligned_alloc -- allocate aligned memory
*/
void *
vmem_aligned_alloc(VMEM *vmp, size_t alignment, size_t size)
{
LOG(3, "vmp %p alignment %zu size %zu", vmp, alignment, size);
return je_vmem_pool_aligned_alloc(
(pool_t *)((uintptr_t)vmp + Header_size),
alignment, size);
}
/*
* vmem_strdup -- allocate memory for copy of string
*/
char *
vmem_strdup(VMEM *vmp, const char *s)
{
LOG(3, "vmp %p s %p", vmp, s);
size_t size = strlen(s) + 1;
void *retaddr = je_vmem_pool_malloc(
(pool_t *)((uintptr_t)vmp + Header_size), size);
if (retaddr == NULL)
return NULL;
return (char *)memcpy(retaddr, s, size);
}
/*
* vmem_wcsdup -- allocate memory for copy of wide character string
*/
wchar_t *
vmem_wcsdup(VMEM *vmp, const wchar_t *s)
{
LOG(3, "vmp %p s %p", vmp, s);
size_t size = (wcslen(s) + 1) * sizeof(wchar_t);
void *retaddr = je_vmem_pool_malloc(
(pool_t *)((uintptr_t)vmp + Header_size), size);
if (retaddr == NULL)
return NULL;
return (wchar_t *)memcpy(retaddr, s, size);
}
/*
* vmem_malloc_usable_size -- get usable size of allocation
*/
size_t
vmem_malloc_usable_size(VMEM *vmp, void *ptr)
{
LOG(3, "vmp %p ptr %p", vmp, ptr);
return je_vmem_pool_malloc_usable_size(
(pool_t *)((uintptr_t)vmp + Header_size), ptr);
}
| 11,248 | 21.957143 | 76 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libvmem/vmem.h | /*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* vmem.h -- internal definitions for libvmem
*/
#ifndef VMEM_H
#define VMEM_H 1
#include <stddef.h>
#include "pool_hdr.h"
#ifdef __cplusplus
extern "C" {
#endif
#define VMEM_LOG_PREFIX "libvmem"
#define VMEM_LOG_LEVEL_VAR "VMEM_LOG_LEVEL"
#define VMEM_LOG_FILE_VAR "VMEM_LOG_FILE"
/* attributes of the vmem memory pool format for the pool header */
#define VMEM_HDR_SIG "VMEM " /* must be 8 bytes including '\0' */
#define VMEM_FORMAT_MAJOR 1
struct vmem {
struct pool_hdr hdr; /* memory pool header */
void *addr; /* mapped region */
size_t size; /* size of mapped region */
int caller_mapped;
};
void vmem_construct(void);
#ifdef __cplusplus
}
#endif
#endif
| 2,284 | 31.183099 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libvmem/libvmem.c | /*
* Copyright 2014-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* libvmem.c -- basic libvmem functions
*/
#include <stdio.h>
#include <stdint.h>
#include "libvmem.h"
#include "jemalloc.h"
#include "out.h"
#include "vmem.h"
/*
* vmem_check_versionU -- see if library meets application version requirements
*/
#ifndef _WIN32
static inline
#endif
const char *
vmem_check_versionU(unsigned major_required, unsigned minor_required)
{
vmem_construct();
LOG(3, "major_required %u minor_required %u",
major_required, minor_required);
if (major_required != VMEM_MAJOR_VERSION) {
ERR("libvmem major version mismatch (need %u, found %u)",
major_required, VMEM_MAJOR_VERSION);
return out_get_errormsg();
}
if (minor_required > VMEM_MINOR_VERSION) {
ERR("libvmem minor version mismatch (need %u, found %u)",
minor_required, VMEM_MINOR_VERSION);
return out_get_errormsg();
}
return NULL;
}
#ifndef _WIN32
/*
* vmem_check_version -- see if library meets application version requirements
*/
const char *
vmem_check_version(unsigned major_required, unsigned minor_required)
{
return vmem_check_versionU(major_required, minor_required);
}
#else
/*
* vmem_check_versionW -- see if library meets application version requirements
*/
const wchar_t *
vmem_check_versionW(unsigned major_required, unsigned minor_required)
{
if (vmem_check_versionU(major_required, minor_required) != NULL)
return out_get_errormsgW();
else
return NULL;
}
#endif
/*
* vmem_set_funcs -- allow overriding libvmem's call to malloc, etc.
*/
void
vmem_set_funcs(
void *(*malloc_func)(size_t size),
void (*free_func)(void *ptr),
void *(*realloc_func)(void *ptr, size_t size),
char *(*strdup_func)(const char *s),
void (*print_func)(const char *s))
{
vmem_construct();
LOG(3, NULL);
util_set_alloc_funcs(malloc_func, free_func,
realloc_func, strdup_func);
out_set_print_func(print_func);
je_vmem_pool_set_alloc_funcs(malloc_func, free_func);
}
/*
* vmem_errormsgU -- return last error message
*/
#ifndef _WIN32
static inline
#endif
const char *
vmem_errormsgU(void)
{
return out_get_errormsg();
}
#ifndef _WIN32
/*
* vmem_errormsg -- return last error message
*/
const char *
vmem_errormsg(void)
{
return vmem_errormsgU();
}
#else
/*
* vmem_errormsgW -- return last error message as wchar_t
*/
const wchar_t *
vmem_errormsgW(void)
{
return out_get_errormsgW();
}
#endif
| 3,940 | 25.809524 | 79 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/getopt/getopt.c | /*
* *Copyright (c) 2012, Kim Gräsman
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Kim Gräsman nor the
* names of contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL KIM GRÄSMAN BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "getopt.h"
#include <stddef.h>
#include <string.h>
#include <stdio.h>
char* optarg;
int optopt;
/* The variable optind [...] shall be initialized to 1 by the system. */
int optind = 1;
int opterr;
static char* optcursor = NULL;
static char *first = NULL;
/* rotates argv array */
static void rotate(char **argv, int argc) {
if (argc <= 1)
return;
char *tmp = argv[0];
memmove(argv, argv + 1, (argc - 1) * sizeof(char *));
argv[argc - 1] = tmp;
}
/* Implemented based on [1] and [2] for optional arguments.
optopt is handled FreeBSD-style, per [3].
Other GNU and FreeBSD extensions are purely accidental.
[1] http://pubs.opengroup.org/onlinepubs/000095399/functions/getopt.html
[2] http://www.kernel.org/doc/man-pages/online/pages/man3/getopt.3.html
[3] http://www.freebsd.org/cgi/man.cgi?query=getopt&sektion=3&manpath=FreeBSD+9.0-RELEASE
*/
int getopt(int argc, char* const argv[], const char* optstring) {
int optchar = -1;
const char* optdecl = NULL;
optarg = NULL;
opterr = 0;
optopt = 0;
/* Unspecified, but we need it to avoid overrunning the argv bounds. */
if (optind >= argc)
goto no_more_optchars;
/* If, when getopt() is called argv[optind] is a null pointer, getopt()
shall return -1 without changing optind. */
if (argv[optind] == NULL)
goto no_more_optchars;
/* If, when getopt() is called *argv[optind] is not the character '-',
permute argv to move non options to the end */
if (*argv[optind] != '-') {
if (argc - optind <= 1)
goto no_more_optchars;
if (!first)
first = argv[optind];
do {
rotate((char **)(argv + optind), argc - optind);
} while (*argv[optind] != '-' && argv[optind] != first);
if (argv[optind] == first)
goto no_more_optchars;
}
/* If, when getopt() is called argv[optind] points to the string "-",
getopt() shall return -1 without changing optind. */
if (strcmp(argv[optind], "-") == 0)
goto no_more_optchars;
/* If, when getopt() is called argv[optind] points to the string "--",
getopt() shall return -1 after incrementing optind. */
if (strcmp(argv[optind], "--") == 0) {
++optind;
if (first) {
do {
rotate((char **)(argv + optind), argc - optind);
} while (argv[optind] != first);
}
goto no_more_optchars;
}
if (optcursor == NULL || *optcursor == '\0')
optcursor = argv[optind] + 1;
optchar = *optcursor;
/* FreeBSD: The variable optopt saves the last known option character
returned by getopt(). */
optopt = optchar;
/* The getopt() function shall return the next option character (if one is
found) from argv that matches a character in optstring, if there is
one that matches. */
optdecl = strchr(optstring, optchar);
if (optdecl) {
/* [I]f a character is followed by a colon, the option takes an
argument. */
if (optdecl[1] == ':') {
optarg = ++optcursor;
if (*optarg == '\0') {
/* GNU extension: Two colons mean an option takes an
optional arg; if there is text in the current argv-element
(i.e., in the same word as the option name itself, for example,
"-oarg"), then it is returned in optarg, otherwise optarg is set
to zero. */
if (optdecl[2] != ':') {
/* If the option was the last character in the string pointed to by
an element of argv, then optarg shall contain the next element
of argv, and optind shall be incremented by 2. If the resulting
value of optind is greater than argc, this indicates a missing
option-argument, and getopt() shall return an error indication.
Otherwise, optarg shall point to the string following the
option character in that element of argv, and optind shall be
incremented by 1.
*/
if (++optind < argc) {
optarg = argv[optind];
} else {
/* If it detects a missing option-argument, it shall return the
colon character ( ':' ) if the first character of optstring
was a colon, or a question-mark character ( '?' ) otherwise.
*/
optarg = NULL;
fprintf(stderr, "%s: option requires an argument -- '%c'\n", argv[0], optchar);
optchar = (optstring[0] == ':') ? ':' : '?';
}
} else {
optarg = NULL;
}
}
optcursor = NULL;
}
} else {
fprintf(stderr,"%s: invalid option -- '%c'\n", argv[0], optchar);
/* If getopt() encounters an option character that is not contained in
optstring, it shall return the question-mark ( '?' ) character. */
optchar = '?';
}
if (optcursor == NULL || *++optcursor == '\0')
++optind;
return optchar;
no_more_optchars:
optcursor = NULL;
first = NULL;
return -1;
}
/* Implementation based on [1].
[1] http://www.kernel.org/doc/man-pages/online/pages/man3/getopt.3.html
*/
int getopt_long(int argc, char* const argv[], const char* optstring,
const struct option* longopts, int* longindex) {
const struct option* o = longopts;
const struct option* match = NULL;
int num_matches = 0;
size_t argument_name_length = 0;
const char* current_argument = NULL;
int retval = -1;
optarg = NULL;
optopt = 0;
if (optind >= argc)
return -1;
/* If, when getopt() is called argv[optind] is a null pointer, getopt_long()
shall return -1 without changing optind. */
if (argv[optind] == NULL)
goto no_more_optchars;
/* If, when getopt_long() is called *argv[optind] is not the character '-',
permute argv to move non options to the end */
if (*argv[optind] != '-') {
if (argc - optind <= 1)
goto no_more_optchars;
if (!first)
first = argv[optind];
do {
rotate((char **)(argv + optind), argc - optind);
} while (*argv[optind] != '-' && argv[optind] != first);
if (argv[optind] == first)
goto no_more_optchars;
}
if (strlen(argv[optind]) < 3 || strncmp(argv[optind], "--", 2) != 0)
return getopt(argc, argv, optstring);
/* It's an option; starts with -- and is longer than two chars. */
current_argument = argv[optind] + 2;
argument_name_length = strcspn(current_argument, "=");
for (; o->name; ++o) {
if (strncmp(o->name, current_argument, argument_name_length) == 0) {
match = o;
++num_matches;
if (strlen(o->name) == argument_name_length) {
/* found match is exactly the one which we are looking for */
num_matches = 1;
break;
}
}
}
if (num_matches == 1) {
/* If longindex is not NULL, it points to a variable which is set to the
index of the long option relative to longopts. */
if (longindex)
*longindex = (int)(match - longopts);
/* If flag is NULL, then getopt_long() shall return val.
Otherwise, getopt_long() returns 0, and flag shall point to a variable
which shall be set to val if the option is found, but left unchanged if
the option is not found. */
if (match->flag)
*(match->flag) = match->val;
retval = match->flag ? 0 : match->val;
if (match->has_arg != no_argument) {
optarg = strchr(argv[optind], '=');
if (optarg != NULL)
++optarg;
if (match->has_arg == required_argument) {
/* Only scan the next argv for required arguments. Behavior is not
specified, but has been observed with Ubuntu and Mac OSX. */
if (optarg == NULL && ++optind < argc) {
optarg = argv[optind];
}
if (optarg == NULL)
retval = ':';
}
} else if (strchr(argv[optind], '=')) {
/* An argument was provided to a non-argument option.
I haven't seen this specified explicitly, but both GNU and BSD-based
implementations show this behavior.
*/
retval = '?';
}
} else {
/* Unknown option or ambiguous match. */
retval = '?';
if (num_matches == 0) {
fprintf(stderr, "%s: unrecognized option -- '%s'\n", argv[0], argv[optind]);
} else {
fprintf(stderr, "%s: option '%s' is ambiguous\n", argv[0], argv[optind]);
}
}
++optind;
return retval;
no_more_optchars:
first = NULL;
return -1;
}
| 9,862 | 32.547619 | 91 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/getopt/getopt.h | /*
* *Copyright (c) 2012, Kim Gräsman
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Kim Gräsman nor the
* names of contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL KIM GRÄSMAN BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef INCLUDED_GETOPT_PORT_H
#define INCLUDED_GETOPT_PORT_H
#if defined(__cplusplus)
extern "C" {
#endif
#define no_argument 0
#define required_argument 1
#define optional_argument 2
extern char* optarg;
extern int optind, opterr, optopt;
struct option {
const char* name;
int has_arg;
int* flag;
int val;
};
int getopt(int argc, char* const argv[], const char* optstring);
int getopt_long(int argc, char* const argv[],
const char* optstring, const struct option* longopts, int* longindex);
#if defined(__cplusplus)
}
#endif
#endif // INCLUDED_GETOPT_PORT_H
| 2,137 | 35.237288 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_defs.h | /* ./../windows/jemalloc_gen/include/jemalloc/jemalloc_defs.h. Generated from jemalloc_defs.h.in by configure. */
/* Defined if __attribute__((...)) syntax is supported. */
/* #undef JEMALLOC_HAVE_ATTR */
/* Defined if alloc_size attribute is supported. */
/* #undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE */
/* Defined if format(gnu_printf, ...) attribute is supported. */
/* #undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF */
/* Defined if format(printf, ...) attribute is supported. */
/* #undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF */
/*
* Define overrides for non-standard allocator-related functions if they are
* present on the system.
*/
/* #undef JEMALLOC_OVERRIDE_MEMALIGN */
/* #undef JEMALLOC_OVERRIDE_VALLOC */
/*
* At least Linux omits the "const" in:
*
* size_t malloc_usable_size(const void *ptr);
*
* Match the operating system's prototype.
*/
#define JEMALLOC_USABLE_SIZE_CONST const
/*
* If defined, specify throw() for the public function prototypes when compiling
* with C++. The only justification for this is to match the prototypes that
* glibc defines.
*/
/* #undef JEMALLOC_USE_CXX_THROW */
#ifdef _MSC_VER
# ifdef _WIN64
# define LG_SIZEOF_PTR_WIN 3
# else
# define LG_SIZEOF_PTR_WIN 2
# endif
#endif
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
#define LG_SIZEOF_PTR LG_SIZEOF_PTR_WIN
| 1,327 | 27.255319 | 115 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_mangle_jet.h | /*
* By default application code must explicitly refer to mangled symbol names,
* so that it is possible to use jemalloc in conjunction with another allocator
* in the same application. Define JEMALLOC_MANGLE in order to cause automatic
* name mangling that matches the API prefixing that happened as a result of
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
*/
#ifdef JEMALLOC_MANGLE
# ifndef JEMALLOC_NO_DEMANGLE
# define JEMALLOC_NO_DEMANGLE
# endif
# define pool_create jet_pool_create
# define pool_delete jet_pool_delete
# define pool_malloc jet_pool_malloc
# define pool_calloc jet_pool_calloc
# define pool_ralloc jet_pool_ralloc
# define pool_aligned_alloc jet_pool_aligned_alloc
# define pool_free jet_pool_free
# define pool_malloc_usable_size jet_pool_malloc_usable_size
# define pool_malloc_stats_print jet_pool_malloc_stats_print
# define pool_extend jet_pool_extend
# define pool_set_alloc_funcs jet_pool_set_alloc_funcs
# define pool_check jet_pool_check
# define malloc_conf jet_malloc_conf
# define malloc_message jet_malloc_message
# define malloc jet_malloc
# define calloc jet_calloc
# define posix_memalign jet_posix_memalign
# define aligned_alloc jet_aligned_alloc
# define realloc jet_realloc
# define free jet_free
# define mallocx jet_mallocx
# define rallocx jet_rallocx
# define xallocx jet_xallocx
# define sallocx jet_sallocx
# define dallocx jet_dallocx
# define nallocx jet_nallocx
# define mallctl jet_mallctl
# define mallctlnametomib jet_mallctlnametomib
# define mallctlbymib jet_mallctlbymib
# define navsnprintf jet_navsnprintf
# define malloc_stats_print jet_malloc_stats_print
# define malloc_usable_size jet_malloc_usable_size
#endif
/*
* The jet_* macros can be used as stable alternative names for the
* public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
* meant for use in jemalloc itself, but it can be used by application code to
* provide isolation from the name mangling specified via --with-mangling
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
# undef jet_pool_create
# undef jet_pool_delete
# undef jet_pool_malloc
# undef jet_pool_calloc
# undef jet_pool_ralloc
# undef jet_pool_aligned_alloc
# undef jet_pool_free
# undef jet_pool_malloc_usable_size
# undef jet_pool_malloc_stats_print
# undef jet_pool_extend
# undef jet_pool_set_alloc_funcs
# undef jet_pool_check
# undef jet_malloc_conf
# undef jet_malloc_message
# undef jet_malloc
# undef jet_calloc
# undef jet_posix_memalign
# undef jet_aligned_alloc
# undef jet_realloc
# undef jet_free
# undef jet_mallocx
# undef jet_rallocx
# undef jet_xallocx
# undef jet_sallocx
# undef jet_dallocx
# undef jet_nallocx
# undef jet_mallctl
# undef jet_mallctlnametomib
# undef jet_mallctlbymib
# undef jet_navsnprintf
# undef jet_malloc_stats_print
# undef jet_malloc_usable_size
#endif
| 2,939 | 32.793103 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_protos_jet.h | /*
* The jet_ prefix on the following public symbol declarations is an artifact
* of namespace management, and should be omitted in application code unless
* JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle@[email protected]).
*/
extern JEMALLOC_EXPORT const char *jet_malloc_conf;
extern JEMALLOC_EXPORT void (*jet_malloc_message)(void *cbopaque,
const char *s);
typedef struct pool_s pool_t;
JEMALLOC_EXPORT pool_t *jet_pool_create(void *addr, size_t size, int zeroed);
JEMALLOC_EXPORT int jet_pool_delete(pool_t *pool);
JEMALLOC_EXPORT size_t jet_pool_extend(pool_t *pool, void *addr,
size_t size, int zeroed);
JEMALLOC_EXPORT void *jet_pool_malloc(pool_t *pool, size_t size);
JEMALLOC_EXPORT void *jet_pool_calloc(pool_t *pool, size_t nmemb, size_t size);
JEMALLOC_EXPORT void *jet_pool_ralloc(pool_t *pool, void *ptr, size_t size);
JEMALLOC_EXPORT void *jet_pool_aligned_alloc(pool_t *pool, size_t alignment, size_t size);
JEMALLOC_EXPORT void jet_pool_free(pool_t *pool, void *ptr);
JEMALLOC_EXPORT size_t jet_pool_malloc_usable_size(pool_t *pool, void *ptr);
JEMALLOC_EXPORT void jet_pool_malloc_stats_print(pool_t *pool,
void (*write_cb)(void *, const char *),
void *cbopaque, const char *opts);
JEMALLOC_EXPORT void jet_pool_set_alloc_funcs(void *(*malloc_func)(size_t),
void (*free_func)(void *));
JEMALLOC_EXPORT int jet_pool_check(pool_t *pool);
JEMALLOC_EXPORT void *jet_malloc(size_t size) JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *jet_calloc(size_t num, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT int jet_posix_memalign(void **memptr, size_t alignment,
size_t size) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT void *jet_aligned_alloc(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *jet_realloc(void *ptr, size_t size);
JEMALLOC_EXPORT void jet_free(void *ptr);
JEMALLOC_EXPORT void *jet_mallocx(size_t size, int flags);
JEMALLOC_EXPORT void *jet_rallocx(void *ptr, size_t size, int flags);
JEMALLOC_EXPORT size_t jet_xallocx(void *ptr, size_t size, size_t extra,
int flags);
JEMALLOC_EXPORT size_t jet_sallocx(const void *ptr, int flags);
JEMALLOC_EXPORT void jet_dallocx(void *ptr, int flags);
JEMALLOC_EXPORT size_t jet_nallocx(size_t size, int flags);
JEMALLOC_EXPORT int jet_mallctl(const char *name, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT int jet_mallctlnametomib(const char *name, size_t *mibp,
size_t *miblenp);
JEMALLOC_EXPORT int jet_mallctlbymib(const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT void jet_malloc_stats_print(void (*write_cb)(void *,
const char *), void *jet_cbopaque, const char *opts);
JEMALLOC_EXPORT size_t jet_malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr);
JEMALLOC_EXPORT int jet_navsnprintf(char *str, size_t size,
const char *format, va_list ap);
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT void * jet_memalign(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT void * jet_valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif
| 3,176 | 45.043478 | 91 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_rename.h | /*
* Name mangling for public symbols is controlled by --with-mangling and
* --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
* these macro definitions.
*/
#ifndef JEMALLOC_NO_RENAME
# define je_pool_create je_vmem_pool_create
# define je_pool_delete je_vmem_pool_delete
# define je_pool_malloc je_vmem_pool_malloc
# define je_pool_calloc je_vmem_pool_calloc
# define je_pool_ralloc je_vmem_pool_ralloc
# define je_pool_aligned_alloc je_vmem_pool_aligned_alloc
# define je_pool_free je_vmem_pool_free
# define je_pool_malloc_usable_size je_vmem_pool_malloc_usable_size
# define je_pool_malloc_stats_print je_vmem_pool_malloc_stats_print
# define je_pool_extend je_vmem_pool_extend
# define je_pool_set_alloc_funcs je_vmem_pool_set_alloc_funcs
# define je_pool_check je_vmem_pool_check
# define je_malloc_conf je_vmem_malloc_conf
# define je_malloc_message je_vmem_malloc_message
# define je_malloc je_vmem_malloc
# define je_calloc je_vmem_calloc
# define je_posix_memalign je_vmem_posix_memalign
# define je_aligned_alloc je_vmem_aligned_alloc
# define je_realloc je_vmem_realloc
# define je_free je_vmem_free
# define je_mallocx je_vmem_mallocx
# define je_rallocx je_vmem_rallocx
# define je_xallocx je_vmem_xallocx
# define je_sallocx je_vmem_sallocx
# define je_dallocx je_vmem_dallocx
# define je_nallocx je_vmem_nallocx
# define je_mallctl je_vmem_mallctl
# define je_mallctlnametomib je_vmem_mallctlnametomib
# define je_mallctlbymib je_vmem_mallctlbymib
# define je_navsnprintf je_vmem_navsnprintf
# define je_malloc_stats_print je_vmem_malloc_stats_print
# define je_malloc_usable_size je_vmem_malloc_usable_size
#endif
| 1,694 | 41.375 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_mangle.h | /*
* By default application code must explicitly refer to mangled symbol names,
* so that it is possible to use jemalloc in conjunction with another allocator
* in the same application. Define JEMALLOC_MANGLE in order to cause automatic
* name mangling that matches the API prefixing that happened as a result of
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
*/
#ifdef JEMALLOC_MANGLE
# ifndef JEMALLOC_NO_DEMANGLE
# define JEMALLOC_NO_DEMANGLE
# endif
# define pool_create je_pool_create
# define pool_delete je_pool_delete
# define pool_malloc je_pool_malloc
# define pool_calloc je_pool_calloc
# define pool_ralloc je_pool_ralloc
# define pool_aligned_alloc je_pool_aligned_alloc
# define pool_free je_pool_free
# define pool_malloc_usable_size je_pool_malloc_usable_size
# define pool_malloc_stats_print je_pool_malloc_stats_print
# define pool_extend je_pool_extend
# define pool_set_alloc_funcs je_pool_set_alloc_funcs
# define pool_check je_pool_check
# define malloc_conf je_malloc_conf
# define malloc_message je_malloc_message
# define malloc je_malloc
# define calloc je_calloc
# define posix_memalign je_posix_memalign
# define aligned_alloc je_aligned_alloc
# define realloc je_realloc
# define free je_free
# define mallocx je_mallocx
# define rallocx je_rallocx
# define xallocx je_xallocx
# define sallocx je_sallocx
# define dallocx je_dallocx
# define nallocx je_nallocx
# define mallctl je_mallctl
# define mallctlnametomib je_mallctlnametomib
# define mallctlbymib je_mallctlbymib
# define navsnprintf je_navsnprintf
# define malloc_stats_print je_malloc_stats_print
# define malloc_usable_size je_malloc_usable_size
#endif
/*
* The je_* macros can be used as stable alternative names for the
* public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
* meant for use in jemalloc itself, but it can be used by application code to
* provide isolation from the name mangling specified via --with-mangling
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
# undef je_pool_create
# undef je_pool_delete
# undef je_pool_malloc
# undef je_pool_calloc
# undef je_pool_ralloc
# undef je_pool_aligned_alloc
# undef je_pool_free
# undef je_pool_malloc_usable_size
# undef je_pool_malloc_stats_print
# undef je_pool_extend
# undef je_pool_set_alloc_funcs
# undef je_pool_check
# undef je_malloc_conf
# undef je_malloc_message
# undef je_malloc
# undef je_calloc
# undef je_posix_memalign
# undef je_aligned_alloc
# undef je_realloc
# undef je_free
# undef je_mallocx
# undef je_rallocx
# undef je_xallocx
# undef je_sallocx
# undef je_dallocx
# undef je_nallocx
# undef je_mallctl
# undef je_mallctlnametomib
# undef je_mallctlbymib
# undef je_navsnprintf
# undef je_malloc_stats_print
# undef je_malloc_usable_size
#endif
| 2,874 | 32.045977 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc.h | #ifndef JEMALLOC_H_
#define JEMALLOC_H_
#ifdef __cplusplus
extern "C" {
#endif
/* Defined if __attribute__((...)) syntax is supported. */
/* #undef JEMALLOC_HAVE_ATTR */
/* Defined if alloc_size attribute is supported. */
/* #undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE */
/* Defined if format(gnu_printf, ...) attribute is supported. */
/* #undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF */
/* Defined if format(printf, ...) attribute is supported. */
/* #undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF */
/*
* Define overrides for non-standard allocator-related functions if they are
* present on the system.
*/
/* #undef JEMALLOC_OVERRIDE_MEMALIGN */
/* #undef JEMALLOC_OVERRIDE_VALLOC */
/*
* At least Linux omits the "const" in:
*
* size_t malloc_usable_size(const void *ptr);
*
* Match the operating system's prototype.
*/
#define JEMALLOC_USABLE_SIZE_CONST const
/*
* If defined, specify throw() for the public function prototypes when compiling
* with C++. The only justification for this is to match the prototypes that
* glibc defines.
*/
/* #undef JEMALLOC_USE_CXX_THROW */
#ifdef _MSC_VER
# ifdef _WIN64
# define LG_SIZEOF_PTR_WIN 3
# else
# define LG_SIZEOF_PTR_WIN 2
# endif
#endif
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
#define LG_SIZEOF_PTR LG_SIZEOF_PTR_WIN
/*
* Name mangling for public symbols is controlled by --with-mangling and
* --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
* these macro definitions.
*/
#ifndef JEMALLOC_NO_RENAME
# define je_pool_create je_vmem_pool_create
# define je_pool_delete je_vmem_pool_delete
# define je_pool_malloc je_vmem_pool_malloc
# define je_pool_calloc je_vmem_pool_calloc
# define je_pool_ralloc je_vmem_pool_ralloc
# define je_pool_aligned_alloc je_vmem_pool_aligned_alloc
# define je_pool_free je_vmem_pool_free
# define je_pool_malloc_usable_size je_vmem_pool_malloc_usable_size
# define je_pool_malloc_stats_print je_vmem_pool_malloc_stats_print
# define je_pool_extend je_vmem_pool_extend
# define je_pool_set_alloc_funcs je_vmem_pool_set_alloc_funcs
# define je_pool_check je_vmem_pool_check
# define je_malloc_conf je_vmem_malloc_conf
# define je_malloc_message je_vmem_malloc_message
# define je_malloc je_vmem_malloc
# define je_calloc je_vmem_calloc
# define je_posix_memalign je_vmem_posix_memalign
# define je_aligned_alloc je_vmem_aligned_alloc
# define je_realloc je_vmem_realloc
# define je_free je_vmem_free
# define je_mallocx je_vmem_mallocx
# define je_rallocx je_vmem_rallocx
# define je_xallocx je_vmem_xallocx
# define je_sallocx je_vmem_sallocx
# define je_dallocx je_vmem_dallocx
# define je_nallocx je_vmem_nallocx
# define je_mallctl je_vmem_mallctl
# define je_mallctlnametomib je_vmem_mallctlnametomib
# define je_mallctlbymib je_vmem_mallctlbymib
# define je_navsnprintf je_vmem_navsnprintf
# define je_malloc_stats_print je_vmem_malloc_stats_print
# define je_malloc_usable_size je_vmem_malloc_usable_size
#endif
#include <limits.h>
#include <strings.h>
#include <stdbool.h>
#include <stdarg.h>
#define JEMALLOC_VERSION ""
#define JEMALLOC_VERSION_MAJOR
#define JEMALLOC_VERSION_MINOR
#define JEMALLOC_VERSION_BUGFIX
#define JEMALLOC_VERSION_NREV
#define JEMALLOC_VERSION_GID ""
# define MALLOCX_LG_ALIGN(la) (la)
# if LG_SIZEOF_PTR == 2
# define MALLOCX_ALIGN(a) (ffs(a)-1)
# else
# define MALLOCX_ALIGN(a) \
(((a) < (size_t)INT_MAX) ? ffs(a)-1 : ffs((a)>>32)+31)
# endif
# define MALLOCX_ZERO ((int)0x40)
/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */
# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8))
#ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
#elif _MSC_VER
# define JEMALLOC_ATTR(s)
# ifndef JEMALLOC_EXPORT
# ifdef DLLEXPORT
# define JEMALLOC_EXPORT __declspec(dllexport)
# else
# define JEMALLOC_EXPORT __declspec(dllimport)
# endif
# endif
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
# define JEMALLOC_NOINLINE __declspec(noinline)
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_EXPORT
# define JEMALLOC_ALIGNED(s)
# define JEMALLOC_SECTION(s)
# define JEMALLOC_NOINLINE
#endif
/*
* The je_ prefix on the following public symbol declarations is an artifact
* of namespace management, and should be omitted in application code unless
* JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h).
*/
extern JEMALLOC_EXPORT const char *je_malloc_conf;
extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
const char *s);
typedef struct pool_s pool_t;
JEMALLOC_EXPORT pool_t *je_pool_create(void *addr, size_t size, int zeroed, int empty);
JEMALLOC_EXPORT int je_pool_delete(pool_t *pool);
JEMALLOC_EXPORT size_t je_pool_extend(pool_t *pool, void *addr,
size_t size, int zeroed);
JEMALLOC_EXPORT void *je_pool_malloc(pool_t *pool, size_t size);
JEMALLOC_EXPORT void *je_pool_calloc(pool_t *pool, size_t nmemb, size_t size);
JEMALLOC_EXPORT void *je_pool_ralloc(pool_t *pool, void *ptr, size_t size);
JEMALLOC_EXPORT void *je_pool_aligned_alloc(pool_t *pool, size_t alignment, size_t size);
JEMALLOC_EXPORT void je_pool_free(pool_t *pool, void *ptr);
JEMALLOC_EXPORT size_t je_pool_malloc_usable_size(pool_t *pool, void *ptr);
JEMALLOC_EXPORT void je_pool_malloc_stats_print(pool_t *pool,
void (*write_cb)(void *, const char *),
void *cbopaque, const char *opts);
JEMALLOC_EXPORT void je_pool_set_alloc_funcs(void *(*malloc_func)(size_t),
void (*free_func)(void *));
JEMALLOC_EXPORT int je_pool_check(pool_t *pool);
JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment,
size_t size) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size);
JEMALLOC_EXPORT void je_free(void *ptr);
JEMALLOC_EXPORT void *je_mallocx(size_t size, int flags);
JEMALLOC_EXPORT void *je_rallocx(void *ptr, size_t size, int flags);
JEMALLOC_EXPORT size_t je_xallocx(void *ptr, size_t size, size_t extra,
int flags);
JEMALLOC_EXPORT size_t je_sallocx(const void *ptr, int flags);
JEMALLOC_EXPORT void je_dallocx(void *ptr, int flags);
JEMALLOC_EXPORT size_t je_nallocx(size_t size, int flags);
JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp,
size_t *miblenp);
JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
const char *), void *je_cbopaque, const char *opts);
JEMALLOC_EXPORT size_t je_malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr);
JEMALLOC_EXPORT int je_navsnprintf(char *str, size_t size,
const char *format, va_list ap);
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif
typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, unsigned, pool_t *);
typedef bool (chunk_dalloc_t)(void *, size_t, unsigned, pool_t *);
/*
* By default application code must explicitly refer to mangled symbol names,
* so that it is possible to use jemalloc in conjunction with another allocator
* in the same application. Define JEMALLOC_MANGLE in order to cause automatic
* name mangling that matches the API prefixing that happened as a result of
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
*/
#ifdef JEMALLOC_MANGLE
# ifndef JEMALLOC_NO_DEMANGLE
# define JEMALLOC_NO_DEMANGLE
# endif
# define pool_create je_pool_create
# define pool_delete je_pool_delete
# define pool_malloc je_pool_malloc
# define pool_calloc je_pool_calloc
# define pool_ralloc je_pool_ralloc
# define pool_aligned_alloc je_pool_aligned_alloc
# define pool_free je_pool_free
# define pool_malloc_usable_size je_pool_malloc_usable_size
# define pool_malloc_stats_print je_pool_malloc_stats_print
# define pool_extend je_pool_extend
# define pool_set_alloc_funcs je_pool_set_alloc_funcs
# define pool_check je_pool_check
# define malloc_conf je_malloc_conf
# define malloc_message je_malloc_message
# define malloc je_malloc
# define calloc je_calloc
# define posix_memalign je_posix_memalign
# define aligned_alloc je_aligned_alloc
# define realloc je_realloc
# define free je_free
# define mallocx je_mallocx
# define rallocx je_rallocx
# define xallocx je_xallocx
# define sallocx je_sallocx
# define dallocx je_dallocx
# define nallocx je_nallocx
# define mallctl je_mallctl
# define mallctlnametomib je_mallctlnametomib
# define mallctlbymib je_mallctlbymib
# define navsnprintf je_navsnprintf
# define malloc_stats_print je_malloc_stats_print
# define malloc_usable_size je_malloc_usable_size
#endif
/*
* The je_* macros can be used as stable alternative names for the
* public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
* meant for use in jemalloc itself, but it can be used by application code to
* provide isolation from the name mangling specified via --with-mangling
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
# undef je_pool_create
# undef je_pool_delete
# undef je_pool_malloc
# undef je_pool_calloc
# undef je_pool_ralloc
# undef je_pool_aligned_alloc
# undef je_pool_free
# undef je_pool_malloc_usable_size
# undef je_pool_malloc_stats_print
# undef je_pool_extend
# undef je_pool_set_alloc_funcs
# undef je_pool_check
# undef je_malloc_conf
# undef je_malloc_message
# undef je_malloc
# undef je_calloc
# undef je_posix_memalign
# undef je_aligned_alloc
# undef je_realloc
# undef je_free
# undef je_mallocx
# undef je_rallocx
# undef je_xallocx
# undef je_sallocx
# undef je_dallocx
# undef je_nallocx
# undef je_mallctl
# undef je_mallctlnametomib
# undef je_mallctlbymib
# undef je_navsnprintf
# undef je_malloc_stats_print
# undef je_malloc_usable_size
#endif
#ifdef __cplusplus
}
#endif
#endif /* JEMALLOC_H_ */
| 10,674 | 34 | 90 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_protos.h | /*
* The je_ prefix on the following public symbol declarations is an artifact
* of namespace management, and should be omitted in application code unless
* JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h).
*/
extern JEMALLOC_EXPORT const char *je_malloc_conf;
extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
const char *s);
typedef struct pool_s pool_t;
JEMALLOC_EXPORT pool_t *je_pool_create(void *addr, size_t size, int zeroed);
JEMALLOC_EXPORT int je_pool_delete(pool_t *pool);
JEMALLOC_EXPORT size_t je_pool_extend(pool_t *pool, void *addr,
size_t size, int zeroed);
JEMALLOC_EXPORT void *je_pool_malloc(pool_t *pool, size_t size);
JEMALLOC_EXPORT void *je_pool_calloc(pool_t *pool, size_t nmemb, size_t size);
JEMALLOC_EXPORT void *je_pool_ralloc(pool_t *pool, void *ptr, size_t size);
JEMALLOC_EXPORT void *je_pool_aligned_alloc(pool_t *pool, size_t alignment, size_t size);
JEMALLOC_EXPORT void je_pool_free(pool_t *pool, void *ptr);
JEMALLOC_EXPORT size_t je_pool_malloc_usable_size(pool_t *pool, void *ptr);
JEMALLOC_EXPORT void je_pool_malloc_stats_print(pool_t *pool,
void (*write_cb)(void *, const char *),
void *cbopaque, const char *opts);
JEMALLOC_EXPORT void je_pool_set_alloc_funcs(void *(*malloc_func)(size_t),
void (*free_func)(void *));
JEMALLOC_EXPORT int je_pool_check(pool_t *pool);
JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment,
size_t size) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size);
JEMALLOC_EXPORT void je_free(void *ptr);
JEMALLOC_EXPORT void *je_mallocx(size_t size, int flags);
JEMALLOC_EXPORT void *je_rallocx(void *ptr, size_t size, int flags);
JEMALLOC_EXPORT size_t je_xallocx(void *ptr, size_t size, size_t extra,
int flags);
JEMALLOC_EXPORT size_t je_sallocx(const void *ptr, int flags);
JEMALLOC_EXPORT void je_dallocx(void *ptr, int flags);
JEMALLOC_EXPORT size_t je_nallocx(size_t size, int flags);
JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp,
size_t *miblenp);
JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
const char *), void *je_cbopaque, const char *opts);
JEMALLOC_EXPORT size_t je_malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr);
JEMALLOC_EXPORT int je_navsnprintf(char *str, size_t size,
const char *format, va_list ap);
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif
| 3,124 | 44.289855 | 90 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_macros.h | #include <limits.h>
#include <strings.h>
#include <stdbool.h>
#include <stdarg.h>
#define JEMALLOC_VERSION ""
#define JEMALLOC_VERSION_MAJOR
#define JEMALLOC_VERSION_MINOR
#define JEMALLOC_VERSION_BUGFIX
#define JEMALLOC_VERSION_NREV
#define JEMALLOC_VERSION_GID ""
# define MALLOCX_LG_ALIGN(la) (la)
# if LG_SIZEOF_PTR == 2
# define MALLOCX_ALIGN(a) (ffs(a)-1)
# else
# define MALLOCX_ALIGN(a) \
(((a) < (size_t)INT_MAX) ? ffs(a)-1 : ffs((a)>>32)+31)
# endif
# define MALLOCX_ZERO ((int)0x40)
/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */
# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8))
#ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
#elif _MSC_VER
# define JEMALLOC_ATTR(s)
# ifdef DLLEXPORT
# define JEMALLOC_EXPORT __declspec(dllexport)
# else
# define JEMALLOC_EXPORT __declspec(dllimport)
# endif
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
# define JEMALLOC_NOINLINE __declspec(noinline)
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_EXPORT
# define JEMALLOC_ALIGNED(s)
# define JEMALLOC_SECTION(s)
# define JEMALLOC_NOINLINE
#endif
| 1,426 | 29.361702 | 76 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/public_unnamespace.h | #undef je_pool_create
#undef je_pool_delete
#undef je_pool_malloc
#undef je_pool_calloc
#undef je_pool_ralloc
#undef je_pool_aligned_alloc
#undef je_pool_free
#undef je_pool_malloc_usable_size
#undef je_pool_malloc_stats_print
#undef je_pool_extend
#undef je_pool_set_alloc_funcs
#undef je_pool_check
#undef je_malloc_conf
#undef je_malloc_message
#undef je_malloc
#undef je_calloc
#undef je_posix_memalign
#undef je_aligned_alloc
#undef je_realloc
#undef je_free
#undef je_mallocx
#undef je_rallocx
#undef je_xallocx
#undef je_sallocx
#undef je_dallocx
#undef je_nallocx
#undef je_mallctl
#undef je_mallctlnametomib
#undef je_mallctlbymib
#undef je_navsnprintf
#undef je_malloc_stats_print
#undef je_malloc_usable_size
| 720 | 20.848485 | 33 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/public_namespace.h | #define je_pool_create JEMALLOC_N(pool_create)
#define je_pool_delete JEMALLOC_N(pool_delete)
#define je_pool_malloc JEMALLOC_N(pool_malloc)
#define je_pool_calloc JEMALLOC_N(pool_calloc)
#define je_pool_ralloc JEMALLOC_N(pool_ralloc)
#define je_pool_aligned_alloc JEMALLOC_N(pool_aligned_alloc)
#define je_pool_free JEMALLOC_N(pool_free)
#define je_pool_malloc_usable_size JEMALLOC_N(pool_malloc_usable_size)
#define je_pool_malloc_stats_print JEMALLOC_N(pool_malloc_stats_print)
#define je_pool_extend JEMALLOC_N(pool_extend)
#define je_pool_set_alloc_funcs JEMALLOC_N(pool_set_alloc_funcs)
#define je_pool_check JEMALLOC_N(pool_check)
#define je_malloc_conf JEMALLOC_N(malloc_conf)
#define je_malloc_message JEMALLOC_N(malloc_message)
#define je_malloc JEMALLOC_N(malloc)
#define je_calloc JEMALLOC_N(calloc)
#define je_posix_memalign JEMALLOC_N(posix_memalign)
#define je_aligned_alloc JEMALLOC_N(aligned_alloc)
#define je_realloc JEMALLOC_N(realloc)
#define je_free JEMALLOC_N(free)
#define je_mallocx JEMALLOC_N(mallocx)
#define je_rallocx JEMALLOC_N(rallocx)
#define je_xallocx JEMALLOC_N(xallocx)
#define je_sallocx JEMALLOC_N(sallocx)
#define je_dallocx JEMALLOC_N(dallocx)
#define je_nallocx JEMALLOC_N(nallocx)
#define je_mallctl JEMALLOC_N(mallctl)
#define je_mallctlnametomib JEMALLOC_N(mallctlnametomib)
#define je_mallctlbymib JEMALLOC_N(mallctlbymib)
#define je_navsnprintf JEMALLOC_N(navsnprintf)
#define je_malloc_stats_print JEMALLOC_N(malloc_stats_print)
#define je_malloc_usable_size JEMALLOC_N(malloc_usable_size)
| 1,536 | 45.575758 | 70 | h |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.