repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_movnt/pmem2_movnt.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_movnt.c -- test for MOVNT threshold
*
* usage: pmem2_movnt
*/
#include "unittest.h"
#include "ut_pmem2.h"
int
main(int argc, char *argv[])
{
int fd;
char *dst;
char *src;
struct pmem2_config *cfg;
struct pmem2_source *psrc;
struct pmem2_map *map;
if (argc != 2)
UT_FATAL("usage: %s file", argv[0]);
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem2_movnt %s %savx %savx512f",
thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
fd = OPEN(argv[1], O_RDWR);
PMEM2_CONFIG_NEW(&cfg);
PMEM2_SOURCE_FROM_FD(&psrc, fd);
PMEM2_CONFIG_SET_GRANULARITY(cfg, PMEM2_GRANULARITY_PAGE);
int ret = pmem2_map(cfg, psrc, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_CONFIG_DELETE(&cfg);
src = MEMALIGN(64, 8192);
dst = MEMALIGN(64, 8192);
memset(src, 0x88, 8192);
memset(dst, 0, 8192);
pmem2_memset_fn memset_fn = pmem2_get_memset_fn(map);
pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map);
pmem2_memmove_fn memmove_fn = pmem2_get_memmove_fn(map);
for (size_t size = 1; size <= 4096; size *= 2) {
memset(dst, 0, 4096);
memcpy_fn(dst, src, size, PMEM2_F_MEM_NODRAIN);
UT_ASSERTeq(memcmp(src, dst, size), 0);
UT_ASSERTeq(dst[size], 0);
}
for (size_t size = 1; size <= 4096; size *= 2) {
memset(dst, 0, 4096);
memmove_fn(dst, src, size, PMEM2_F_MEM_NODRAIN);
UT_ASSERTeq(memcmp(src, dst, size), 0);
UT_ASSERTeq(dst[size], 0);
}
for (size_t size = 1; size <= 4096; size *= 2) {
memset(dst, 0, 4096);
memset_fn(dst, 0x77, size, PMEM2_F_MEM_NODRAIN);
UT_ASSERTeq(dst[0], 0x77);
UT_ASSERTeq(dst[size - 1], 0x77);
UT_ASSERTeq(dst[size], 0);
}
ALIGNED_FREE(dst);
ALIGNED_FREE(src);
ret = pmem2_unmap(&map);
UT_ASSERTeq(ret, 0);
CLOSE(fd);
DONE(NULL);
}
| 1,945 | 21.113636 | 59 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_memmove/pmem2_memmove.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* pmem2_memmove.c -- test for doing a memmove
*
* usage:
* pmem2_memmove file b:length [d:{offset}] [s:offset] [o:{1|2} S:{overlap}]
*
*/
#include "unittest.h"
#include "ut_pmem2.h"
#include "file.h"
#include "memmove_common.h"
static void
do_memmove_variants(char *dst, char *src, const char *file_name,
size_t dest_off, size_t src_off, size_t bytes, persist_fn p,
memmove_fn fn)
{
for (int i = 0; i < ARRAY_SIZE(Flags); ++i) {
do_memmove(dst, src, file_name, dest_off, src_off,
bytes, fn, Flags[i], p);
}
}
int
main(int argc, char *argv[])
{
int fd;
char *dst;
char *src;
char *src_orig;
size_t dst_off = 0;
size_t src_off = 0;
size_t bytes = 0;
int who = 0;
size_t mapped_len;
struct pmem2_config *cfg;
struct pmem2_source *psrc;
struct pmem2_map *map;
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem2_memmove %s %s %s %s %savx %savx512f",
argc > 2 ? argv[2] : "null",
argc > 3 ? argv[3] : "null",
argc > 4 ? argv[4] : "null",
thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
fd = OPEN(argv[1], O_RDWR);
if (argc < 3)
USAGE();
PMEM2_CONFIG_NEW(&cfg);
PMEM2_SOURCE_FROM_FD(&psrc, fd);
PMEM2_CONFIG_SET_GRANULARITY(cfg, PMEM2_GRANULARITY_PAGE);
int ret = pmem2_map(cfg, psrc, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_CONFIG_DELETE(&cfg);
pmem2_persist_fn persist = pmem2_get_persist_fn(map);
mapped_len = pmem2_map_get_size(map);
dst = pmem2_map_get_address(map);
if (dst == NULL)
UT_FATAL("!could not map file: %s", argv[1]);
pmem2_memmove_fn memmove_fn = pmem2_get_memmove_fn(map);
for (int arg = 2; arg < argc; arg++) {
if (strchr("dsbo",
argv[arg][0]) == NULL || argv[arg][1] != ':')
UT_FATAL("op must be d: or s: or b: or o:");
size_t val = STRTOUL(&argv[arg][2], NULL, 0);
switch (argv[arg][0]) {
case 'd':
if (val <= 0)
UT_FATAL("bad offset (%lu) with d: option",
val);
dst_off = val;
break;
case 's':
if (val <= 0)
UT_FATAL("bad offset (%lu) with s: option",
val);
src_off = val;
break;
case 'b':
if (val <= 0)
UT_FATAL("bad length (%lu) with b: option",
val);
bytes = val;
break;
case 'o':
if (val != 1 && val != 0)
UT_FATAL("bad val (%lu) with o: option",
val);
who = (int)val;
break;
}
}
if (who == 0) {
src_orig = src = dst + mapped_len / 2;
UT_ASSERT(src > dst);
do_memmove_variants(dst, src, argv[1], dst_off, src_off,
bytes, persist, memmove_fn);
/* dest > src */
src = dst;
dst = src_orig;
if (dst <= src)
UT_FATAL("cannot map files in memory order");
do_memmove_variants(dst, src, argv[1], dst_off, src_off,
bytes, persist, memmove_fn);
} else {
/* use the same buffer for source and destination */
memset(dst, 0, bytes);
persist(dst, bytes);
do_memmove_variants(dst, dst, argv[1], dst_off, src_off,
bytes, persist, memmove_fn);
}
ret = pmem2_unmap(&map);
UT_ASSERTeq(ret, 0);
CLOSE(fd);
DONE(NULL);
}
| 3,184 | 20.52027 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_memmove/memmove_common.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* memmove_common.h -- header file for common memmove_common test utilities
*/
#ifndef MEMMOVE_COMMON_H
#define MEMMOVE_COMMON_H 1
#include "unittest.h"
#include "file.h"
extern unsigned Flags[10];
#define USAGE() do { UT_FATAL("usage: %s file b:length [d:{offset}] "\
"[s:{offset}] [o:{0|1}]", argv[0]); } while (0)
typedef void *(*memmove_fn)(void *pmemdest, const void *src, size_t len,
unsigned flags);
typedef void (*persist_fn)(const void *ptr, size_t len);
void do_memmove(char *dst, char *src, const char *file_name,
size_t dest_off, size_t src_off, size_t bytes,
memmove_fn fn, unsigned flags, persist_fn p);
void verify_contents(const char *file_name, int test, const char *buf1,
const char *buf2, size_t len);
#endif
| 832 | 25.870968 | 75 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_memmove/memmove_common.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* memmove_common.c -- common part for tests doing a persistent memmove
*/
#include "unittest.h"
#include "memmove_common.h"
/*
* verify_contents -- verify that buffers match, if they don't - print contents
* of both and abort the test
*/
void
verify_contents(const char *file_name, int test,
const char *buf1, const char *buf2,
size_t len)
{
if (memcmp(buf1, buf2, len) == 0)
return;
for (size_t i = 0; i < len; ++i)
UT_ERR("%04zu 0x%02x 0x%02x %s", i, (uint8_t)buf1[i],
(uint8_t)buf2[i],
buf1[i] != buf2[i] ? "!!!" : "");
UT_FATAL("%s %d: %zu bytes do not match with memcmp",
file_name, test, len);
}
/*
* do_memmove: Worker function for memmove.
*
* Always work within the boundary of bytes. Fill in 1/2 of the src
* memory with the pattern we want to write. This allows us to check
* that we did not overwrite anything we were not supposed to in the
* dest. Use the non pmem version of the memset/memcpy commands
* so as not to introduce any possible side affects.
*/
void
do_memmove(char *dst, char *src, const char *file_name,
size_t dest_off, size_t src_off, size_t bytes,
memmove_fn fn, unsigned flags, persist_fn persist)
{
void *ret;
char *srcshadow = MALLOC(dest_off + src_off + bytes);
char *dstshadow = srcshadow;
if (src != dst)
dstshadow = MALLOC(dest_off + src_off + bytes);
char old;
memset(src, 0x11, bytes);
memset(dst, 0x22, bytes);
memset(src, 0x33, bytes / 4);
memset(src + bytes / 4, 0x44, bytes / 4);
persist(src, bytes);
persist(dst, bytes);
memcpy(srcshadow, src, bytes);
memcpy(dstshadow, dst, bytes);
/* TEST 1, dest == src */
old = *(char *)(dst + dest_off);
ret = fn(dst + dest_off, dst + dest_off, bytes / 2, flags);
UT_ASSERTeq(ret, dst + dest_off);
UT_ASSERTeq(*(char *)(dst + dest_off), old);
/* do the same using regular memmove and verify that buffers match */
memmove(dstshadow + dest_off, dstshadow + dest_off, bytes / 2);
verify_contents(file_name, 0, dstshadow, dst, bytes);
verify_contents(file_name, 1, srcshadow, src, bytes);
/* TEST 2, len == 0 */
old = *(char *)(dst + dest_off);
ret = fn(dst + dest_off, src + src_off, 0, flags);
UT_ASSERTeq(ret, dst + dest_off);
UT_ASSERTeq(*(char *)(dst + dest_off), old);
/* do the same using regular memmove and verify that buffers match */
memmove(dstshadow + dest_off, srcshadow + src_off, 0);
verify_contents(file_name, 2, dstshadow, dst, bytes);
verify_contents(file_name, 3, srcshadow, src, bytes);
/* TEST 3, len == bytes / 2 */
ret = fn(dst + dest_off, src + src_off, bytes / 2, flags);
UT_ASSERTeq(ret, dst + dest_off);
if (flags & PMEM_F_MEM_NOFLUSH)
/* for pmemcheck */
persist(dst + dest_off, bytes / 2);
/* do the same using regular memmove and verify that buffers match */
memmove(dstshadow + dest_off, srcshadow + src_off, bytes / 2);
verify_contents(file_name, 4, dstshadow, dst, bytes);
verify_contents(file_name, 5, srcshadow, src, bytes);
FREE(srcshadow);
if (dstshadow != srcshadow)
FREE(dstshadow);
}
unsigned Flags[] = {
0,
PMEM_F_MEM_NODRAIN,
PMEM_F_MEM_NONTEMPORAL,
PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_NODRAIN,
PMEM_F_MEM_WC,
PMEM_F_MEM_WB,
PMEM_F_MEM_NOFLUSH,
/* all possible flags */
PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH |
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL |
PMEM_F_MEM_WC | PMEM_F_MEM_WB,
};
| 3,503 | 28.694915 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_zones/obj_zones.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_zones.c -- allocates from a very large pool (exceeding 1 zone)
*
*/
#include <stddef.h>
#include <page_size.h>
#include "unittest.h"
#define LAYOUT_NAME "obj_zones"
#define ALLOC_SIZE ((8191 * (256 * 1024)) - 16) /* must evenly divide a zone */
/*
* test_create -- allocate all possible objects and log the number. It should
* exceed what would be possible on a single zone.
* Additionally, free one object so that we can later check that it can be
* allocated after the next open.
*/
static void
test_create(const char *path)
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, LAYOUT_NAME,
0, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
PMEMoid oid;
int n = 0;
while (1) {
if (pmemobj_alloc(pop, &oid, ALLOC_SIZE, 0, NULL, NULL) != 0)
break;
n++;
}
UT_OUT("allocated: %d", n);
pmemobj_free(&oid);
pmemobj_close(pop);
}
/*
* test_open -- in the open test we should be able to allocate exactly
* one object.
*/
static void
test_open(const char *path)
{
PMEMobjpool *pop;
if ((pop = pmemobj_open(path, LAYOUT_NAME)) == NULL)
UT_FATAL("!pmemobj_open: %s", path);
int ret = pmemobj_alloc(pop, NULL, ALLOC_SIZE, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
ret = pmemobj_alloc(pop, NULL, ALLOC_SIZE, 0, NULL, NULL);
UT_ASSERTne(ret, 0);
pmemobj_close(pop);
}
/*
* test_malloc_free -- test if alloc until OOM/free/alloc until OOM sequence
* produces the same number of allocations for the second alloc loop.
*/
static void
test_malloc_free(const char *path)
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, LAYOUT_NAME,
0, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
size_t alloc_size = PMEM_PAGESIZE * 32;
size_t max_allocs = 1000000;
PMEMoid *oid = MALLOC(sizeof(PMEMoid) * max_allocs);
size_t n = 0;
while (1) {
if (pmemobj_alloc(pop, &oid[n], alloc_size, 0, NULL, NULL) != 0)
break;
n++;
UT_ASSERTne(n, max_allocs);
}
size_t first_run_allocated = n;
for (size_t i = 0; i < n; ++i) {
pmemobj_free(&oid[i]);
}
n = 0;
while (1) {
if (pmemobj_alloc(pop, &oid[n], alloc_size, 0, NULL, NULL) != 0)
break;
n++;
}
UT_ASSERTeq(first_run_allocated, n);
pmemobj_close(pop);
FREE(oid);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_zones");
if (argc != 3)
UT_FATAL("usage: %s file-name [open|create]", argv[0]);
const char *path = argv[1];
char op = argv[2][0];
if (op == 'c')
test_create(path);
else if (op == 'o')
test_open(path);
else if (op == 'f')
test_malloc_free(path);
else
UT_FATAL("invalid operation");
DONE(NULL);
}
| 2,706 | 20.148438 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_tx_locks_abort/obj_tx_locks_abort.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* obj_tx_locks_nested.c -- unit test for transaction locks
*/
#include "unittest.h"
#define LAYOUT_NAME "locks"
TOID_DECLARE_ROOT(struct root_obj);
TOID_DECLARE(struct obj, 1);
struct root_obj {
PMEMmutex lock;
TOID(struct obj) head;
};
struct obj {
int data;
PMEMmutex lock;
TOID(struct obj) next;
};
/*
* do_nested_tx-- (internal) nested transaction
*/
static void
do_nested_tx(PMEMobjpool *pop, TOID(struct obj) o, int value)
{
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(o)->lock, TX_PARAM_NONE) {
TX_ADD(o);
D_RW(o)->data = value;
if (!TOID_IS_NULL(D_RO(o)->next)) {
/*
* Add the object to undo log, while the mutex
* it contains is not locked.
*/
TX_ADD(D_RO(o)->next);
do_nested_tx(pop, D_RO(o)->next, value);
}
} TX_END;
}
/*
* do_aborted_nested_tx -- (internal) aborted nested transaction
*/
static void
do_aborted_nested_tx(PMEMobjpool *pop, TOID(struct obj) oid, int value)
{
TOID(struct obj) o = oid;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(o)->lock, TX_PARAM_NONE) {
TX_ADD(o);
D_RW(o)->data = value;
if (!TOID_IS_NULL(D_RO(o)->next)) {
/*
* Add the object to undo log, while the mutex
* it contains is not locked.
*/
TX_ADD(D_RO(o)->next);
do_nested_tx(pop, D_RO(o)->next, value);
}
pmemobj_tx_abort(EINVAL);
} TX_FINALLY {
o = oid;
while (!TOID_IS_NULL(o)) {
if (pmemobj_mutex_trylock(pop, &D_RW(o)->lock)) {
UT_OUT("trylock failed");
} else {
UT_OUT("trylock succeeded");
pmemobj_mutex_unlock(pop, &D_RW(o)->lock);
}
o = D_RO(o)->next;
}
} TX_END;
}
/*
* do_check -- (internal) print 'data' value of each object on the list
*/
static void
do_check(TOID(struct obj) o)
{
while (!TOID_IS_NULL(o)) {
UT_OUT("data = %d", D_RO(o)->data);
o = D_RO(o)->next;
}
}
int
main(int argc, char *argv[])
{
PMEMobjpool *pop;
START(argc, argv, "obj_tx_locks_abort");
if (argc > 3)
UT_FATAL("usage: %s <file>", argv[0]);
pop = pmemobj_create(argv[1], LAYOUT_NAME,
PMEMOBJ_MIN_POOL * 4, S_IWUSR | S_IRUSR);
if (pop == NULL)
UT_FATAL("!pmemobj_create");
TOID(struct root_obj) root = POBJ_ROOT(pop, struct root_obj);
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
TX_ADD(root);
D_RW(root)->head = TX_ZNEW(struct obj);
TOID(struct obj) o;
o = D_RW(root)->head;
D_RW(o)->data = 100;
pmemobj_mutex_zero(pop, &D_RW(o)->lock);
for (int i = 0; i < 3; i++) {
D_RW(o)->next = TX_ZNEW(struct obj);
o = D_RO(o)->next;
D_RW(o)->data = 101 + i;
pmemobj_mutex_zero(pop, &D_RW(o)->lock);
}
TOID_ASSIGN(D_RW(o)->next, OID_NULL);
} TX_END;
UT_OUT("initial state");
do_check(D_RO(root)->head);
UT_OUT("nested tx");
do_nested_tx(pop, D_RW(root)->head, 200);
do_check(D_RO(root)->head);
UT_OUT("aborted nested tx");
do_aborted_nested_tx(pop, D_RW(root)->head, 300);
do_check(D_RO(root)->head);
pmemobj_close(pop);
DONE(NULL);
}
| 2,994 | 20.392857 | 71 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_persist_valgrind/pmem2_persist_valgrind.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_persist_valgrind.c -- pmem2_persist_valgrind tests
*/
#include "out.h"
#include "unittest.h"
#include "ut_pmem2_utils.h"
#define DATA "XXXXXXXX"
#define STRIDE_SIZE 4096
/*
* test_ctx -- essential parameters used by test
*/
struct test_ctx {
int fd;
struct pmem2_map *map;
};
/*
* test_init -- prepare resources required for testing
*/
static int
test_init(const struct test_case *tc, int argc, char *argv[],
struct test_ctx *ctx)
{
if (argc < 1)
UT_FATAL("usage: %s <file>", tc->name);
char *file = argv[0];
ctx->fd = OPEN(file, O_RDWR);
struct pmem2_source *src;
int ret = pmem2_source_from_fd(&src, ctx->fd);
UT_PMEM2_EXPECT_RETURN(ret, 0);
struct pmem2_config *cfg;
/* fill pmem2_config in minimal scope */
ret = pmem2_config_new(&cfg);
UT_PMEM2_EXPECT_RETURN(ret, 0);
ret = pmem2_config_set_required_store_granularity(
cfg, PMEM2_GRANULARITY_PAGE);
UT_PMEM2_EXPECT_RETURN(ret, 0);
/* execute pmem2_map and validate the result */
ret = pmem2_map(cfg, src, &ctx->map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTne(ctx->map, NULL);
size_t size;
UT_ASSERTeq(pmem2_source_size(src, &size), 0);
UT_ASSERTeq(pmem2_map_get_size(ctx->map), size);
pmem2_config_delete(&cfg);
/* the function returns the number of consumed arguments */
return 1;
}
/*
* test_fini -- cleanup the test resources
*/
static void
test_fini(struct test_ctx *ctx)
{
pmem2_unmap(&ctx->map);
CLOSE(ctx->fd);
}
/*
* data_write -- write the data in mapped memory
*/
static void
data_write(void *addr, size_t size, size_t stride)
{
for (size_t offset = 0; offset + sizeof(DATA) <= size;
offset += stride) {
memcpy((void *)((uintptr_t)addr + offset), DATA, sizeof(DATA));
}
}
/*
* data_persist -- persist data in a range of mapped memory with defined stride
*/
static void
data_persist(struct pmem2_map *map, size_t len, size_t stride)
{
size_t map_size = pmem2_map_get_size(map);
char *addr = pmem2_map_get_address(map);
pmem2_persist_fn p_func = pmem2_get_persist_fn(map);
for (size_t offset = 0; offset + len <= map_size;
offset += stride) {
p_func(addr + offset, len);
}
}
/*
* test_persist_continuous_range -- persist continuous data in a range of
* the persistent memory
*/
static int
test_persist_continuous_range(const struct test_case *tc, int argc,
char *argv[])
{
struct test_ctx ctx = {0};
int ret = test_init(tc, argc, argv, &ctx);
char *addr = pmem2_map_get_address(ctx.map);
size_t map_size = pmem2_map_get_size(ctx.map);
data_write(addr, map_size, sizeof(DATA) /* stride */);
data_persist(ctx.map, map_size, map_size /* stride */);
test_fini(&ctx);
return ret;
}
/*
* test_persist_discontinuous_range -- persist discontinuous data in a range of
* the persistent memory
*/
static int
test_persist_discontinuous_range(const struct test_case *tc, int argc,
char *argv[])
{
struct test_ctx ctx = {0};
int ret = test_init(tc, argc, argv, &ctx);
char *addr = pmem2_map_get_address(ctx.map);
size_t map_size = pmem2_map_get_size(ctx.map);
data_write(addr, map_size, STRIDE_SIZE);
data_persist(ctx.map, sizeof(DATA), STRIDE_SIZE);
test_fini(&ctx);
return ret;
}
/*
* test_persist_discontinuous_range_partially -- persist part of discontinuous
* data in a range of persistent memory
*/
static int
test_persist_discontinuous_range_partially(const struct test_case *tc, int argc,
char *argv[])
{
struct test_ctx ctx = {0};
int ret = test_init(tc, argc, argv, &ctx);
char *addr = pmem2_map_get_address(ctx.map);
size_t map_size = pmem2_map_get_size(ctx.map);
data_write(addr, map_size, STRIDE_SIZE);
/* persist only a half of the writes */
data_persist(ctx.map, sizeof(DATA), 2 * STRIDE_SIZE);
test_fini(&ctx);
return ret;
}
/*
* test_persist_nonpmem_data -- persist data in a range of the memory mapped
* by mmap()
*/
static int
test_persist_nonpmem_data(const struct test_case *tc, int argc, char *argv[])
{
struct test_ctx ctx = {0};
/* pmem2_map is needed to get persist function */
int ret = test_init(tc, argc, argv, &ctx);
size_t size = pmem2_map_get_size(ctx.map);
int flags = MAP_SHARED;
int proto = PROT_READ | PROT_WRITE;
char *addr;
addr = mmap(NULL, size, proto, flags, ctx.fd, 0);
data_write(addr, size, sizeof(DATA) /* stride */);
pmem2_persist_fn p_func = pmem2_get_persist_fn(ctx.map);
p_func(addr, size);
munmap(addr, size);
test_fini(&ctx);
return ret;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_persist_continuous_range),
TEST_CASE(test_persist_discontinuous_range),
TEST_CASE(test_persist_discontinuous_range_partially),
TEST_CASE(test_persist_nonpmem_data),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem2_persist_valgrind");
out_init("pmem2_persist_valgrind", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0,
0);
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
out_fini();
DONE(NULL);
}
| 5,072 | 22.37788 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_ctl_alloc_class/obj_ctl_alloc_class.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2018, Intel Corporation */
/*
* obj_ctl_alloc_class.c -- tests for the ctl entry points: heap.alloc_class
*/
#include <sys/resource.h>
#include "unittest.h"
#define LAYOUT "obj_ctl_alloc_class"
static void
basic(const char *path)
{
PMEMobjpool *pop;
if ((pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL * 20,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
int ret;
PMEMoid oid;
size_t usable_size;
struct pobj_alloc_class_desc alloc_class_128;
alloc_class_128.header_type = POBJ_HEADER_NONE;
alloc_class_128.unit_size = 128;
alloc_class_128.units_per_block = 1000;
alloc_class_128.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.128.desc",
&alloc_class_128);
UT_ASSERTeq(ret, 0);
struct pobj_alloc_class_desc alloc_class_129;
alloc_class_129.header_type = POBJ_HEADER_COMPACT;
alloc_class_129.unit_size = 1024;
alloc_class_129.units_per_block = 1000;
alloc_class_129.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.129.desc",
&alloc_class_129);
UT_ASSERTeq(ret, 0);
struct pobj_alloc_class_desc alloc_class_128_r;
ret = pmemobj_ctl_get(pop, "heap.alloc_class.128.desc",
&alloc_class_128_r);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(alloc_class_128.header_type, alloc_class_128_r.header_type);
UT_ASSERTeq(alloc_class_128.unit_size, alloc_class_128_r.unit_size);
UT_ASSERT(alloc_class_128.units_per_block <=
alloc_class_128_r.units_per_block);
/*
* One unit from alloc class 128 - 128 bytes unit size, minimal headers.
*/
ret = pmemobj_xalloc(pop, &oid, 128, 0, POBJ_CLASS_ID(128), NULL, NULL);
UT_ASSERTeq(ret, 0);
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, 128);
pmemobj_free(&oid);
/*
* Reserve as above.
*/
struct pobj_action act;
oid = pmemobj_xreserve(pop, &act, 128, 0, POBJ_CLASS_ID(128));
UT_ASSERT(!OID_IS_NULL(oid));
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, 128);
pmemobj_cancel(pop, &act, 1);
/*
* One unit from alloc class 128 - 128 bytes unit size, minimal headers,
* but request size 1 byte.
*/
ret = pmemobj_xalloc(pop, &oid, 1, 0, POBJ_CLASS_ID(128), NULL, NULL);
UT_ASSERTeq(ret, 0);
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, 128);
pmemobj_free(&oid);
/*
* Two units from alloc class 129 -
* 1024 bytes unit size, compact headers.
*/
ret = pmemobj_xalloc(pop, &oid, 1024 + 1,
0, POBJ_CLASS_ID(129), NULL, NULL);
UT_ASSERTeq(ret, 0);
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, (1024 * 2) - 16); /* 2 units minus hdr */
pmemobj_free(&oid);
/*
* 64 units from alloc class 129
* - 1024 bytes unit size, compact headers.
*/
ret = pmemobj_xalloc(pop, &oid, (1024 * 64) - 16,
0, POBJ_CLASS_ID(129), NULL, NULL);
UT_ASSERTeq(ret, 0);
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, (1024 * 64) - 16);
pmemobj_free(&oid);
/*
* 65 units from alloc class 129 -
* 1024 bytes unit size, compact headers.
* Should fail, as it would require two bitmap modifications.
*/
ret = pmemobj_xalloc(pop, &oid, 1024 * 64 + 1, 0,
POBJ_CLASS_ID(129), NULL, NULL);
UT_ASSERTeq(ret, -1);
/*
* Nonexistent alloc class.
*/
ret = pmemobj_xalloc(pop, &oid, 1, 0, POBJ_CLASS_ID(130), NULL, NULL);
UT_ASSERTeq(ret, -1);
struct pobj_alloc_class_desc alloc_class_new;
alloc_class_new.header_type = POBJ_HEADER_NONE;
alloc_class_new.unit_size = 777;
alloc_class_new.units_per_block = 200;
alloc_class_new.class_id = 0;
alloc_class_new.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_new);
UT_ASSERTeq(ret, 0);
struct pobj_alloc_class_desc alloc_class_fail;
alloc_class_fail.header_type = POBJ_HEADER_NONE;
alloc_class_fail.unit_size = 777;
alloc_class_fail.units_per_block = 200;
alloc_class_fail.class_id = 0;
alloc_class_fail.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_fail);
UT_ASSERTeq(ret, -1);
ret = pmemobj_ctl_set(pop, "heap.alloc_class.200.desc",
&alloc_class_fail);
UT_ASSERTeq(ret, -1);
ret = pmemobj_xalloc(pop, &oid, 1, 0,
POBJ_CLASS_ID(alloc_class_new.class_id), NULL, NULL);
UT_ASSERTeq(ret, 0);
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, 777);
struct pobj_alloc_class_desc alloc_class_new_huge;
alloc_class_new_huge.header_type = POBJ_HEADER_NONE;
alloc_class_new_huge.unit_size = (2 << 23);
alloc_class_new_huge.units_per_block = 1;
alloc_class_new_huge.class_id = 0;
alloc_class_new_huge.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_new_huge);
UT_ASSERTeq(ret, 0);
ret = pmemobj_xalloc(pop, &oid, 1, 0,
POBJ_CLASS_ID(alloc_class_new_huge.class_id), NULL, NULL);
UT_ASSERTeq(ret, 0);
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, (2 << 23));
struct pobj_alloc_class_desc alloc_class_new_max;
alloc_class_new_max.header_type = POBJ_HEADER_COMPACT;
alloc_class_new_max.unit_size = PMEMOBJ_MAX_ALLOC_SIZE;
alloc_class_new_max.units_per_block = 1024;
alloc_class_new_max.class_id = 0;
alloc_class_new_max.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_new_max);
UT_ASSERTeq(ret, 0);
ret = pmemobj_xalloc(pop, &oid, 1, 0,
POBJ_CLASS_ID(alloc_class_new_max.class_id), NULL, NULL);
UT_ASSERTne(ret, 0);
struct pobj_alloc_class_desc alloc_class_new_loop;
alloc_class_new_loop.header_type = POBJ_HEADER_COMPACT;
alloc_class_new_loop.unit_size = 16384;
alloc_class_new_loop.units_per_block = 63;
alloc_class_new_loop.class_id = 0;
alloc_class_new_loop.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_new_loop);
UT_ASSERTeq(ret, 0);
size_t s = (63 * 16384) - 16;
ret = pmemobj_xalloc(pop, &oid, s + 1, 0,
POBJ_CLASS_ID(alloc_class_new_loop.class_id), NULL, NULL);
UT_ASSERTne(ret, 0);
struct pobj_alloc_class_desc alloc_class_tiny;
alloc_class_tiny.header_type = POBJ_HEADER_NONE;
alloc_class_tiny.unit_size = 7;
alloc_class_tiny.units_per_block = 1;
alloc_class_tiny.class_id = 0;
alloc_class_tiny.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_tiny);
UT_ASSERTeq(ret, 0);
UT_ASSERT(alloc_class_tiny.units_per_block > 1);
for (int i = 0; i < 1000; ++i) {
ret = pmemobj_xalloc(pop, &oid, 7, 0,
POBJ_CLASS_ID(alloc_class_tiny.class_id), NULL, NULL);
UT_ASSERTeq(ret, 0);
}
pmemobj_close(pop);
}
static void
many(const char *path)
{
PMEMobjpool *pop;
if ((pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
unsigned nunits = UINT16_MAX + 1;
struct pobj_alloc_class_desc alloc_class_tiny;
alloc_class_tiny.header_type = POBJ_HEADER_NONE;
alloc_class_tiny.unit_size = 8;
alloc_class_tiny.units_per_block = nunits;
alloc_class_tiny.class_id = 0;
alloc_class_tiny.alignment = 0;
int ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_tiny);
UT_ASSERTeq(ret, 0);
PMEMoid oid;
uint64_t *counterp = NULL;
for (size_t i = 0; i < nunits; ++i) {
pmemobj_xalloc(pop, &oid, 8, 0,
POBJ_CLASS_ID(alloc_class_tiny.class_id), NULL, NULL);
counterp = pmemobj_direct(oid);
(*counterp)++;
/*
* This works only because this is a fresh pool in a new file
* and so the counter must be initially zero.
* This might have to be fixed if that ever changes.
*/
UT_ASSERTeq(*counterp, 1);
}
pmemobj_close(pop);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_ctl_alloc_class");
if (argc != 3)
UT_FATAL("usage: %s file-name b|m", argv[0]);
const char *path = argv[1];
if (argv[2][0] == 'b')
basic(path);
else if (argv[2][0] == 'm')
many(path);
DONE(NULL);
}
| 7,857 | 26.865248 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/traces_pmem/traces_pmem.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* traces_pmem.c -- unit test traces for libraries pmem
*/
#include "unittest.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "traces_pmem");
UT_ASSERT(!pmem_check_version(PMEM_MAJOR_VERSION,
PMEM_MINOR_VERSION));
UT_ASSERT(!pmemblk_check_version(PMEMBLK_MAJOR_VERSION,
PMEMBLK_MINOR_VERSION));
UT_ASSERT(!pmemlog_check_version(PMEMLOG_MAJOR_VERSION,
PMEMLOG_MINOR_VERSION));
UT_ASSERT(!pmemobj_check_version(PMEMOBJ_MAJOR_VERSION,
PMEMOBJ_MINOR_VERSION));
DONE(NULL);
}
| 596 | 21.961538 | 56 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_debug/obj_debug.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* obj_debug.c -- unit test for debug features
*
* usage: obj_debug file operation [op_index]:...
*
* operations are 'f' or 'l' or 'r' or 'a' or 'n' or 's'
*
*/
#include <stddef.h>
#include <stdlib.h>
#include <sys/param.h>
#include "unittest.h"
#include "libpmemobj.h"
#define LAYOUT_NAME "layout_obj_debug"
TOID_DECLARE_ROOT(struct root);
TOID_DECLARE(struct tobj, 0);
TOID_DECLARE(struct int3_s, 1);
struct root {
POBJ_LIST_HEAD(listhead, struct tobj) lhead, lhead2;
uint32_t val;
};
struct tobj {
POBJ_LIST_ENTRY(struct tobj) next;
};
struct int3_s {
uint32_t i1;
uint32_t i2;
uint32_t i3;
};
typedef void (*func)(PMEMobjpool *pop, void *sync, void *cond);
static void
test_FOREACH(const char *path)
{
PMEMobjpool *pop = NULL;
PMEMoid varoid, nvaroid;
TOID(struct root) root;
TOID(struct tobj) var, nvar;
#define COMMANDS_FOREACH()\
do {\
POBJ_FOREACH(pop, varoid) {}\
POBJ_FOREACH_SAFE(pop, varoid, nvaroid) {}\
POBJ_FOREACH_TYPE(pop, var) {}\
POBJ_FOREACH_SAFE_TYPE(pop, var, nvar) {}\
POBJ_LIST_FOREACH(var, &D_RW(root)->lhead, next) {}\
POBJ_LIST_FOREACH_REVERSE(var, &D_RW(root)->lhead, next) {}\
} while (0)
if ((pop = pmemobj_create(path, LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
TOID_ASSIGN(root, pmemobj_root(pop, sizeof(struct root)));
POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(root)->lhead, next,
sizeof(struct tobj), NULL, NULL);
COMMANDS_FOREACH();
TX_BEGIN(pop) {
COMMANDS_FOREACH();
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
COMMANDS_FOREACH();
pmemobj_close(pop);
}
static void
test_lists(const char *path)
{
PMEMobjpool *pop = NULL;
TOID(struct root) root;
TOID(struct tobj) elm;
#define COMMANDS_LISTS()\
do {\
POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(root)->lhead, next,\
sizeof(struct tobj), NULL, NULL);\
POBJ_NEW(pop, &elm, struct tobj, NULL, NULL);\
POBJ_LIST_INSERT_AFTER(pop, &D_RW(root)->lhead,\
POBJ_LIST_FIRST(&D_RW(root)->lhead), elm, next);\
POBJ_LIST_MOVE_ELEMENT_HEAD(pop, &D_RW(root)->lhead,\
&D_RW(root)->lhead2, elm, next, next);\
POBJ_LIST_REMOVE(pop, &D_RW(root)->lhead2, elm, next);\
POBJ_FREE(&elm);\
} while (0)
if ((pop = pmemobj_create(path, LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
TOID_ASSIGN(root, pmemobj_root(pop, sizeof(struct root)));
COMMANDS_LISTS();
TX_BEGIN(pop) {
COMMANDS_LISTS();
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
COMMANDS_LISTS();
pmemobj_close(pop);
}
static int
int3_constructor(PMEMobjpool *pop, void *ptr, void *arg)
{
struct int3_s *args = (struct int3_s *)arg;
struct int3_s *val = (struct int3_s *)ptr;
val->i1 = args->i1;
val->i2 = args->i2;
val->i3 = args->i3;
pmemobj_persist(pop, val, sizeof(*val));
return 0;
}
static void
test_alloc_construct(const char *path)
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
TX_BEGIN(pop) {
struct int3_s args = { 1, 2, 3 };
PMEMoid allocation;
pmemobj_alloc(pop, &allocation, sizeof(allocation), 1,
int3_constructor, &args);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
pmemobj_close(pop);
}
static void
test_double_free(const char *path)
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
PMEMoid oid, oid2;
int err = pmemobj_zalloc(pop, &oid, 100, 0);
UT_ASSERTeq(err, 0);
UT_ASSERT(!OID_IS_NULL(oid));
oid2 = oid;
pmemobj_free(&oid);
pmemobj_free(&oid2);
}
static int
test_constr(PMEMobjpool *pop, void *ptr, void *arg)
{
PMEMoid oid;
pmemobj_alloc(pop, &oid, 1, 1, test_constr, NULL);
return 0;
}
static void
test_alloc_in_constructor(const char *path)
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
PMEMoid oid;
pmemobj_alloc(pop, &oid, 1, 1, test_constr, NULL);
}
static void
test_mutex_lock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_mutex_lock(pop, (PMEMmutex *)sync);
}
static void
test_mutex_unlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_mutex_unlock(pop, (PMEMmutex *)sync);
}
static void
test_mutex_trylock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_mutex_trylock(pop, (PMEMmutex *)sync);
}
static void
test_mutex_timedlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_mutex_timedlock(pop, (PMEMmutex *)sync, NULL);
}
static void
test_mutex_zero(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_mutex_zero(pop, (PMEMmutex *)sync);
}
static void
test_rwlock_rdlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_rdlock(pop, (PMEMrwlock *)sync);
}
static void
test_rwlock_wrlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_wrlock(pop, (PMEMrwlock *)sync);
}
static void
test_rwlock_timedrdlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_timedrdlock(pop, (PMEMrwlock *)sync, NULL);
}
static void
test_rwlock_timedwrlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_timedwrlock(pop, (PMEMrwlock *)sync, NULL);
}
static void
test_rwlock_tryrdlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_tryrdlock(pop, (PMEMrwlock *)sync);
}
static void
test_rwlock_trywrlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_trywrlock(pop, (PMEMrwlock *)sync);
}
static void
test_rwlock_unlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_unlock(pop, (PMEMrwlock *)sync);
}
static void
test_rwlock_zero(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_zero(pop, (PMEMrwlock *)sync);
}
static void
test_cond_wait(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_cond_wait(pop, (PMEMcond *)cond, (PMEMmutex *)sync);
}
static void
test_cond_signal(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_cond_signal(pop, (PMEMcond *)cond);
}
static void
test_cond_broadcast(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_cond_broadcast(pop, (PMEMcond *)cond);
}
static void
test_cond_timedwait(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_cond_timedwait(pop, (PMEMcond *)cond, (PMEMmutex *)sync, NULL);
}
static void
test_cond_zero(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_cond_zero(pop, (PMEMcond *)cond);
}
static void
test_sync_pop_check(unsigned long op_index)
{
PMEMobjpool *pop = (PMEMobjpool *)(uintptr_t)0x1;
func to_test[] = {
test_mutex_lock, test_mutex_unlock, test_mutex_trylock,
test_mutex_timedlock, test_mutex_zero, test_rwlock_rdlock,
test_rwlock_wrlock, test_rwlock_timedrdlock,
test_rwlock_timedwrlock, test_rwlock_tryrdlock,
test_rwlock_trywrlock, test_rwlock_unlock, test_rwlock_zero,
test_cond_wait, test_cond_signal, test_cond_broadcast,
test_cond_timedwait, test_cond_zero
};
if (op_index >= (sizeof(to_test) / sizeof(to_test[0])))
UT_FATAL("Invalid op_index provided");
PMEMmutex stack_sync;
PMEMcond stack_cond;
to_test[op_index](pop, &stack_sync, &stack_cond);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_debug");
if (argc < 3)
UT_FATAL("usage: %s file-name op:f|l|r|a|s [op_index]",
argv[0]);
const char *path = argv[1];
if (strchr("flrapns", argv[2][0]) == NULL || argv[2][1] != '\0')
UT_FATAL("op must be f or l or r or a or p or n or s");
unsigned long op_index;
char *tailptr;
switch (argv[2][0]) {
case 'f':
test_FOREACH(path);
break;
case 'l':
test_lists(path);
break;
case 'a':
test_alloc_construct(path);
break;
case 'p':
test_double_free(path);
break;
case 'n':
test_alloc_in_constructor(path);
break;
case 's':
if (argc != 4)
UT_FATAL("Provide an op_index with option s");
op_index = strtoul(argv[3], &tailptr, 10);
if (tailptr[0] != '\0')
UT_FATAL("Wrong op_index format");
test_sync_pop_check(op_index);
break;
}
DONE(NULL);
}
| 8,098 | 20.771505 | 72 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_config/pmem2_config.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* pmem_config.c -- pmem2_config unittests
*/
#include "fault_injection.h"
#include "unittest.h"
#include "ut_pmem2.h"
#include "config.h"
#include "out.h"
#include "source.h"
/*
* test_cfg_create_and_delete_valid - test pmem2_config allocation
*/
static int
test_cfg_create_and_delete_valid(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config *cfg;
int ret = pmem2_config_new(&cfg);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTne(cfg, NULL);
ret = pmem2_config_delete(&cfg);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTeq(cfg, NULL);
return 0;
}
/*
* test_cfg_alloc_enomem - test pmem2_config allocation with error injection
*/
static int
test_alloc_cfg_enomem(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config *cfg;
if (!core_fault_injection_enabled()) {
return 0;
}
core_inject_fault_at(PMEM_MALLOC, 1, "pmem2_malloc");
int ret = pmem2_config_new(&cfg);
UT_PMEM2_EXPECT_RETURN(ret, -ENOMEM);
UT_ASSERTeq(cfg, NULL);
return 0;
}
/*
* test_delete_null_config - test pmem2_delete on NULL config
*/
static int
test_delete_null_config(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config *cfg = NULL;
/* should not crash */
int ret = pmem2_config_delete(&cfg);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTeq(cfg, NULL);
return 0;
}
/*
* test_config_set_granularity_valid - check valid granularity values
*/
static int
test_config_set_granularity_valid(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
/* check default granularity */
enum pmem2_granularity g =
(enum pmem2_granularity)PMEM2_GRANULARITY_INVALID;
UT_ASSERTeq(cfg.requested_max_granularity, g);
/* change default granularity */
int ret = -1;
g = PMEM2_GRANULARITY_BYTE;
ret = pmem2_config_set_required_store_granularity(&cfg, g);
UT_ASSERTeq(cfg.requested_max_granularity, g);
UT_PMEM2_EXPECT_RETURN(ret, 0);
/* set granularity once more */
ret = -1;
g = PMEM2_GRANULARITY_PAGE;
ret = pmem2_config_set_required_store_granularity(&cfg, g);
UT_ASSERTeq(cfg.requested_max_granularity, g);
UT_PMEM2_EXPECT_RETURN(ret, 0);
return 0;
}
/*
* test_config_set_granularity_invalid - check invalid granularity values
*/
static int
test_config_set_granularity_invalid(const struct test_case *tc, int argc,
char *argv[])
{
/* pass invalid granularity */
int ret = 0;
enum pmem2_granularity g_inval = 999;
struct pmem2_config cfg;
pmem2_config_init(&cfg);
ret = pmem2_config_set_required_store_granularity(&cfg, g_inval);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_GRANULARITY_NOT_SUPPORTED);
return 0;
}
/*
* test_set_offset_too_large - setting offset which is too large
*/
static int
test_set_offset_too_large(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
/* let's try to set the offset which is too large */
size_t offset = (size_t)INT64_MAX + 1;
int ret = pmem2_config_set_offset(&cfg, offset);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_OFFSET_OUT_OF_RANGE);
return 0;
}
/*
* test_set_offset_success - setting a valid offset
*/
static int
test_set_offset_success(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
/* let's try to successfully set the offset */
size_t offset = Ut_mmap_align;
int ret = pmem2_config_set_offset(&cfg, offset);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(cfg.offset, offset);
return 0;
}
/*
* test_set_length_success - setting a valid length
*/
static int
test_set_length_success(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
/* let's try to successfully set the length, can be any length */
size_t length = Ut_mmap_align;
int ret = pmem2_config_set_length(&cfg, length);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(cfg.length, length);
return 0;
}
/*
* test_set_offset_max - setting maximum possible offset
*/
static int
test_set_offset_max(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
/* let's try to successfully set maximum possible offset */
size_t offset = (INT64_MAX / Ut_mmap_align) * Ut_mmap_align;
int ret = pmem2_config_set_offset(&cfg, offset);
UT_ASSERTeq(ret, 0);
return 0;
}
/*
* test_set_sharing_valid - setting valid sharing
*/
static int
test_set_sharing_valid(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
/* check sharing default value */
UT_ASSERTeq(cfg.sharing, PMEM2_SHARED);
int ret = pmem2_config_set_sharing(&cfg, PMEM2_PRIVATE);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(cfg.sharing, PMEM2_PRIVATE);
return 0;
}
/*
* test_set_sharing_invalid - setting invalid sharing
*/
static int
test_set_sharing_invalid(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
unsigned invalid_sharing = 777;
int ret = pmem2_config_set_sharing(&cfg, invalid_sharing);
UT_ASSERTeq(ret, PMEM2_E_INVALID_SHARING_VALUE);
return 0;
}
/*
* test_validate_unaligned_addr - setting unaligned addr and validating it
*/
static int
test_validate_unaligned_addr(const struct test_case *tc, int argc,
char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_validate_unaligned_addr <file>");
/* needed for source alignment */
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_source *src;
PMEM2_SOURCE_FROM_FD(&src, fd);
struct pmem2_config cfg;
pmem2_config_init(&cfg);
/* let's set addr which is unaligned */
cfg.addr = (char *)1;
int ret = pmem2_config_validate_addr_alignment(&cfg, src);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_ADDRESS_UNALIGNED);
PMEM2_SOURCE_DELETE(&src);
CLOSE(fd);
return 1;
}
/*
* test_set_wrong_addr_req_type - setting wrong addr request type
*/
static int
test_set_wrong_addr_req_type(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
/* "randomly" chosen invalid addr request type */
enum pmem2_address_request_type request_type = 999;
int ret = pmem2_config_set_address(&cfg, NULL, request_type);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_ADDRESS_REQUEST_TYPE);
return 0;
}
/*
* test_null_addr_noreplace - setting null addr when request type
* PMEM2_ADDRESS_FIXED_NOREPLACE is used
*/
static int
test_null_addr_noreplace(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
int ret = pmem2_config_set_address(
&cfg, NULL, PMEM2_ADDRESS_FIXED_NOREPLACE);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_ADDRESS_NULL);
return 0;
}
/*
* test_clear_address - using pmem2_config_clear_address func
*/
static int
test_clear_address(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
/* "randomly" chosen value of address and addr request type */
void *addr = (void *)(1024 * 1024);
int ret = pmem2_config_set_address(
&cfg, addr, PMEM2_ADDRESS_FIXED_NOREPLACE);
UT_ASSERTeq(ret, 0);
UT_ASSERTne(cfg.addr, NULL);
UT_ASSERTne(cfg.addr_request, PMEM2_ADDRESS_ANY);
pmem2_config_clear_address(&cfg);
UT_ASSERTeq(cfg.addr, NULL);
UT_ASSERTeq(cfg.addr_request, PMEM2_ADDRESS_ANY);
return 0;
}
/*
* test_set_valid_prot_flag -- set valid protection flag
*/
static int
test_set_valid_prot_flag(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
int ret = pmem2_config_set_protection(&cfg, PMEM2_PROT_READ);
UT_ASSERTeq(ret, 0);
ret = pmem2_config_set_protection(&cfg, PMEM2_PROT_WRITE);
UT_ASSERTeq(ret, 0);
ret = pmem2_config_set_protection(&cfg, PMEM2_PROT_EXEC);
UT_ASSERTeq(ret, 0);
ret = pmem2_config_set_protection(&cfg, PMEM2_PROT_NONE);
UT_ASSERTeq(ret, 0);
ret = pmem2_config_set_protection(&cfg,
PMEM2_PROT_WRITE | PMEM2_PROT_READ | PMEM2_PROT_EXEC);
UT_ASSERTeq(ret, 0);
return 0;
}
/*
* test_set_invalid_prot_flag -- set invalid protection flag
*/
static int
test_set_invalid_prot_flag(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
int ret = pmem2_config_set_protection(&cfg, PROT_WRITE);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_PROT_FLAG);
UT_ASSERTeq(cfg.protection_flag, PMEM2_PROT_READ | PMEM2_PROT_WRITE);
return 0;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_cfg_create_and_delete_valid),
TEST_CASE(test_alloc_cfg_enomem),
TEST_CASE(test_delete_null_config),
TEST_CASE(test_config_set_granularity_valid),
TEST_CASE(test_config_set_granularity_invalid),
TEST_CASE(test_set_offset_too_large),
TEST_CASE(test_set_offset_success),
TEST_CASE(test_set_length_success),
TEST_CASE(test_set_offset_max),
TEST_CASE(test_set_sharing_valid),
TEST_CASE(test_set_sharing_invalid),
TEST_CASE(test_validate_unaligned_addr),
TEST_CASE(test_set_wrong_addr_req_type),
TEST_CASE(test_null_addr_noreplace),
TEST_CASE(test_clear_address),
TEST_CASE(test_set_valid_prot_flag),
TEST_CASE(test_set_invalid_prot_flag),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char **argv)
{
START(argc, argv, "pmem2_config");
util_init();
out_init("pmem2_config", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0, 0);
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
out_fini();
DONE(NULL);
}
| 9,397 | 22.792405 | 77 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem_map_file_trunc/pmem_map_file_trunc.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* pmem_map_file_trunc.c -- test for mapping specially crafted files,
* which used to confuse Windows libc to truncate it by 1 byte
*
* See https://github.com/pmem/pmdk/pull/3728 for full description.
*
* usage: pmem_map_file_trunc file
*/
#include "unittest.h"
#define EXPECTED_SIZE (4 * 1024)
/*
* so called "Ctrl-Z" or EOF character
* https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/fopen-wfopen
*/
#define FILL_CHAR 0x1a
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem_map_file_trunc");
if (argc < 2)
UT_FATAL("not enough args");
size_t mapped;
int ispmem;
char *p;
os_stat_t st;
p = pmem_map_file(argv[1], EXPECTED_SIZE, PMEM_FILE_CREATE, 0644,
&mapped, &ispmem);
UT_ASSERT(p);
UT_ASSERTeq(mapped, EXPECTED_SIZE);
p[EXPECTED_SIZE - 1] = FILL_CHAR;
pmem_persist(&p[EXPECTED_SIZE - 1], 1);
pmem_unmap(p, EXPECTED_SIZE);
STAT(argv[1], &st);
UT_ASSERTeq(st.st_size, EXPECTED_SIZE);
p = pmem_map_file(argv[1], 0, 0, 0644, &mapped, &ispmem);
UT_ASSERT(p);
UT_ASSERTeq(mapped, EXPECTED_SIZE);
UT_ASSERTeq(p[EXPECTED_SIZE - 1], FILL_CHAR);
pmem_unmap(p, EXPECTED_SIZE);
STAT(argv[1], &st);
UT_ASSERTeq(st.st_size, EXPECTED_SIZE);
DONE(NULL);
}
| 1,302 | 20.716667 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/util_ravl/util_ravl.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* util_ravl.c -- unit test for ravl tree
*/
#include <stdint.h>
#include <stdlib.h>
#include "ravl.h"
#include "util.h"
#include "unittest.h"
#include "fault_injection.h"
static int
cmpkey(const void *lhs, const void *rhs)
{
intptr_t l = (intptr_t)lhs;
intptr_t r = (intptr_t)rhs;
return (int)(l - r);
}
static void
test_misc(void)
{
struct ravl *r = ravl_new(cmpkey);
struct ravl_node *n = NULL;
ravl_insert(r, (void *)3);
ravl_insert(r, (void *)6);
ravl_insert(r, (void *)1);
ravl_insert(r, (void *)7);
ravl_insert(r, (void *)9);
ravl_insert(r, (void *)5);
ravl_insert(r, (void *)8);
ravl_insert(r, (void *)2);
ravl_insert(r, (void *)4);
ravl_insert(r, (void *)10);
n = ravl_find(r, (void *)11, RAVL_PREDICATE_EQUAL);
UT_ASSERTeq(n, NULL);
n = ravl_find(r, (void *)10, RAVL_PREDICATE_GREATER);
UT_ASSERTeq(n, NULL);
n = ravl_find(r, (void *)11, RAVL_PREDICATE_GREATER);
UT_ASSERTeq(n, NULL);
n = ravl_find(r, (void *)11,
RAVL_PREDICATE_GREATER | RAVL_PREDICATE_EQUAL);
UT_ASSERTeq(n, NULL);
n = ravl_find(r, (void *)1, RAVL_PREDICATE_LESS);
UT_ASSERTeq(n, NULL);
n = ravl_find(r, (void *)0, RAVL_PREDICATE_LESS_EQUAL);
UT_ASSERTeq(n, NULL);
n = ravl_find(r, (void *)9, RAVL_PREDICATE_GREATER);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)10);
n = ravl_find(r, (void *)9, RAVL_PREDICATE_LESS);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)8);
n = ravl_find(r, (void *)9,
RAVL_PREDICATE_GREATER | RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)9);
n = ravl_find(r, (void *)9,
RAVL_PREDICATE_LESS | RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)9);
n = ravl_find(r, (void *)100, RAVL_PREDICATE_LESS);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)10);
n = ravl_find(r, (void *)0, RAVL_PREDICATE_GREATER);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)1);
n = ravl_find(r, (void *)3, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)10, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)6, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)9, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)7, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)1, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)5, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)8, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)2, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)4, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
ravl_delete(r);
}
static void
test_predicate(void)
{
struct ravl *r = ravl_new(cmpkey);
struct ravl_node *n = NULL;
ravl_insert(r, (void *)10);
ravl_insert(r, (void *)5);
ravl_insert(r, (void *)7);
n = ravl_find(r, (void *)6, RAVL_PREDICATE_GREATER);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)7);
n = ravl_find(r, (void *)6, RAVL_PREDICATE_LESS);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)5);
ravl_delete(r);
}
static void
test_stress(void)
{
struct ravl *r = ravl_new(cmpkey);
for (int i = 0; i < 1000000; ++i) {
ravl_insert(r, (void *)(uintptr_t)rand());
}
ravl_delete(r);
}
struct foo {
int a;
int b;
int c;
};
static int
cmpfoo(const void *lhs, const void *rhs)
{
const struct foo *l = lhs;
const struct foo *r = rhs;
return ((l->a + l->b + l->c) - (r->a + r->b + r->c));
}
static void
test_emplace(void)
{
struct ravl *r = ravl_new_sized(cmpfoo, sizeof(struct foo));
struct foo a = {1, 2, 3};
struct foo b = {2, 3, 4};
struct foo z = {0, 0, 0};
ravl_emplace_copy(r, &a);
ravl_emplace_copy(r, &b);
struct ravl_node *n = ravl_find(r, &z, RAVL_PREDICATE_GREATER);
struct foo *fn = ravl_data(n);
UT_ASSERTeq(fn->a, a.a);
UT_ASSERTeq(fn->b, a.b);
UT_ASSERTeq(fn->c, a.c);
ravl_remove(r, n);
n = ravl_find(r, &z, RAVL_PREDICATE_GREATER);
fn = ravl_data(n);
UT_ASSERTeq(fn->a, b.a);
UT_ASSERTeq(fn->b, b.b);
UT_ASSERTeq(fn->c, b.c);
ravl_remove(r, n);
ravl_delete(r);
}
static void
test_fault_injection_ravl_sized()
{
if (!core_fault_injection_enabled())
return;
core_inject_fault_at(PMEM_MALLOC, 1, "ravl_new_sized");
struct ravl *r = ravl_new_sized(NULL, 0);
UT_ASSERTeq(r, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
test_fault_injection_ravl_node()
{
if (!core_fault_injection_enabled())
return;
struct foo a = {1, 2, 3};
struct ravl *r = ravl_new_sized(cmpfoo, sizeof(struct foo));
UT_ASSERTne(r, NULL);
core_inject_fault_at(PMEM_MALLOC, 1, "ravl_new_node");
int ret = ravl_emplace_copy(r, &a);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOMEM);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "util_ravl");
test_predicate();
test_misc();
test_stress();
test_emplace();
test_fault_injection_ravl_sized();
test_fault_injection_ravl_node();
DONE(NULL);
}
| 5,271 | 20.34413 | 64 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_sync/mocks_windows.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* mocks_windows.h -- redefinitions of pthread functions
*
* This file is Windows-specific.
*
* This file should be included (i.e. using Forced Include) by libpmemobj
* files, when compiled for the purpose of obj_sync test.
* It would replace default implementation with mocked functions defined
* in obj_sync.c.
*
* These defines could be also passed as preprocessor definitions.
*/
#ifndef WRAP_REAL
#define os_mutex_init __wrap_os_mutex_init
#define os_rwlock_init __wrap_os_rwlock_init
#define os_cond_init __wrap_os_cond_init
#endif
| 2,265 | 41.754717 | 74 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_sync/obj_sync.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_sync.c -- unit test for PMEM-resident locks
*/
#include "obj.h"
#include "sync.h"
#include "unittest.h"
#include "sys_util.h"
#include "util.h"
#include "os.h"
#define MAX_THREAD_NUM 200
#define DATA_SIZE 128
#define LOCKED_MUTEX 1
#define NANO_PER_ONE 1000000000LL
#define TIMEOUT (NANO_PER_ONE / 1000LL)
#define WORKER_RUNS 10
#define MAX_OPENS 5
#define FATAL_USAGE() UT_FATAL("usage: obj_sync [mrc] <num_threads> <runs>\n")
/* posix thread worker typedef */
typedef void *(*worker)(void *);
/* the mock pmemobj pool */
static PMEMobjpool Mock_pop;
/* the tested object containing persistent synchronization primitives */
static struct mock_obj {
PMEMmutex mutex;
PMEMmutex mutex_locked;
PMEMcond cond;
PMEMrwlock rwlock;
int check_data;
uint8_t data[DATA_SIZE];
} *Test_obj;
PMEMobjpool *
pmemobj_pool_by_ptr(const void *arg)
{
return &Mock_pop;
}
/*
* mock_open_pool -- (internal) simulate pool opening
*/
static void
mock_open_pool(PMEMobjpool *pop)
{
util_fetch_and_add64(&pop->run_id, 2);
}
/*
* mutex_write_worker -- (internal) write data with mutex
*/
static void *
mutex_write_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
if (pmemobj_mutex_lock(&Mock_pop, &Test_obj->mutex)) {
UT_ERR("pmemobj_mutex_lock");
return NULL;
}
memset(Test_obj->data, (int)(uintptr_t)arg, DATA_SIZE);
if (pmemobj_mutex_unlock(&Mock_pop, &Test_obj->mutex))
UT_ERR("pmemobj_mutex_unlock");
}
return NULL;
}
/*
* mutex_check_worker -- (internal) check consistency with mutex
*/
static void *
mutex_check_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
if (pmemobj_mutex_lock(&Mock_pop, &Test_obj->mutex)) {
UT_ERR("pmemobj_mutex_lock");
return NULL;
}
uint8_t val = Test_obj->data[0];
for (int i = 1; i < DATA_SIZE; i++)
UT_ASSERTeq(Test_obj->data[i], val);
memset(Test_obj->data, 0, DATA_SIZE);
if (pmemobj_mutex_unlock(&Mock_pop, &Test_obj->mutex))
UT_ERR("pmemobj_mutex_unlock");
}
return NULL;
}
/*
* cond_write_worker -- (internal) write data with cond variable
*/
static void *
cond_write_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
if (pmemobj_mutex_lock(&Mock_pop, &Test_obj->mutex))
return NULL;
memset(Test_obj->data, (int)(uintptr_t)arg, DATA_SIZE);
Test_obj->check_data = 1;
if (pmemobj_cond_signal(&Mock_pop, &Test_obj->cond))
UT_ERR("pmemobj_cond_signal");
pmemobj_mutex_unlock(&Mock_pop, &Test_obj->mutex);
}
return NULL;
}
/*
* cond_check_worker -- (internal) check consistency with cond variable
*/
static void *
cond_check_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
if (pmemobj_mutex_lock(&Mock_pop, &Test_obj->mutex))
return NULL;
while (Test_obj->check_data != 1) {
if (pmemobj_cond_wait(&Mock_pop, &Test_obj->cond,
&Test_obj->mutex))
UT_ERR("pmemobj_cond_wait");
}
uint8_t val = Test_obj->data[0];
for (int i = 1; i < DATA_SIZE; i++)
UT_ASSERTeq(Test_obj->data[i], val);
memset(Test_obj->data, 0, DATA_SIZE);
pmemobj_mutex_unlock(&Mock_pop, &Test_obj->mutex);
}
return NULL;
}
/*
* rwlock_write_worker -- (internal) write data with rwlock
*/
static void *
rwlock_write_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
if (pmemobj_rwlock_wrlock(&Mock_pop, &Test_obj->rwlock)) {
UT_ERR("pmemobj_rwlock_wrlock");
return NULL;
}
memset(Test_obj->data, (int)(uintptr_t)arg, DATA_SIZE);
if (pmemobj_rwlock_unlock(&Mock_pop, &Test_obj->rwlock))
UT_ERR("pmemobj_rwlock_unlock");
}
return NULL;
}
/*
* rwlock_check_worker -- (internal) check consistency with rwlock
*/
static void *
rwlock_check_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
if (pmemobj_rwlock_rdlock(&Mock_pop, &Test_obj->rwlock)) {
UT_ERR("pmemobj_rwlock_rdlock");
return NULL;
}
uint8_t val = Test_obj->data[0];
for (int i = 1; i < DATA_SIZE; i++)
UT_ASSERTeq(Test_obj->data[i], val);
if (pmemobj_rwlock_unlock(&Mock_pop, &Test_obj->rwlock))
UT_ERR("pmemobj_rwlock_unlock");
}
return NULL;
}
/*
* timed_write_worker -- (internal) intentionally doing nothing
*/
static void *
timed_write_worker(void *arg)
{
return NULL;
}
/*
* timed_check_worker -- (internal) check consistency with mutex
*/
static void *
timed_check_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
int mutex_id = (int)(uintptr_t)arg % 2;
PMEMmutex *mtx = mutex_id == LOCKED_MUTEX ?
&Test_obj->mutex_locked : &Test_obj->mutex;
struct timespec t1, t2, abs_time;
os_clock_gettime(CLOCK_REALTIME, &t1);
abs_time = t1;
abs_time.tv_nsec += TIMEOUT;
if (abs_time.tv_nsec >= NANO_PER_ONE) {
abs_time.tv_sec++;
abs_time.tv_nsec -= NANO_PER_ONE;
}
int ret = pmemobj_mutex_timedlock(&Mock_pop, mtx, &abs_time);
os_clock_gettime(CLOCK_REALTIME, &t2);
if (mutex_id == LOCKED_MUTEX) {
UT_ASSERTeq(ret, ETIMEDOUT);
uint64_t diff = (uint64_t)((t2.tv_sec - t1.tv_sec) *
NANO_PER_ONE + t2.tv_nsec - t1.tv_nsec);
UT_ASSERT(diff >= TIMEOUT);
return NULL;
}
if (ret == 0) {
UT_ASSERTne(mutex_id, LOCKED_MUTEX);
pmemobj_mutex_unlock(&Mock_pop, mtx);
} else if (ret == ETIMEDOUT) {
uint64_t diff = (uint64_t)((t2.tv_sec - t1.tv_sec)
* NANO_PER_ONE + t2.tv_nsec - t1.tv_nsec);
UT_ASSERT(diff >= TIMEOUT);
} else {
errno = ret;
UT_ERR("!pmemobj_mutex_timedlock");
}
}
return NULL;
}
/*
* cleanup -- (internal) clean up after each run
*/
static void
cleanup(char test_type)
{
switch (test_type) {
case 'm':
util_mutex_destroy(&((PMEMmutex_internal *)
&(Test_obj->mutex))->PMEMmutex_lock);
break;
case 'r':
util_rwlock_destroy(&((PMEMrwlock_internal *)
&(Test_obj->rwlock))->PMEMrwlock_lock);
break;
case 'c':
util_mutex_destroy(&((PMEMmutex_internal *)
&(Test_obj->mutex))->PMEMmutex_lock);
util_cond_destroy(&((PMEMcond_internal *)
&(Test_obj->cond))->PMEMcond_cond);
break;
case 't':
util_mutex_destroy(&((PMEMmutex_internal *)
&(Test_obj->mutex))->PMEMmutex_lock);
util_mutex_destroy(&((PMEMmutex_internal *)
&(Test_obj->mutex_locked))->PMEMmutex_lock);
break;
default:
FATAL_USAGE();
}
}
static int
obj_sync_persist(void *ctx, const void *ptr, size_t sz, unsigned flags)
{
/* no-op */
return 0;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_sync");
util_init();
if (argc < 4)
FATAL_USAGE();
worker writer;
worker checker;
char test_type = argv[1][0];
switch (test_type) {
case 'm':
writer = mutex_write_worker;
checker = mutex_check_worker;
break;
case 'r':
writer = rwlock_write_worker;
checker = rwlock_check_worker;
break;
case 'c':
writer = cond_write_worker;
checker = cond_check_worker;
break;
case 't':
writer = timed_write_worker;
checker = timed_check_worker;
break;
default:
FATAL_USAGE();
}
unsigned long num_threads = strtoul(argv[2], NULL, 10);
if (num_threads > MAX_THREAD_NUM)
UT_FATAL("Do not use more than %d threads.\n", MAX_THREAD_NUM);
unsigned long opens = strtoul(argv[3], NULL, 10);
if (opens > MAX_OPENS)
UT_FATAL("Do not use more than %d runs.\n", MAX_OPENS);
os_thread_t *write_threads
= (os_thread_t *)MALLOC(num_threads * sizeof(os_thread_t));
os_thread_t *check_threads
= (os_thread_t *)MALLOC(num_threads * sizeof(os_thread_t));
/* first pool open */
mock_open_pool(&Mock_pop);
Mock_pop.p_ops.persist = obj_sync_persist;
Mock_pop.p_ops.base = &Mock_pop;
Test_obj = (struct mock_obj *)MALLOC(sizeof(struct mock_obj));
/* zero-initialize the test object */
pmemobj_mutex_zero(&Mock_pop, &Test_obj->mutex);
pmemobj_mutex_zero(&Mock_pop, &Test_obj->mutex_locked);
pmemobj_cond_zero(&Mock_pop, &Test_obj->cond);
pmemobj_rwlock_zero(&Mock_pop, &Test_obj->rwlock);
Test_obj->check_data = 0;
memset(&Test_obj->data, 0, DATA_SIZE);
for (unsigned long run = 0; run < opens; run++) {
if (test_type == 't') {
pmemobj_mutex_lock(&Mock_pop,
&Test_obj->mutex_locked);
}
for (unsigned i = 0; i < num_threads; i++) {
THREAD_CREATE(&write_threads[i], NULL, writer,
(void *)(uintptr_t)i);
THREAD_CREATE(&check_threads[i], NULL, checker,
(void *)(uintptr_t)i);
}
for (unsigned i = 0; i < num_threads; i++) {
THREAD_JOIN(&write_threads[i], NULL);
THREAD_JOIN(&check_threads[i], NULL);
}
if (test_type == 't') {
pmemobj_mutex_unlock(&Mock_pop,
&Test_obj->mutex_locked);
}
/* up the run_id counter and cleanup */
mock_open_pool(&Mock_pop);
cleanup(test_type);
}
FREE(check_threads);
FREE(write_threads);
FREE(Test_obj);
DONE(NULL);
}
| 8,776 | 21.97644 | 78 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_sync/mocks_posix.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* mocks_posix.c -- redefinitions of lock functions (Posix implementation)
*/
#include <pthread.h>
#include "util.h"
#include "os.h"
#include "unittest.h"
FUNC_MOCK(pthread_mutex_init, int,
pthread_mutex_t *__restrict mutex,
const pthread_mutexattr_t *__restrict attr)
FUNC_MOCK_RUN_RET_DEFAULT_REAL(pthread_mutex_init, mutex, attr)
FUNC_MOCK_RUN(1) {
return -1;
}
FUNC_MOCK_END
FUNC_MOCK(pthread_rwlock_init, int,
pthread_rwlock_t *__restrict rwlock,
const pthread_rwlockattr_t *__restrict attr)
FUNC_MOCK_RUN_RET_DEFAULT_REAL(pthread_rwlock_init, rwlock, attr)
FUNC_MOCK_RUN(1) {
return -1;
}
FUNC_MOCK_END
FUNC_MOCK(pthread_cond_init, int,
pthread_cond_t *__restrict cond,
const pthread_condattr_t *__restrict attr)
FUNC_MOCK_RUN_RET_DEFAULT_REAL(pthread_cond_init, cond, attr)
FUNC_MOCK_RUN(1) {
return -1;
}
FUNC_MOCK_END
| 950 | 22.775 | 74 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/out_err_mt_win/out_err_mt_win.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* out_err_mt_win.c -- unit test for error messages
*/
#include <sys/types.h>
#include <stdarg.h>
#include <errno.h>
#include "unittest.h"
#include "valgrind_internal.h"
#include "util.h"
#define NUM_THREADS 16
static void
print_errors(const wchar_t *msg)
{
UT_OUT("%S", msg);
UT_OUT("PMEM: %S", pmem_errormsgW());
UT_OUT("PMEMOBJ: %S", pmemobj_errormsgW());
UT_OUT("PMEMLOG: %S", pmemlog_errormsgW());
UT_OUT("PMEMBLK: %S", pmemblk_errormsgW());
UT_OUT("PMEMPOOL: %S", pmempool_errormsgW());
}
static void
check_errors(int ver)
{
int ret;
int err_need;
int err_found;
ret = swscanf(pmem_errormsgW(),
L"libpmem major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEM_MAJOR_VERSION);
ret = swscanf(pmemobj_errormsgW(),
L"libpmemobj major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMOBJ_MAJOR_VERSION);
ret = swscanf(pmemlog_errormsgW(),
L"libpmemlog major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMLOG_MAJOR_VERSION);
ret = swscanf(pmemblk_errormsgW(),
L"libpmemblk major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMBLK_MAJOR_VERSION);
ret = swscanf(pmempool_errormsgW(),
L"libpmempool major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMPOOL_MAJOR_VERSION);
}
static void *
do_test(void *arg)
{
int ver = *(int *)arg;
pmem_check_version(ver, 0);
pmemobj_check_version(ver, 0);
pmemlog_check_version(ver, 0);
pmemblk_check_version(ver, 0);
pmempool_check_version(ver, 0);
check_errors(ver);
return NULL;
}
static void
run_mt_test(void *(*worker)(void *))
{
os_thread_t thread[NUM_THREADS];
int ver[NUM_THREADS];
for (int i = 0; i < NUM_THREADS; ++i) {
ver[i] = 10000 + i;
THREAD_CREATE(&thread[i], NULL, worker, &ver[i]);
}
for (int i = 0; i < NUM_THREADS; ++i) {
THREAD_JOIN(&thread[i], NULL);
}
}
int
wmain(int argc, wchar_t *argv[])
{
STARTW(argc, argv, "out_err_mt_win");
if (argc != 6)
UT_FATAL("usage: %S file1 file2 file3 file4 dir",
argv[0]);
print_errors(L"start");
PMEMobjpool *pop = pmemobj_createW(argv[1], L"test",
PMEMOBJ_MIN_POOL, 0666);
PMEMlogpool *plp = pmemlog_createW(argv[2],
PMEMLOG_MIN_POOL, 0666);
PMEMblkpool *pbp = pmemblk_createW(argv[3],
128, PMEMBLK_MIN_POOL, 0666);
util_init();
pmem_check_version(10000, 0);
pmemobj_check_version(10001, 0);
pmemlog_check_version(10002, 0);
pmemblk_check_version(10003, 0);
pmempool_check_version(10006, 0);
print_errors(L"version check");
void *ptr = NULL;
/*
* We are testing library error reporting and we don't want this test
* to fail under memcheck.
*/
VALGRIND_DO_DISABLE_ERROR_REPORTING;
pmem_msync(ptr, 1);
VALGRIND_DO_ENABLE_ERROR_REPORTING;
print_errors(L"pmem_msync");
int ret;
PMEMoid oid;
ret = pmemobj_alloc(pop, &oid, 0, 0, NULL, NULL);
UT_ASSERTeq(ret, -1);
print_errors(L"pmemobj_alloc");
pmemlog_append(plp, NULL, PMEMLOG_MIN_POOL);
print_errors(L"pmemlog_append");
size_t nblock = pmemblk_nblock(pbp);
pmemblk_set_error(pbp, nblock + 1);
print_errors(L"pmemblk_set_error");
run_mt_test(do_test);
pmemobj_close(pop);
pmemlog_close(plp);
pmemblk_close(pbp);
PMEMpoolcheck *ppc;
struct pmempool_check_args args = {0, };
ppc = pmempool_check_init(&args, sizeof(args) / 2);
UT_ASSERTeq(ppc, NULL);
print_errors(L"pmempool_check_init");
DONEW(NULL);
}
| 3,844 | 22.30303 | 70 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_oid_thread/obj_oid_thread.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_oid_thread.c -- unit test for the reverse direct operation
*/
#include "unittest.h"
#include "lane.h"
#include "obj.h"
#include "sys_util.h"
#define MAX_PATH_LEN 255
#define LAYOUT_NAME "direct"
static os_mutex_t lock;
static os_cond_t cond;
static int flag = 1;
static PMEMoid thread_oid;
/*
* test_worker -- (internal) test worker thread
*/
static void *
test_worker(void *arg)
{
util_mutex_lock(&lock);
/* before pool is closed */
void *direct = pmemobj_direct(thread_oid);
UT_ASSERT(OID_EQUALS(thread_oid, pmemobj_oid(direct)));
flag = 0;
os_cond_signal(&cond);
util_mutex_unlock(&lock);
util_mutex_lock(&lock);
while (flag == 0)
os_cond_wait(&cond, &lock);
/* after pool is closed */
UT_ASSERT(OID_IS_NULL(pmemobj_oid(direct)));
util_mutex_unlock(&lock);
return NULL;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_oid_thread");
if (argc != 3)
UT_FATAL("usage: %s [directory] [# of pools]", argv[0]);
util_mutex_init(&lock);
util_cond_init(&cond);
unsigned npools = ATOU(argv[2]);
const char *dir = argv[1];
int r;
PMEMobjpool **pops = MALLOC(npools * sizeof(PMEMoid *));
size_t length = strlen(dir) + MAX_PATH_LEN;
char *path = MALLOC(length);
for (unsigned i = 0; i < npools; ++i) {
int ret = snprintf(path, length, "%s"OS_DIR_SEP_STR"testfile%d",
dir, i);
if (ret < 0 || ret >= length)
UT_FATAL("snprintf: %d", ret);
pops[i] = pmemobj_create(path, LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR);
if (pops[i] == NULL)
UT_FATAL("!pmemobj_create");
}
/* Address outside the pmemobj pool */
void *allocated_memory = MALLOC(sizeof(int));
UT_ASSERT(OID_IS_NULL(pmemobj_oid(allocated_memory)));
PMEMoid *oids = MALLOC(npools * sizeof(PMEMoid));
PMEMoid *tmpoids = MALLOC(npools * sizeof(PMEMoid));
UT_ASSERT(OID_IS_NULL(pmemobj_oid(NULL)));
oids[0] = OID_NULL;
for (unsigned i = 0; i < npools; ++i) {
uint64_t off = pops[i]->heap_offset;
oids[i] = (PMEMoid) {pops[i]->uuid_lo, off};
UT_ASSERT(OID_EQUALS(oids[i],
pmemobj_oid(pmemobj_direct(oids[i]))));
r = pmemobj_alloc(pops[i], &tmpoids[i], 100, 1, NULL, NULL);
UT_ASSERTeq(r, 0);
UT_ASSERT(OID_EQUALS(tmpoids[i],
pmemobj_oid(pmemobj_direct(tmpoids[i]))));
}
r = pmemobj_alloc(pops[0], &thread_oid, 100, 2, NULL, NULL);
UT_ASSERTeq(r, 0);
UT_ASSERT(!OID_IS_NULL(pmemobj_oid(pmemobj_direct(thread_oid))));
util_mutex_lock(&lock);
os_thread_t t;
THREAD_CREATE(&t, NULL, test_worker, NULL);
/* wait for the thread to perform the first direct */
while (flag != 0)
os_cond_wait(&cond, &lock);
for (unsigned i = 0; i < npools; ++i) {
pmemobj_free(&tmpoids[i]);
UT_ASSERT(OID_IS_NULL(pmemobj_oid(
pmemobj_direct(tmpoids[i]))));
pmemobj_close(pops[i]);
UT_ASSERT(OID_IS_NULL(pmemobj_oid(
pmemobj_direct(oids[i]))));
}
/* signal the waiting thread */
flag = 1;
os_cond_signal(&cond);
util_mutex_unlock(&lock);
THREAD_JOIN(&t, NULL);
FREE(path);
FREE(tmpoids);
FREE(oids);
FREE(pops);
FREE(allocated_memory);
util_mutex_destroy(&lock);
util_cond_destroy(&cond);
DONE(NULL);
}
| 3,186 | 21.602837 | 66 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/rpmem_common/rpmem_fip_common.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_fip_common.h -- common definitions for librpmem and rpmemd
*/
#ifndef RPMEM_FIP_COMMON_H
#define RPMEM_FIP_COMMON_H 1
#include <string.h>
#include <netinet/in.h>
#include <rdma/fabric.h>
#include <rdma/fi_cm.h>
#include <rdma/fi_rma.h>
#ifdef __cplusplus
extern "C" {
#endif
#define RPMEM_FIVERSION FI_VERSION(1, 4)
#define RPMEM_FIP_CQ_WAIT_MS 100
#define min(a, b) ((a) < (b) ? (a) : (b))
#define max(a, b) ((a) > (b) ? (a) : (b))
/*
* rpmem_fip_node -- client or server node type
*/
enum rpmem_fip_node {
RPMEM_FIP_NODE_CLIENT,
RPMEM_FIP_NODE_SERVER,
MAX_RPMEM_FIP_NODE,
};
/*
* rpmem_fip_probe -- list of providers
*/
struct rpmem_fip_probe {
unsigned providers;
size_t max_wq_size[MAX_RPMEM_PROV];
};
/*
* rpmem_fip_probe -- returns true if specified provider is available
*/
static inline int
rpmem_fip_probe(struct rpmem_fip_probe probe, enum rpmem_provider provider)
{
return (probe.providers & (1U << provider)) != 0;
}
/*
* rpmem_fip_probe_any -- returns true if any provider is available
*/
static inline int
rpmem_fip_probe_any(struct rpmem_fip_probe probe)
{
return probe.providers != 0;
}
int rpmem_fip_probe_get(const char *target, struct rpmem_fip_probe *probe);
struct fi_info *rpmem_fip_get_hints(enum rpmem_provider provider);
int rpmem_fip_read_eq_check(struct fid_eq *eq, struct fi_eq_cm_entry *entry,
uint32_t exp_event, fid_t exp_fid, int timeout);
int rpmem_fip_read_eq(struct fid_eq *eq, struct fi_eq_cm_entry *entry,
uint32_t *event, int timeout);
size_t rpmem_fip_cq_size(enum rpmem_persist_method pm,
enum rpmem_fip_node node);
size_t rpmem_fip_wq_size(enum rpmem_persist_method pm,
enum rpmem_fip_node node);
size_t rpmem_fip_rx_size(enum rpmem_persist_method pm,
enum rpmem_fip_node node);
size_t rpmem_fip_max_nlanes(struct fi_info *fi);
void rpmem_fip_print_info(struct fi_info *fi);
#ifdef __cplusplus
}
#endif
#endif
| 1,992 | 21.144444 | 76 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/rpmem_common/rpmem_fip_common.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_common.c -- common definitions for librpmem and rpmemd
*/
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <stddef.h>
#include <errno.h>
#include "rpmem_common.h"
#include "rpmem_fip_common.h"
#include "rpmem_proto.h"
#include "rpmem_common_log.h"
#include "valgrind_internal.h"
#include <rdma/fi_errno.h>
/*
* rpmem_fip_get_hints -- return fabric interface information hints
*/
struct fi_info *
rpmem_fip_get_hints(enum rpmem_provider provider)
{
RPMEMC_ASSERT(provider < MAX_RPMEM_PROV);
struct fi_info *hints = fi_allocinfo();
if (!hints) {
RPMEMC_LOG(ERR, "!fi_allocinfo");
return NULL;
}
/* connection-oriented endpoint */
hints->ep_attr->type = FI_EP_MSG;
/*
* Basic memory registration mode indicates that MR attributes
* (rkey, lkey) are selected by provider.
*/
hints->domain_attr->mr_mode = FI_MR_BASIC;
/*
* FI_THREAD_SAFE indicates MT applications can access any
* resources through interface without any restrictions
*/
hints->domain_attr->threading = FI_THREAD_SAFE;
/*
* FI_MSG - SEND and RECV
* FI_RMA - WRITE and READ
*/
hints->caps = FI_MSG | FI_RMA;
/* must register locally accessed buffers */
hints->mode = FI_CONTEXT | FI_LOCAL_MR | FI_RX_CQ_DATA;
/* READ-after-WRITE and SEND-after-WRITE message ordering required */
hints->tx_attr->msg_order = FI_ORDER_RAW | FI_ORDER_SAW;
hints->addr_format = FI_SOCKADDR;
if (provider != RPMEM_PROV_UNKNOWN) {
const char *prov_name = rpmem_provider_to_str(provider);
RPMEMC_ASSERT(prov_name != NULL);
hints->fabric_attr->prov_name = strdup(prov_name);
if (!hints->fabric_attr->prov_name) {
RPMEMC_LOG(ERR, "!strdup(provider)");
goto err_strdup;
}
}
return hints;
err_strdup:
fi_freeinfo(hints);
return NULL;
}
/*
* rpmem_fip_probe_get -- return list of available providers
*/
int
rpmem_fip_probe_get(const char *target, struct rpmem_fip_probe *probe)
{
struct fi_info *hints = rpmem_fip_get_hints(RPMEM_PROV_UNKNOWN);
if (!hints)
return -1;
int ret;
struct fi_info *fi;
ret = fi_getinfo(RPMEM_FIVERSION, target, NULL, 0, hints, &fi);
if (ret) {
goto err_getinfo;
}
if (probe) {
memset(probe, 0, sizeof(*probe));
struct fi_info *prov = fi;
while (prov) {
enum rpmem_provider p = rpmem_provider_from_str(
prov->fabric_attr->prov_name);
if (p == RPMEM_PROV_UNKNOWN) {
prov = prov->next;
continue;
}
probe->providers |= (1U << p);
probe->max_wq_size[p] = prov->tx_attr->size;
prov = prov->next;
}
}
fi_freeinfo(fi);
err_getinfo:
fi_freeinfo(hints);
return ret;
}
/*
* rpmem_fip_read_eq -- read event queue entry with specified timeout
*/
int
rpmem_fip_read_eq(struct fid_eq *eq, struct fi_eq_cm_entry *entry,
uint32_t *event, int timeout)
{
int ret;
ssize_t sret;
struct fi_eq_err_entry err;
sret = fi_eq_sread(eq, event, entry, sizeof(*entry), timeout, 0);
VALGRIND_DO_MAKE_MEM_DEFINED(&sret, sizeof(sret));
if (timeout != -1 && (sret == -FI_ETIMEDOUT || sret == -FI_EAGAIN)) {
errno = ETIMEDOUT;
return 1;
}
if (sret < 0 || (size_t)sret != sizeof(*entry)) {
if (sret < 0)
ret = (int)sret;
else
ret = -1;
sret = fi_eq_readerr(eq, &err, 0);
if (sret < 0) {
errno = EIO;
RPMEMC_LOG(ERR, "error reading from event queue: "
"cannot read error from event queue: %s",
fi_strerror((int)sret));
} else if (sret > 0) {
RPMEMC_ASSERT(sret == sizeof(err));
errno = -err.prov_errno;
RPMEMC_LOG(ERR, "error reading from event queue: %s",
fi_eq_strerror(eq, err.prov_errno,
NULL, NULL, 0));
}
return ret;
}
return 0;
}
/*
* rpmem_fip_read_eq -- read event queue entry and expect specified event
* and fid
*
* Returns:
* 1 - timeout
* 0 - success
* otherwise - error
*/
int
rpmem_fip_read_eq_check(struct fid_eq *eq, struct fi_eq_cm_entry *entry,
uint32_t exp_event, fid_t exp_fid, int timeout)
{
uint32_t event;
int ret = rpmem_fip_read_eq(eq, entry, &event, timeout);
if (ret)
return ret;
if (event != exp_event || entry->fid != exp_fid) {
errno = EIO;
RPMEMC_LOG(ERR, "unexpected event received (%u) "
"expected (%u)%s", event, exp_event,
entry->fid != exp_fid ?
" invalid endpoint" : "");
return -1;
}
return 0;
}
/*
* rpmem_fip_lane_attr -- lane attributes
*
* This structure describes how many SQ, RQ and CQ entries are
* required for a single lane.
*
* NOTE:
* - WRITE, READ and SEND requests are placed in SQ,
* - RECV requests are placed in RQ.
*/
struct rpmem_fip_lane_attr {
size_t n_per_sq; /* number of entries per lane in send queue */
size_t n_per_rq; /* number of entries per lane in receive queue */
size_t n_per_cq; /* number of entries per lane in completion queue */
};
/* queues size required by remote persist operation methods */
static const struct rpmem_fip_lane_attr
rpmem_fip_lane_attrs[MAX_RPMEM_FIP_NODE][MAX_RPMEM_PM] = {
[RPMEM_FIP_NODE_CLIENT][RPMEM_PM_GPSPM] = {
.n_per_sq = 2, /* WRITE + SEND */
.n_per_rq = 1, /* RECV */
.n_per_cq = 3,
},
[RPMEM_FIP_NODE_CLIENT][RPMEM_PM_APM] = {
/* WRITE + READ for persist, WRITE + SEND for deep persist */
.n_per_sq = 2, /* WRITE + SEND */
.n_per_rq = 1, /* RECV */
.n_per_cq = 3,
},
[RPMEM_FIP_NODE_SERVER][RPMEM_PM_GPSPM] = {
.n_per_sq = 1, /* SEND */
.n_per_rq = 1, /* RECV */
.n_per_cq = 3,
},
[RPMEM_FIP_NODE_SERVER][RPMEM_PM_APM] = {
.n_per_sq = 1, /* SEND */
.n_per_rq = 1, /* RECV */
.n_per_cq = 3,
},
};
/*
* rpmem_fip_cq_size -- returns completion queue size based on
* persist method and node type
*/
size_t
rpmem_fip_cq_size(enum rpmem_persist_method pm, enum rpmem_fip_node node)
{
RPMEMC_ASSERT(pm < MAX_RPMEM_PM);
RPMEMC_ASSERT(node < MAX_RPMEM_FIP_NODE);
const struct rpmem_fip_lane_attr *attr =
&rpmem_fip_lane_attrs[node][pm];
return attr->n_per_cq ? : 1;
}
/*
* rpmem_fip_wq_size -- returns submission queue (transmit queue) size based
* on persist method and node type
*/
size_t
rpmem_fip_wq_size(enum rpmem_persist_method pm, enum rpmem_fip_node node)
{
RPMEMC_ASSERT(pm < MAX_RPMEM_PM);
RPMEMC_ASSERT(node < MAX_RPMEM_FIP_NODE);
const struct rpmem_fip_lane_attr *attr =
&rpmem_fip_lane_attrs[node][pm];
return attr->n_per_sq ? : 1;
}
/*
* rpmem_fip_rx_size -- returns receive queue size based
* on persist method and node type
*/
size_t
rpmem_fip_rx_size(enum rpmem_persist_method pm, enum rpmem_fip_node node)
{
RPMEMC_ASSERT(pm < MAX_RPMEM_PM);
RPMEMC_ASSERT(node < MAX_RPMEM_FIP_NODE);
const struct rpmem_fip_lane_attr *attr =
&rpmem_fip_lane_attrs[node][pm];
return attr->n_per_rq ? : 1;
}
/*
* rpmem_fip_max_nlanes -- returns maximum number of lanes
*/
size_t
rpmem_fip_max_nlanes(struct fi_info *fi)
{
return min(min(fi->domain_attr->tx_ctx_cnt,
fi->domain_attr->rx_ctx_cnt),
fi->domain_attr->cq_cnt);
}
/*
* rpmem_fip_print_info -- print some useful info about fabric interface
*/
void
rpmem_fip_print_info(struct fi_info *fi)
{
RPMEMC_LOG(INFO, "libfabric version: %s",
fi_tostr(fi, FI_TYPE_VERSION));
char *str = fi_tostr(fi, FI_TYPE_INFO);
char *buff = strdup(str);
if (!buff) {
RPMEMC_LOG(ERR, "!allocating string buffer for "
"libfabric interface information");
return;
}
RPMEMC_LOG(INFO, "libfabric interface info:");
char *nl;
char *last = buff;
while (last != NULL) {
nl = strchr(last, '\n');
if (nl) {
*nl = '\0';
nl++;
}
RPMEMC_LOG(INFO, "%s", last);
last = nl;
}
free(buff);
}
| 7,550 | 21.675676 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/rpmem_common/rpmem_common_log.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* rpmem_common_log.h -- common log macros for librpmem and rpmemd
*/
#if defined(RPMEMC_LOG_RPMEM) && defined(RPMEMC_LOG_RPMEMD)
#error Both RPMEMC_LOG_RPMEM and RPMEMC_LOG_RPMEMD defined
#elif !defined(RPMEMC_LOG_RPMEM) && !defined(RPMEMC_LOG_RPMEMD)
#define RPMEMC_LOG(level, fmt, args...) do {} while (0)
#define RPMEMC_DBG(level, fmt, args...) do {} while (0)
#define RPMEMC_FATAL(fmt, args...) do {} while (0)
#define RPMEMC_ASSERT(cond) do {} while (0)
#elif defined(RPMEMC_LOG_RPMEM)
#include "out.h"
#include "rpmem_util.h"
#define RPMEMC_LOG(level, fmt, args...) RPMEM_LOG(level, fmt, ## args)
#define RPMEMC_DBG(level, fmt, args...) RPMEM_DBG(fmt, ## args)
#define RPMEMC_FATAL(fmt, args...) RPMEM_FATAL(fmt, ## args)
#define RPMEMC_ASSERT(cond) RPMEM_ASSERT(cond)
#else
#include "rpmemd_log.h"
#define RPMEMC_LOG(level, fmt, args...) RPMEMD_LOG(level, fmt, ## args)
#define RPMEMC_DBG(level, fmt, args...) RPMEMD_DBG(fmt, ## args)
#define RPMEMC_FATAL(fmt, args...) RPMEMD_FATAL(fmt, ## args)
#define RPMEMC_ASSERT(cond) RPMEMD_ASSERT(cond)
#endif
| 1,160 | 28.769231 | 71 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/rpmem_common/rpmem_common.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_common.h -- common definitions for librpmem and rpmemd
*/
#ifndef RPMEM_COMMON_H
#define RPMEM_COMMON_H 1
/*
* Values for SO_KEEPALIVE socket option
*/
#define RPMEM_CMD_ENV "RPMEM_CMD"
#define RPMEM_SSH_ENV "RPMEM_SSH"
#define RPMEM_DEF_CMD "rpmemd"
#define RPMEM_DEF_SSH "ssh"
#define RPMEM_PROV_SOCKET_ENV "RPMEM_ENABLE_SOCKETS"
#define RPMEM_PROV_VERBS_ENV "RPMEM_ENABLE_VERBS"
#define RPMEM_MAX_NLANES_ENV "RPMEM_MAX_NLANES"
#define RPMEM_WQ_SIZE_ENV "RPMEM_WORK_QUEUE_SIZE"
#define RPMEM_ACCEPT_TIMEOUT 30000
#define RPMEM_CONNECT_TIMEOUT 30000
#define RPMEM_MONITOR_TIMEOUT 1000
#include <stdint.h>
#include <sys/socket.h>
#include <netdb.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* rpmem_err -- error codes
*/
enum rpmem_err {
RPMEM_SUCCESS = 0,
RPMEM_ERR_BADPROTO = 1,
RPMEM_ERR_BADNAME = 2,
RPMEM_ERR_BADSIZE = 3,
RPMEM_ERR_BADNLANES = 4,
RPMEM_ERR_BADPROVIDER = 5,
RPMEM_ERR_FATAL = 6,
RPMEM_ERR_FATAL_CONN = 7,
RPMEM_ERR_BUSY = 8,
RPMEM_ERR_EXISTS = 9,
RPMEM_ERR_PROVNOSUP = 10,
RPMEM_ERR_NOEXIST = 11,
RPMEM_ERR_NOACCESS = 12,
RPMEM_ERR_POOL_CFG = 13,
MAX_RPMEM_ERR,
};
/*
* rpmem_persist_method -- remote persist operation method
*/
enum rpmem_persist_method {
RPMEM_PM_GPSPM = 1, /* General Purpose Server Persistency Method */
RPMEM_PM_APM = 2, /* Appliance Persistency Method */
MAX_RPMEM_PM,
};
const char *rpmem_persist_method_to_str(enum rpmem_persist_method pm);
/*
* rpmem_provider -- supported providers
*/
enum rpmem_provider {
RPMEM_PROV_UNKNOWN = 0,
RPMEM_PROV_LIBFABRIC_VERBS = 1,
RPMEM_PROV_LIBFABRIC_SOCKETS = 2,
MAX_RPMEM_PROV,
};
enum rpmem_provider rpmem_provider_from_str(const char *str);
const char *rpmem_provider_to_str(enum rpmem_provider provider);
/*
* rpmem_req_attr -- arguments for open/create request
*/
struct rpmem_req_attr {
size_t pool_size;
unsigned nlanes;
size_t buff_size;
enum rpmem_provider provider;
const char *pool_desc;
};
/*
* rpmem_resp_attr -- return arguments from open/create request
*/
struct rpmem_resp_attr {
unsigned short port;
uint64_t rkey;
uint64_t raddr;
unsigned nlanes;
enum rpmem_persist_method persist_method;
};
#define RPMEM_HAS_USER 0x1
#define RPMEM_HAS_SERVICE 0x2
#define RPMEM_FLAGS_USE_IPV4 0x4
#define RPMEM_MAX_USER (32 + 1) /* see useradd(8) + 1 for '\0' */
#define RPMEM_MAX_NODE (255 + 1) /* see gethostname(2) + 1 for '\0' */
#define RPMEM_MAX_SERVICE (NI_MAXSERV + 1) /* + 1 for '\0' */
#define RPMEM_HDR_SIZE 4096
#define RPMEM_CLOSE_FLAGS_REMOVE 0x1
#define RPMEM_DEF_BUFF_SIZE 8192
struct rpmem_target_info {
char user[RPMEM_MAX_USER];
char node[RPMEM_MAX_NODE];
char service[RPMEM_MAX_SERVICE];
unsigned flags;
};
extern unsigned Rpmem_max_nlanes;
extern unsigned Rpmem_wq_size;
extern int Rpmem_fork_unsafe;
int rpmem_b64_write(int sockfd, const void *buf, size_t len, int flags);
int rpmem_b64_read(int sockfd, void *buf, size_t len, int flags);
const char *rpmem_get_ip_str(const struct sockaddr *addr);
struct rpmem_target_info *rpmem_target_parse(const char *target);
void rpmem_target_free(struct rpmem_target_info *info);
int rpmem_xwrite(int fd, const void *buf, size_t len, int flags);
int rpmem_xread(int fd, void *buf, size_t len, int flags);
char *rpmem_get_ssh_conn_addr(void);
#ifdef __cplusplus
}
#endif
#endif
| 3,404 | 23.321429 | 72 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/rpmem_common/rpmem_proto.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_proto.h -- rpmem protocol definitions
*/
#ifndef RPMEM_PROTO_H
#define RPMEM_PROTO_H 1
#include <stdint.h>
#include <endian.h>
#include "librpmem.h"
#ifdef __cplusplus
extern "C" {
#endif
#define PACKED __attribute__((packed))
#define RPMEM_PROTO "tcp"
#define RPMEM_PROTO_MAJOR 0
#define RPMEM_PROTO_MINOR 1
#define RPMEM_SIG_SIZE 8
#define RPMEM_UUID_SIZE 16
#define RPMEM_PROV_SIZE 32
#define RPMEM_USER_SIZE 16
/*
* rpmem_msg_type -- type of messages
*/
enum rpmem_msg_type {
RPMEM_MSG_TYPE_CREATE = 1, /* create request */
RPMEM_MSG_TYPE_CREATE_RESP = 2, /* create request response */
RPMEM_MSG_TYPE_OPEN = 3, /* open request */
RPMEM_MSG_TYPE_OPEN_RESP = 4, /* open request response */
RPMEM_MSG_TYPE_CLOSE = 5, /* close request */
RPMEM_MSG_TYPE_CLOSE_RESP = 6, /* close request response */
RPMEM_MSG_TYPE_SET_ATTR = 7, /* set attributes request */
/* set attributes request response */
RPMEM_MSG_TYPE_SET_ATTR_RESP = 8,
MAX_RPMEM_MSG_TYPE,
};
/*
* rpmem_pool_attr_packed -- a packed version
*/
struct rpmem_pool_attr_packed {
char signature[RPMEM_POOL_HDR_SIG_LEN]; /* pool signature */
uint32_t major; /* format major version number */
uint32_t compat_features; /* mask: compatible "may" features */
uint32_t incompat_features; /* mask: "must support" features */
uint32_t ro_compat_features; /* mask: force RO if unsupported */
unsigned char poolset_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* pool uuid */
unsigned char uuid[RPMEM_POOL_HDR_UUID_LEN]; /* first part uuid */
unsigned char next_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* next pool uuid */
unsigned char prev_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* prev pool uuid */
unsigned char user_flags[RPMEM_POOL_USER_FLAGS_LEN]; /* user flags */
} PACKED;
/*
* rpmem_msg_ibc_attr -- in-band connection attributes
*
* Used by create request response and open request response.
* Contains essential information to proceed with in-band connection
* initialization.
*/
struct rpmem_msg_ibc_attr {
uint32_t port; /* RDMA connection port */
uint32_t persist_method; /* persist method */
uint64_t rkey; /* remote key */
uint64_t raddr; /* remote address */
uint32_t nlanes; /* number of lanes */
} PACKED;
/*
* rpmem_msg_pool_desc -- remote pool descriptor
*/
struct rpmem_msg_pool_desc {
uint32_t size; /* size of pool descriptor */
uint8_t desc[0]; /* pool descriptor, null-terminated string */
} PACKED;
/*
* rpmem_msg_hdr -- message header which consists of type and size of message
*
* The type must be one of the rpmem_msg_type values.
*/
struct rpmem_msg_hdr {
uint32_t type; /* type of message */
uint64_t size; /* size of message */
uint8_t body[0];
} PACKED;
/*
* rpmem_msg_hdr_resp -- message response header which consists of type, size
* and status.
*
* The type must be one of the rpmem_msg_type values.
*/
struct rpmem_msg_hdr_resp {
uint32_t status; /* response status */
uint32_t type; /* type of message */
uint64_t size; /* size of message */
} PACKED;
/*
* rpmem_msg_common -- common fields for open/create messages
*/
struct rpmem_msg_common {
uint16_t major; /* protocol version major number */
uint16_t minor; /* protocol version minor number */
uint64_t pool_size; /* minimum required size of a pool */
uint32_t nlanes; /* number of lanes used by initiator */
uint32_t provider; /* provider */
uint64_t buff_size; /* buffer size for inline persist */
} PACKED;
/*
* rpmem_msg_create -- create request message
*
* The type of message must be set to RPMEM_MSG_TYPE_CREATE.
* The size of message must be set to
* sizeof(struct rpmem_msg_create) + pool_desc_size
*/
struct rpmem_msg_create {
struct rpmem_msg_hdr hdr; /* message header */
struct rpmem_msg_common c;
struct rpmem_pool_attr_packed pool_attr; /* pool attributes */
struct rpmem_msg_pool_desc pool_desc; /* pool descriptor */
} PACKED;
/*
* rpmem_msg_create_resp -- create request response message
*
* The type of message must be set to RPMEM_MSG_TYPE_CREATE_RESP.
* The size of message must be set to sizeof(struct rpmem_msg_create_resp).
*/
struct rpmem_msg_create_resp {
struct rpmem_msg_hdr_resp hdr; /* message header */
struct rpmem_msg_ibc_attr ibc; /* in-band connection attributes */
} PACKED;
/*
* rpmem_msg_open -- open request message
*
* The type of message must be set to RPMEM_MSG_TYPE_OPEN.
* The size of message must be set to
* sizeof(struct rpmem_msg_open) + pool_desc_size
*/
struct rpmem_msg_open {
struct rpmem_msg_hdr hdr; /* message header */
struct rpmem_msg_common c;
struct rpmem_msg_pool_desc pool_desc; /* pool descriptor */
} PACKED;
/*
* rpmem_msg_open_resp -- open request response message
*
* The type of message must be set to RPMEM_MSG_TYPE_OPEN_RESP.
* The size of message must be set to sizeof(struct rpmem_msg_open_resp)
*/
struct rpmem_msg_open_resp {
struct rpmem_msg_hdr_resp hdr; /* message header */
struct rpmem_msg_ibc_attr ibc; /* in-band connection attributes */
struct rpmem_pool_attr_packed pool_attr; /* pool attributes */
} PACKED;
/*
* rpmem_msg_close -- close request message
*
* The type of message must be set to RPMEM_MSG_TYPE_CLOSE
* The size of message must be set to sizeof(struct rpmem_msg_close)
*/
struct rpmem_msg_close {
struct rpmem_msg_hdr hdr; /* message header */
uint32_t flags; /* flags */
} PACKED;
/*
* rpmem_msg_close_resp -- close request response message
*
* The type of message must be set to RPMEM_MSG_TYPE_CLOSE_RESP
* The size of message must be set to sizeof(struct rpmem_msg_close_resp)
*/
struct rpmem_msg_close_resp {
struct rpmem_msg_hdr_resp hdr; /* message header */
/* no more fields */
} PACKED;
#define RPMEM_FLUSH_WRITE 0U /* flush / persist using RDMA WRITE */
#define RPMEM_DEEP_PERSIST 1U /* deep persist operation */
#define RPMEM_PERSIST_SEND 2U /* persist using RDMA SEND */
#define RPMEM_COMPLETION 4U /* schedule command with a completion */
/* the two least significant bits are reserved for mode of persist */
#define RPMEM_FLUSH_PERSIST_MASK 0x3U
#define RPMEM_PERSIST_MAX 2U /* maximum valid persist value */
/*
* rpmem_msg_persist -- remote persist message
*/
struct rpmem_msg_persist {
uint32_t flags; /* lane flags */
uint32_t lane; /* lane identifier */
uint64_t addr; /* remote memory address */
uint64_t size; /* remote memory size */
uint8_t data[];
};
/*
* rpmem_msg_persist_resp -- remote persist response message
*/
struct rpmem_msg_persist_resp {
uint32_t flags; /* lane flags */
uint32_t lane; /* lane identifier */
};
/*
* rpmem_msg_set_attr -- set attributes request message
*
* The type of message must be set to RPMEM_MSG_TYPE_SET_ATTR.
* The size of message must be set to sizeof(struct rpmem_msg_set_attr)
*/
struct rpmem_msg_set_attr {
struct rpmem_msg_hdr hdr; /* message header */
struct rpmem_pool_attr_packed pool_attr; /* pool attributes */
} PACKED;
/*
* rpmem_msg_set_attr_resp -- set attributes request response message
*
* The type of message must be set to RPMEM_MSG_TYPE_SET_ATTR_RESP.
* The size of message must be set to sizeof(struct rpmem_msg_set_attr_resp).
*/
struct rpmem_msg_set_attr_resp {
struct rpmem_msg_hdr_resp hdr; /* message header */
} PACKED;
/*
* XXX Begin: Suppress gcc conversion warnings for FreeBSD be*toh macros.
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
/*
* rpmem_ntoh_msg_ibc_attr -- convert rpmem_msg_ibc attr to host byte order
*/
static inline void
rpmem_ntoh_msg_ibc_attr(struct rpmem_msg_ibc_attr *ibc)
{
ibc->port = be32toh(ibc->port);
ibc->persist_method = be32toh(ibc->persist_method);
ibc->rkey = be64toh(ibc->rkey);
ibc->raddr = be64toh(ibc->raddr);
}
/*
* rpmem_ntoh_msg_pool_desc -- convert rpmem_msg_pool_desc to host byte order
*/
static inline void
rpmem_ntoh_msg_pool_desc(struct rpmem_msg_pool_desc *pool_desc)
{
pool_desc->size = be32toh(pool_desc->size);
}
/*
* rpmem_ntoh_pool_attr -- convert rpmem_pool_attr to host byte order
*/
static inline void
rpmem_ntoh_pool_attr(struct rpmem_pool_attr_packed *attr)
{
attr->major = be32toh(attr->major);
attr->ro_compat_features = be32toh(attr->ro_compat_features);
attr->incompat_features = be32toh(attr->incompat_features);
attr->compat_features = be32toh(attr->compat_features);
}
/*
* rpmem_ntoh_msg_hdr -- convert rpmem_msg_hdr to host byte order
*/
static inline void
rpmem_ntoh_msg_hdr(struct rpmem_msg_hdr *hdrp)
{
hdrp->type = be32toh(hdrp->type);
hdrp->size = be64toh(hdrp->size);
}
/*
* rpmem_hton_msg_hdr -- convert rpmem_msg_hdr to network byte order
*/
static inline void
rpmem_hton_msg_hdr(struct rpmem_msg_hdr *hdrp)
{
rpmem_ntoh_msg_hdr(hdrp);
}
/*
* rpmem_ntoh_msg_hdr_resp -- convert rpmem_msg_hdr_resp to host byte order
*/
static inline void
rpmem_ntoh_msg_hdr_resp(struct rpmem_msg_hdr_resp *hdrp)
{
hdrp->status = be32toh(hdrp->status);
hdrp->type = be32toh(hdrp->type);
hdrp->size = be64toh(hdrp->size);
}
/*
* rpmem_hton_msg_hdr_resp -- convert rpmem_msg_hdr_resp to network byte order
*/
static inline void
rpmem_hton_msg_hdr_resp(struct rpmem_msg_hdr_resp *hdrp)
{
rpmem_ntoh_msg_hdr_resp(hdrp);
}
/*
* rpmem_ntoh_msg_common -- convert rpmem_msg_common to host byte order
*/
static inline void
rpmem_ntoh_msg_common(struct rpmem_msg_common *msg)
{
msg->major = be16toh(msg->major);
msg->minor = be16toh(msg->minor);
msg->pool_size = be64toh(msg->pool_size);
msg->nlanes = be32toh(msg->nlanes);
msg->provider = be32toh(msg->provider);
msg->buff_size = be64toh(msg->buff_size);
}
/*
* rpmem_hton_msg_common -- convert rpmem_msg_common to network byte order
*/
static inline void
rpmem_hton_msg_common(struct rpmem_msg_common *msg)
{
rpmem_ntoh_msg_common(msg);
}
/*
* rpmem_ntoh_msg_create -- convert rpmem_msg_create to host byte order
*/
static inline void
rpmem_ntoh_msg_create(struct rpmem_msg_create *msg)
{
rpmem_ntoh_msg_hdr(&msg->hdr);
rpmem_ntoh_msg_common(&msg->c);
rpmem_ntoh_pool_attr(&msg->pool_attr);
rpmem_ntoh_msg_pool_desc(&msg->pool_desc);
}
/*
* rpmem_hton_msg_create -- convert rpmem_msg_create to network byte order
*/
static inline void
rpmem_hton_msg_create(struct rpmem_msg_create *msg)
{
rpmem_ntoh_msg_create(msg);
}
/*
* rpmem_ntoh_msg_create_resp -- convert rpmem_msg_create_resp to host byte
* order
*/
static inline void
rpmem_ntoh_msg_create_resp(struct rpmem_msg_create_resp *msg)
{
rpmem_ntoh_msg_hdr_resp(&msg->hdr);
rpmem_ntoh_msg_ibc_attr(&msg->ibc);
}
/*
* rpmem_hton_msg_create_resp -- convert rpmem_msg_create_resp to network byte
* order
*/
static inline void
rpmem_hton_msg_create_resp(struct rpmem_msg_create_resp *msg)
{
rpmem_ntoh_msg_create_resp(msg);
}
/*
* rpmem_ntoh_msg_open -- convert rpmem_msg_open to host byte order
*/
static inline void
rpmem_ntoh_msg_open(struct rpmem_msg_open *msg)
{
rpmem_ntoh_msg_hdr(&msg->hdr);
rpmem_ntoh_msg_common(&msg->c);
rpmem_ntoh_msg_pool_desc(&msg->pool_desc);
}
/*
* XXX End: Suppress gcc conversion warnings for FreeBSD be*toh macros
*/
#pragma GCC diagnostic pop
/*
* rpmem_hton_msg_open -- convert rpmem_msg_open to network byte order
*/
static inline void
rpmem_hton_msg_open(struct rpmem_msg_open *msg)
{
rpmem_ntoh_msg_open(msg);
}
/*
* rpmem_ntoh_msg_open_resp -- convert rpmem_msg_open_resp to host byte order
*/
static inline void
rpmem_ntoh_msg_open_resp(struct rpmem_msg_open_resp *msg)
{
rpmem_ntoh_msg_hdr_resp(&msg->hdr);
rpmem_ntoh_msg_ibc_attr(&msg->ibc);
rpmem_ntoh_pool_attr(&msg->pool_attr);
}
/*
* rpmem_hton_msg_open_resp -- convert rpmem_msg_open_resp to network byte order
*/
static inline void
rpmem_hton_msg_open_resp(struct rpmem_msg_open_resp *msg)
{
rpmem_ntoh_msg_open_resp(msg);
}
/*
* rpmem_ntoh_msg_set_attr -- convert rpmem_msg_set_attr to host byte order
*/
static inline void
rpmem_ntoh_msg_set_attr(struct rpmem_msg_set_attr *msg)
{
rpmem_ntoh_msg_hdr(&msg->hdr);
rpmem_ntoh_pool_attr(&msg->pool_attr);
}
/*
* rpmem_hton_msg_set_attr -- convert rpmem_msg_set_attr to network byte order
*/
static inline void
rpmem_hton_msg_set_attr(struct rpmem_msg_set_attr *msg)
{
rpmem_ntoh_msg_set_attr(msg);
}
/*
* rpmem_ntoh_msg_set_attr_resp -- convert rpmem_msg_set_attr_resp to host byte
* order
*/
static inline void
rpmem_ntoh_msg_set_attr_resp(struct rpmem_msg_set_attr_resp *msg)
{
rpmem_ntoh_msg_hdr_resp(&msg->hdr);
}
/*
* rpmem_hton_msg_set_attr_resp -- convert rpmem_msg_set_attr_resp to network
* byte order
*/
static inline void
rpmem_hton_msg_set_attr_resp(struct rpmem_msg_set_attr_resp *msg)
{
rpmem_hton_msg_hdr_resp(&msg->hdr);
}
/*
* rpmem_ntoh_msg_close -- convert rpmem_msg_close to host byte order
*/
static inline void
rpmem_ntoh_msg_close(struct rpmem_msg_close *msg)
{
rpmem_ntoh_msg_hdr(&msg->hdr);
}
/*
* rpmem_hton_msg_close -- convert rpmem_msg_close to network byte order
*/
static inline void
rpmem_hton_msg_close(struct rpmem_msg_close *msg)
{
rpmem_ntoh_msg_close(msg);
}
/*
* rpmem_ntoh_msg_close_resp -- convert rpmem_msg_close_resp to host byte order
*/
static inline void
rpmem_ntoh_msg_close_resp(struct rpmem_msg_close_resp *msg)
{
rpmem_ntoh_msg_hdr_resp(&msg->hdr);
}
/*
* rpmem_hton_msg_close_resp -- convert rpmem_msg_close_resp to network byte
* order
*/
static inline void
rpmem_hton_msg_close_resp(struct rpmem_msg_close_resp *msg)
{
rpmem_ntoh_msg_close_resp(msg);
}
/*
* pack_rpmem_pool_attr -- copy pool attributes to a packed structure
*/
static inline void
pack_rpmem_pool_attr(const struct rpmem_pool_attr *src,
struct rpmem_pool_attr_packed *dst)
{
memcpy(dst->signature, src->signature, sizeof(src->signature));
dst->major = src->major;
dst->compat_features = src->compat_features;
dst->incompat_features = src->incompat_features;
dst->ro_compat_features = src->ro_compat_features;
memcpy(dst->poolset_uuid, src->poolset_uuid, sizeof(dst->poolset_uuid));
memcpy(dst->uuid, src->uuid, sizeof(dst->uuid));
memcpy(dst->next_uuid, src->next_uuid, sizeof(dst->next_uuid));
memcpy(dst->prev_uuid, src->prev_uuid, sizeof(dst->prev_uuid));
memcpy(dst->user_flags, src->user_flags, sizeof(dst->user_flags));
}
/*
* unpack_rpmem_pool_attr -- copy pool attributes to an unpacked structure
*/
static inline void
unpack_rpmem_pool_attr(const struct rpmem_pool_attr_packed *src,
struct rpmem_pool_attr *dst)
{
memcpy(dst->signature, src->signature, sizeof(src->signature));
dst->major = src->major;
dst->compat_features = src->compat_features;
dst->incompat_features = src->incompat_features;
dst->ro_compat_features = src->ro_compat_features;
memcpy(dst->poolset_uuid, src->poolset_uuid, sizeof(dst->poolset_uuid));
memcpy(dst->uuid, src->uuid, sizeof(dst->uuid));
memcpy(dst->next_uuid, src->next_uuid, sizeof(dst->next_uuid));
memcpy(dst->prev_uuid, src->prev_uuid, sizeof(dst->prev_uuid));
memcpy(dst->user_flags, src->user_flags, sizeof(dst->user_flags));
}
#ifdef __cplusplus
}
#endif
#endif
| 15,016 | 26.503663 | 80 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/rpmem_common/rpmem_fip_lane.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* rpmem_fip_lane.h -- rpmem fabric provider lane definition
*/
#include <sched.h>
#include <stdint.h>
#include "sys_util.h"
/*
* rpmem_fip_lane -- basic lane structure
*
* This structure consist of a synchronization object and a return value.
* It is possible to wait on the lane for specified event. The event can be
* signalled by another thread which can pass the return value if required.
*
* The sync variable can store up to 64 different events, each event on
* separate bit.
*/
struct rpmem_fip_lane {
os_spinlock_t lock;
int ret;
uint64_t sync;
};
/*
* rpmem_fip_lane_init -- initialize basic lane structure
*/
static inline int
rpmem_fip_lane_init(struct rpmem_fip_lane *lanep)
{
lanep->ret = 0;
lanep->sync = 0;
return util_spin_init(&lanep->lock, PTHREAD_PROCESS_PRIVATE);
}
/*
* rpmem_fip_lane_fini -- deinitialize basic lane structure
*/
static inline void
rpmem_fip_lane_fini(struct rpmem_fip_lane *lanep)
{
util_spin_destroy(&lanep->lock);
}
/*
* rpmem_fip_lane_busy -- return true if lane has pending events
*/
static inline int
rpmem_fip_lane_busy(struct rpmem_fip_lane *lanep)
{
util_spin_lock(&lanep->lock);
int ret = lanep->sync != 0;
util_spin_unlock(&lanep->lock);
return ret;
}
/*
* rpmem_fip_lane_begin -- begin waiting for specified event(s)
*/
static inline void
rpmem_fip_lane_begin(struct rpmem_fip_lane *lanep, uint64_t sig)
{
util_spin_lock(&lanep->lock);
lanep->ret = 0;
lanep->sync |= sig;
util_spin_unlock(&lanep->lock);
}
static inline int
rpmem_fip_lane_is_busy(struct rpmem_fip_lane *lanep, uint64_t sig)
{
util_spin_lock(&lanep->lock);
int ret = (lanep->sync & sig) != 0;
util_spin_unlock(&lanep->lock);
return ret;
}
static inline int
rpmem_fip_lane_ret(struct rpmem_fip_lane *lanep)
{
util_spin_lock(&lanep->lock);
int ret = lanep->ret;
util_spin_unlock(&lanep->lock);
return ret;
}
/*
* rpmem_fip_lane_wait -- wait for specified event(s)
*/
static inline int
rpmem_fip_lane_wait(struct rpmem_fip_lane *lanep, uint64_t sig)
{
while (rpmem_fip_lane_is_busy(lanep, sig))
sched_yield();
return rpmem_fip_lane_ret(lanep);
}
/*
* rpmem_fip_lane_signal -- signal lane about specified event
*/
static inline void
rpmem_fip_lane_signal(struct rpmem_fip_lane *lanep, uint64_t sig)
{
util_spin_lock(&lanep->lock);
lanep->sync &= ~sig;
util_spin_unlock(&lanep->lock);
}
/*
* rpmem_fip_lane_signal -- signal lane about specified event and store
* return value
*/
static inline void
rpmem_fip_lane_sigret(struct rpmem_fip_lane *lanep, uint64_t sig, int ret)
{
util_spin_lock(&lanep->lock);
lanep->ret = ret;
lanep->sync &= ~sig;
util_spin_unlock(&lanep->lock);
}
| 2,754 | 20.523438 | 75 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/rpmem_common/rpmem_fip_msg.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmem_fip_msg.h -- simple wrappers for fi_rma(3) and fi_msg(3) functions
*/
#ifndef RPMEM_FIP_MSG_H
#define RPMEM_FIP_MSG_H 1
#include <rdma/fi_rma.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* rpmem_fip_rma -- helper struct for RMA operation
*/
struct rpmem_fip_rma {
struct fi_msg_rma msg; /* message structure */
struct iovec msg_iov; /* IO vector buffer */
struct fi_rma_iov rma_iov; /* RMA IO vector buffer */
void *desc; /* local memory descriptor */
uint64_t flags; /* RMA operation flags */
};
/*
* rpmem_fip_msg -- helper struct for MSG operation
*/
struct rpmem_fip_msg {
struct fi_msg msg; /* message structure */
struct iovec iov; /* IO vector buffer */
void *desc; /* local memory descriptor */
uint64_t flags; /* MSG operation flags */
};
/*
* rpmem_fip_rma_init -- initialize RMA helper struct
*/
static inline void
rpmem_fip_rma_init(struct rpmem_fip_rma *rma, void *desc,
fi_addr_t addr, uint64_t rkey, void *context, uint64_t flags)
{
memset(rma, 0, sizeof(*rma));
rma->desc = desc;
rma->flags = flags;
rma->rma_iov.key = rkey;
rma->msg.context = context;
rma->msg.addr = addr;
rma->msg.desc = &rma->desc;
rma->msg.rma_iov = &rma->rma_iov;
rma->msg.rma_iov_count = 1;
rma->msg.msg_iov = &rma->msg_iov;
rma->msg.iov_count = 1;
}
/*
* rpmem_fip_msg_init -- initialize MSG helper struct
*/
static inline void
rpmem_fip_msg_init(struct rpmem_fip_msg *msg, void *desc, fi_addr_t addr,
void *context, void *buff, size_t len, uint64_t flags)
{
memset(msg, 0, sizeof(*msg));
msg->desc = desc;
msg->flags = flags;
msg->iov.iov_base = buff;
msg->iov.iov_len = len;
msg->msg.context = context;
msg->msg.addr = addr;
msg->msg.desc = &msg->desc;
msg->msg.msg_iov = &msg->iov;
msg->msg.iov_count = 1;
}
/*
* rpmem_fip_writemsg -- wrapper for fi_writemsg
*/
static inline int
rpmem_fip_writemsg(struct fid_ep *ep, struct rpmem_fip_rma *rma,
const void *buff, size_t len, uint64_t addr)
{
rma->rma_iov.addr = addr;
rma->rma_iov.len = len;
rma->msg_iov.iov_base = (void *)buff;
rma->msg_iov.iov_len = len;
return (int)fi_writemsg(ep, &rma->msg, rma->flags);
}
/*
* rpmem_fip_readmsg -- wrapper for fi_readmsg
*/
static inline int
rpmem_fip_readmsg(struct fid_ep *ep, struct rpmem_fip_rma *rma,
void *buff, size_t len, uint64_t addr)
{
rma->rma_iov.addr = addr;
rma->rma_iov.len = len;
rma->msg_iov.iov_base = buff;
rma->msg_iov.iov_len = len;
return (int)fi_readmsg(ep, &rma->msg, rma->flags);
}
/*
* rpmem_fip_sendmsg -- wrapper for fi_sendmsg
*/
static inline int
rpmem_fip_sendmsg(struct fid_ep *ep, struct rpmem_fip_msg *msg, size_t len)
{
msg->iov.iov_len = len;
return (int)fi_sendmsg(ep, &msg->msg, msg->flags);
}
/*
* rpmem_fip_recvmsg -- wrapper for fi_recvmsg
*/
static inline int
rpmem_fip_recvmsg(struct fid_ep *ep, struct rpmem_fip_msg *msg)
{
return (int)fi_recvmsg(ep, &msg->msg, msg->flags);
}
/*
* rpmem_fip_msg_get_pmsg -- returns message buffer as a persist message
*/
static inline struct rpmem_msg_persist *
rpmem_fip_msg_get_pmsg(struct rpmem_fip_msg *msg)
{
return (struct rpmem_msg_persist *)msg->iov.iov_base;
}
/*
* rpmem_fip_msg_get_pres -- returns message buffer as a persist response
*/
static inline struct rpmem_msg_persist_resp *
rpmem_fip_msg_get_pres(struct rpmem_fip_msg *msg)
{
return (struct rpmem_msg_persist_resp *)msg->iov.iov_base;
}
#ifdef __cplusplus
}
#endif
#endif
| 3,494 | 22.77551 | 75 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmempool/libpmempool.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* libpmempool.c -- entry points for libpmempool
*/
#include <stdlib.h>
#include <stdint.h>
#include <errno.h>
#include <sys/param.h>
#include "pmemcommon.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check.h"
#ifdef USE_RPMEM
#include "rpmem_common.h"
#include "rpmem_util.h"
#endif
#ifdef _WIN32
#define ANSWER_BUFFSIZE 256
#endif
/*
* libpmempool_init -- load-time initialization for libpmempool
*
* Called automatically by the run-time loader.
*/
ATTR_CONSTRUCTOR
void
libpmempool_init(void)
{
common_init(PMEMPOOL_LOG_PREFIX, PMEMPOOL_LOG_LEVEL_VAR,
PMEMPOOL_LOG_FILE_VAR, PMEMPOOL_MAJOR_VERSION,
PMEMPOOL_MINOR_VERSION);
LOG(3, NULL);
#ifdef USE_RPMEM
util_remote_init();
rpmem_util_cmds_init();
#endif
}
/*
* libpmempool_fini -- libpmempool cleanup routine
*
* Called automatically when the process terminates.
*/
ATTR_DESTRUCTOR
void
libpmempool_fini(void)
{
LOG(3, NULL);
#ifdef USE_RPMEM
util_remote_unload();
util_remote_fini();
rpmem_util_cmds_fini();
#endif
common_fini();
}
/*
* pmempool_check_versionU -- see if library meets application version
* requirements
*/
#ifndef _WIN32
static inline
#endif
const char *
pmempool_check_versionU(unsigned major_required, unsigned minor_required)
{
LOG(3, "major_required %u minor_required %u",
major_required, minor_required);
if (major_required != PMEMPOOL_MAJOR_VERSION) {
ERR("libpmempool major version mismatch (need %u, found %u)",
major_required, PMEMPOOL_MAJOR_VERSION);
return out_get_errormsg();
}
if (minor_required > PMEMPOOL_MINOR_VERSION) {
ERR("libpmempool minor version mismatch (need %u, found %u)",
minor_required, PMEMPOOL_MINOR_VERSION);
return out_get_errormsg();
}
return NULL;
}
#ifndef _WIN32
/*
* pmempool_check_version -- see if lib meets application version requirements
*/
const char *
pmempool_check_version(unsigned major_required, unsigned minor_required)
{
return pmempool_check_versionU(major_required, minor_required);
}
#else
/*
* pmempool_check_versionW -- see if library meets application version
* requirements as widechar
*/
const wchar_t *
pmempool_check_versionW(unsigned major_required, unsigned minor_required)
{
if (pmempool_check_versionU(major_required, minor_required) != NULL)
return out_get_errormsgW();
else
return NULL;
}
#endif
/*
* pmempool_errormsgU -- return last error message
*/
#ifndef _WIN32
static inline
#endif
const char *
pmempool_errormsgU(void)
{
return out_get_errormsg();
}
#ifndef _WIN32
/*
* pmempool_errormsg -- return last error message
*/
const char *
pmempool_errormsg(void)
{
return pmempool_errormsgU();
}
#else
/*
* pmempool_errormsgW -- return last error message as widechar
*/
const wchar_t *
pmempool_errormsgW(void)
{
return out_get_errormsgW();
}
#endif
/*
* pmempool_ppc_set_default -- (internal) set default values of check context
*/
static void
pmempool_ppc_set_default(PMEMpoolcheck *ppc)
{
/* all other fields should be zeroed */
const PMEMpoolcheck ppc_default = {
.args = {
.pool_type = PMEMPOOL_POOL_TYPE_DETECT,
},
.result = CHECK_RESULT_CONSISTENT,
};
*ppc = ppc_default;
}
/*
* pmempool_check_initU -- initialize check context
*/
#ifndef _WIN32
static inline
#endif
PMEMpoolcheck *
pmempool_check_initU(struct pmempool_check_argsU *args, size_t args_size)
{
LOG(3, "path %s backup_path %s pool_type %u flags %x", args->path,
args->backup_path, args->pool_type, args->flags);
/*
* Currently one size of args structure is supported. The version of the
* pmempool_check_args structure can be distinguished based on provided
* args_size.
*/
if (args_size < sizeof(struct pmempool_check_args)) {
ERR("provided args_size is not supported");
errno = EINVAL;
return NULL;
}
/*
* Dry run does not allow to made changes possibly performed during
* repair. Advanced allow to perform more complex repairs. Questions
* are ask only if repairs are made. So dry run, advanced and always_yes
* can be set only if repair is set.
*/
if (util_flag_isclr(args->flags, PMEMPOOL_CHECK_REPAIR) &&
util_flag_isset(args->flags, PMEMPOOL_CHECK_DRY_RUN |
PMEMPOOL_CHECK_ADVANCED | PMEMPOOL_CHECK_ALWAYS_YES)) {
ERR("dry_run, advanced and always_yes are applicable only if "
"repair is set");
errno = EINVAL;
return NULL;
}
/*
* dry run does not modify anything so performing backup is redundant
*/
if (util_flag_isset(args->flags, PMEMPOOL_CHECK_DRY_RUN) &&
args->backup_path != NULL) {
ERR("dry run does not allow one to perform backup");
errno = EINVAL;
return NULL;
}
/*
* libpmempool uses str format of communication so it must be set
*/
if (util_flag_isclr(args->flags, PMEMPOOL_CHECK_FORMAT_STR)) {
ERR("PMEMPOOL_CHECK_FORMAT_STR flag must be set");
errno = EINVAL;
return NULL;
}
PMEMpoolcheck *ppc = calloc(1, sizeof(*ppc));
if (ppc == NULL) {
ERR("!calloc");
return NULL;
}
pmempool_ppc_set_default(ppc);
memcpy(&ppc->args, args, sizeof(ppc->args));
ppc->path = strdup(args->path);
if (!ppc->path) {
ERR("!strdup");
goto error_path_malloc;
}
ppc->args.path = ppc->path;
if (args->backup_path != NULL) {
ppc->backup_path = strdup(args->backup_path);
if (!ppc->backup_path) {
ERR("!strdup");
goto error_backup_path_malloc;
}
ppc->args.backup_path = ppc->backup_path;
}
if (check_init(ppc) != 0)
goto error_check_init;
return ppc;
error_check_init:
/* in case errno not set by any of the used functions set its value */
if (errno == 0)
errno = EINVAL;
free(ppc->backup_path);
error_backup_path_malloc:
free(ppc->path);
error_path_malloc:
free(ppc);
return NULL;
}
#ifndef _WIN32
/*
* pmempool_check_init -- initialize check context
*/
PMEMpoolcheck *
pmempool_check_init(struct pmempool_check_args *args, size_t args_size)
{
return pmempool_check_initU(args, args_size);
}
#else
/*
* pmempool_check_initW -- initialize check context as widechar
*/
PMEMpoolcheck *
pmempool_check_initW(struct pmempool_check_argsW *args, size_t args_size)
{
char *upath = util_toUTF8(args->path);
if (upath == NULL)
return NULL;
char *ubackup_path = NULL;
if (args->backup_path != NULL) {
ubackup_path = util_toUTF8(args->backup_path);
if (ubackup_path == NULL) {
util_free_UTF8(upath);
return NULL;
}
}
struct pmempool_check_argsU uargs = {
.path = upath,
.backup_path = ubackup_path,
.pool_type = args->pool_type,
.flags = args->flags
};
PMEMpoolcheck *ret = pmempool_check_initU(&uargs, args_size);
util_free_UTF8(ubackup_path);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* pmempool_checkU -- continue check till produce status to consume for caller
*/
#ifndef _WIN32
static inline
#endif
struct pmempool_check_statusU *
pmempool_checkU(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
ASSERTne(ppc, NULL);
struct check_status *result;
do {
result = check_step(ppc);
if (check_is_end(ppc->data) && result == NULL)
return NULL;
} while (result == NULL);
return check_status_get(result);
}
#ifndef _WIN32
/*
* pmempool_check -- continue check till produce status to consume for caller
*/
struct pmempool_check_status *
pmempool_check(PMEMpoolcheck *ppc)
{
return pmempool_checkU(ppc);
}
#else
/*
* pmempool_checkW -- continue check till produce status to consume for caller
*/
struct pmempool_check_statusW *
pmempool_checkW(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
ASSERTne(ppc, NULL);
/* check the cache and convert msg and answer */
char buf[ANSWER_BUFFSIZE];
memset(buf, 0, ANSWER_BUFFSIZE);
convert_status_cache(ppc, buf, ANSWER_BUFFSIZE);
struct check_status *uresult;
do {
uresult = check_step(ppc);
if (check_is_end(ppc->data) && uresult == NULL)
return NULL;
} while (uresult == NULL);
struct pmempool_check_statusU *uret_res = check_status_get(uresult);
const wchar_t *wmsg = util_toUTF16(uret_res->str.msg);
if (wmsg == NULL)
FATAL("!malloc");
struct pmempool_check_statusW *wret_res =
(struct pmempool_check_statusW *)uret_res;
/* pointer to old message is freed in next check step */
wret_res->str.msg = wmsg;
return wret_res;
}
#endif
/*
* pmempool_check_end -- end check and release check context
*/
enum pmempool_check_result
pmempool_check_end(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
const enum check_result result = ppc->result;
const unsigned sync_required = ppc->sync_required;
check_fini(ppc);
free(ppc->path);
free(ppc->backup_path);
free(ppc);
if (sync_required) {
switch (result) {
case CHECK_RESULT_CONSISTENT:
case CHECK_RESULT_REPAIRED:
return PMEMPOOL_CHECK_RESULT_SYNC_REQ;
default:
/* other results require fixing prior to sync */
;
}
}
switch (result) {
case CHECK_RESULT_CONSISTENT:
return PMEMPOOL_CHECK_RESULT_CONSISTENT;
case CHECK_RESULT_NOT_CONSISTENT:
return PMEMPOOL_CHECK_RESULT_NOT_CONSISTENT;
case CHECK_RESULT_REPAIRED:
return PMEMPOOL_CHECK_RESULT_REPAIRED;
case CHECK_RESULT_CANNOT_REPAIR:
return PMEMPOOL_CHECK_RESULT_CANNOT_REPAIR;
default:
return PMEMPOOL_CHECK_RESULT_ERROR;
}
}
| 9,142 | 20.873206 | 78 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmempool/replica.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* replica.h -- module for synchronizing and transforming poolset
*/
#ifndef REPLICA_H
#define REPLICA_H
#include "libpmempool.h"
#include "pool.h"
#include "badblocks.h"
#ifdef __cplusplus
extern "C" {
#endif
#define UNDEF_REPLICA UINT_MAX
#define UNDEF_PART UINT_MAX
/*
* A part marked as broken does not exist or is damaged so that
* it cannot be opened and has to be recreated.
*/
#define IS_BROKEN (1U << 0)
/*
* A replica marked as inconsistent exists but has inconsistent metadata
* (e.g. inconsistent parts or replicas linkage)
*/
#define IS_INCONSISTENT (1U << 1)
/*
* A part or replica marked in this way has bad blocks inside.
*/
#define HAS_BAD_BLOCKS (1U << 2)
/*
* A part marked in this way has bad blocks in the header
*/
#define HAS_CORRUPTED_HEADER (1U << 3)
/*
* A flag which can be passed to sync_replica() to indicate that the function is
* called by pmempool_transform
*/
#define IS_TRANSFORMED (1U << 10)
/*
* Number of lanes utilized when working with remote replicas
*/
#define REMOTE_NLANES 1
/*
* Helping structures for storing part's health status
*/
struct part_health_status {
unsigned flags;
struct badblocks bbs; /* structure with bad blocks */
char *recovery_file_name; /* name of bad block recovery file */
int recovery_file_exists; /* bad block recovery file exists */
};
/*
* Helping structures for storing replica and poolset's health status
*/
struct replica_health_status {
unsigned nparts;
unsigned nhdrs;
/* a flag for the replica */
unsigned flags;
/* effective size of a pool, valid only for healthy replica */
size_t pool_size;
/* flags for each part */
struct part_health_status part[];
};
struct poolset_health_status {
unsigned nreplicas;
/* a flag for the poolset */
unsigned flags;
/* health statuses for each replica */
struct replica_health_status *replica[];
};
/* get index of the (r)th replica health status */
static inline unsigned
REP_HEALTHidx(struct poolset_health_status *set, unsigned r)
{
ASSERTne(set->nreplicas, 0);
return (set->nreplicas + r) % set->nreplicas;
}
/* get index of the (r + 1)th replica health status */
static inline unsigned
REPN_HEALTHidx(struct poolset_health_status *set, unsigned r)
{
ASSERTne(set->nreplicas, 0);
return (set->nreplicas + r + 1) % set->nreplicas;
}
/* get (p)th part health status */
static inline unsigned
PART_HEALTHidx(struct replica_health_status *rep, unsigned p)
{
ASSERTne(rep->nparts, 0);
return (rep->nparts + p) % rep->nparts;
}
/* get (r)th replica health status */
static inline struct replica_health_status *
REP_HEALTH(struct poolset_health_status *set, unsigned r)
{
return set->replica[REP_HEALTHidx(set, r)];
}
/* get (p)th part health status */
static inline unsigned
PART_HEALTH(struct replica_health_status *rep, unsigned p)
{
return rep->part[PART_HEALTHidx(rep, p)].flags;
}
uint64_t replica_get_part_offset(struct pool_set *set,
unsigned repn, unsigned partn);
void replica_align_badblock_offset_length(size_t *offset, size_t *length,
struct pool_set *set_in, unsigned repn, unsigned partn);
size_t replica_get_part_data_len(struct pool_set *set_in, unsigned repn,
unsigned partn);
uint64_t replica_get_part_data_offset(struct pool_set *set_in, unsigned repn,
unsigned part);
/*
* is_dry_run -- (internal) check whether only verification mode is enabled
*/
static inline bool
is_dry_run(unsigned flags)
{
/*
* PMEMPOOL_SYNC_DRY_RUN and PMEMPOOL_TRANSFORM_DRY_RUN
* have to have the same value in order to use this common function.
*/
ASSERT_COMPILE_ERROR_ON(PMEMPOOL_SYNC_DRY_RUN !=
PMEMPOOL_TRANSFORM_DRY_RUN);
return flags & PMEMPOOL_SYNC_DRY_RUN;
}
/*
* fix_bad_blocks -- (internal) fix bad blocks - it causes reading or creating
* bad blocks recovery files
* (depending on if they exist or not)
*/
static inline bool
fix_bad_blocks(unsigned flags)
{
return flags & PMEMPOOL_SYNC_FIX_BAD_BLOCKS;
}
int replica_remove_all_recovery_files(struct poolset_health_status *set_hs);
int replica_remove_part(struct pool_set *set, unsigned repn, unsigned partn,
int fix_bad_blocks);
int replica_create_poolset_health_status(struct pool_set *set,
struct poolset_health_status **set_hsp);
void replica_free_poolset_health_status(struct poolset_health_status *set_s);
int replica_check_poolset_health(struct pool_set *set,
struct poolset_health_status **set_hs,
int called_from_sync, unsigned flags);
int replica_is_part_broken(unsigned repn, unsigned partn,
struct poolset_health_status *set_hs);
int replica_has_bad_blocks(unsigned repn, struct poolset_health_status *set_hs);
int replica_part_has_bad_blocks(struct part_health_status *phs);
int replica_part_has_corrupted_header(unsigned repn, unsigned partn,
struct poolset_health_status *set_hs);
unsigned replica_find_unbroken_part(unsigned repn,
struct poolset_health_status *set_hs);
int replica_is_replica_broken(unsigned repn,
struct poolset_health_status *set_hs);
int replica_is_replica_consistent(unsigned repn,
struct poolset_health_status *set_hs);
int replica_is_replica_healthy(unsigned repn,
struct poolset_health_status *set_hs);
unsigned replica_find_healthy_replica(
struct poolset_health_status *set_hs);
unsigned replica_find_replica_healthy_header(
struct poolset_health_status *set_hs);
int replica_is_poolset_healthy(struct poolset_health_status *set_hs);
int replica_is_poolset_transformed(unsigned flags);
ssize_t replica_get_pool_size(struct pool_set *set, unsigned repn);
int replica_check_part_sizes(struct pool_set *set, size_t min_size);
int replica_check_part_dirs(struct pool_set *set);
int replica_check_local_part_dir(struct pool_set *set, unsigned repn,
unsigned partn);
int replica_open_replica_part_files(struct pool_set *set, unsigned repn);
int replica_open_poolset_part_files(struct pool_set *set);
int replica_sync(struct pool_set *set_in, struct poolset_health_status *set_hs,
unsigned flags);
int replica_transform(struct pool_set *set_in, struct pool_set *set_out,
unsigned flags);
#ifdef __cplusplus
}
#endif
#endif
| 6,216 | 28.325472 | 80 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmempool/check_blk.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* check_blk.c -- check pmemblk
*/
#include <inttypes.h>
#include <sys/param.h>
#include <endian.h>
#include "out.h"
#include "btt.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
enum question {
Q_BLK_BSIZE,
};
/*
* blk_get_max_bsize -- (internal) return maximum size of block for given file
* size
*/
static inline uint32_t
blk_get_max_bsize(uint64_t fsize)
{
LOG(3, NULL);
if (fsize == 0)
return 0;
/* default nfree */
uint32_t nfree = BTT_DEFAULT_NFREE;
/* number of blocks must be at least 2 * nfree */
uint32_t internal_nlba = 2 * nfree;
/* compute arena size from file size without pmemblk structure */
uint64_t arena_size = fsize - sizeof(struct pmemblk);
if (arena_size > BTT_MAX_ARENA)
arena_size = BTT_MAX_ARENA;
arena_size = btt_arena_datasize(arena_size, nfree);
/* compute maximum internal LBA size */
uint64_t internal_lbasize = (arena_size - BTT_ALIGNMENT) /
internal_nlba - BTT_MAP_ENTRY_SIZE;
ASSERT(internal_lbasize <= UINT32_MAX);
if (internal_lbasize < BTT_MIN_LBA_SIZE)
internal_lbasize = BTT_MIN_LBA_SIZE;
internal_lbasize = roundup(internal_lbasize, BTT_INTERNAL_LBA_ALIGNMENT)
- BTT_INTERNAL_LBA_ALIGNMENT;
return (uint32_t)internal_lbasize;
}
/*
* blk_read -- (internal) read pmemblk header
*/
static int
blk_read(PMEMpoolcheck *ppc)
{
/*
* Here we want to read the pmemblk header without the pool_hdr as we've
* already done it before.
*
* Take the pointer to fields right after pool_hdr, compute the size and
* offset of remaining fields.
*/
uint8_t *ptr = (uint8_t *)&ppc->pool->hdr.blk;
ptr += sizeof(ppc->pool->hdr.blk.hdr);
size_t size = sizeof(ppc->pool->hdr.blk) -
sizeof(ppc->pool->hdr.blk.hdr);
uint64_t offset = sizeof(ppc->pool->hdr.blk.hdr);
if (pool_read(ppc->pool, ptr, size, offset)) {
return CHECK_ERR(ppc, "cannot read pmemblk structure");
}
/* endianness conversion */
ppc->pool->hdr.blk.bsize = le32toh(ppc->pool->hdr.blk.bsize);
return 0;
}
/*
* blk_bsize_valid -- (internal) check if block size is valid for given file
* size
*/
static int
blk_bsize_valid(uint32_t bsize, uint64_t fsize)
{
uint32_t max_bsize = blk_get_max_bsize(fsize);
return (bsize >= max_bsize);
}
/*
* blk_hdr_check -- (internal) check pmemblk header
*/
static int
blk_hdr_check(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
CHECK_INFO(ppc, "checking pmemblk header");
if (blk_read(ppc)) {
ppc->result = CHECK_RESULT_ERROR;
return -1;
}
/* check for valid BTT Info arena as we can take bsize from it */
if (!ppc->pool->bttc.valid)
pool_blk_get_first_valid_arena(ppc->pool, &ppc->pool->bttc);
if (ppc->pool->bttc.valid) {
const uint32_t btt_bsize =
ppc->pool->bttc.btt_info.external_lbasize;
if (ppc->pool->hdr.blk.bsize != btt_bsize) {
CHECK_ASK(ppc, Q_BLK_BSIZE,
"invalid pmemblk.bsize.|Do you want to set "
"pmemblk.bsize to %u from BTT Info?",
btt_bsize);
}
} else if (!ppc->pool->bttc.zeroed) {
if (ppc->pool->hdr.blk.bsize < BTT_MIN_LBA_SIZE ||
blk_bsize_valid(ppc->pool->hdr.blk.bsize,
ppc->pool->set_file->size)) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
return CHECK_ERR(ppc, "invalid pmemblk.bsize");
}
}
if (ppc->result == CHECK_RESULT_CONSISTENT ||
ppc->result == CHECK_RESULT_REPAIRED)
CHECK_INFO(ppc, "pmemblk header correct");
return check_questions_sequence_validate(ppc);
}
/*
* blk_hdr_fix -- (internal) fix pmemblk header
*/
static int
blk_hdr_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question, void *ctx)
{
LOG(3, NULL);
uint32_t btt_bsize;
switch (question) {
case Q_BLK_BSIZE:
/*
* check for valid BTT Info arena as we can take bsize from it
*/
if (!ppc->pool->bttc.valid)
pool_blk_get_first_valid_arena(ppc->pool,
&ppc->pool->bttc);
btt_bsize = ppc->pool->bttc.btt_info.external_lbasize;
CHECK_INFO(ppc, "setting pmemblk.b_size to 0x%x", btt_bsize);
ppc->pool->hdr.blk.bsize = btt_bsize;
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
struct step {
int (*check)(PMEMpoolcheck *, location *);
int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *);
enum pool_type type;
};
static const struct step steps[] = {
{
.check = blk_hdr_check,
.type = POOL_TYPE_BLK
},
{
.fix = blk_hdr_fix,
.type = POOL_TYPE_BLK
},
{
.check = NULL,
.fix = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static inline int
step_exe(PMEMpoolcheck *ppc, location *loc)
{
ASSERT(loc->step < ARRAY_SIZE(steps));
ASSERTeq(ppc->pool->params.type, POOL_TYPE_BLK);
const struct step *step = &steps[loc->step++];
if (!(step->type & ppc->pool->params.type))
return 0;
if (!step->fix)
return step->check(ppc, loc);
if (blk_read(ppc)) {
ppc->result = CHECK_RESULT_ERROR;
return -1;
}
return check_answer_loop(ppc, loc, NULL, 1, step->fix);
}
/*
* check_blk -- entry point for pmemblk checks
*/
void
check_blk(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
location *loc = check_get_step_data(ppc->data);
/* do all checks */
while (CHECK_NOT_COMPLETE(loc, steps)) {
if (step_exe(ppc, loc))
break;
}
}
| 5,277 | 21.176471 | 78 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmempool/check_sds.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* check_shutdown_state.c -- shutdown state check
*/
#include <stdio.h>
#include <inttypes.h>
#include <sys/mman.h>
#include <endian.h>
#include "out.h"
#include "util_pmem.h"
#include "libpmempool.h"
#include "libpmem.h"
#include "pmempool.h"
#include "pool.h"
#include "set.h"
#include "check_util.h"
enum question {
Q_RESET_SDS,
};
#define SDS_CHECK_STR "checking shutdown state"
#define SDS_OK_STR "shutdown state correct"
#define SDS_DIRTY_STR "shutdown state is dirty"
#define ADR_FAILURE_STR \
"an ADR failure was detected - your pool might be corrupted"
#define ZERO_SDS_STR \
"Do you want to zero shutdown state?"
#define RESET_SDS_STR \
"Do you want to reset shutdown state at your own risk? " \
"If you have more then one replica you will have to " \
"synchronize your pool after this operation."
#define SDS_FAIL_MSG(hdrp) \
IGNORE_SDS(hdrp) ? SDS_DIRTY_STR : ADR_FAILURE_STR
#define SDS_REPAIR_MSG(hdrp) \
IGNORE_SDS(hdrp) \
? SDS_DIRTY_STR ".|" ZERO_SDS_STR \
: ADR_FAILURE_STR ".|" RESET_SDS_STR
/*
* sds_check_replica -- (internal) check if replica is healthy
*/
static int
sds_check_replica(location *loc)
{
LOG(3, NULL);
struct pool_replica *rep = REP(loc->set, loc->replica);
if (rep->remote)
return 0;
/* make a copy of sds as we shouldn't modify a pool */
struct shutdown_state old_sds = loc->hdr.sds;
struct shutdown_state curr_sds;
if (IGNORE_SDS(&loc->hdr))
return util_is_zeroed(&old_sds, sizeof(old_sds)) ? 0 : -1;
shutdown_state_init(&curr_sds, NULL);
/* get current shutdown state */
for (unsigned p = 0; p < rep->nparts; ++p) {
if (shutdown_state_add_part(&curr_sds,
PART(rep, p)->fd, NULL))
return -1;
}
/* compare current and old shutdown state */
return shutdown_state_check(&curr_sds, &old_sds, NULL);
}
/*
* sds_check -- (internal) check shutdown_state
*/
static int
sds_check(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
CHECK_INFO(ppc, "%s" SDS_CHECK_STR, loc->prefix);
/* shutdown state is valid */
if (!sds_check_replica(loc)) {
CHECK_INFO(ppc, "%s" SDS_OK_STR, loc->prefix);
loc->step = CHECK_STEP_COMPLETE;
return 0;
}
/* shutdown state is NOT valid and can NOT be repaired */
if (CHECK_IS_NOT(ppc, REPAIR)) {
check_end(ppc->data);
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
return CHECK_ERR(ppc, "%s%s", loc->prefix,
SDS_FAIL_MSG(&loc->hdr));
}
/* shutdown state is NOT valid but can be repaired */
CHECK_ASK(ppc, Q_RESET_SDS, "%s%s", loc->prefix,
SDS_REPAIR_MSG(&loc->hdr));
return check_questions_sequence_validate(ppc);
}
/*
* sds_fix -- (internal) fix shutdown state
*/
static int
sds_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *context)
{
LOG(3, NULL);
switch (question) {
case Q_RESET_SDS:
CHECK_INFO(ppc, "%sresetting pool_hdr.sds", loc->prefix);
memset(&loc->hdr.sds, 0, sizeof(loc->hdr.sds));
++loc->healthy_replicas;
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
struct step {
int (*check)(PMEMpoolcheck *, location *);
int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *);
};
static const struct step steps[] = {
{
.check = sds_check,
},
{
.fix = sds_fix,
},
{
.check = NULL,
.fix = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static int
step_exe(PMEMpoolcheck *ppc, const struct step *steps, location *loc)
{
const struct step *step = &steps[loc->step++];
if (!step->fix)
return step->check(ppc, loc);
if (!check_has_answer(ppc->data))
return 0;
if (check_answer_loop(ppc, loc, NULL, 0 /* fail on no */, step->fix))
return -1;
util_convert2le_hdr(&loc->hdr);
memcpy(loc->hdrp, &loc->hdr, sizeof(loc->hdr));
util_persist_auto(loc->is_dev_dax, loc->hdrp, sizeof(*loc->hdrp));
util_convert2h_hdr_nocheck(&loc->hdr);
loc->pool_hdr_modified = 1;
return 0;
}
/*
* init_prefix -- prepare prefix for messages
*/
static void
init_prefix(location *loc)
{
if (loc->set->nreplicas > 1) {
int ret = util_snprintf(loc->prefix, PREFIX_MAX_SIZE,
"replica %u: ",
loc->replica);
if (ret < 0)
FATAL("!snprintf");
} else
loc->prefix[0] = '\0';
loc->step = 0;
}
/*
* init_location_data -- (internal) prepare location information
*/
static void
init_location_data(PMEMpoolcheck *ppc, location *loc)
{
ASSERTeq(loc->part, 0);
loc->set = ppc->pool->set_file->poolset;
if (ppc->result != CHECK_RESULT_PROCESS_ANSWERS)
init_prefix(loc);
struct pool_replica *rep = REP(loc->set, loc->replica);
loc->hdrp = HDR(rep, loc->part);
memcpy(&loc->hdr, loc->hdrp, sizeof(loc->hdr));
util_convert2h_hdr_nocheck(&loc->hdr);
loc->is_dev_dax = PART(rep, 0)->is_dev_dax;
}
/*
* sds_get_healthy_replicas_num -- (internal) get number of healthy replicas
*/
static void
sds_get_healthy_replicas_num(PMEMpoolcheck *ppc, location *loc)
{
const unsigned nreplicas = ppc->pool->set_file->poolset->nreplicas;
loc->healthy_replicas = 0;
loc->part = 0;
for (; loc->replica < nreplicas; loc->replica++) {
init_location_data(ppc, loc);
if (!sds_check_replica(loc)) {
++loc->healthy_replicas; /* healthy replica found */
}
}
loc->replica = 0; /* reset replica index */
}
/*
* check_sds -- entry point for shutdown state checks
*/
void
check_sds(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
const unsigned nreplicas = ppc->pool->set_file->poolset->nreplicas;
location *loc = check_get_step_data(ppc->data);
if (!loc->init_done) {
sds_get_healthy_replicas_num(ppc, loc);
if (loc->healthy_replicas == nreplicas) {
/* all replicas have healthy shutdown state */
/* print summary */
for (; loc->replica < nreplicas; loc->replica++) {
init_prefix(loc);
CHECK_INFO(ppc, "%s" SDS_CHECK_STR,
loc->prefix);
CHECK_INFO(ppc, "%s" SDS_OK_STR, loc->prefix);
}
return;
} else if (loc->healthy_replicas > 0) {
ppc->sync_required = true;
return;
}
loc->init_done = true;
}
/* produce single healthy replica */
loc->part = 0;
for (; loc->replica < nreplicas; loc->replica++) {
init_location_data(ppc, loc);
while (CHECK_NOT_COMPLETE(loc, steps)) {
ASSERT(loc->step < ARRAY_SIZE(steps));
if (step_exe(ppc, steps, loc))
return;
}
if (loc->healthy_replicas)
break;
}
if (loc->healthy_replicas == 0) {
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
CHECK_ERR(ppc, "cannot complete repair, reverting changes");
} else if (loc->healthy_replicas < nreplicas) {
ppc->sync_required = true;
}
}
| 6,571 | 21.662069 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmempool/check_log.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* check_log.c -- check pmemlog
*/
#include <inttypes.h>
#include <sys/param.h>
#include <endian.h>
#include "out.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
enum question {
Q_LOG_START_OFFSET,
Q_LOG_END_OFFSET,
Q_LOG_WRITE_OFFSET,
};
/*
* log_read -- (internal) read pmemlog header
*/
static int
log_read(PMEMpoolcheck *ppc)
{
/*
* Here we want to read the pmemlog header without the pool_hdr as we've
* already done it before.
*
* Take the pointer to fields right after pool_hdr, compute the size and
* offset of remaining fields.
*/
uint8_t *ptr = (uint8_t *)&ppc->pool->hdr.log;
ptr += sizeof(ppc->pool->hdr.log.hdr);
size_t size = sizeof(ppc->pool->hdr.log) -
sizeof(ppc->pool->hdr.log.hdr);
uint64_t offset = sizeof(ppc->pool->hdr.log.hdr);
if (pool_read(ppc->pool, ptr, size, offset))
return CHECK_ERR(ppc, "cannot read pmemlog structure");
/* endianness conversion */
log_convert2h(&ppc->pool->hdr.log);
return 0;
}
/*
* log_hdr_check -- (internal) check pmemlog header
*/
static int
log_hdr_check(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
CHECK_INFO(ppc, "checking pmemlog header");
if (log_read(ppc)) {
ppc->result = CHECK_RESULT_ERROR;
return -1;
}
/* determine constant values for pmemlog */
const uint64_t d_start_offset =
roundup(sizeof(ppc->pool->hdr.log), LOG_FORMAT_DATA_ALIGN);
if (ppc->pool->hdr.log.start_offset != d_start_offset) {
if (CHECK_ASK(ppc, Q_LOG_START_OFFSET,
"invalid pmemlog.start_offset: 0x%jx.|Do you "
"want to set pmemlog.start_offset to default "
"0x%jx?",
ppc->pool->hdr.log.start_offset,
d_start_offset))
goto error;
}
if (ppc->pool->hdr.log.end_offset != ppc->pool->set_file->size) {
if (CHECK_ASK(ppc, Q_LOG_END_OFFSET,
"invalid pmemlog.end_offset: 0x%jx.|Do you "
"want to set pmemlog.end_offset to 0x%jx?",
ppc->pool->hdr.log.end_offset,
ppc->pool->set_file->size))
goto error;
}
if (ppc->pool->hdr.log.write_offset < d_start_offset ||
ppc->pool->hdr.log.write_offset > ppc->pool->set_file->size) {
if (CHECK_ASK(ppc, Q_LOG_WRITE_OFFSET,
"invalid pmemlog.write_offset: 0x%jx.|Do you "
"want to set pmemlog.write_offset to "
"pmemlog.end_offset?",
ppc->pool->hdr.log.write_offset))
goto error;
}
if (ppc->result == CHECK_RESULT_CONSISTENT ||
ppc->result == CHECK_RESULT_REPAIRED)
CHECK_INFO(ppc, "pmemlog header correct");
return check_questions_sequence_validate(ppc);
error:
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
check_end(ppc->data);
return -1;
}
/*
* log_hdr_fix -- (internal) fix pmemlog header
*/
static int
log_hdr_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question, void *ctx)
{
LOG(3, NULL);
uint64_t d_start_offset;
switch (question) {
case Q_LOG_START_OFFSET:
/* determine constant values for pmemlog */
d_start_offset = roundup(sizeof(ppc->pool->hdr.log),
LOG_FORMAT_DATA_ALIGN);
CHECK_INFO(ppc, "setting pmemlog.start_offset to 0x%jx",
d_start_offset);
ppc->pool->hdr.log.start_offset = d_start_offset;
break;
case Q_LOG_END_OFFSET:
CHECK_INFO(ppc, "setting pmemlog.end_offset to 0x%jx",
ppc->pool->set_file->size);
ppc->pool->hdr.log.end_offset = ppc->pool->set_file->size;
break;
case Q_LOG_WRITE_OFFSET:
CHECK_INFO(ppc, "setting pmemlog.write_offset to "
"pmemlog.end_offset");
ppc->pool->hdr.log.write_offset = ppc->pool->set_file->size;
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
struct step {
int (*check)(PMEMpoolcheck *, location *);
int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *);
enum pool_type type;
};
static const struct step steps[] = {
{
.check = log_hdr_check,
.type = POOL_TYPE_LOG
},
{
.fix = log_hdr_fix,
.type = POOL_TYPE_LOG
},
{
.check = NULL,
.fix = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static inline int
step_exe(PMEMpoolcheck *ppc, location *loc)
{
ASSERT(loc->step < ARRAY_SIZE(steps));
ASSERTeq(ppc->pool->params.type, POOL_TYPE_LOG);
const struct step *step = &steps[loc->step++];
if (!(step->type & ppc->pool->params.type))
return 0;
if (!step->fix)
return step->check(ppc, loc);
if (log_read(ppc)) {
ppc->result = CHECK_RESULT_ERROR;
return -1;
}
return check_answer_loop(ppc, loc, NULL, 1, step->fix);
}
/*
* check_log -- entry point for pmemlog checks
*/
void
check_log(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
location *loc = check_get_step_data(ppc->data);
/* do all checks */
while (CHECK_NOT_COMPLETE(loc, steps)) {
if (step_exe(ppc, loc))
break;
}
}
| 4,760 | 21.671429 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmempool/check_util.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* check_util.h -- internal definitions check util
*/
#ifndef CHECK_UTIL_H
#define CHECK_UTIL_H
#include <time.h>
#include <limits.h>
#include <sys/param.h>
#ifdef __cplusplus
extern "C" {
#endif
#define CHECK_STEP_COMPLETE UINT_MAX
#define CHECK_INVALID_QUESTION UINT_MAX
#define REQUIRE_ADVANCED "the following error can be fixed using " \
"PMEMPOOL_CHECK_ADVANCED flag"
#ifndef min
#define min(a, b) ((a) < (b) ? (a) : (b))
#endif
/* check control context */
struct check_data;
struct arena;
/* queue of check statuses */
struct check_status;
/* container storing state of all check steps */
#define PREFIX_MAX_SIZE 30
typedef struct {
unsigned init_done;
unsigned step;
unsigned replica;
unsigned part;
int single_repl;
int single_part;
struct pool_set *set;
int is_dev_dax;
struct pool_hdr *hdrp;
/* copy of the pool header in host byte order */
struct pool_hdr hdr;
int hdr_valid;
/*
* If pool header has been modified this field indicates that
* the pool parameters structure requires refresh.
*/
int pool_hdr_modified;
unsigned healthy_replicas;
struct pool_hdr *next_part_hdrp;
struct pool_hdr *prev_part_hdrp;
struct pool_hdr *next_repl_hdrp;
struct pool_hdr *prev_repl_hdrp;
int next_part_hdr_valid;
int prev_part_hdr_valid;
int next_repl_hdr_valid;
int prev_repl_hdr_valid;
/* valid poolset uuid */
uuid_t *valid_puuid;
/* valid part uuid */
uuid_t *valid_uuid;
/* valid part pool header */
struct pool_hdr *valid_part_hdrp;
int valid_part_done;
unsigned valid_part_replica;
char prefix[PREFIX_MAX_SIZE];
struct arena *arenap;
uint64_t offset;
uint32_t narena;
uint8_t *bitmap;
uint8_t *dup_bitmap;
uint8_t *fbitmap;
struct list *list_inval;
struct list *list_flog_inval;
struct list *list_unmap;
struct {
int btti_header;
int btti_backup;
} valid;
struct {
struct btt_info btti;
uint64_t btti_offset;
} pool_valid;
} location;
/* check steps */
void check_bad_blocks(PMEMpoolcheck *ppc);
void check_backup(PMEMpoolcheck *ppc);
void check_pool_hdr(PMEMpoolcheck *ppc);
void check_pool_hdr_uuids(PMEMpoolcheck *ppc);
void check_sds(PMEMpoolcheck *ppc);
void check_log(PMEMpoolcheck *ppc);
void check_blk(PMEMpoolcheck *ppc);
void check_btt_info(PMEMpoolcheck *ppc);
void check_btt_map_flog(PMEMpoolcheck *ppc);
void check_write(PMEMpoolcheck *ppc);
struct check_data *check_data_alloc(void);
void check_data_free(struct check_data *data);
uint32_t check_step_get(struct check_data *data);
void check_step_inc(struct check_data *data);
location *check_get_step_data(struct check_data *data);
void check_end(struct check_data *data);
int check_is_end_util(struct check_data *data);
int check_status_create(PMEMpoolcheck *ppc, enum pmempool_check_msg_type type,
uint32_t arg, const char *fmt, ...) FORMAT_PRINTF(4, 5);
void check_status_release(PMEMpoolcheck *ppc, struct check_status *status);
void check_clear_status_cache(struct check_data *data);
struct check_status *check_pop_question(struct check_data *data);
struct check_status *check_pop_error(struct check_data *data);
struct check_status *check_pop_info(struct check_data *data);
bool check_has_error(struct check_data *data);
bool check_has_answer(struct check_data *data);
int check_push_answer(PMEMpoolcheck *ppc);
struct pmempool_check_status *check_status_get_util(
struct check_status *status);
int check_status_is(struct check_status *status,
enum pmempool_check_msg_type type);
/* create info status */
#define CHECK_INFO(ppc, ...)\
check_status_create(ppc, PMEMPOOL_CHECK_MSG_TYPE_INFO, 0, __VA_ARGS__)
/* create info status and append error message based on errno */
#define CHECK_INFO_ERRNO(ppc, ...)\
check_status_create(ppc, PMEMPOOL_CHECK_MSG_TYPE_INFO,\
(uint32_t)errno, __VA_ARGS__)
/* create error status */
#define CHECK_ERR(ppc, ...)\
check_status_create(ppc, PMEMPOOL_CHECK_MSG_TYPE_ERROR, 0, __VA_ARGS__)
/* create question status */
#define CHECK_ASK(ppc, question, ...)\
check_status_create(ppc, PMEMPOOL_CHECK_MSG_TYPE_QUESTION, question,\
__VA_ARGS__)
#define CHECK_NOT_COMPLETE(loc, steps)\
((loc)->step != CHECK_STEP_COMPLETE &&\
((steps)[(loc)->step].check != NULL ||\
(steps)[(loc)->step].fix != NULL))
int check_answer_loop(PMEMpoolcheck *ppc, location *data,
void *ctx, int fail_on_no,
int (*callback)(PMEMpoolcheck *, location *, uint32_t, void *ctx));
int check_questions_sequence_validate(PMEMpoolcheck *ppc);
const char *check_get_time_str(time_t time);
const char *check_get_uuid_str(uuid_t uuid);
const char *check_get_pool_type_str(enum pool_type type);
void check_insert_arena(PMEMpoolcheck *ppc, struct arena *arenap);
#ifdef _WIN32
void cache_to_utf8(struct check_data *data, char *buf, size_t size);
#endif
#define CHECK_IS(ppc, flag)\
util_flag_isset((ppc)->args.flags, PMEMPOOL_CHECK_ ## flag)
#define CHECK_IS_NOT(ppc, flag)\
util_flag_isclr((ppc)->args.flags, PMEMPOOL_CHECK_ ## flag)
#define CHECK_WITHOUT_FIXING(ppc)\
CHECK_IS_NOT(ppc, REPAIR) || CHECK_IS(ppc, DRY_RUN)
#ifdef __cplusplus
}
#endif
#endif
| 5,143 | 25.111675 | 78 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmempool/check_bad_blocks.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* check_bad_blocks.c -- pre-check bad_blocks
*/
#include <stddef.h>
#include <stdint.h>
#include <unistd.h>
#include "out.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
#include "set_badblocks.h"
#include "badblocks.h"
/*
* check_bad_blocks -- check poolset for bad_blocks
*/
void
check_bad_blocks(PMEMpoolcheck *ppc)
{
LOG(3, "ppc %p", ppc);
int ret;
if (!(ppc->pool->params.features.compat & POOL_FEAT_CHECK_BAD_BLOCKS)) {
/* skipping checking poolset for bad blocks */
ppc->result = CHECK_RESULT_CONSISTENT;
return;
}
if (ppc->pool->set_file->poolset) {
ret = badblocks_check_poolset(ppc->pool->set_file->poolset, 0);
} else {
ret = badblocks_check_file(ppc->pool->set_file->fname);
}
if (ret < 0) {
if (errno == ENOTSUP) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
CHECK_ERR(ppc, BB_NOT_SUPP);
return;
}
ppc->result = CHECK_RESULT_ERROR;
CHECK_ERR(ppc, "checking poolset for bad blocks failed -- '%s'",
ppc->path);
return;
}
if (ret > 0) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
CHECK_ERR(ppc,
"poolset contains bad blocks, use 'pmempool info --bad-blocks=yes' to print or 'pmempool sync --bad-blocks' to clear them");
}
}
| 1,329 | 20.803279 | 127 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmempool/feature.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* feature.c -- implementation of pmempool_feature_(enable|disable|query)()
*/
#include <stddef.h>
#include <stdint.h>
#include <unistd.h>
#include <errno.h>
#include <sys/mman.h>
#include "libpmempool.h"
#include "util_pmem.h"
#include "pool_hdr.h"
#include "pool.h"
#define RW 0
#define RDONLY 1
#define FEATURE_INCOMPAT(X) \
(features_t)FEAT_INCOMPAT(X)
static const features_t f_singlehdr = FEAT_INCOMPAT(SINGLEHDR);
static const features_t f_cksum_2k = FEAT_INCOMPAT(CKSUM_2K);
static const features_t f_sds = FEAT_INCOMPAT(SDS);
static const features_t f_chkbb = FEAT_COMPAT(CHECK_BAD_BLOCKS);
#define FEAT_INVALID \
{UINT32_MAX, UINT32_MAX, UINT32_MAX};
static const features_t f_invalid = FEAT_INVALID;
#define FEATURE_MAXPRINT ((size_t)1024)
/*
* buff_concat -- (internal) concat formatted string to string buffer
*/
static int
buff_concat(char *buff, size_t *pos, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
const size_t size = FEATURE_MAXPRINT - *pos - 1;
int ret = vsnprintf(buff + *pos, size, fmt, ap);
va_end(ap);
if (ret < 0) {
ERR("vsprintf");
return ret;
}
if ((size_t)ret >= size) {
ERR("buffer truncated %d >= %zu", ret, size);
return -1;
}
*pos += (size_t)ret;
return 0;
}
/*
* buff_concat_features -- (internal) concat features string to string buffer
*/
static int
buff_concat_features(char *buff, size_t *pos, features_t f)
{
return buff_concat(buff, pos,
"{compat 0x%x, incompat 0x%x, ro_compat 0x%x}",
f.compat, f.incompat, f.ro_compat);
}
/*
* poolset_close -- (internal) close pool set
*/
static void
poolset_close(struct pool_set *set)
{
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = REP(set, r);
ASSERT(!rep->remote);
for (unsigned p = 0; p < rep->nparts; ++p) {
util_unmap_hdr(PART(rep, p));
}
}
util_poolset_close(set, DO_NOT_DELETE_PARTS);
}
/*
* features_check -- (internal) check if features are correct
*/
static int
features_check(features_t *features, struct pool_hdr *hdrp)
{
static char msg[FEATURE_MAXPRINT];
struct pool_hdr hdr;
memcpy(&hdr, hdrp, sizeof(hdr));
util_convert2h_hdr_nocheck(&hdr);
/* (features != f_invlaid) <=> features is set */
if (!util_feature_cmp(*features, f_invalid)) {
/* features from current and previous headers have to match */
if (!util_feature_cmp(*features, hdr.features)) {
size_t pos = 0;
if (buff_concat_features(msg, &pos, hdr.features))
goto err;
if (buff_concat(msg, &pos, "%s", " != "))
goto err;
if (buff_concat_features(msg, &pos, *features))
goto err;
ERR("features mismatch detected: %s", msg);
return -1;
} else {
return 0;
}
}
features_t unknown = util_get_unknown_features(
hdr.features, (features_t)POOL_FEAT_VALID);
/* all features are known */
if (util_feature_is_zero(unknown)) {
memcpy(features, &hdr.features, sizeof(*features));
return 0;
}
/* unknown features detected - print error message */
size_t pos = 0;
if (buff_concat_features(msg, &pos, unknown))
goto err;
ERR("invalid features detected: %s", msg);
err:
return -1;
}
/*
* get_pool_open_flags -- (internal) generate pool open flags
*/
static inline unsigned
get_pool_open_flags(struct pool_set *set, int rdonly)
{
unsigned flags = 0;
if (rdonly == RDONLY && !util_pool_has_device_dax(set))
flags = POOL_OPEN_COW;
flags |= POOL_OPEN_IGNORE_BAD_BLOCKS;
return flags;
}
/*
* get_mmap_flags -- (internal) generate mmap flags
*/
static inline int
get_mmap_flags(struct pool_set_part *part, int rdonly)
{
if (part->is_dev_dax)
return MAP_SHARED;
else
return rdonly ? MAP_PRIVATE : MAP_SHARED;
}
/*
* poolset_open -- (internal) open pool set
*/
static struct pool_set *
poolset_open(const char *path, int rdonly)
{
struct pool_set *set;
features_t features = FEAT_INVALID;
/* read poolset */
int ret = util_poolset_create_set(&set, path, 0, 0, true);
if (ret < 0) {
ERR("cannot open pool set -- '%s'", path);
goto err_poolset;
}
if (set->remote) {
ERR("poolsets with remote replicas are not supported");
errno = EINVAL;
goto err_open;
}
/* open a memory pool */
unsigned flags = get_pool_open_flags(set, rdonly);
if (util_pool_open_nocheck(set, flags))
goto err_open;
/* map all headers and check features */
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = REP(set, r);
ASSERT(!rep->remote);
for (unsigned p = 0; p < rep->nparts; ++p) {
struct pool_set_part *part = PART(rep, p);
int mmap_flags = get_mmap_flags(part, rdonly);
if (util_map_hdr(part, mmap_flags, rdonly)) {
part->hdr = NULL;
goto err_map_hdr;
}
if (features_check(&features, HDR(rep, p))) {
ERR(
"invalid features - replica #%d part #%d",
r, p);
goto err_open;
}
}
}
return set;
err_map_hdr:
/* unmap all headers */
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = REP(set, r);
ASSERT(!rep->remote);
for (unsigned p = 0; p < rep->nparts; ++p) {
util_unmap_hdr(PART(rep, p));
}
}
err_open:
/* close the memory pool and release pool set structure */
util_poolset_close(set, DO_NOT_DELETE_PARTS);
err_poolset:
return NULL;
}
/*
* get_hdr -- (internal) read header in host byte order
*/
static struct pool_hdr *
get_hdr(struct pool_set *set, unsigned rep, unsigned part)
{
static struct pool_hdr hdr;
/* copy header */
struct pool_hdr *hdrp = HDR(REP(set, rep), part);
memcpy(&hdr, hdrp, sizeof(hdr));
/* convert to host byte order and return */
util_convert2h_hdr_nocheck(&hdr);
return &hdr;
}
/*
* set_hdr -- (internal) convert header to little-endian, checksum and write
*/
static void
set_hdr(struct pool_set *set, unsigned rep, unsigned part, struct pool_hdr *src)
{
/* convert to little-endian and set new checksum */
const size_t skip_off = POOL_HDR_CSUM_END_OFF(src);
util_convert2le_hdr(src);
util_checksum(src, sizeof(*src), &src->checksum, 1, skip_off);
/* write header */
struct pool_replica *replica = REP(set, rep);
struct pool_hdr *dst = HDR(replica, part);
memcpy(dst, src, sizeof(*src));
util_persist_auto(PART(replica, part)->is_dev_dax, dst, sizeof(*src));
}
typedef enum {
DISABLED,
ENABLED
} fstate_t;
#define FEATURE_IS_ENABLED_STR "feature already enabled: %s"
#define FEATURE_IS_DISABLED_STR "feature already disabled: %s"
/*
* require_feature_is -- (internal) check if required feature is enabled
* (or disabled)
*/
static int
require_feature_is(struct pool_set *set, features_t feature, fstate_t req_state)
{
struct pool_hdr *hdrp = get_hdr((set), 0, 0);
fstate_t state = util_feature_is_set(hdrp->features, feature)
? ENABLED : DISABLED;
if (state == req_state)
return 1;
const char *msg = (state == ENABLED)
? FEATURE_IS_ENABLED_STR : FEATURE_IS_DISABLED_STR;
LOG(3, msg, util_feature2str(feature, NULL));
return 0;
}
#define FEATURE_IS_NOT_ENABLED_PRIOR_STR "enable %s prior to %s %s"
#define FEATURE_IS_NOT_DISABLED_PRIOR_STR "disable %s prior to %s %s"
/*
* require_other_feature_is -- (internal) check if other feature is enabled
* (or disabled) in case the other feature has to be enabled (or disabled)
* prior to the main one
*/
static int
require_other_feature_is(struct pool_set *set, features_t other,
fstate_t req_state, features_t feature, const char *cause)
{
struct pool_hdr *hdrp = get_hdr((set), 0, 0);
fstate_t state = util_feature_is_set(hdrp->features, other)
? ENABLED : DISABLED;
if (state == req_state)
return 1;
const char *msg = (req_state == ENABLED)
? FEATURE_IS_NOT_ENABLED_PRIOR_STR
: FEATURE_IS_NOT_DISABLED_PRIOR_STR;
ERR(msg, util_feature2str(other, NULL),
cause, util_feature2str(feature, NULL));
return 0;
}
/*
* feature_set -- (internal) enable (or disable) feature
*/
static void
feature_set(struct pool_set *set, features_t feature, int value)
{
for (unsigned r = 0; r < set->nreplicas; ++r) {
for (unsigned p = 0; p < REP(set, r)->nparts; ++p) {
struct pool_hdr *hdrp = get_hdr(set, r, p);
if (value == ENABLED)
util_feature_enable(&hdrp->features, feature);
else
util_feature_disable(&hdrp->features, feature);
set_hdr(set, r, p, hdrp);
}
}
}
/*
* query_feature -- (internal) query feature value
*/
static int
query_feature(const char *path, features_t feature)
{
struct pool_set *set = poolset_open(path, RDONLY);
if (!set)
goto err_open;
struct pool_hdr *hdrp = get_hdr(set, 0, 0);
const int query = util_feature_is_set(hdrp->features, feature);
poolset_close(set);
return query;
err_open:
return -1;
}
/*
* unsupported_feature -- (internal) report unsupported feature
*/
static inline int
unsupported_feature(features_t feature)
{
ERR("unsupported feature: %s", util_feature2str(feature, NULL));
errno = EINVAL;
return -1;
}
/*
* enable_singlehdr -- (internal) enable POOL_FEAT_SINGLEHDR
*/
static int
enable_singlehdr(const char *path)
{
return unsupported_feature(f_singlehdr);
}
/*
* disable_singlehdr -- (internal) disable POOL_FEAT_SINGLEHDR
*/
static int
disable_singlehdr(const char *path)
{
return unsupported_feature(f_singlehdr);
}
/*
* query_singlehdr -- (internal) query POOL_FEAT_SINGLEHDR
*/
static int
query_singlehdr(const char *path)
{
return query_feature(path, f_singlehdr);
}
/*
* enable_checksum_2k -- (internal) enable POOL_FEAT_CKSUM_2K
*/
static int
enable_checksum_2k(const char *path)
{
struct pool_set *set = poolset_open(path, RW);
if (!set)
return -1;
if (require_feature_is(set, f_cksum_2k, DISABLED))
feature_set(set, f_cksum_2k, ENABLED);
poolset_close(set);
return 0;
}
/*
* disable_checksum_2k -- (internal) disable POOL_FEAT_CKSUM_2K
*/
static int
disable_checksum_2k(const char *path)
{
struct pool_set *set = poolset_open(path, RW);
if (!set)
return -1;
int ret = 0;
if (!require_feature_is(set, f_cksum_2k, ENABLED))
goto exit;
/* check if POOL_FEAT_SDS is disabled */
if (!require_other_feature_is(set, f_sds, DISABLED,
f_cksum_2k, "disabling")) {
ret = -1;
goto exit;
}
feature_set(set, f_cksum_2k, DISABLED);
exit:
poolset_close(set);
return ret;
}
/*
* query_checksum_2k -- (internal) query POOL_FEAT_CKSUM_2K
*/
static int
query_checksum_2k(const char *path)
{
return query_feature(path, f_cksum_2k);
}
/*
* enable_shutdown_state -- (internal) enable POOL_FEAT_SDS
*/
static int
enable_shutdown_state(const char *path)
{
struct pool_set *set = poolset_open(path, RW);
if (!set)
return -1;
int ret = 0;
if (!require_feature_is(set, f_sds, DISABLED))
goto exit;
/* check if POOL_FEAT_CKSUM_2K is enabled */
if (!require_other_feature_is(set, f_cksum_2k, ENABLED,
f_sds, "enabling")) {
ret = -1;
goto exit;
}
feature_set(set, f_sds, ENABLED);
exit:
poolset_close(set);
return ret;
}
/*
* reset_shutdown_state -- zero all shutdown structures
*/
static void
reset_shutdown_state(struct pool_set *set)
{
for (unsigned rep = 0; rep < set->nreplicas; ++rep) {
for (unsigned part = 0; part < REP(set, rep)->nparts; ++part) {
struct pool_hdr *hdrp = HDR(REP(set, rep), part);
shutdown_state_init(&hdrp->sds, REP(set, rep));
}
}
}
/*
* disable_shutdown_state -- (internal) disable POOL_FEAT_SDS
*/
static int
disable_shutdown_state(const char *path)
{
struct pool_set *set = poolset_open(path, RW);
if (!set)
return -1;
if (require_feature_is(set, f_sds, ENABLED)) {
feature_set(set, f_sds, DISABLED);
reset_shutdown_state(set);
}
poolset_close(set);
return 0;
}
/*
* query_shutdown_state -- (internal) query POOL_FEAT_SDS
*/
static int
query_shutdown_state(const char *path)
{
return query_feature(path, f_sds);
}
/*
* enable_badblocks_checking -- (internal) enable POOL_FEAT_CHECK_BAD_BLOCKS
*/
static int
enable_badblocks_checking(const char *path)
{
#ifdef _WIN32
ERR("bad blocks checking is not supported on Windows");
return -1;
#else
struct pool_set *set = poolset_open(path, RW);
if (!set)
return -1;
if (require_feature_is(set, f_chkbb, DISABLED))
feature_set(set, f_chkbb, ENABLED);
poolset_close(set);
return 0;
#endif
}
/*
* disable_badblocks_checking -- (internal) disable POOL_FEAT_CHECK_BAD_BLOCKS
*/
static int
disable_badblocks_checking(const char *path)
{
struct pool_set *set = poolset_open(path, RW);
if (!set)
return -1;
int ret = 0;
if (!require_feature_is(set, f_chkbb, ENABLED))
goto exit;
feature_set(set, f_chkbb, DISABLED);
exit:
poolset_close(set);
return ret;
}
/*
* query_badblocks_checking -- (internal) query POOL_FEAT_CHECK_BAD_BLOCKS
*/
static int
query_badblocks_checking(const char *path)
{
return query_feature(path, f_chkbb);
}
struct feature_funcs {
int (*enable)(const char *);
int (*disable)(const char *);
int (*query)(const char *);
};
static struct feature_funcs features[] = {
{
.enable = enable_singlehdr,
.disable = disable_singlehdr,
.query = query_singlehdr
},
{
.enable = enable_checksum_2k,
.disable = disable_checksum_2k,
.query = query_checksum_2k
},
{
.enable = enable_shutdown_state,
.disable = disable_shutdown_state,
.query = query_shutdown_state
},
{
.enable = enable_badblocks_checking,
.disable = disable_badblocks_checking,
.query = query_badblocks_checking
},
};
#define FEATURE_FUNCS_MAX ARRAY_SIZE(features)
/*
* are_flags_valid -- (internal) check if flags are valid
*/
static inline int
are_flags_valid(unsigned flags)
{
if (flags != 0) {
ERR("invalid flags: 0x%x", flags);
errno = EINVAL;
return 0;
}
return 1;
}
/*
* is_feature_valid -- (internal) check if feature is valid
*/
static inline int
is_feature_valid(uint32_t feature)
{
if (feature >= FEATURE_FUNCS_MAX) {
ERR("invalid feature: 0x%x", feature);
errno = EINVAL;
return 0;
}
return 1;
}
/*
* pmempool_feature_enableU -- enable pool set feature
*/
#ifndef _WIN32
static inline
#endif
int
pmempool_feature_enableU(const char *path, enum pmempool_feature feature,
unsigned flags)
{
LOG(3, "path %s feature %x flags %x", path, feature, flags);
if (!is_feature_valid(feature))
return -1;
if (!are_flags_valid(flags))
return -1;
return features[feature].enable(path);
}
/*
* pmempool_feature_disableU -- disable pool set feature
*/
#ifndef _WIN32
static inline
#endif
int
pmempool_feature_disableU(const char *path, enum pmempool_feature feature,
unsigned flags)
{
LOG(3, "path %s feature %x flags %x", path, feature, flags);
if (!is_feature_valid(feature))
return -1;
if (!are_flags_valid(flags))
return -1;
return features[feature].disable(path);
}
/*
* pmempool_feature_queryU -- query pool set feature
*/
#ifndef _WIN32
static inline
#endif
int
pmempool_feature_queryU(const char *path, enum pmempool_feature feature,
unsigned flags)
{
LOG(3, "path %s feature %x flags %x", path, feature, flags);
/*
* XXX: Windows does not allow function call in a constant expressions
*/
#ifndef _WIN32
#define CHECK_INCOMPAT_MAPPING(FEAT, ENUM) \
COMPILE_ERROR_ON( \
util_feature2pmempool_feature(FEATURE_INCOMPAT(FEAT)) != ENUM)
CHECK_INCOMPAT_MAPPING(SINGLEHDR, PMEMPOOL_FEAT_SINGLEHDR);
CHECK_INCOMPAT_MAPPING(CKSUM_2K, PMEMPOOL_FEAT_CKSUM_2K);
CHECK_INCOMPAT_MAPPING(SDS, PMEMPOOL_FEAT_SHUTDOWN_STATE);
#undef CHECK_INCOMPAT_MAPPING
#endif
if (!is_feature_valid(feature))
return -1;
if (!are_flags_valid(flags))
return -1;
return features[feature].query(path);
}
#ifndef _WIN32
/*
* pmempool_feature_enable -- enable pool set feature
*/
int
pmempool_feature_enable(const char *path, enum pmempool_feature feature,
unsigned flags)
{
return pmempool_feature_enableU(path, feature, flags);
}
#else
/*
* pmempool_feature_enableW -- enable pool set feature as widechar
*/
int
pmempool_feature_enableW(const wchar_t *path, enum pmempool_feature feature,
unsigned flags)
{
char *upath = util_toUTF8(path);
if (upath == NULL) {
ERR("Invalid poolest/pool file path.");
return -1;
}
int ret = pmempool_feature_enableU(upath, feature, flags);
util_free_UTF8(upath);
return ret;
}
#endif
#ifndef _WIN32
/*
* pmempool_feature_disable -- disable pool set feature
*/
int
pmempool_feature_disable(const char *path, enum pmempool_feature feature,
unsigned flags)
{
return pmempool_feature_disableU(path, feature, flags);
}
#else
/*
* pmempool_feature_disableW -- disable pool set feature as widechar
*/
int
pmempool_feature_disableW(const wchar_t *path, enum pmempool_feature feature,
unsigned flags)
{
char *upath = util_toUTF8(path);
if (upath == NULL) {
ERR("Invalid poolest/pool file path.");
return -1;
}
int ret = pmempool_feature_disableU(upath, feature, flags);
util_free_UTF8(upath);
return ret;
}
#endif
#ifndef _WIN32
/*
* pmempool_feature_query -- query pool set feature
*/
int
pmempool_feature_query(const char *path, enum pmempool_feature feature,
unsigned flags)
{
return pmempool_feature_queryU(path, feature, flags);
}
#else
/*
* pmempool_feature_queryW -- query pool set feature as widechar
*/
int
pmempool_feature_queryW(const wchar_t *path, enum pmempool_feature feature,
unsigned flags)
{
char *upath = util_toUTF8(path);
if (upath == NULL) {
ERR("Invalid poolest/pool file path.");
return -1;
}
int ret = pmempool_feature_queryU(upath, feature, flags);
util_free_UTF8(upath);
return ret;
}
#endif
| 17,344 | 20.955696 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmempool/check_btt_map_flog.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* check_btt_map_flog.c -- check BTT Map and Flog
*/
#include <stdint.h>
#include <sys/param.h>
#include <endian.h>
#include "out.h"
#include "btt.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
enum questions {
Q_REPAIR_MAP,
Q_REPAIR_FLOG,
};
/*
* flog_read -- (internal) read and convert flog from file
*/
static int
flog_read(PMEMpoolcheck *ppc, struct arena *arenap)
{
uint64_t flogoff = arenap->offset + arenap->btt_info.flogoff;
arenap->flogsize = btt_flog_size(arenap->btt_info.nfree);
arenap->flog = malloc(arenap->flogsize);
if (!arenap->flog) {
ERR("!malloc");
goto error_malloc;
}
if (pool_read(ppc->pool, arenap->flog, arenap->flogsize, flogoff))
goto error_read;
uint8_t *ptr = arenap->flog;
uint32_t i;
for (i = 0; i < arenap->btt_info.nfree; i++) {
struct btt_flog *flog = (struct btt_flog *)ptr;
btt_flog_convert2h(&flog[0]);
btt_flog_convert2h(&flog[1]);
ptr += BTT_FLOG_PAIR_ALIGN;
}
return 0;
error_read:
free(arenap->flog);
arenap->flog = NULL;
error_malloc:
return -1;
}
/*
* map_read -- (internal) read and convert map from file
*/
static int
map_read(PMEMpoolcheck *ppc, struct arena *arenap)
{
uint64_t mapoff = arenap->offset + arenap->btt_info.mapoff;
arenap->mapsize = btt_map_size(arenap->btt_info.external_nlba);
ASSERT(arenap->mapsize != 0);
arenap->map = malloc(arenap->mapsize);
if (!arenap->map) {
ERR("!malloc");
goto error_malloc;
}
if (pool_read(ppc->pool, arenap->map, arenap->mapsize, mapoff)) {
goto error_read;
}
uint32_t i;
for (i = 0; i < arenap->btt_info.external_nlba; i++)
arenap->map[i] = le32toh(arenap->map[i]);
return 0;
error_read:
free(arenap->map);
arenap->map = NULL;
error_malloc:
return -1;
}
/*
* list_item -- item for simple list
*/
struct list_item {
PMDK_LIST_ENTRY(list_item) next;
uint32_t val;
};
/*
* list -- simple list for storing numbers
*/
struct list {
PMDK_LIST_HEAD(listhead, list_item) head;
uint32_t count;
};
/*
* list_alloc -- (internal) allocate an empty list
*/
static struct list *
list_alloc(void)
{
struct list *list = malloc(sizeof(struct list));
if (!list) {
ERR("!malloc");
return NULL;
}
PMDK_LIST_INIT(&list->head);
list->count = 0;
return list;
}
/*
* list_push -- (internal) insert new element to the list
*/
static struct list_item *
list_push(struct list *list, uint32_t val)
{
struct list_item *item = malloc(sizeof(*item));
if (!item) {
ERR("!malloc");
return NULL;
}
item->val = val;
list->count++;
PMDK_LIST_INSERT_HEAD(&list->head, item, next);
return item;
}
/*
* list_pop -- (internal) pop element from list head
*/
static int
list_pop(struct list *list, uint32_t *valp)
{
if (!PMDK_LIST_EMPTY(&list->head)) {
struct list_item *i = PMDK_LIST_FIRST(&list->head);
PMDK_LIST_REMOVE(i, next);
if (valp)
*valp = i->val;
free(i);
list->count--;
return 1;
}
return 0;
}
/*
* list_free -- (internal) free the list
*/
static void
list_free(struct list *list)
{
while (list_pop(list, NULL))
;
free(list);
}
/*
* cleanup -- (internal) prepare resources for map and flog check
*/
static int
cleanup(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
if (loc->list_unmap)
list_free(loc->list_unmap);
if (loc->list_flog_inval)
list_free(loc->list_flog_inval);
if (loc->list_inval)
list_free(loc->list_inval);
if (loc->fbitmap)
free(loc->fbitmap);
if (loc->bitmap)
free(loc->bitmap);
if (loc->dup_bitmap)
free(loc->dup_bitmap);
return 0;
}
/*
* init -- (internal) initialize map and flog check
*/
static int
init(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
struct arena *arenap = loc->arenap;
/* read flog and map entries */
if (flog_read(ppc, arenap)) {
CHECK_ERR(ppc, "arena %u: cannot read BTT Flog", arenap->id);
goto error;
}
if (map_read(ppc, arenap)) {
CHECK_ERR(ppc, "arena %u: cannot read BTT Map", arenap->id);
goto error;
}
/* create bitmaps for checking duplicated blocks */
uint32_t bitmapsize = howmany(arenap->btt_info.internal_nlba, 8);
loc->bitmap = calloc(bitmapsize, 1);
if (!loc->bitmap) {
ERR("!calloc");
CHECK_ERR(ppc, "arena %u: cannot allocate memory for blocks "
"bitmap", arenap->id);
goto error;
}
loc->dup_bitmap = calloc(bitmapsize, 1);
if (!loc->dup_bitmap) {
ERR("!calloc");
CHECK_ERR(ppc, "arena %u: cannot allocate memory for "
"duplicated blocks bitmap", arenap->id);
goto error;
}
loc->fbitmap = calloc(bitmapsize, 1);
if (!loc->fbitmap) {
ERR("!calloc");
CHECK_ERR(ppc, "arena %u: cannot allocate memory for BTT Flog "
"bitmap", arenap->id);
goto error;
}
/* list of invalid map entries */
loc->list_inval = list_alloc();
if (!loc->list_inval) {
CHECK_ERR(ppc,
"arena %u: cannot allocate memory for invalid BTT map "
"entries list", arenap->id);
goto error;
}
/* list of invalid flog entries */
loc->list_flog_inval = list_alloc();
if (!loc->list_flog_inval) {
CHECK_ERR(ppc,
"arena %u: cannot allocate memory for invalid BTT Flog "
"entries list", arenap->id);
goto error;
}
/* list of unmapped blocks */
loc->list_unmap = list_alloc();
if (!loc->list_unmap) {
CHECK_ERR(ppc,
"arena %u: cannot allocate memory for unmaped blocks "
"list", arenap->id);
goto error;
}
return 0;
error:
ppc->result = CHECK_RESULT_ERROR;
cleanup(ppc, loc);
return -1;
}
/*
* map_get_postmap_lba -- extract postmap LBA from map entry
*/
static inline uint32_t
map_get_postmap_lba(struct arena *arenap, uint32_t i)
{
uint32_t entry = arenap->map[i];
/* if map record is in initial state (flags == 0b00) */
if (map_entry_is_initial(entry))
return i;
/* read postmap LBA otherwise */
return entry & BTT_MAP_ENTRY_LBA_MASK;
}
/*
* map_entry_check -- (internal) check single map entry
*/
static int
map_entry_check(PMEMpoolcheck *ppc, location *loc, uint32_t i)
{
struct arena *arenap = loc->arenap;
uint32_t lba = map_get_postmap_lba(arenap, i);
/* add duplicated and invalid entries to list */
if (lba < arenap->btt_info.internal_nlba) {
if (util_isset(loc->bitmap, lba)) {
CHECK_INFO(ppc, "arena %u: BTT Map entry %u duplicated "
"at %u", arenap->id, lba, i);
util_setbit(loc->dup_bitmap, lba);
if (!list_push(loc->list_inval, i))
return -1;
} else
util_setbit(loc->bitmap, lba);
} else {
CHECK_INFO(ppc, "arena %u: invalid BTT Map entry at %u",
arenap->id, i);
if (!list_push(loc->list_inval, i))
return -1;
}
return 0;
}
/*
* flog_entry_check -- (internal) check single flog entry
*/
static int
flog_entry_check(PMEMpoolcheck *ppc, location *loc, uint32_t i,
uint8_t **ptr)
{
struct arena *arenap = loc->arenap;
/* flog entry consists of two btt_flog structures */
struct btt_flog *flog = (struct btt_flog *)*ptr;
int next;
struct btt_flog *flog_cur = btt_flog_get_valid(flog, &next);
/* insert invalid and duplicated indexes to list */
if (!flog_cur) {
CHECK_INFO(ppc, "arena %u: invalid BTT Flog entry at %u",
arenap->id, i);
if (!list_push(loc->list_flog_inval, i))
return -1;
goto next;
}
uint32_t entry = flog_cur->old_map & BTT_MAP_ENTRY_LBA_MASK;
uint32_t new_entry = flog_cur->new_map & BTT_MAP_ENTRY_LBA_MASK;
/*
* Check if lba is in extranal_nlba range, and check if both old_map and
* new_map are in internal_nlba range.
*/
if (flog_cur->lba >= arenap->btt_info.external_nlba ||
entry >= arenap->btt_info.internal_nlba ||
new_entry >= arenap->btt_info.internal_nlba) {
CHECK_INFO(ppc, "arena %u: invalid BTT Flog entry at %u",
arenap->id, i);
if (!list_push(loc->list_flog_inval, i))
return -1;
goto next;
}
if (util_isset(loc->fbitmap, entry)) {
/*
* here we have two flog entries which holds the same free block
*/
CHECK_INFO(ppc, "arena %u: duplicated BTT Flog entry at %u\n",
arenap->id, i);
if (!list_push(loc->list_flog_inval, i))
return -1;
} else if (util_isset(loc->bitmap, entry)) {
/* here we have probably an unfinished write */
if (util_isset(loc->bitmap, new_entry)) {
/* Both old_map and new_map are already used in map. */
CHECK_INFO(ppc, "arena %u: duplicated BTT Flog entry "
"at %u", arenap->id, i);
util_setbit(loc->dup_bitmap, new_entry);
if (!list_push(loc->list_flog_inval, i))
return -1;
} else {
/*
* Unfinished write. Next time pool is opened, the map
* will be updated to new_map.
*/
util_setbit(loc->bitmap, new_entry);
util_setbit(loc->fbitmap, entry);
}
} else {
int flog_valid = 1;
/*
* Either flog entry is in its initial state:
* - current_btt_flog entry is first one in pair and
* - current_btt_flog.old_map == current_btt_flog.new_map and
* - current_btt_flog.seq == 0b01 and
* - second flog entry in pair is zeroed
* or
* current_btt_flog.old_map != current_btt_flog.new_map
*/
if (entry == new_entry)
flog_valid = (next == 1) && (flog_cur->seq == 1) &&
util_is_zeroed((const void *)&flog[1],
sizeof(flog[1]));
if (flog_valid) {
/* totally fine case */
util_setbit(loc->bitmap, entry);
util_setbit(loc->fbitmap, entry);
} else {
CHECK_INFO(ppc, "arena %u: invalid BTT Flog entry at "
"%u", arenap->id, i);
if (!list_push(loc->list_flog_inval, i))
return -1;
}
}
next:
*ptr += BTT_FLOG_PAIR_ALIGN;
return 0;
}
/*
* arena_map_flog_check -- (internal) check map and flog
*/
static int
arena_map_flog_check(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
struct arena *arenap = loc->arenap;
/* check map entries */
uint32_t i;
for (i = 0; i < arenap->btt_info.external_nlba; i++) {
if (map_entry_check(ppc, loc, i))
goto error_push;
}
/* check flog entries */
uint8_t *ptr = arenap->flog;
for (i = 0; i < arenap->btt_info.nfree; i++) {
if (flog_entry_check(ppc, loc, i, &ptr))
goto error_push;
}
/* check unmapped blocks and insert to list */
for (i = 0; i < arenap->btt_info.internal_nlba; i++) {
if (!util_isset(loc->bitmap, i)) {
CHECK_INFO(ppc, "arena %u: unmapped block %u",
arenap->id, i);
if (!list_push(loc->list_unmap, i))
goto error_push;
}
}
if (loc->list_unmap->count)
CHECK_INFO(ppc, "arena %u: number of unmapped blocks: %u",
arenap->id, loc->list_unmap->count);
if (loc->list_inval->count)
CHECK_INFO(ppc, "arena %u: number of invalid BTT Map entries: "
"%u", arenap->id, loc->list_inval->count);
if (loc->list_flog_inval->count)
CHECK_INFO(ppc, "arena %u: number of invalid BTT Flog entries: "
"%u", arenap->id, loc->list_flog_inval->count);
if (CHECK_IS_NOT(ppc, REPAIR) && loc->list_unmap->count > 0) {
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
check_end(ppc->data);
goto cleanup;
}
/*
* We are able to repair if and only if number of unmapped blocks is
* equal to sum of invalid map and flog entries.
*/
if (loc->list_unmap->count != (loc->list_inval->count +
loc->list_flog_inval->count)) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
CHECK_ERR(ppc, "arena %u: cannot repair BTT Map and Flog",
arenap->id);
goto cleanup;
}
if (CHECK_IS_NOT(ppc, ADVANCED) && loc->list_inval->count +
loc->list_flog_inval->count > 0) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
CHECK_INFO(ppc, REQUIRE_ADVANCED);
CHECK_ERR(ppc, "BTT Map and / or BTT Flog contain invalid "
"entries");
check_end(ppc->data);
goto cleanup;
}
if (loc->list_inval->count > 0) {
CHECK_ASK(ppc, Q_REPAIR_MAP, "Do you want to repair invalid "
"BTT Map entries?");
}
if (loc->list_flog_inval->count > 0) {
CHECK_ASK(ppc, Q_REPAIR_FLOG, "Do you want to repair invalid "
"BTT Flog entries?");
}
return check_questions_sequence_validate(ppc);
error_push:
CHECK_ERR(ppc, "arena %u: cannot allocate momory for list item",
arenap->id);
ppc->result = CHECK_RESULT_ERROR;
cleanup:
cleanup(ppc, loc);
return -1;
}
/*
* arena_map_flog_fix -- (internal) fix map and flog
*/
static int
arena_map_flog_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *ctx)
{
LOG(3, NULL);
ASSERTeq(ctx, NULL);
ASSERTne(loc, NULL);
struct arena *arenap = loc->arenap;
uint32_t inval;
uint32_t unmap;
switch (question) {
case Q_REPAIR_MAP:
/*
* Cause first of duplicated map entries seems valid till we
* find second of them we must find all first map entries
* pointing to the postmap LBA's we know are duplicated to mark
* them with error flag.
*/
for (uint32_t i = 0; i < arenap->btt_info.external_nlba; i++) {
uint32_t lba = map_get_postmap_lba(arenap, i);
if (lba >= arenap->btt_info.internal_nlba)
continue;
if (!util_isset(loc->dup_bitmap, lba))
continue;
arenap->map[i] = BTT_MAP_ENTRY_ERROR | lba;
util_clrbit(loc->dup_bitmap, lba);
CHECK_INFO(ppc,
"arena %u: storing 0x%x at %u BTT Map entry",
arenap->id, arenap->map[i], i);
}
/*
* repair invalid or duplicated map entries by using unmapped
* blocks
*/
while (list_pop(loc->list_inval, &inval)) {
if (!list_pop(loc->list_unmap, &unmap)) {
ppc->result = CHECK_RESULT_ERROR;
return -1;
}
arenap->map[inval] = unmap | BTT_MAP_ENTRY_ERROR;
CHECK_INFO(ppc, "arena %u: storing 0x%x at %u BTT Map "
"entry", arenap->id, arenap->map[inval], inval);
}
break;
case Q_REPAIR_FLOG:
/* repair invalid flog entries using unmapped blocks */
while (list_pop(loc->list_flog_inval, &inval)) {
if (!list_pop(loc->list_unmap, &unmap)) {
ppc->result = CHECK_RESULT_ERROR;
return -1;
}
struct btt_flog *flog = (struct btt_flog *)
(arenap->flog + inval * BTT_FLOG_PAIR_ALIGN);
memset(&flog[1], 0, sizeof(flog[1]));
uint32_t entry = unmap | BTT_MAP_ENTRY_ERROR;
flog[0].lba = inval;
flog[0].new_map = entry;
flog[0].old_map = entry;
flog[0].seq = 1;
CHECK_INFO(ppc, "arena %u: repairing BTT Flog at %u "
"with free block entry 0x%x", loc->arenap->id,
inval, entry);
}
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
struct step {
int (*check)(PMEMpoolcheck *, location *);
int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *);
};
static const struct step steps[] = {
{
.check = init,
},
{
.check = arena_map_flog_check,
},
{
.fix = arena_map_flog_fix,
},
{
.check = cleanup,
},
{
.check = NULL,
.fix = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static inline int
step_exe(PMEMpoolcheck *ppc, location *loc)
{
ASSERT(loc->step < ARRAY_SIZE(steps));
const struct step *step = &steps[loc->step++];
if (!step->fix)
return step->check(ppc, loc);
if (!check_answer_loop(ppc, loc, NULL, 1, step->fix))
return 0;
cleanup(ppc, loc);
return -1;
}
/*
* check_btt_map_flog -- perform check and fixing of map and flog
*/
void
check_btt_map_flog(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
location *loc = check_get_step_data(ppc->data);
if (ppc->pool->blk_no_layout)
return;
/* initialize check */
if (!loc->arenap && loc->narena == 0 &&
ppc->result != CHECK_RESULT_PROCESS_ANSWERS) {
CHECK_INFO(ppc, "checking BTT Map and Flog");
loc->arenap = PMDK_TAILQ_FIRST(&ppc->pool->arenas);
loc->narena = 0;
}
while (loc->arenap != NULL) {
/* add info about checking next arena */
if (ppc->result != CHECK_RESULT_PROCESS_ANSWERS &&
loc->step == 0) {
CHECK_INFO(ppc, "arena %u: checking BTT Map and Flog",
loc->narena);
}
/* do all checks */
while (CHECK_NOT_COMPLETE(loc, steps)) {
if (step_exe(ppc, loc))
return;
}
/* jump to next arena */
loc->arenap = PMDK_TAILQ_NEXT(loc->arenap, next);
loc->narena++;
loc->step = 0;
}
}
| 15,734 | 21.937318 | 73 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmempool/check_backup.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* check_backup.c -- pre-check backup
*/
#include <stddef.h>
#include <stdint.h>
#include <unistd.h>
#include "out.h"
#include "file.h"
#include "os.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
enum question {
Q_OVERWRITE_EXISTING_FILE,
Q_OVERWRITE_EXISTING_PARTS
};
/*
* location_release -- (internal) release poolset structure
*/
static void
location_release(location *loc)
{
if (loc->set) {
util_poolset_free(loc->set);
loc->set = NULL;
}
}
/*
* backup_nonpoolset_requirements -- (internal) check backup requirements
*/
static int
backup_nonpoolset_requirements(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, "backup_path %s", ppc->backup_path);
int exists = util_file_exists(ppc->backup_path);
if (exists < 0) {
return CHECK_ERR(ppc,
"unable to access the backup destination: %s",
ppc->backup_path);
}
if (!exists) {
errno = 0;
return 0;
}
if ((size_t)util_file_get_size(ppc->backup_path) !=
ppc->pool->set_file->size) {
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc,
"destination of the backup does not match the size of the source pool file: %s",
ppc->backup_path);
}
if (CHECK_WITHOUT_FIXING(ppc)) {
location_release(loc);
loc->step = CHECK_STEP_COMPLETE;
return 0;
}
CHECK_ASK(ppc, Q_OVERWRITE_EXISTING_FILE,
"destination of the backup already exists.|Do you want to overwrite it?");
return check_questions_sequence_validate(ppc);
}
/*
* backup_nonpoolset_overwrite -- (internal) overwrite pool
*/
static int
backup_nonpoolset_overwrite(PMEMpoolcheck *ppc, location *loc,
uint32_t question, void *context)
{
LOG(3, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_OVERWRITE_EXISTING_FILE:
if (pool_copy(ppc->pool, ppc->backup_path, 1 /* overwrite */)) {
location_release(loc);
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc, "cannot perform backup");
}
location_release(loc);
loc->step = CHECK_STEP_COMPLETE;
return 0;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
/*
* backup_nonpoolset_create -- (internal) create backup
*/
static int
backup_nonpoolset_create(PMEMpoolcheck *ppc, location *loc)
{
CHECK_INFO(ppc, "creating backup file: %s", ppc->backup_path);
if (pool_copy(ppc->pool, ppc->backup_path, 0)) {
location_release(loc);
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc, "cannot perform backup");
}
location_release(loc);
loc->step = CHECK_STEP_COMPLETE;
return 0;
}
/*
* backup_poolset_requirements -- (internal) check backup requirements
*/
static int
backup_poolset_requirements(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, "backup_path %s", ppc->backup_path);
if (ppc->pool->set_file->poolset->nreplicas > 1) {
CHECK_INFO(ppc,
"backup of a poolset with multiple replicas is not supported");
goto err;
}
if (pool_set_parse(&loc->set, ppc->backup_path)) {
CHECK_INFO_ERRNO(ppc, "invalid poolset backup file: %s",
ppc->backup_path);
goto err;
}
if (loc->set->nreplicas > 1) {
CHECK_INFO(ppc,
"backup to a poolset with multiple replicas is not supported");
goto err_poolset;
}
ASSERTeq(loc->set->nreplicas, 1);
struct pool_replica *srep = ppc->pool->set_file->poolset->replica[0];
struct pool_replica *drep = loc->set->replica[0];
if (srep->nparts != drep->nparts) {
CHECK_INFO(ppc,
"number of part files in the backup poolset must match number of part files in the source poolset");
goto err_poolset;
}
int overwrite_required = 0;
for (unsigned p = 0; p < srep->nparts; p++) {
int exists = util_file_exists(drep->part[p].path);
if (exists < 0) {
CHECK_INFO(ppc,
"unable to access the part of the destination poolset: %s",
ppc->backup_path);
goto err_poolset;
}
if (srep->part[p].filesize != drep->part[p].filesize) {
CHECK_INFO(ppc,
"size of the part %u of the backup poolset does not match source poolset",
p);
goto err_poolset;
}
if (!exists) {
errno = 0;
continue;
}
overwrite_required = true;
if ((size_t)util_file_get_size(drep->part[p].path) !=
srep->part[p].filesize) {
CHECK_INFO(ppc,
"destination of the backup part does not match size of the source part file: %s",
drep->part[p].path);
goto err_poolset;
}
}
if (CHECK_WITHOUT_FIXING(ppc)) {
location_release(loc);
loc->step = CHECK_STEP_COMPLETE;
return 0;
}
if (overwrite_required) {
CHECK_ASK(ppc, Q_OVERWRITE_EXISTING_PARTS,
"part files of the destination poolset of the backup already exist.|"
"Do you want to overwrite them?");
}
return check_questions_sequence_validate(ppc);
err_poolset:
location_release(loc);
err:
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc, "unable to backup poolset");
}
/*
* backup_poolset -- (internal) backup the poolset
*/
static int
backup_poolset(PMEMpoolcheck *ppc, location *loc, int overwrite)
{
struct pool_replica *srep = ppc->pool->set_file->poolset->replica[0];
struct pool_replica *drep = loc->set->replica[0];
for (unsigned p = 0; p < srep->nparts; p++) {
if (overwrite == 0) {
CHECK_INFO(ppc, "creating backup file: %s",
drep->part[p].path);
}
if (pool_set_part_copy(&drep->part[p], &srep->part[p],
overwrite)) {
location_release(loc);
ppc->result = CHECK_RESULT_ERROR;
CHECK_INFO(ppc, "unable to create backup file");
return CHECK_ERR(ppc, "unable to backup poolset");
}
}
return 0;
}
/*
* backup_poolset_overwrite -- (internal) backup poolset with overwrite
*/
static int
backup_poolset_overwrite(PMEMpoolcheck *ppc, location *loc,
uint32_t question, void *context)
{
LOG(3, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_OVERWRITE_EXISTING_PARTS:
if (backup_poolset(ppc, loc, 1 /* overwrite */)) {
location_release(loc);
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc, "cannot perform backup");
}
location_release(loc);
loc->step = CHECK_STEP_COMPLETE;
return 0;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
/*
* backup_poolset_create -- (internal) backup poolset
*/
static int
backup_poolset_create(PMEMpoolcheck *ppc, location *loc)
{
if (backup_poolset(ppc, loc, 0)) {
location_release(loc);
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc, "cannot perform backup");
}
location_release(loc);
loc->step = CHECK_STEP_COMPLETE;
return 0;
}
struct step {
int (*check)(PMEMpoolcheck *, location *);
int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *);
int poolset;
};
static const struct step steps[] = {
{
.check = backup_nonpoolset_requirements,
.poolset = false,
},
{
.fix = backup_nonpoolset_overwrite,
.poolset = false,
},
{
.check = backup_nonpoolset_create,
.poolset = false
},
{
.check = backup_poolset_requirements,
.poolset = true,
},
{
.fix = backup_poolset_overwrite,
.poolset = true,
},
{
.check = backup_poolset_create,
.poolset = true
},
{
.check = NULL,
.fix = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static int
step_exe(PMEMpoolcheck *ppc, location *loc)
{
ASSERT(loc->step < ARRAY_SIZE(steps));
const struct step *step = &steps[loc->step++];
if (step->poolset == 0 && ppc->pool->params.is_poolset == 1)
return 0;
if (!step->fix)
return step->check(ppc, loc);
if (!check_has_answer(ppc->data))
return 0;
if (check_answer_loop(ppc, loc, NULL, 1, step->fix))
return -1;
ppc->result = CHECK_RESULT_CONSISTENT;
return 0;
}
/*
* check_backup -- perform backup if requested and needed
*/
void
check_backup(PMEMpoolcheck *ppc)
{
LOG(3, "backup_path %s", ppc->backup_path);
if (ppc->backup_path == NULL)
return;
location *loc = check_get_step_data(ppc->data);
/* do all checks */
while (CHECK_NOT_COMPLETE(loc, steps)) {
if (step_exe(ppc, loc))
break;
}
}
| 7,968 | 20.654891 | 103 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmempool/check_btt_info.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* check_btt_info.c -- check BTT Info
*/
#include <stdlib.h>
#include <stdint.h>
#include <endian.h>
#include "out.h"
#include "util.h"
#include "btt.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
enum question {
Q_RESTORE_FROM_BACKUP,
Q_REGENERATE,
Q_REGENERATE_CHECKSUM,
Q_RESTORE_FROM_HEADER
};
/*
* location_release -- (internal) release check_btt_info_loc allocations
*/
static void
location_release(location *loc)
{
free(loc->arenap);
loc->arenap = NULL;
}
/*
* btt_info_checksum -- (internal) check BTT Info checksum
*/
static int
btt_info_checksum(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
loc->arenap = calloc(1, sizeof(struct arena));
if (!loc->arenap) {
ERR("!calloc");
ppc->result = CHECK_RESULT_INTERNAL_ERROR;
CHECK_ERR(ppc, "cannot allocate memory for arena");
goto error_cleanup;
}
/* read the BTT Info header at well known offset */
if (pool_read(ppc->pool, &loc->arenap->btt_info,
sizeof(loc->arenap->btt_info), loc->offset)) {
CHECK_ERR(ppc, "arena %u: cannot read BTT Info header",
loc->arenap->id);
ppc->result = CHECK_RESULT_ERROR;
goto error_cleanup;
}
loc->arenap->id = ppc->pool->narenas;
/* BLK is consistent even without BTT Layout */
if (ppc->pool->params.type == POOL_TYPE_BLK) {
int is_zeroed = util_is_zeroed((const void *)
&loc->arenap->btt_info, sizeof(loc->arenap->btt_info));
if (is_zeroed) {
CHECK_INFO(ppc, "BTT Layout not written");
loc->step = CHECK_STEP_COMPLETE;
ppc->pool->blk_no_layout = 1;
location_release(loc);
check_end(ppc->data);
return 0;
}
}
/* check consistency of BTT Info */
if (pool_btt_info_valid(&loc->arenap->btt_info)) {
CHECK_INFO(ppc, "arena %u: BTT Info header checksum correct",
loc->arenap->id);
loc->valid.btti_header = 1;
} else if (CHECK_IS_NOT(ppc, REPAIR)) {
CHECK_ERR(ppc, "arena %u: BTT Info header checksum incorrect",
loc->arenap->id);
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
check_end(ppc->data);
goto error_cleanup;
}
return 0;
error_cleanup:
location_release(loc);
return -1;
}
/*
* btt_info_backup -- (internal) check BTT Info backup
*/
static int
btt_info_backup(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
/* check BTT Info backup consistency */
const size_t btt_info_size = sizeof(ppc->pool->bttc.btt_info);
uint64_t btt_info_off = pool_next_arena_offset(ppc->pool, loc->offset) -
btt_info_size;
if (pool_read(ppc->pool, &ppc->pool->bttc.btt_info, btt_info_size,
btt_info_off)) {
CHECK_ERR(ppc, "arena %u: cannot read BTT Info backup",
loc->arenap->id);
goto error;
}
/* check whether this BTT Info backup is valid */
if (pool_btt_info_valid(&ppc->pool->bttc.btt_info)) {
loc->valid.btti_backup = 1;
/* restore BTT Info from backup */
if (!loc->valid.btti_header && CHECK_IS(ppc, REPAIR))
CHECK_ASK(ppc, Q_RESTORE_FROM_BACKUP, "arena %u: BTT "
"Info header checksum incorrect.|Restore BTT "
"Info from backup?", loc->arenap->id);
}
/*
* if BTT Info backup require repairs it will be fixed in further steps
*/
return check_questions_sequence_validate(ppc);
error:
ppc->result = CHECK_RESULT_ERROR;
location_release(loc);
return -1;
}
/*
* btt_info_from_backup_fix -- (internal) fix BTT Info using its backup
*/
static int
btt_info_from_backup_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *ctx)
{
LOG(3, NULL);
ASSERTeq(ctx, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_RESTORE_FROM_BACKUP:
CHECK_INFO(ppc,
"arena %u: restoring BTT Info header from backup",
loc->arenap->id);
memcpy(&loc->arenap->btt_info, &ppc->pool->bttc.btt_info,
sizeof(loc->arenap->btt_info));
loc->valid.btti_header = 1;
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
/*
* btt_info_gen -- (internal) ask whether try to regenerate BTT Info
*/
static int
btt_info_gen(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
if (loc->valid.btti_header)
return 0;
ASSERT(CHECK_IS(ppc, REPAIR));
if (!loc->pool_valid.btti_offset) {
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
check_end(ppc->data);
return CHECK_ERR(ppc, "can not find any valid BTT Info");
}
CHECK_ASK(ppc, Q_REGENERATE,
"arena %u: BTT Info header checksum incorrect.|Do you want to "
"regenerate BTT Info?", loc->arenap->id);
return check_questions_sequence_validate(ppc);
}
/*
* btt_info_gen_fix -- (internal) fix by regenerating BTT Info
*/
static int
btt_info_gen_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *ctx)
{
LOG(3, NULL);
ASSERTeq(ctx, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_REGENERATE:
CHECK_INFO(ppc, "arena %u: regenerating BTT Info header",
loc->arenap->id);
/*
* We do not have valid BTT Info backup so we get first valid
* BTT Info and try to calculate BTT Info for current arena
*/
uint64_t arena_size = ppc->pool->set_file->size - loc->offset;
if (arena_size > BTT_MAX_ARENA)
arena_size = BTT_MAX_ARENA;
uint64_t space_left = ppc->pool->set_file->size - loc->offset -
arena_size;
struct btt_info *bttd = &loc->arenap->btt_info;
struct btt_info *btts = &loc->pool_valid.btti;
btt_info_convert2h(bttd);
/*
* all valid BTT Info structures have the same signature, UUID,
* parent UUID, flags, major, minor, external LBA size, internal
* LBA size, nfree, info size and data offset
*/
memcpy(bttd->sig, btts->sig, BTTINFO_SIG_LEN);
memcpy(bttd->uuid, btts->uuid, BTTINFO_UUID_LEN);
memcpy(bttd->parent_uuid, btts->parent_uuid, BTTINFO_UUID_LEN);
memset(bttd->unused, 0, BTTINFO_UNUSED_LEN);
bttd->flags = btts->flags;
bttd->major = btts->major;
bttd->minor = btts->minor;
/* other parameters can be calculated */
if (btt_info_set(bttd, btts->external_lbasize, btts->nfree,
arena_size, space_left)) {
CHECK_ERR(ppc, "can not restore BTT Info");
return -1;
}
ASSERTeq(bttd->external_lbasize, btts->external_lbasize);
ASSERTeq(bttd->internal_lbasize, btts->internal_lbasize);
ASSERTeq(bttd->nfree, btts->nfree);
ASSERTeq(bttd->infosize, btts->infosize);
ASSERTeq(bttd->dataoff, btts->dataoff);
return 0;
default:
ERR("not implemented question id: %u", question);
return -1;
}
}
/*
* btt_info_checksum_retry -- (internal) check BTT Info checksum
*/
static int
btt_info_checksum_retry(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
if (loc->valid.btti_header)
return 0;
btt_info_convert2le(&loc->arenap->btt_info);
/* check consistency of BTT Info */
if (pool_btt_info_valid(&loc->arenap->btt_info)) {
CHECK_INFO(ppc, "arena %u: BTT Info header checksum correct",
loc->arenap->id);
loc->valid.btti_header = 1;
return 0;
}
if (CHECK_IS_NOT(ppc, ADVANCED)) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
CHECK_INFO(ppc, REQUIRE_ADVANCED);
CHECK_ERR(ppc, "arena %u: BTT Info header checksum incorrect",
loc->arenap->id);
check_end(ppc->data);
goto error_cleanup;
}
CHECK_ASK(ppc, Q_REGENERATE_CHECKSUM,
"arena %u: BTT Info header checksum incorrect.|Do you want to "
"regenerate BTT Info checksum?", loc->arenap->id);
return check_questions_sequence_validate(ppc);
error_cleanup:
location_release(loc);
return -1;
}
/*
* btt_info_checksum_fix -- (internal) fix by regenerating BTT Info checksum
*/
static int
btt_info_checksum_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *ctx)
{
LOG(3, NULL);
ASSERTeq(ctx, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_REGENERATE_CHECKSUM:
util_checksum(&loc->arenap->btt_info, sizeof(struct btt_info),
&loc->arenap->btt_info.checksum, 1, 0);
loc->valid.btti_header = 1;
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
/*
* btt_info_backup_checksum -- (internal) check BTT Info backup checksum
*/
static int
btt_info_backup_checksum(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
ASSERT(loc->valid.btti_header);
if (loc->valid.btti_backup)
return 0;
/* BTT Info backup is not valid so it must be fixed */
if (CHECK_IS_NOT(ppc, REPAIR)) {
CHECK_ERR(ppc,
"arena %u: BTT Info backup checksum incorrect",
loc->arenap->id);
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
check_end(ppc->data);
goto error_cleanup;
}
CHECK_ASK(ppc, Q_RESTORE_FROM_HEADER,
"arena %u: BTT Info backup checksum incorrect.|Do you want to "
"restore it from BTT Info header?", loc->arenap->id);
return check_questions_sequence_validate(ppc);
error_cleanup:
location_release(loc);
return -1;
}
/*
* btt_info_backup_fix -- (internal) prepare restore BTT Info backup from header
*/
static int
btt_info_backup_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *ctx)
{
LOG(3, NULL);
ASSERTeq(ctx, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_RESTORE_FROM_HEADER:
/* BTT Info backup would be restored in check_write step */
CHECK_INFO(ppc,
"arena %u: restoring BTT Info backup from header",
loc->arenap->id);
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
struct step {
int (*check)(PMEMpoolcheck *, location *);
int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *);
};
static const struct step steps[] = {
{
.check = btt_info_checksum,
},
{
.check = btt_info_backup,
},
{
.fix = btt_info_from_backup_fix,
},
{
.check = btt_info_gen,
},
{
.fix = btt_info_gen_fix,
},
{
.check = btt_info_checksum_retry,
},
{
.fix = btt_info_checksum_fix,
},
{
.check = btt_info_backup_checksum,
},
{
.fix = btt_info_backup_fix,
},
{
.check = NULL,
.fix = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static inline int
step_exe(PMEMpoolcheck *ppc, location *loc)
{
ASSERT(loc->step < ARRAY_SIZE(steps));
const struct step *step = &steps[loc->step++];
if (!step->fix)
return step->check(ppc, loc);
if (!check_answer_loop(ppc, loc, NULL, 1, step->fix))
return 0;
if (check_has_error(ppc->data))
location_release(loc);
return -1;
}
/*
* check_btt_info -- entry point for btt info check
*/
void
check_btt_info(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
location *loc = check_get_step_data(ppc->data);
uint64_t nextoff = 0;
/* initialize check */
if (!loc->offset) {
CHECK_INFO(ppc, "checking BTT Info headers");
loc->offset = sizeof(struct pool_hdr);
if (ppc->pool->params.type == POOL_TYPE_BLK)
loc->offset += ALIGN_UP(sizeof(struct pmemblk) -
sizeof(struct pool_hdr),
BLK_FORMAT_DATA_ALIGN);
loc->pool_valid.btti_offset = pool_get_first_valid_btt(
ppc->pool, &loc->pool_valid.btti, loc->offset, NULL);
/* Without valid BTT Info we can not proceed */
if (!loc->pool_valid.btti_offset) {
if (ppc->pool->params.type == POOL_TYPE_BTT) {
CHECK_ERR(ppc,
"can not find any valid BTT Info");
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
check_end(ppc->data);
return;
}
} else
btt_info_convert2h(&loc->pool_valid.btti);
}
do {
/* jump to next offset */
if (ppc->result != CHECK_RESULT_PROCESS_ANSWERS) {
loc->offset += nextoff;
loc->step = 0;
loc->valid.btti_header = 0;
loc->valid.btti_backup = 0;
}
/* do all checks */
while (CHECK_NOT_COMPLETE(loc, steps)) {
if (step_exe(ppc, loc) || ppc->pool->blk_no_layout == 1)
return;
}
/* save offset and insert BTT to cache for next steps */
loc->arenap->offset = loc->offset;
loc->arenap->valid = true;
check_insert_arena(ppc, loc->arenap);
nextoff = le64toh(loc->arenap->btt_info.nextoff);
} while (nextoff > 0);
}
| 11,735 | 22.011765 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmempool/check_util.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* check_util.c -- check utility functions
*/
#include <stdio.h>
#include <stdint.h>
#include "out.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
#define CHECK_END UINT_MAX
/* separate info part of message from question part of message */
#define MSG_SEPARATOR '|'
/* error part of message must have '.' at the end */
#define MSG_PLACE_OF_SEPARATION '.'
#define MAX_MSG_STR_SIZE 8192
#define CHECK_ANSWER_YES "yes"
#define CHECK_ANSWER_NO "no"
#define STR_MAX 256
#define TIME_STR_FMT "%a %b %d %Y %H:%M:%S"
#define UUID_STR_MAX 37
enum check_answer {
PMEMPOOL_CHECK_ANSWER_EMPTY,
PMEMPOOL_CHECK_ANSWER_YES,
PMEMPOOL_CHECK_ANSWER_NO,
PMEMPOOL_CHECK_ANSWER_DEFAULT,
};
/* queue of check statuses */
struct check_status {
PMDK_TAILQ_ENTRY(check_status) next;
struct pmempool_check_status status;
unsigned question;
enum check_answer answer;
char *msg;
};
PMDK_TAILQ_HEAD(check_status_head, check_status);
/* check control context */
struct check_data {
unsigned step;
location step_data;
struct check_status *error;
struct check_status_head infos;
struct check_status_head questions;
struct check_status_head answers;
struct check_status *check_status_cache;
};
/*
* check_data_alloc -- allocate and initialize check_data structure
*/
struct check_data *
check_data_alloc(void)
{
LOG(3, NULL);
struct check_data *data = calloc(1, sizeof(*data));
if (data == NULL) {
ERR("!calloc");
return NULL;
}
PMDK_TAILQ_INIT(&data->infos);
PMDK_TAILQ_INIT(&data->questions);
PMDK_TAILQ_INIT(&data->answers);
return data;
}
/*
* check_data_free -- clean and deallocate check_data
*/
void
check_data_free(struct check_data *data)
{
LOG(3, NULL);
if (data->error != NULL) {
free(data->error);
data->error = NULL;
}
if (data->check_status_cache != NULL) {
free(data->check_status_cache);
data->check_status_cache = NULL;
}
while (!PMDK_TAILQ_EMPTY(&data->infos)) {
struct check_status *statp = PMDK_TAILQ_FIRST(&data->infos);
PMDK_TAILQ_REMOVE(&data->infos, statp, next);
free(statp);
}
while (!PMDK_TAILQ_EMPTY(&data->questions)) {
struct check_status *statp = PMDK_TAILQ_FIRST(&data->questions);
PMDK_TAILQ_REMOVE(&data->questions, statp, next);
free(statp);
}
while (!PMDK_TAILQ_EMPTY(&data->answers)) {
struct check_status *statp = PMDK_TAILQ_FIRST(&data->answers);
PMDK_TAILQ_REMOVE(&data->answers, statp, next);
free(statp);
}
free(data);
}
/*
* check_step_get - return current check step number
*/
uint32_t
check_step_get(struct check_data *data)
{
return data->step;
}
/*
* check_step_inc -- move to next step number
*/
void
check_step_inc(struct check_data *data)
{
if (check_is_end_util(data))
return;
++data->step;
memset(&data->step_data, 0, sizeof(location));
}
/*
* check_get_step_data -- return pointer to check step data
*/
location *
check_get_step_data(struct check_data *data)
{
return &data->step_data;
}
/*
* check_end -- mark check as ended
*/
void
check_end(struct check_data *data)
{
LOG(3, NULL);
data->step = CHECK_END;
}
/*
* check_is_end_util -- return if check has ended
*/
int
check_is_end_util(struct check_data *data)
{
return data->step == CHECK_END;
}
/*
* status_alloc -- (internal) allocate and initialize check_status
*/
static inline struct check_status *
status_alloc(void)
{
struct check_status *status = malloc(sizeof(*status));
if (!status)
FATAL("!malloc");
status->msg = malloc(sizeof(char) * MAX_MSG_STR_SIZE);
if (!status->msg) {
free(status);
FATAL("!malloc");
}
status->status.str.msg = status->msg;
status->answer = PMEMPOOL_CHECK_ANSWER_EMPTY;
status->question = CHECK_INVALID_QUESTION;
return status;
}
/*
* status_release -- (internal) release check_status
*/
static void
status_release(struct check_status *status)
{
#ifdef _WIN32
/* dealloc duplicate string after conversion */
if (status->status.str.msg != status->msg)
free((void *)status->status.str.msg);
#endif
free(status->msg);
free(status);
}
/*
* status_msg_info_only -- (internal) separate info part of the message
*
* If message is in form of "info.|question" it modifies it as follows
* "info\0|question"
*/
static inline int
status_msg_info_only(const char *msg)
{
char *sep = strchr(msg, MSG_SEPARATOR);
if (sep) {
ASSERTne(sep, msg);
--sep;
ASSERTeq(*sep, MSG_PLACE_OF_SEPARATION);
*sep = '\0';
return 0;
}
return -1;
}
/*
* status_msg_info_and_question -- (internal) join info and question
*
* If message is in form "info.|question" it will replace MSG_SEPARATOR '|' with
* space to get "info. question"
*/
static inline int
status_msg_info_and_question(const char *msg)
{
char *sep = strchr(msg, MSG_SEPARATOR);
if (sep) {
*sep = ' ';
return 0;
}
return -1;
}
/*
* status_push -- (internal) push single status object
*/
static int
status_push(PMEMpoolcheck *ppc, struct check_status *st, uint32_t question)
{
if (st->status.type == PMEMPOOL_CHECK_MSG_TYPE_ERROR) {
ASSERTeq(ppc->data->error, NULL);
ppc->data->error = st;
return -1;
} else if (st->status.type == PMEMPOOL_CHECK_MSG_TYPE_INFO) {
if (CHECK_IS(ppc, VERBOSE))
PMDK_TAILQ_INSERT_TAIL(&ppc->data->infos, st, next);
else
check_status_release(ppc, st);
return 0;
}
/* st->status.type == PMEMPOOL_CHECK_MSG_TYPE_QUESTION */
if (CHECK_IS_NOT(ppc, REPAIR)) {
/* error status */
if (status_msg_info_only(st->msg)) {
ERR("no error message for the user");
st->msg[0] = '\0';
}
st->status.type = PMEMPOOL_CHECK_MSG_TYPE_ERROR;
return status_push(ppc, st, question);
}
if (CHECK_IS(ppc, ALWAYS_YES)) {
if (!status_msg_info_only(st->msg)) {
/* information status */
st->status.type = PMEMPOOL_CHECK_MSG_TYPE_INFO;
status_push(ppc, st, question);
st = status_alloc();
}
/* answer status */
ppc->result = CHECK_RESULT_PROCESS_ANSWERS;
st->question = question;
st->answer = PMEMPOOL_CHECK_ANSWER_YES;
st->status.type = PMEMPOOL_CHECK_MSG_TYPE_QUESTION;
PMDK_TAILQ_INSERT_TAIL(&ppc->data->answers, st, next);
} else {
/* question message */
status_msg_info_and_question(st->msg);
st->question = question;
ppc->result = CHECK_RESULT_ASK_QUESTIONS;
st->answer = PMEMPOOL_CHECK_ANSWER_EMPTY;
PMDK_TAILQ_INSERT_TAIL(&ppc->data->questions, st, next);
}
return 0;
}
/*
* check_status_create -- create single status, push it to proper queue
*
* MSG_SEPARATOR character in fmt is treated as message separator. If creating
* question but check arguments do not allow to make any changes (asking any
* question is pointless) it takes part of message before MSG_SEPARATOR
* character and use it to create error message. Character just before separator
* must be a MSG_PLACE_OF_SEPARATION character. Return non 0 value if error
* status would be created.
*
* The arg is an additional argument for specified type of status.
*/
int
check_status_create(PMEMpoolcheck *ppc, enum pmempool_check_msg_type type,
uint32_t arg, const char *fmt, ...)
{
if (CHECK_IS_NOT(ppc, VERBOSE) && type == PMEMPOOL_CHECK_MSG_TYPE_INFO)
return 0;
struct check_status *st = status_alloc();
ASSERT(CHECK_IS(ppc, FORMAT_STR));
va_list ap;
va_start(ap, fmt);
int p = vsnprintf(st->msg, MAX_MSG_STR_SIZE, fmt, ap);
va_end(ap);
/* append possible strerror at the end of the message */
if (type != PMEMPOOL_CHECK_MSG_TYPE_QUESTION && arg && p > 0) {
char buff[UTIL_MAX_ERR_MSG];
util_strerror((int)arg, buff, UTIL_MAX_ERR_MSG);
int ret = util_snprintf(st->msg + p,
MAX_MSG_STR_SIZE - (size_t)p, ": %s", buff);
if (ret < 0) {
ERR("!snprintf");
status_release(st);
return -1;
}
}
st->status.type = type;
return status_push(ppc, st, arg);
}
/*
* check_status_release -- release single status object
*/
void
check_status_release(PMEMpoolcheck *ppc, struct check_status *status)
{
if (status->status.type == PMEMPOOL_CHECK_MSG_TYPE_ERROR)
ppc->data->error = NULL;
status_release(status);
}
/*
* pop_status -- (internal) pop single message from check_status queue
*/
static struct check_status *
pop_status(struct check_data *data, struct check_status_head *queue)
{
if (!PMDK_TAILQ_EMPTY(queue)) {
ASSERTeq(data->check_status_cache, NULL);
data->check_status_cache = PMDK_TAILQ_FIRST(queue);
PMDK_TAILQ_REMOVE(queue, data->check_status_cache, next);
return data->check_status_cache;
}
return NULL;
}
/*
* check_pop_question -- pop single question from questions queue
*/
struct check_status *
check_pop_question(struct check_data *data)
{
return pop_status(data, &data->questions);
}
/*
* check_pop_info -- pop single info from information queue
*/
struct check_status *
check_pop_info(struct check_data *data)
{
return pop_status(data, &data->infos);
}
/*
* check_pop_error -- pop error from state
*/
struct check_status *
check_pop_error(struct check_data *data)
{
if (data->error) {
ASSERTeq(data->check_status_cache, NULL);
data->check_status_cache = data->error;
data->error = NULL;
return data->check_status_cache;
}
return NULL;
}
#ifdef _WIN32
void
cache_to_utf8(struct check_data *data, char *buf, size_t size)
{
if (data->check_status_cache == NULL)
return;
struct check_status *status = data->check_status_cache;
/* if it was a question, convert it and the answer to utf8 */
if (status->status.type == PMEMPOOL_CHECK_MSG_TYPE_QUESTION) {
struct pmempool_check_statusW *wstatus =
(struct pmempool_check_statusW *)&status->status;
wchar_t *wstring = (wchar_t *)wstatus->str.msg;
status->status.str.msg = util_toUTF8(wstring);
if (status->status.str.msg == NULL)
FATAL("!malloc");
util_free_UTF16(wstring);
if (util_toUTF8_buff(wstatus->str.answer, buf, size) != 0)
FATAL("Invalid answer conversion %s",
out_get_errormsg());
status->status.str.answer = buf;
}
}
#endif
/*
* check_clear_status_cache -- release check_status from cache
*/
void
check_clear_status_cache(struct check_data *data)
{
if (data->check_status_cache) {
switch (data->check_status_cache->status.type) {
case PMEMPOOL_CHECK_MSG_TYPE_INFO:
case PMEMPOOL_CHECK_MSG_TYPE_ERROR:
/*
* Info and error statuses are disposable. After showing
* them to the user we have to release them.
*/
status_release(data->check_status_cache);
data->check_status_cache = NULL;
break;
case PMEMPOOL_CHECK_MSG_TYPE_QUESTION:
/*
* Question status after being showed to the user carry
* users answer. It must be kept till answer would be
* processed so it can not be released from cache. It
* has to be pushed to the answers queue, processed and
* released after that.
*/
break;
default:
ASSERT(0);
}
}
}
/*
* status_answer_push -- (internal) push single answer to answers queue
*/
static void
status_answer_push(struct check_data *data, struct check_status *st)
{
ASSERTeq(st->status.type, PMEMPOOL_CHECK_MSG_TYPE_QUESTION);
PMDK_TAILQ_INSERT_TAIL(&data->answers, st, next);
}
/*
* check_push_answer -- process answer and push it to answers queue
*/
int
check_push_answer(PMEMpoolcheck *ppc)
{
if (ppc->data->check_status_cache == NULL)
return 0;
/* check if answer is "yes" or "no" */
struct check_status *status = ppc->data->check_status_cache;
if (status->status.str.answer != NULL) {
if (strcmp(status->status.str.answer, CHECK_ANSWER_YES) == 0)
status->answer = PMEMPOOL_CHECK_ANSWER_YES;
else if (strcmp(status->status.str.answer, CHECK_ANSWER_NO)
== 0)
status->answer = PMEMPOOL_CHECK_ANSWER_NO;
}
if (status->answer == PMEMPOOL_CHECK_ANSWER_EMPTY) {
/* invalid answer provided */
status_answer_push(ppc->data, ppc->data->check_status_cache);
ppc->data->check_status_cache = NULL;
CHECK_INFO(ppc, "Answer must be either %s or %s",
CHECK_ANSWER_YES, CHECK_ANSWER_NO);
return -1;
}
/* push answer */
PMDK_TAILQ_INSERT_TAIL(&ppc->data->answers,
ppc->data->check_status_cache, next);
ppc->data->check_status_cache = NULL;
return 0;
}
/*
* check_has_error - check if error exists
*/
bool
check_has_error(struct check_data *data)
{
return data->error != NULL;
}
/*
* check_has_answer - check if any answer exists
*/
bool
check_has_answer(struct check_data *data)
{
return !PMDK_TAILQ_EMPTY(&data->answers);
}
/*
* pop_answer -- (internal) pop single answer from answers queue
*/
static struct check_status *
pop_answer(struct check_data *data)
{
struct check_status *ret = NULL;
if (!PMDK_TAILQ_EMPTY(&data->answers)) {
ret = PMDK_TAILQ_FIRST(&data->answers);
PMDK_TAILQ_REMOVE(&data->answers, ret, next);
}
return ret;
}
/*
* check_status_get_util -- extract pmempool_check_status from check_status
*/
struct pmempool_check_status *
check_status_get_util(struct check_status *status)
{
return &status->status;
}
/*
* check_answer_loop -- loop through all available answers and process them
*/
int
check_answer_loop(PMEMpoolcheck *ppc, location *data, void *ctx, int fail_on_no,
int (*callback)(PMEMpoolcheck *, location *, uint32_t, void *ctx))
{
struct check_status *answer;
while ((answer = pop_answer(ppc->data)) != NULL) {
/* if answer is "no" we cannot fix an issue */
if (answer->answer != PMEMPOOL_CHECK_ANSWER_YES) {
if (fail_on_no ||
answer->answer != PMEMPOOL_CHECK_ANSWER_NO) {
CHECK_ERR(ppc,
"cannot complete repair, reverting changes");
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
goto error;
}
ppc->result = CHECK_RESULT_REPAIRED;
check_status_release(ppc, answer);
continue;
}
/* perform fix */
if (callback(ppc, data, answer->question, ctx)) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
goto error;
}
if (ppc->result == CHECK_RESULT_ERROR)
goto error;
/* fix succeeded */
ppc->result = CHECK_RESULT_REPAIRED;
check_status_release(ppc, answer);
}
return 0;
error:
check_status_release(ppc, answer);
return -1;
}
/*
* check_questions_sequence_validate -- generate return value from result
*
* Sequence of questions can result in one of the following results: CONSISTENT,
* REPAIRED, ASK_QUESTIONS of PROCESS_ANSWERS. If result == ASK_QUESTIONS it
* returns -1 to indicate existence of unanswered questions.
*/
int
check_questions_sequence_validate(PMEMpoolcheck *ppc)
{
ASSERT(ppc->result == CHECK_RESULT_CONSISTENT ||
ppc->result == CHECK_RESULT_ASK_QUESTIONS ||
ppc->result == CHECK_RESULT_PROCESS_ANSWERS ||
ppc->result == CHECK_RESULT_REPAIRED);
if (ppc->result == CHECK_RESULT_ASK_QUESTIONS) {
ASSERT(!PMDK_TAILQ_EMPTY(&ppc->data->questions));
return -1;
}
return 0;
}
/*
* check_get_time_str -- returns time in human-readable format
*/
const char *
check_get_time_str(time_t time)
{
static char str_buff[STR_MAX] = {0, };
struct tm *tm = util_localtime(&time);
if (tm)
strftime(str_buff, STR_MAX, TIME_STR_FMT, tm);
else {
int ret = util_snprintf(str_buff, STR_MAX, "unknown");
if (ret < 0) {
ERR("!snprintf");
return "";
}
}
return str_buff;
}
/*
* check_get_uuid_str -- returns uuid in human readable format
*/
const char *
check_get_uuid_str(uuid_t uuid)
{
static char uuid_str[UUID_STR_MAX] = {0, };
int ret = util_uuid_to_string(uuid, uuid_str);
if (ret != 0) {
ERR("failed to covert uuid to string");
return "";
}
return uuid_str;
}
/*
* pmempool_check_insert_arena -- insert arena to list
*/
void
check_insert_arena(PMEMpoolcheck *ppc, struct arena *arenap)
{
PMDK_TAILQ_INSERT_TAIL(&ppc->pool->arenas, arenap, next);
ppc->pool->narenas++;
}
| 15,575 | 22.247761 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmempool/pool.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* pool.h -- internal definitions for pool processing functions
*/
#ifndef POOL_H
#define POOL_H
#include <stdbool.h>
#include <sys/types.h>
#include "libpmemobj.h"
#include "queue.h"
#include "set.h"
#include "log.h"
#include "blk.h"
#include "btt_layout.h"
#ifdef __cplusplus
extern "C" {
#endif
#include "alloc.h"
#include "fault_injection.h"
enum pool_type {
POOL_TYPE_UNKNOWN = (1 << 0),
POOL_TYPE_LOG = (1 << 1),
POOL_TYPE_BLK = (1 << 2),
POOL_TYPE_OBJ = (1 << 3),
POOL_TYPE_BTT = (1 << 4),
POOL_TYPE_ANY = POOL_TYPE_UNKNOWN | POOL_TYPE_LOG |
POOL_TYPE_BLK | POOL_TYPE_OBJ | POOL_TYPE_BTT,
};
struct pool_params {
enum pool_type type;
char signature[POOL_HDR_SIG_LEN];
features_t features;
size_t size;
mode_t mode;
int is_poolset;
int is_part;
int is_dev_dax;
int is_pmem;
union {
struct {
uint64_t bsize;
} blk;
struct {
char layout[PMEMOBJ_MAX_LAYOUT];
} obj;
};
};
struct pool_set_file {
int fd;
char *fname;
void *addr;
size_t size;
struct pool_set *poolset;
time_t mtime;
mode_t mode;
};
struct arena {
PMDK_TAILQ_ENTRY(arena) next;
struct btt_info btt_info;
uint32_t id;
bool valid;
bool zeroed;
uint64_t offset;
uint8_t *flog;
size_t flogsize;
uint32_t *map;
size_t mapsize;
};
struct pool_data {
struct pool_params params;
struct pool_set_file *set_file;
int blk_no_layout;
union {
struct pool_hdr pool;
struct pmemlog log;
struct pmemblk blk;
} hdr;
enum {
UUID_NOP = 0,
UUID_FROM_BTT,
UUID_NOT_FROM_BTT,
} uuid_op;
struct arena bttc;
PMDK_TAILQ_HEAD(arenashead, arena) arenas;
uint32_t narenas;
};
struct pool_data *pool_data_alloc(PMEMpoolcheck *ppc);
void pool_data_free(struct pool_data *pool);
void pool_params_from_header(struct pool_params *params,
const struct pool_hdr *hdr);
int pool_set_parse(struct pool_set **setp, const char *path);
void *pool_set_file_map(struct pool_set_file *file, uint64_t offset);
int pool_read(struct pool_data *pool, void *buff, size_t nbytes,
uint64_t off);
int pool_write(struct pool_data *pool, const void *buff, size_t nbytes,
uint64_t off);
int pool_copy(struct pool_data *pool, const char *dst_path, int overwrite);
int pool_set_part_copy(struct pool_set_part *dpart,
struct pool_set_part *spart, int overwrite);
int pool_memset(struct pool_data *pool, uint64_t off, int c, size_t count);
unsigned pool_set_files_count(struct pool_set_file *file);
int pool_set_file_map_headers(struct pool_set_file *file, int rdonly, int prv);
void pool_set_file_unmap_headers(struct pool_set_file *file);
void pool_hdr_default(enum pool_type type, struct pool_hdr *hdrp);
enum pool_type pool_hdr_get_type(const struct pool_hdr *hdrp);
enum pool_type pool_set_type(struct pool_set *set);
const char *pool_get_pool_type_str(enum pool_type type);
int pool_btt_info_valid(struct btt_info *infop);
int pool_blk_get_first_valid_arena(struct pool_data *pool,
struct arena *arenap);
int pool_blk_bsize_valid(uint32_t bsize, uint64_t fsize);
uint64_t pool_next_arena_offset(struct pool_data *pool, uint64_t header_offset);
uint64_t pool_get_first_valid_btt(struct pool_data *pool,
struct btt_info *infop, uint64_t offset, bool *zeroed);
size_t pool_get_min_size(enum pool_type);
#if FAULT_INJECTION
void
pmempool_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at);
int
pmempool_fault_injection_enabled(void);
#else
static inline void
pmempool_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
abort();
}
static inline int
pmempool_fault_injection_enabled(void)
{
return 0;
}
#endif
#ifdef __cplusplus
}
#endif
#endif
| 3,712 | 21.640244 | 80 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmempool/pool.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* pool.c -- pool processing functions
*/
#include <stdio.h>
#include <stdint.h>
#include <sys/mman.h>
#include <unistd.h>
#include <fcntl.h>
#include <endian.h>
#ifndef _WIN32
#include <sys/ioctl.h>
#ifdef __FreeBSD__
#include <sys/disk.h>
#define BLKGETSIZE64 DIOCGMEDIASIZE
#else
#include <linux/fs.h>
#endif
#endif
#include "libpmem.h"
#include "libpmemlog.h"
#include "libpmemblk.h"
#include "libpmempool.h"
#include "out.h"
#include "pmempool.h"
#include "pool.h"
#include "lane.h"
#include "obj.h"
#include "btt.h"
#include "file.h"
#include "os.h"
#include "set.h"
#include "check_util.h"
#include "util_pmem.h"
#include "mmap.h"
/* arbitrary size of a maximum file part being read / write at once */
#define RW_BUFFERING_SIZE (128 * 1024 * 1024)
/*
* pool_btt_lseek -- (internal) perform lseek in BTT file mode
*/
static inline os_off_t
pool_btt_lseek(struct pool_data *pool, os_off_t offset, int whence)
{
os_off_t result;
if ((result = os_lseek(pool->set_file->fd, offset, whence)) == -1)
ERR("!lseek");
return result;
}
/*
* pool_btt_read -- (internal) perform read in BTT file mode
*/
static inline ssize_t
pool_btt_read(struct pool_data *pool, void *dst, size_t count)
{
size_t total = 0;
ssize_t nread;
while (count > total &&
(nread = util_read(pool->set_file->fd, dst, count - total))) {
if (nread == -1) {
ERR("!read");
return total ? (ssize_t)total : -1;
}
dst = (void *)((ssize_t)dst + nread);
total += (size_t)nread;
}
return (ssize_t)total;
}
/*
* pool_btt_write -- (internal) perform write in BTT file mode
*/
static inline ssize_t
pool_btt_write(struct pool_data *pool, const void *src, size_t count)
{
ssize_t nwrite = 0;
size_t total = 0;
while (count > total &&
(nwrite = util_write(pool->set_file->fd, src,
count - total))) {
if (nwrite == -1) {
ERR("!write");
return total ? (ssize_t)total : -1;
}
src = (void *)((ssize_t)src + nwrite);
total += (size_t)nwrite;
}
return (ssize_t)total;
}
/*
* pool_set_read_header -- (internal) read a header of a pool set
*/
static int
pool_set_read_header(const char *fname, struct pool_hdr *hdr)
{
struct pool_set *set;
int ret = 0;
if (util_poolset_read(&set, fname)) {
return -1;
}
/* open the first part set file to read the pool header values */
const struct pool_set_part *part = PART(REP(set, 0), 0);
int fdp = util_file_open(part->path, NULL, 0, O_RDONLY);
if (fdp < 0) {
ERR("cannot open poolset part file");
ret = -1;
goto err_pool_set;
}
/* read the pool header from first pool set file */
if (pread(fdp, hdr, sizeof(*hdr), 0) != sizeof(*hdr)) {
ERR("cannot read pool header from poolset");
ret = -1;
goto err_close_part;
}
err_close_part:
os_close(fdp);
err_pool_set:
util_poolset_free(set);
return ret;
}
/*
* pool_set_map -- (internal) map poolset
*/
static int
pool_set_map(const char *fname, struct pool_set **poolset, unsigned flags)
{
ASSERTeq(util_is_poolset_file(fname), 1);
struct pool_hdr hdr;
if (pool_set_read_header(fname, &hdr))
return -1;
util_convert2h_hdr_nocheck(&hdr);
/* parse pool type from first pool set file */
enum pool_type type = pool_hdr_get_type(&hdr);
if (type == POOL_TYPE_UNKNOWN) {
ERR("cannot determine pool type from poolset");
return -1;
}
/*
* Open the poolset, the values passed to util_pool_open are read
* from the first poolset file, these values are then compared with
* the values from all headers of poolset files.
*/
struct pool_attr attr;
util_pool_hdr2attr(&attr, &hdr);
if (util_pool_open(poolset, fname, 0 /* minpartsize */, &attr,
NULL, NULL, flags | POOL_OPEN_IGNORE_SDS |
POOL_OPEN_IGNORE_BAD_BLOCKS)) {
ERR("opening poolset failed");
return -1;
}
return 0;
}
/*
* pool_params_from_header -- parse pool params from pool header
*/
void
pool_params_from_header(struct pool_params *params, const struct pool_hdr *hdr)
{
memcpy(params->signature, hdr->signature, sizeof(params->signature));
memcpy(¶ms->features, &hdr->features, sizeof(params->features));
/*
* Check if file is a part of pool set by comparing the UUID with the
* next part UUID. If it is the same it means the pool consist of a
* single file.
*/
int uuid_eq_next = uuidcmp(hdr->uuid, hdr->next_part_uuid);
int uuid_eq_prev = uuidcmp(hdr->uuid, hdr->prev_part_uuid);
params->is_part = !params->is_poolset && (uuid_eq_next || uuid_eq_prev);
params->type = pool_hdr_get_type(hdr);
}
/*
* pool_check_type_to_pool_type -- (internal) convert check pool type to
* internal pool type value
*/
static enum pool_type
pool_check_type_to_pool_type(enum pmempool_pool_type check_pool_type)
{
switch (check_pool_type) {
case PMEMPOOL_POOL_TYPE_LOG:
return POOL_TYPE_LOG;
case PMEMPOOL_POOL_TYPE_BLK:
return POOL_TYPE_BLK;
case PMEMPOOL_POOL_TYPE_OBJ:
return POOL_TYPE_OBJ;
default:
ERR("can not convert pmempool_pool_type %u to pool_type",
check_pool_type);
return POOL_TYPE_UNKNOWN;
}
}
/*
* pool_parse_params -- parse pool type, file size and block size
*/
static int
pool_params_parse(const PMEMpoolcheck *ppc, struct pool_params *params,
int check)
{
LOG(3, NULL);
int is_btt = ppc->args.pool_type == PMEMPOOL_POOL_TYPE_BTT;
params->type = POOL_TYPE_UNKNOWN;
params->is_poolset = util_is_poolset_file(ppc->path) == 1;
int fd = util_file_open(ppc->path, NULL, 0, O_RDONLY);
if (fd < 0)
return -1;
int ret = 0;
os_stat_t stat_buf;
ret = os_fstat(fd, &stat_buf);
if (ret)
goto out_close;
ASSERT(stat_buf.st_size >= 0);
params->mode = stat_buf.st_mode;
struct pool_set *set;
void *addr;
if (params->is_poolset) {
/*
* Need to close the poolset because it will be opened with
* flock in the following instructions.
*/
os_close(fd);
fd = -1;
if (check) {
if (pool_set_map(ppc->path, &set, 0))
return -1;
} else {
ret = util_poolset_create_set(&set, ppc->path,
0, 0, true);
if (ret < 0) {
LOG(2, "cannot open pool set -- '%s'",
ppc->path);
return -1;
}
if (set->remote) {
ERR("poolsets with remote replicas are not "
"supported");
return -1;
}
if (util_pool_open_nocheck(set,
POOL_OPEN_IGNORE_BAD_BLOCKS))
return -1;
}
params->size = set->poolsize;
addr = set->replica[0]->part[0].addr;
/*
* XXX mprotect for device dax with length not aligned to its
* page granularity causes SIGBUS on the next page fault.
* The length argument of this call should be changed to
* set->poolsize once the kernel issue is solved.
*/
if (mprotect(addr, set->replica[0]->repsize,
PROT_READ) < 0) {
ERR("!mprotect");
goto out_unmap;
}
params->is_dev_dax = set->replica[0]->part[0].is_dev_dax;
params->is_pmem = set->replica[0]->is_pmem;
} else if (is_btt) {
params->size = (size_t)stat_buf.st_size;
#ifndef _WIN32
if (params->mode & S_IFBLK)
if (ioctl(fd, BLKGETSIZE64, ¶ms->size)) {
ERR("!ioctl");
goto out_close;
}
#endif
addr = NULL;
} else {
enum file_type type = util_file_get_type(ppc->path);
if (type < 0) {
ret = -1;
goto out_close;
}
ssize_t s = util_file_get_size(ppc->path);
if (s < 0) {
ret = -1;
goto out_close;
}
params->size = (size_t)s;
int map_sync;
addr = util_map(fd, 0, params->size, MAP_SHARED, 1, 0,
&map_sync);
if (addr == NULL) {
ret = -1;
goto out_close;
}
params->is_dev_dax = type == TYPE_DEVDAX;
params->is_pmem = params->is_dev_dax || map_sync ||
pmem_is_pmem(addr, params->size);
}
/* stop processing for BTT device */
if (is_btt) {
params->type = POOL_TYPE_BTT;
params->is_part = false;
goto out_close;
}
struct pool_hdr hdr;
memcpy(&hdr, addr, sizeof(hdr));
util_convert2h_hdr_nocheck(&hdr);
pool_params_from_header(params, &hdr);
if (ppc->args.pool_type != PMEMPOOL_POOL_TYPE_DETECT) {
enum pool_type declared_type =
pool_check_type_to_pool_type(ppc->args.pool_type);
if ((params->type & ~declared_type) != 0) {
ERR("declared pool type does not match");
errno = EINVAL;
ret = 1;
goto out_unmap;
}
}
if (params->type == POOL_TYPE_BLK) {
struct pmemblk pbp;
memcpy(&pbp, addr, sizeof(pbp));
params->blk.bsize = le32toh(pbp.bsize);
} else if (params->type == POOL_TYPE_OBJ) {
struct pmemobjpool *pop = addr;
memcpy(params->obj.layout, pop->layout,
PMEMOBJ_MAX_LAYOUT);
}
out_unmap:
if (params->is_poolset) {
ASSERTeq(fd, -1);
ASSERTne(addr, NULL);
util_poolset_close(set, DO_NOT_DELETE_PARTS);
} else if (!is_btt) {
ASSERTne(fd, -1);
ASSERTne(addr, NULL);
munmap(addr, params->size);
}
out_close:
if (fd != -1)
os_close(fd);
return ret;
}
/*
* pool_set_file_open -- (internal) opens pool set file or regular file
*/
static struct pool_set_file *
pool_set_file_open(const char *fname, struct pool_params *params, int rdonly)
{
LOG(3, NULL);
struct pool_set_file *file = calloc(1, sizeof(*file));
if (!file)
return NULL;
file->fname = strdup(fname);
if (!file->fname)
goto err;
const char *path = file->fname;
if (params->type != POOL_TYPE_BTT) {
int ret = util_poolset_create_set(&file->poolset, path,
0, 0, true);
if (ret < 0) {
LOG(2, "cannot open pool set -- '%s'", path);
goto err_free_fname;
}
unsigned flags = (rdonly ? POOL_OPEN_COW : 0) |
POOL_OPEN_IGNORE_BAD_BLOCKS;
if (util_pool_open_nocheck(file->poolset, flags))
goto err_free_fname;
file->size = file->poolset->poolsize;
/* get modification time from the first part of first replica */
path = file->poolset->replica[0]->part[0].path;
file->addr = file->poolset->replica[0]->part[0].addr;
} else {
int oflag = rdonly ? O_RDONLY : O_RDWR;
file->fd = util_file_open(fname, NULL, 0, oflag);
file->size = params->size;
}
os_stat_t buf;
if (os_stat(path, &buf)) {
ERR("%s", path);
goto err_close_poolset;
}
file->mtime = buf.st_mtime;
file->mode = buf.st_mode;
return file;
err_close_poolset:
if (params->type != POOL_TYPE_BTT)
util_poolset_close(file->poolset, DO_NOT_DELETE_PARTS);
else if (file->fd != -1)
os_close(file->fd);
err_free_fname:
free(file->fname);
err:
free(file);
return NULL;
}
/*
* pool_set_parse -- parse poolset file
*/
int
pool_set_parse(struct pool_set **setp, const char *path)
{
LOG(3, "setp %p path %s", setp, path);
int fd = os_open(path, O_RDONLY);
int ret = 0;
if (fd < 0)
return 1;
if (util_poolset_parse(setp, path, fd)) {
ret = 1;
goto err_close;
}
err_close:
os_close(fd);
return ret;
}
/*
* pool_data_alloc -- allocate pool data and open set_file
*/
struct pool_data *
pool_data_alloc(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
struct pool_data *pool = calloc(1, sizeof(*pool));
if (!pool) {
ERR("!calloc");
return NULL;
}
PMDK_TAILQ_INIT(&pool->arenas);
pool->uuid_op = UUID_NOP;
if (pool_params_parse(ppc, &pool->params, 0))
goto error;
int rdonly = CHECK_IS_NOT(ppc, REPAIR);
int prv = CHECK_IS(ppc, DRY_RUN);
if (prv && pool->params.is_dev_dax) {
errno = ENOTSUP;
ERR("!cannot perform a dry run on dax device");
goto error;
}
pool->set_file = pool_set_file_open(ppc->path, &pool->params, prv);
if (pool->set_file == NULL)
goto error;
/*
* XXX mprotect for device dax with length not aligned to its
* page granularity causes SIGBUS on the next page fault.
* The length argument of this call should be changed to
* pool->set_file->poolsize once the kernel issue is solved.
*/
if (rdonly && mprotect(pool->set_file->addr,
pool->set_file->poolset->replica[0]->repsize,
PROT_READ) < 0)
goto error;
if (pool->params.type != POOL_TYPE_BTT) {
if (pool_set_file_map_headers(pool->set_file, rdonly, prv))
goto error;
}
return pool;
error:
pool_data_free(pool);
return NULL;
}
/*
* pool_set_file_close -- (internal) closes pool set file or regular file
*/
static void
pool_set_file_close(struct pool_set_file *file)
{
LOG(3, NULL);
if (file->poolset)
util_poolset_close(file->poolset, DO_NOT_DELETE_PARTS);
else if (file->addr) {
munmap(file->addr, file->size);
os_close(file->fd);
} else if (file->fd)
os_close(file->fd);
free(file->fname);
free(file);
}
/*
* pool_data_free -- close set_file and release pool data
*/
void
pool_data_free(struct pool_data *pool)
{
LOG(3, NULL);
if (pool->set_file) {
if (pool->params.type != POOL_TYPE_BTT)
pool_set_file_unmap_headers(pool->set_file);
pool_set_file_close(pool->set_file);
}
while (!PMDK_TAILQ_EMPTY(&pool->arenas)) {
struct arena *arenap = PMDK_TAILQ_FIRST(&pool->arenas);
if (arenap->map)
free(arenap->map);
if (arenap->flog)
free(arenap->flog);
PMDK_TAILQ_REMOVE(&pool->arenas, arenap, next);
free(arenap);
}
free(pool);
}
/*
* pool_set_file_map -- return mapped address at given offset
*/
void *
pool_set_file_map(struct pool_set_file *file, uint64_t offset)
{
if (file->addr == MAP_FAILED)
return NULL;
return (char *)file->addr + offset;
}
/*
* pool_read -- read from pool set file or regular file
*
* 'buff' has to be a buffer at least 'nbytes' long
* 'off' is an offset from the beginning of the pool
*/
int
pool_read(struct pool_data *pool, void *buff, size_t nbytes, uint64_t off)
{
if (off + nbytes > pool->set_file->size)
return -1;
if (pool->params.type != POOL_TYPE_BTT)
memcpy(buff, (char *)pool->set_file->addr + off, nbytes);
else {
if (pool_btt_lseek(pool, (os_off_t)off, SEEK_SET) == -1)
return -1;
if ((size_t)pool_btt_read(pool, buff, nbytes) != nbytes)
return -1;
}
return 0;
}
/*
* pool_write -- write to pool set file or regular file
*
* 'buff' has to be a buffer at least 'nbytes' long
* 'off' is an offset from the beginning of the pool
*/
int
pool_write(struct pool_data *pool, const void *buff, size_t nbytes,
uint64_t off)
{
if (off + nbytes > pool->set_file->size)
return -1;
if (pool->params.type != POOL_TYPE_BTT) {
memcpy((char *)pool->set_file->addr + off, buff, nbytes);
util_persist_auto(pool->params.is_pmem,
(char *)pool->set_file->addr + off, nbytes);
} else {
if (pool_btt_lseek(pool, (os_off_t)off, SEEK_SET) == -1)
return -1;
if ((size_t)pool_btt_write(pool, buff, nbytes) != nbytes)
return -1;
}
return 0;
}
/*
* pool_copy -- make a copy of the pool
*/
int
pool_copy(struct pool_data *pool, const char *dst_path, int overwrite)
{
struct pool_set_file *file = pool->set_file;
int dfd;
int exists = util_file_exists(dst_path);
if (exists < 0)
return -1;
if (exists) {
if (!overwrite) {
errno = EEXIST;
return -1;
}
dfd = util_file_open(dst_path, NULL, 0, O_RDWR);
} else {
errno = 0;
dfd = util_file_create(dst_path, file->size, 0);
}
if (dfd < 0)
return -1;
int result = 0;
os_stat_t stat_buf;
if (os_stat(file->fname, &stat_buf)) {
result = -1;
goto out_close;
}
if (fchmod(dfd, stat_buf.st_mode)) {
result = -1;
goto out_close;
}
void *daddr = mmap(NULL, file->size, PROT_READ | PROT_WRITE,
MAP_SHARED, dfd, 0);
if (daddr == MAP_FAILED) {
result = -1;
goto out_close;
}
if (pool->params.type != POOL_TYPE_BTT) {
void *saddr = pool_set_file_map(file, 0);
memcpy(daddr, saddr, file->size);
goto out_unmap;
}
void *buf = malloc(RW_BUFFERING_SIZE);
if (buf == NULL) {
ERR("!malloc");
result = -1;
goto out_unmap;
}
if (pool_btt_lseek(pool, 0, SEEK_SET) == -1) {
result = -1;
goto out_free;
}
ssize_t buf_read = 0;
void *dst = daddr;
while ((buf_read = pool_btt_read(pool, buf, RW_BUFFERING_SIZE))) {
if (buf_read == -1)
break;
memcpy(dst, buf, (size_t)buf_read);
dst = (void *)((ssize_t)dst + buf_read);
}
out_free:
free(buf);
out_unmap:
munmap(daddr, file->size);
out_close:
(void) os_close(dfd);
return result;
}
/*
* pool_set_part_copy -- make a copy of the poolset part
*/
int
pool_set_part_copy(struct pool_set_part *dpart, struct pool_set_part *spart,
int overwrite)
{
LOG(3, "dpart %p spart %p", dpart, spart);
int result = 0;
os_stat_t stat_buf;
if (os_fstat(spart->fd, &stat_buf)) {
ERR("!util_stat");
return -1;
}
size_t smapped = 0;
void *saddr = pmem_map_file(spart->path, 0, 0, S_IREAD, &smapped, NULL);
if (!saddr)
return -1;
size_t dmapped = 0;
int is_pmem;
void *daddr;
int exists = util_file_exists(dpart->path);
if (exists < 0) {
result = -1;
goto out_sunmap;
}
if (exists) {
if (!overwrite) {
errno = EEXIST;
result = -1;
goto out_sunmap;
}
daddr = pmem_map_file(dpart->path, 0, 0, S_IWRITE, &dmapped,
&is_pmem);
} else {
errno = 0;
daddr = pmem_map_file(dpart->path, dpart->filesize,
PMEM_FILE_CREATE | PMEM_FILE_EXCL,
stat_buf.st_mode, &dmapped, &is_pmem);
}
if (!daddr) {
result = -1;
goto out_sunmap;
}
#ifdef DEBUG
/* provide extra logging in case of wrong dmapped/smapped value */
if (dmapped < smapped) {
LOG(1, "dmapped < smapped: dmapped = %lu, smapped = %lu",
dmapped, smapped);
ASSERT(0);
}
#endif
if (is_pmem) {
pmem_memcpy_persist(daddr, saddr, smapped);
} else {
memcpy(daddr, saddr, smapped);
pmem_msync(daddr, smapped);
}
pmem_unmap(daddr, dmapped);
out_sunmap:
pmem_unmap(saddr, smapped);
return result;
}
/*
* pool_memset -- memset pool part described by off and count
*/
int
pool_memset(struct pool_data *pool, uint64_t off, int c, size_t count)
{
int result = 0;
if (pool->params.type != POOL_TYPE_BTT)
memset((char *)off, 0, count);
else {
if (pool_btt_lseek(pool, (os_off_t)off, SEEK_SET) == -1)
return -1;
size_t zero_size = min(count, RW_BUFFERING_SIZE);
void *buf = malloc(zero_size);
if (!buf) {
ERR("!malloc");
return -1;
}
memset(buf, c, zero_size);
ssize_t nwrite = 0;
do {
zero_size = min(zero_size, count);
nwrite = pool_btt_write(pool, buf, zero_size);
if (nwrite < 0) {
result = -1;
break;
}
count -= (size_t)nwrite;
} while (count > 0);
free(buf);
}
return result;
}
/*
* pool_set_files_count -- get total number of parts of all replicas
*/
unsigned
pool_set_files_count(struct pool_set_file *file)
{
unsigned ret = 0;
unsigned nreplicas = file->poolset->nreplicas;
for (unsigned r = 0; r < nreplicas; r++) {
struct pool_replica *rep = file->poolset->replica[r];
ret += rep->nparts;
}
return ret;
}
/*
* pool_set_file_map_headers -- map headers of each pool set part file
*/
int
pool_set_file_map_headers(struct pool_set_file *file, int rdonly, int prv)
{
if (!file->poolset)
return -1;
for (unsigned r = 0; r < file->poolset->nreplicas; r++) {
struct pool_replica *rep = file->poolset->replica[r];
for (unsigned p = 0; p < rep->nparts; p++) {
struct pool_set_part *part = &rep->part[p];
if (util_map_hdr(part,
prv ? MAP_PRIVATE : MAP_SHARED, rdonly)) {
part->hdr = NULL;
goto err;
}
}
}
return 0;
err:
pool_set_file_unmap_headers(file);
return -1;
}
/*
* pool_set_file_unmap_headers -- unmap headers of each pool set part file
*/
void
pool_set_file_unmap_headers(struct pool_set_file *file)
{
if (!file->poolset)
return;
for (unsigned r = 0; r < file->poolset->nreplicas; r++) {
struct pool_replica *rep = file->poolset->replica[r];
for (unsigned p = 0; p < rep->nparts; p++) {
struct pool_set_part *part = &rep->part[p];
util_unmap_hdr(part);
}
}
}
/*
* pool_get_signature -- (internal) return signature of specified pool type
*/
static const char *
pool_get_signature(enum pool_type type)
{
switch (type) {
case POOL_TYPE_LOG:
return LOG_HDR_SIG;
case POOL_TYPE_BLK:
return BLK_HDR_SIG;
case POOL_TYPE_OBJ:
return OBJ_HDR_SIG;
default:
return NULL;
}
}
/*
* pool_hdr_default -- return default pool header values
*/
void
pool_hdr_default(enum pool_type type, struct pool_hdr *hdrp)
{
memset(hdrp, 0, sizeof(*hdrp));
const char *sig = pool_get_signature(type);
ASSERTne(sig, NULL);
memcpy(hdrp->signature, sig, POOL_HDR_SIG_LEN);
switch (type) {
case POOL_TYPE_LOG:
hdrp->major = LOG_FORMAT_MAJOR;
hdrp->features = log_format_feat_default;
break;
case POOL_TYPE_BLK:
hdrp->major = BLK_FORMAT_MAJOR;
hdrp->features = blk_format_feat_default;
break;
case POOL_TYPE_OBJ:
hdrp->major = OBJ_FORMAT_MAJOR;
hdrp->features = obj_format_feat_default;
break;
default:
break;
}
}
/*
* pool_hdr_get_type -- return pool type based on pool header data
*/
enum pool_type
pool_hdr_get_type(const struct pool_hdr *hdrp)
{
if (memcmp(hdrp->signature, LOG_HDR_SIG, POOL_HDR_SIG_LEN) == 0)
return POOL_TYPE_LOG;
else if (memcmp(hdrp->signature, BLK_HDR_SIG, POOL_HDR_SIG_LEN) == 0)
return POOL_TYPE_BLK;
else if (memcmp(hdrp->signature, OBJ_HDR_SIG, POOL_HDR_SIG_LEN) == 0)
return POOL_TYPE_OBJ;
else
return POOL_TYPE_UNKNOWN;
}
/*
* pool_get_pool_type_str -- return human-readable pool type string
*/
const char *
pool_get_pool_type_str(enum pool_type type)
{
switch (type) {
case POOL_TYPE_BTT:
return "btt";
case POOL_TYPE_LOG:
return "pmemlog";
case POOL_TYPE_BLK:
return "pmemblk";
case POOL_TYPE_OBJ:
return "pmemobj";
default:
return "unknown";
}
}
/*
* pool_set_type -- get pool type of a poolset
*/
enum pool_type
pool_set_type(struct pool_set *set)
{
struct pool_hdr hdr;
/* open the first part file to read the pool header values */
const struct pool_set_part *part = PART(REP(set, 0), 0);
if (util_file_pread(part->path, &hdr, sizeof(hdr), 0) !=
sizeof(hdr)) {
ERR("cannot read pool header from poolset");
return POOL_TYPE_UNKNOWN;
}
util_convert2h_hdr_nocheck(&hdr);
enum pool_type type = pool_hdr_get_type(&hdr);
return type;
}
/*
* pool_btt_info_valid -- check consistency of BTT Info header
*/
int
pool_btt_info_valid(struct btt_info *infop)
{
if (memcmp(infop->sig, BTTINFO_SIG, BTTINFO_SIG_LEN) != 0)
return 0;
return util_checksum(infop, sizeof(*infop), &infop->checksum, 0, 0);
}
/*
* pool_blk_get_first_valid_arena -- get first valid BTT Info in arena
*/
int
pool_blk_get_first_valid_arena(struct pool_data *pool, struct arena *arenap)
{
arenap->zeroed = true;
uint64_t offset = pool_get_first_valid_btt(pool, &arenap->btt_info,
2 * BTT_ALIGNMENT, &arenap->zeroed);
if (offset != 0) {
arenap->offset = offset;
arenap->valid = true;
return 1;
}
return 0;
}
/*
* pool_next_arena_offset -- get offset of next arena
*
* Calculated offset is theoretical. Function does not check if such arena can
* exist.
*/
uint64_t
pool_next_arena_offset(struct pool_data *pool, uint64_t offset)
{
uint64_t lastoff = (pool->set_file->size & ~(BTT_ALIGNMENT - 1));
uint64_t nextoff = min(offset + BTT_MAX_ARENA, lastoff);
return nextoff;
}
/*
* pool_get_first_valid_btt -- return offset to first valid BTT Info
*
* - Return offset to valid BTT Info header in pool file.
* - Start looking from given offset.
* - Convert BTT Info header to host endianness.
* - Return the BTT Info header by pointer.
* - If zeroed pointer provided would check if all checked BTT Info are zeroed
* which is useful for BLK pools
*/
uint64_t
pool_get_first_valid_btt(struct pool_data *pool, struct btt_info *infop,
uint64_t offset, bool *zeroed)
{
/* if we have valid arena get BTT Info header from it */
if (pool->narenas != 0) {
struct arena *arenap = PMDK_TAILQ_FIRST(&pool->arenas);
memcpy(infop, &arenap->btt_info, sizeof(*infop));
return arenap->offset;
}
const size_t info_size = sizeof(*infop);
/* theoretical offsets to BTT Info header and backup */
uint64_t offsets[2] = {offset, 0};
while (offsets[0] < pool->set_file->size) {
/* calculate backup offset */
offsets[1] = pool_next_arena_offset(pool, offsets[0]) -
info_size;
/* check both offsets: header and backup */
for (int i = 0; i < 2; ++i) {
if (pool_read(pool, infop, info_size, offsets[i]))
continue;
/* check if all possible BTT Info are zeroed */
if (zeroed)
*zeroed &= util_is_zeroed((const void *)infop,
info_size);
/* check if read BTT Info is valid */
if (pool_btt_info_valid(infop)) {
btt_info_convert2h(infop);
return offsets[i];
}
}
/* jump to next arena */
offsets[0] += BTT_MAX_ARENA;
}
return 0;
}
/*
* pool_get_min_size -- return the minimum pool size of a pool of a given type
*/
size_t
pool_get_min_size(enum pool_type type)
{
switch (type) {
case POOL_TYPE_LOG:
return PMEMLOG_MIN_POOL;
case POOL_TYPE_BLK:
return PMEMBLK_MIN_POOL;
case POOL_TYPE_OBJ:
return PMEMOBJ_MIN_POOL;
default:
ERR("unknown type of a pool");
return SIZE_MAX;
}
}
#if FAULT_INJECTION
void
pmempool_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
core_inject_fault_at(type, nth, at);
}
int
pmempool_fault_injection_enabled(void)
{
return core_fault_injection_enabled();
}
#endif
| 24,738 | 21.009786 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem/pmem.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* pmem.c -- pmem entry points for libpmem
*
*
* PERSISTENT MEMORY INSTRUCTIONS ON X86
*
* The primary feature of this library is to provide a way to flush
* changes to persistent memory as outlined below (note that many
* of the decisions below are made at initialization time, and not
* repeated every time a flush is requested).
*
* To flush a range to pmem when CLWB is available:
*
* CLWB for each cache line in the given range.
*
* SFENCE to ensure the CLWBs above have completed.
*
* To flush a range to pmem when CLFLUSHOPT is available and CLWB is not
* (same as above but issue CLFLUSHOPT instead of CLWB):
*
* CLFLUSHOPT for each cache line in the given range.
*
* SFENCE to ensure the CLWBs above have completed.
*
* To flush a range to pmem when neither CLFLUSHOPT or CLWB are available
* (same as above but fences surrounding CLFLUSH are not required):
*
* CLFLUSH for each cache line in the given range.
*
* To memcpy a range of memory to pmem when MOVNT is available:
*
* Copy any non-64-byte portion of the destination using MOV.
*
* Use the flush flow above without the fence for the copied portion.
*
* Copy using MOVNTDQ, up to any non-64-byte aligned end portion.
* (The MOVNT instructions bypass the cache, so no flush is required.)
*
* Copy any unaligned end portion using MOV.
*
* Use the flush flow above for the copied portion (including fence).
*
* To memcpy a range of memory to pmem when MOVNT is not available:
*
* Just pass the call to the normal memcpy() followed by pmem_persist().
*
* To memset a non-trivial sized range of memory to pmem:
*
* Same as the memcpy cases above but store the given value instead
* of reading values from the source.
*
* These features are supported for ARM AARCH64 using equivalent ARM
* assembly instruction. Please refer to (arm_cacheops.h) for more details.
*
* INTERFACES FOR FLUSHING TO PERSISTENT MEMORY
*
* Given the flows above, three interfaces are provided for flushing a range
* so that the caller has the ability to separate the steps when necessary,
* but otherwise leaves the detection of available instructions to the libpmem:
*
* pmem_persist(addr, len)
*
* This is the common case, which just calls the two other functions:
*
* pmem_flush(addr, len);
* pmem_drain();
*
* pmem_flush(addr, len)
*
* CLWB or CLFLUSHOPT or CLFLUSH for each cache line
*
* pmem_drain()
*
* SFENCE unless using CLFLUSH
*
*
* INTERFACES FOR COPYING/SETTING RANGES OF MEMORY
*
* Given the flows above, the following interfaces are provided for the
* memmove/memcpy/memset operations to persistent memory:
*
* pmem_memmove_nodrain()
*
* Checks for overlapped ranges to determine whether to copy from
* the beginning of the range or from the end. If MOVNT instructions
* are available, uses the memory copy flow described above, otherwise
* calls the libc memmove() followed by pmem_flush(). Since no conditional
* compilation and/or architecture specific CFLAGS are in use at the
* moment, SSE2 ( thus movnt ) is just assumed to be available.
*
* pmem_memcpy_nodrain()
*
* Just calls pmem_memmove_nodrain().
*
* pmem_memset_nodrain()
*
* If MOVNT instructions are available, uses the memset flow described
* above, otherwise calls the libc memset() followed by pmem_flush().
*
* pmem_memmove_persist()
* pmem_memcpy_persist()
* pmem_memset_persist()
*
* Calls the appropriate _nodrain() function followed by pmem_drain().
*
*
* DECISIONS MADE AT INITIALIZATION TIME
*
* As much as possible, all decisions described above are made at library
* initialization time. This is achieved using function pointers that are
* setup by pmem_init() when the library loads.
*
* Func_fence is used by pmem_drain() to call one of:
* fence_empty()
* memory_barrier()
*
* Func_flush is used by pmem_flush() to call one of:
* flush_dcache()
* flush_dcache_invalidate_opt()
* flush_dcache_invalidate()
*
* Func_memmove_nodrain is used by memmove_nodrain() to call one of:
* memmove_nodrain_libc()
* memmove_nodrain_movnt()
*
* Func_memset_nodrain is used by memset_nodrain() to call one of:
* memset_nodrain_libc()
* memset_nodrain_movnt()
*
* DEBUG LOGGING
*
* Many of the functions here get called hundreds of times from loops
* iterating over ranges, making the usual LOG() calls at level 3
* impractical. The call tracing log for those functions is set at 15.
*/
#include <sys/mman.h>
#include <sys/stat.h>
#include <errno.h>
#include <fcntl.h>
#include "libpmem.h"
#include "pmem.h"
#include "pmem2_arch.h"
#include "out.h"
#include "os.h"
#include "mmap.h"
#include "file.h"
#include "valgrind_internal.h"
#include "os_deep.h"
#include "auto_flush.h"
struct pmem_funcs {
memmove_nodrain_func memmove_nodrain;
memset_nodrain_func memset_nodrain;
flush_func deep_flush;
flush_func flush;
fence_func fence;
};
static struct pmem_funcs Funcs;
static is_pmem_func Is_pmem = NULL;
/*
* pmem_has_hw_drain -- return whether or not HW drain was found
*
* Always false for x86: HW drain is done by HW with no SW involvement.
*/
int
pmem_has_hw_drain(void)
{
LOG(3, NULL);
return 0;
}
/*
* pmem_drain -- wait for any PM stores to drain from HW buffers
*/
void
pmem_drain(void)
{
LOG(15, NULL);
Funcs.fence();
}
/*
* pmem_has_auto_flush -- check if platform supports eADR
*/
int
pmem_has_auto_flush()
{
LOG(3, NULL);
return pmem2_auto_flush();
}
/*
* pmem_deep_flush -- flush processor cache for the given range
* regardless of eADR support on platform
*/
void
pmem_deep_flush(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len);
Funcs.deep_flush(addr, len);
}
/*
* pmem_flush -- flush processor cache for the given range
*/
void
pmem_flush(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len);
Funcs.flush(addr, len);
}
/*
* pmem_persist -- make any cached changes to a range of pmem persistent
*/
void
pmem_persist(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
pmem_flush(addr, len);
pmem_drain();
}
/*
* pmem_msync -- flush to persistence via msync
*
* Using msync() means this routine is less optimal for pmem (but it
* still works) but it also works for any memory mapped file, unlike
* pmem_persist() which is only safe where pmem_is_pmem() returns true.
*/
int
pmem_msync(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len);
/*
* msync requires addr to be a multiple of pagesize but there are no
* requirements for len. Align addr down and change len so that
* [addr, addr + len) still contains initial range.
*/
/* increase len by the amount we gain when we round addr down */
len += (uintptr_t)addr & (Pagesize - 1);
/* round addr down to page boundary */
uintptr_t uptr = (uintptr_t)addr & ~((uintptr_t)Pagesize - 1);
/*
* msync accepts addresses aligned to page boundary, so we may sync
* more and part of it may have been marked as undefined/inaccessible
* Msyncing such memory is not a bug, so as a workaround temporarily
* disable error reporting.
*/
VALGRIND_DO_DISABLE_ERROR_REPORTING;
int ret;
if ((ret = msync((void *)uptr, len, MS_SYNC)) < 0)
ERR("!msync");
VALGRIND_DO_ENABLE_ERROR_REPORTING;
/* full flush */
VALGRIND_DO_PERSIST(uptr, len);
return ret;
}
/*
* is_pmem_always -- (internal) always true (for meaningful parameters) version
* of pmem_is_pmem()
*/
static int
is_pmem_always(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
if (len == 0)
return 0;
return 1;
}
/*
* is_pmem_never -- (internal) never true version of pmem_is_pmem()
*/
static int
is_pmem_never(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
return 0;
}
/*
* pmem_is_pmem_init -- (internal) initialize Func_is_pmem pointer
*
* This should be done only once - on the first call to pmem_is_pmem().
* If PMEM_IS_PMEM_FORCE is set, it would override the default behavior
* of pmem_is_pmem().
*/
static void
pmem_is_pmem_init(void)
{
LOG(3, NULL);
static volatile unsigned init;
while (init != 2) {
if (!util_bool_compare_and_swap32(&init, 0, 1))
continue;
/*
* For debugging/testing, allow pmem_is_pmem() to be forced
* to always true or never true using environment variable
* PMEM_IS_PMEM_FORCE values of zero or one.
*
* This isn't #ifdef DEBUG because it has a trivial performance
* impact and it may turn out to be useful as a "chicken bit"
* for systems where pmem_is_pmem() isn't correctly detecting
* true persistent memory.
*/
char *ptr = os_getenv("PMEM_IS_PMEM_FORCE");
if (ptr) {
int val = atoi(ptr);
if (val == 0)
Is_pmem = is_pmem_never;
else if (val == 1)
Is_pmem = is_pmem_always;
VALGRIND_ANNOTATE_HAPPENS_BEFORE(&Is_pmem);
LOG(4, "PMEM_IS_PMEM_FORCE=%d", val);
}
if (Funcs.deep_flush == NULL)
Is_pmem = is_pmem_never;
if (!util_bool_compare_and_swap32(&init, 1, 2))
FATAL("util_bool_compare_and_swap32");
}
}
/*
* pmem_is_pmem -- return true if entire range is persistent memory
*/
int
pmem_is_pmem(const void *addr, size_t len)
{
LOG(10, "addr %p len %zu", addr, len);
static int once;
/* This is not thread-safe, but pmem_is_pmem_init() is. */
if (once == 0) {
pmem_is_pmem_init();
util_fetch_and_add32(&once, 1);
}
VALGRIND_ANNOTATE_HAPPENS_AFTER(&Is_pmem);
return Is_pmem(addr, len);
}
#define PMEM_FILE_ALL_FLAGS\
(PMEM_FILE_CREATE|PMEM_FILE_EXCL|PMEM_FILE_SPARSE|PMEM_FILE_TMPFILE)
#define PMEM_DAX_VALID_FLAGS\
(PMEM_FILE_CREATE|PMEM_FILE_SPARSE)
/*
* pmem_map_fileU -- create or open the file and map it to memory
*/
#ifndef _WIN32
static inline
#endif
void *
pmem_map_fileU(const char *path, size_t len, int flags,
mode_t mode, size_t *mapped_lenp, int *is_pmemp)
{
LOG(3, "path \"%s\" size %zu flags %x mode %o mapped_lenp %p "
"is_pmemp %p", path, len, flags, mode, mapped_lenp, is_pmemp);
int oerrno;
int fd;
int open_flags = O_RDWR;//O_RDONLY;//O_RDWR;O_RDONLY
int delete_on_err = 0;
int file_type = util_file_get_type(path);
#ifdef _WIN32
open_flags |= O_BINARY;
#endif
if (file_type == OTHER_ERROR)
return NULL;
if (flags & ~(PMEM_FILE_ALL_FLAGS)) {
ERR("invalid flag specified %x", flags);
errno = EINVAL;
return NULL;
}
if (file_type == TYPE_DEVDAX) {
if (flags & ~(PMEM_DAX_VALID_FLAGS)) {
ERR("flag unsupported for Device DAX %x", flags);
errno = EINVAL;
return NULL;
} else {
/* we are ignoring all of the flags */
flags = 0;
ssize_t actual_len = util_file_get_size(path);
if (actual_len < 0) {
ERR("unable to read Device DAX size");
errno = EINVAL;
return NULL;
}
if (len != 0 && len != (size_t)actual_len) {
ERR("Device DAX length must be either 0 or "
"the exact size of the device: %zu",
actual_len);
errno = EINVAL;
return NULL;
}
len = 0;
}
}
if (flags & PMEM_FILE_CREATE) {
if ((os_off_t)len < 0) {
ERR("invalid file length %zu", len);
errno = EINVAL;
return NULL;
}
open_flags |= O_CREAT;
}
if (flags & PMEM_FILE_EXCL)
open_flags |= O_EXCL;
if ((len != 0) && !(flags & PMEM_FILE_CREATE)) {
ERR("non-zero 'len' not allowed without PMEM_FILE_CREATE");
errno = EINVAL;
return NULL;
}
if ((len == 0) && (flags & PMEM_FILE_CREATE)) {
ERR("zero 'len' not allowed with PMEM_FILE_CREATE");
errno = EINVAL;
return NULL;
}
if ((flags & PMEM_FILE_TMPFILE) && !(flags & PMEM_FILE_CREATE)) {
ERR("PMEM_FILE_TMPFILE not allowed without PMEM_FILE_CREATE");
errno = EINVAL;
return NULL;
}
if (flags & PMEM_FILE_TMPFILE) {
if ((fd = util_tmpfile(path,
OS_DIR_SEP_STR"pmem.XXXXXX",
open_flags & O_EXCL)) < 0) {
LOG(2, "failed to create temporary file at \"%s\"",
path);
return NULL;
}
} else {
if ((fd = os_open(path, open_flags, mode)) < 0) {
ERR("!open %s", path);
return NULL;
}
if ((flags & PMEM_FILE_CREATE) && (flags & PMEM_FILE_EXCL))
delete_on_err = 1;
}
if (flags & PMEM_FILE_CREATE) {
/*
* Always set length of file to 'len'.
* (May either extend or truncate existing file.)
*/
if (os_ftruncate(fd, (os_off_t)len) != 0) {
ERR("!ftruncate");
goto err;
}
if ((flags & PMEM_FILE_SPARSE) == 0) {
if ((errno = os_posix_fallocate(fd, 0,
(os_off_t)len)) != 0) {
ERR("!posix_fallocate");
goto err;
}
}
} else {
ssize_t actual_size = util_fd_get_size(fd);
if (actual_size < 0) {
ERR("stat %s: negative size", path);
errno = EINVAL;
goto err;
}
len = (size_t)actual_size;
}
void *addr = pmem_map_register(fd, len, path, file_type == TYPE_DEVDAX);
if (addr == NULL)
goto err;
if (mapped_lenp != NULL)
*mapped_lenp = len;
if (is_pmemp != NULL)
*is_pmemp = pmem_is_pmem(addr, len);
LOG(3, "returning %p", addr);
VALGRIND_REGISTER_PMEM_MAPPING(addr, len);
VALGRIND_REGISTER_PMEM_FILE(fd, addr, len, 0);
(void) os_close(fd);
return addr;
err:
oerrno = errno;
(void) os_close(fd);
if (delete_on_err)
(void) os_unlink(path);
errno = oerrno;
return NULL;
}
#ifndef _WIN32
/*
* pmem_map_file -- create or open the file and map it to memory
*/
void *
pmem_map_file(const char *path, size_t len, int flags,
mode_t mode, size_t *mapped_lenp, int *is_pmemp)
{
return pmem_map_fileU(path, len, flags, mode, mapped_lenp, is_pmemp);
}
#else
/*
* pmem_map_fileW -- create or open the file and map it to memory
*/
void *
pmem_map_fileW(const wchar_t *path, size_t len, int flags, mode_t mode,
size_t *mapped_lenp, int *is_pmemp) {
char *upath = util_toUTF8(path);
if (upath == NULL)
return NULL;
void *ret = pmem_map_fileU(upath, len, flags, mode, mapped_lenp,
is_pmemp);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* pmem_unmap -- unmap the specified region
*/
int
pmem_unmap(void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
#ifndef _WIN32
util_range_unregister(addr, len);
#endif
VALGRIND_REMOVE_PMEM_MAPPING(addr, len);
return util_unmap(addr, len);
}
/*
* pmem_memmove -- memmove to pmem
*/
void *
pmem_memmove(void *pmemdest, const void *src, size_t len, unsigned flags)
{
LOG(15, "pmemdest %p src %p len %zu flags 0x%x",
pmemdest, src, len, flags);
#ifdef DEBUG
if (flags & ~PMEM_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM_API_START();
Funcs.memmove_nodrain(pmemdest, src, len, flags & ~PMEM_F_MEM_NODRAIN,
Funcs.flush);
if ((flags & (PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH)) == 0)
pmem_drain();
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memcpy -- memcpy to pmem
*/
void *
pmem_memcpy(void *pmemdest, const void *src, size_t len, unsigned flags)
{
LOG(15, "pmemdest %p src %p len %zu flags 0x%x",
pmemdest, src, len, flags);
#ifdef DEBUG
if (flags & ~PMEM_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM_API_START();
Funcs.memmove_nodrain(pmemdest, src, len, flags & ~PMEM_F_MEM_NODRAIN,
Funcs.flush);
if ((flags & (PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH)) == 0)
pmem_drain();
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memset -- memset to pmem
*/
void *
pmem_memset(void *pmemdest, int c, size_t len, unsigned flags)
{
LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x",
pmemdest, c, len, flags);
#ifdef DEBUG
if (flags & ~PMEM_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM_API_START();
Funcs.memset_nodrain(pmemdest, c, len, flags & ~PMEM_F_MEM_NODRAIN,
Funcs.flush);
if ((flags & (PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH)) == 0)
pmem_drain();
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memmove_nodrain -- memmove to pmem without hw drain
*/
void *
pmem_memmove_nodrain(void *pmemdest, const void *src, size_t len)
{
LOG(15, "pmemdest %p src %p len %zu", pmemdest, src, len);
PMEM_API_START();
Funcs.memmove_nodrain(pmemdest, src, len, 0, Funcs.flush);
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memcpy_nodrain -- memcpy to pmem without hw drain
*/
void *
pmem_memcpy_nodrain(void *pmemdest, const void *src, size_t len)
{
LOG(15, "pmemdest %p src %p len %zu", pmemdest, src, len);
PMEM_API_START();
Funcs.memmove_nodrain(pmemdest, src, len, 0, Funcs.flush);
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memmove_persist -- memmove to pmem
*/
void *
pmem_memmove_persist(void *pmemdest, const void *src, size_t len)
{
LOG(15, "pmemdest %p src %p len %zu", pmemdest, src, len);
PMEM_API_START();
Funcs.memmove_nodrain(pmemdest, src, len, 0, Funcs.flush);
pmem_drain();
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memcpy_persist -- memcpy to pmem
*/
void *
pmem_memcpy_persist(void *pmemdest, const void *src, size_t len)
{
LOG(15, "pmemdest %p src %p len %zu", pmemdest, src, len);
PMEM_API_START();
Funcs.memmove_nodrain(pmemdest, src, len, 0, Funcs.flush);
pmem_drain();
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memset_nodrain -- memset to pmem without hw drain
*/
void *
pmem_memset_nodrain(void *pmemdest, int c, size_t len)
{
LOG(15, "pmemdest %p c %d len %zu", pmemdest, c, len);
PMEM_API_START();
Funcs.memset_nodrain(pmemdest, c, len, 0, Funcs.flush);
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memset_persist -- memset to pmem
*/
void *
pmem_memset_persist(void *pmemdest, int c, size_t len)
{
LOG(15, "pmemdest %p c %d len %zu", pmemdest, c, len);
PMEM_API_START();
Funcs.memset_nodrain(pmemdest, c, len, 0, Funcs.flush);
pmem_drain();
PMEM_API_END();
return pmemdest;
}
/*
* memmove_nodrain_libc -- (internal) memmove to pmem using libc
*/
static void *
memmove_nodrain_libc(void *pmemdest, const void *src, size_t len,
unsigned flags, flush_func flush)
{
LOG(15, "pmemdest %p src %p len %zu flags 0x%x", pmemdest, src, len,
flags);
memmove(pmemdest, src, len);
if (!(flags & PMEM_F_MEM_NOFLUSH))
flush(pmemdest, len);
return pmemdest;
}
/*
* memset_nodrain_libc -- (internal) memset to pmem using libc
*/
static void *
memset_nodrain_libc(void *pmemdest, int c, size_t len, unsigned flags,
flush_func flush)
{
LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x", pmemdest, c, len,
flags);
memset(pmemdest, c, len);
if (!(flags & PMEM_F_MEM_NOFLUSH))
flush(pmemdest, len);
return pmemdest;
}
/*
* flush_empty -- (internal) do not flush the CPU cache
*/
static void
flush_empty(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
flush_empty_nolog(addr, len);
}
/*
* fence_empty -- (internal) issue the fence instruction
*/
static void
fence_empty(void)
{
LOG(15, NULL);
VALGRIND_DO_FENCE;
}
/*
* pmem_init -- load-time initialization for pmem.c
*/
void
pmem_init(void)
{
LOG(3, NULL);
struct pmem2_arch_info info;
info.memmove_nodrain = NULL;
info.memset_nodrain = NULL;
info.flush = NULL;
info.fence = NULL;
info.flush_has_builtin_fence = 0;
pmem2_arch_init(&info);
int flush;
char *e = os_getenv("PMEM_NO_FLUSH");
if (e && (strcmp(e, "1") == 0)) {
flush = 0;
LOG(3, "Forced not flushing CPU_cache");
} else if (e && (strcmp(e, "0") == 0)) {
flush = 1;
LOG(3, "Forced flushing CPU_cache");
} else if (pmem2_auto_flush() == 1) {
flush = 0;
LOG(3, "Not flushing CPU_cache, eADR detected");
} else {
flush = 1;
LOG(3, "Flushing CPU cache");
}
Funcs.deep_flush = info.flush;
if (flush) {
Funcs.flush = info.flush;
Funcs.memmove_nodrain = info.memmove_nodrain;
Funcs.memset_nodrain = info.memset_nodrain;
if (info.flush_has_builtin_fence)
Funcs.fence = fence_empty;
else
Funcs.fence = info.fence;
} else {
Funcs.memmove_nodrain = info.memmove_nodrain_eadr;
Funcs.memset_nodrain = info.memset_nodrain_eadr;
Funcs.flush = flush_empty;
Funcs.fence = info.fence;
}
char *ptr = os_getenv("PMEM_NO_GENERIC_MEMCPY");
long long no_generic = 0;
if (ptr)
no_generic = atoll(ptr);
if (info.memmove_nodrain == NULL) {
if (no_generic) {
Funcs.memmove_nodrain = memmove_nodrain_libc;
LOG(3, "using libc memmove");
} else {
Funcs.memmove_nodrain = memmove_nodrain_generic;
LOG(3, "using generic memmove");
}
} else {
Funcs.memmove_nodrain = info.memmove_nodrain;
}
if (info.memset_nodrain == NULL) {
if (no_generic) {
Funcs.memset_nodrain = memset_nodrain_libc;
LOG(3, "using libc memset");
} else {
Funcs.memset_nodrain = memset_nodrain_generic;
LOG(3, "using generic memset");
}
} else {
Funcs.memset_nodrain = info.memset_nodrain;
}
if (Funcs.flush == flush_empty)
LOG(3, "not flushing CPU cache");
else if (Funcs.flush != Funcs.deep_flush)
FATAL("invalid flush function address");
pmem_os_init(&Is_pmem);
}
/*
* pmem_deep_persist -- perform deep persist on a memory range
*
* It merely acts as wrapper around an msync call in most cases, the only
* exception is the case of an mmap'ed DAX device on Linux.
*/
int
pmem_deep_persist(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
pmem_deep_flush(addr, len);
return pmem_deep_drain(addr, len);
}
/*
* pmem_deep_drain -- perform deep drain on a memory range
*/
int
pmem_deep_drain(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
return os_range_deep_common((uintptr_t)addr, len);
}
#if VG_PMEMCHECK_ENABLED
/*
* pmem_emit_log -- logs library and function names to pmemcheck store log
*/
void
pmem_emit_log(const char *func, int order)
{
util_emit_log("libpmem", func, order);
}
#endif
#if FAULT_INJECTION
void
pmem_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
core_inject_fault_at(type, nth, at);
}
int
pmem_fault_injection_enabled(void)
{
return core_fault_injection_enabled();
}
#endif
| 21,858 | 21.817328 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem/pmem_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmem_windows.c -- pmem utilities with OS-specific implementation
*/
#include <memoryapi.h>
#include "pmem.h"
#include "out.h"
#include "mmap.h"
#include "win_mmap.h"
#include "sys/mman.h"
#if (NTDDI_VERSION >= NTDDI_WIN10_RS1)
typedef BOOL (WINAPI *PQVM)(
HANDLE, const void *,
enum WIN32_MEMORY_INFORMATION_CLASS, PVOID,
SIZE_T, PSIZE_T);
static PQVM Func_qvmi = NULL;
#endif
/*
* is_direct_mapped -- (internal) for each page in the given region
* checks with MM, if it's direct mapped.
*/
static int
is_direct_mapped(const void *begin, const void *end)
{
LOG(3, "begin %p end %p", begin, end);
#if (NTDDI_VERSION >= NTDDI_WIN10_RS1)
int retval = 1;
WIN32_MEMORY_REGION_INFORMATION region_info;
SIZE_T bytes_returned;
if (Func_qvmi == NULL) {
LOG(4, "QueryVirtualMemoryInformation not supported, "
"assuming non-DAX.");
return 0;
}
const void *begin_aligned = (const void *)rounddown((intptr_t)begin,
Pagesize);
const void *end_aligned = (const void *)roundup((intptr_t)end,
Pagesize);
for (const void *page = begin_aligned;
page < end_aligned;
page = (const void *)((char *)page + Pagesize)) {
if (Func_qvmi(GetCurrentProcess(), page,
MemoryRegionInfo, ®ion_info,
sizeof(region_info), &bytes_returned)) {
retval = region_info.DirectMapped;
} else {
LOG(4, "QueryVirtualMemoryInformation failed, assuming "
"non-DAX. Last error: %08x", GetLastError());
retval = 0;
}
if (retval == 0) {
LOG(4, "page %p is not direct mapped", page);
break;
}
}
return retval;
#else
/* if the MM API is not available the safest answer is NO */
return 0;
#endif /* NTDDI_VERSION >= NTDDI_WIN10_RS1 */
}
/*
* is_pmem_detect -- implement pmem_is_pmem()
*
* This function returns true only if the entire range can be confirmed
* as being direct access persistent memory. Finding any part of the
* range is not direct access, or failing to look up the information
* because it is unmapped or because any sort of error happens, just
* results in returning false.
*/
int
is_pmem_detect(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
if (len == 0)
return 0;
if (len > UINTPTR_MAX - (uintptr_t)addr) {
len = UINTPTR_MAX - (uintptr_t)addr;
LOG(4, "limit len to %zu to not get beyond address space", len);
}
int retval = 1;
const void *begin = addr;
const void *end = (const void *)((char *)addr + len);
LOG(4, "begin %p end %p", begin, end);
AcquireSRWLockShared(&FileMappingQLock);
PFILE_MAPPING_TRACKER mt;
PMDK_SORTEDQ_FOREACH(mt, &FileMappingQHead, ListEntry) {
if (mt->BaseAddress >= end) {
LOG(4, "ignoring all mapped ranges beyond given range");
break;
}
if (mt->EndAddress <= begin) {
LOG(4, "skipping all mapped ranges before given range");
continue;
}
if (!(mt->Flags & FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED)) {
LOG(4, "tracked range [%p, %p) is not direct mapped",
mt->BaseAddress, mt->EndAddress);
retval = 0;
break;
}
/*
* If there is a gap between the given region that we process
* currently and the mapped region in our tracking list, we
* need to process the gap by taking the long route of asking
* MM for each page in that range.
*/
if (begin < mt->BaseAddress &&
!is_direct_mapped(begin, mt->BaseAddress)) {
LOG(4, "untracked range [%p, %p) is not direct mapped",
begin, mt->BaseAddress);
retval = 0;
break;
}
/* push our begin to reflect what we have already processed */
begin = mt->EndAddress;
}
/*
* If we still have a range to verify, check with MM if the entire
* region is direct mapped.
*/
if (begin < end && !is_direct_mapped(begin, end)) {
LOG(4, "untracked end range [%p, %p) is not direct mapped",
begin, end);
retval = 0;
}
ReleaseSRWLockShared(&FileMappingQLock);
LOG(4, "returning %d", retval);
return retval;
}
/*
* pmem_map_register -- memory map file and register mapping
*/
void *
pmem_map_register(int fd, size_t len, const char *path, int is_dev_dax)
{
/* there is no device dax on windows */
ASSERTeq(is_dev_dax, 0);
return util_map(fd, 0, len, MAP_SHARED, 0, 0, NULL);
}
/*
* pmem_os_init -- os-dependent part of pmem initialization
*/
void
pmem_os_init(is_pmem_func *func)
{
LOG(3, NULL);
*func = is_pmem_detect;
#if NTDDI_VERSION >= NTDDI_WIN10_RS1
Func_qvmi = (PQVM)GetProcAddress(
GetModuleHandle(TEXT("KernelBase.dll")),
"QueryVirtualMemoryInformation");
#endif
}
| 6,186 | 27.643519 | 74 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/auto_flush_windows.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2019, Intel Corporation */
#ifndef PMEM2_AUTO_FLUSH_WINDOWS_H
#define PMEM2_AUTO_FLUSH_WINDOWS_H 1
#define ACPI_SIGNATURE 0x41435049 /* hex value of ACPI signature */
#define NFIT_REV_SIGNATURE 0x5449464e /* hex value of htonl(NFIT) signature */
#define NFIT_STR_SIGNATURE "NFIT"
#define NFIT_SIGNATURE_LEN 4
#define NFIT_OEM_ID_LEN 6
#define NFIT_OEM_TABLE_ID_LEN 8
#define NFIT_MAX_STRUCTURES 8
#define PCS_RESERVED 3
#define PCS_RESERVED_2 4
#define PCS_TYPE_NUMBER 7
/* check if bit on 'bit' position in number 'num' is set */
#define CHECK_BIT(num, bit) (((num) >> (bit)) & 1)
/*
* sets alignment of members of structure
*/
#pragma pack(1)
struct platform_capabilities
{
uint16_t type;
uint16_t length;
uint8_t highest_valid;
uint8_t reserved[PCS_RESERVED];
uint32_t capabilities;
uint8_t reserved2[PCS_RESERVED_2];
};
struct nfit_header
{
uint8_t signature[NFIT_SIGNATURE_LEN];
uint32_t length;
uint8_t revision;
uint8_t checksum;
uint8_t oem_id[NFIT_OEM_ID_LEN];
uint8_t oem_table_id[NFIT_OEM_TABLE_ID_LEN];
uint32_t oem_revision;
uint8_t creator_id[4];
uint32_t creator_revision;
uint32_t reserved;
};
#pragma pack()
#endif
| 1,215 | 22.843137 | 78 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/deep_flush_linux.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* deep_flush_linux.c -- deep_flush functionality
*/
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include "deep_flush.h"
#include "libpmem2.h"
#include "map.h"
#include "os.h"
#include "out.h"
#include "persist.h"
#include "pmem2_utils.h"
#include "region_namespace.h"
/*
* pmem2_deep_flush_write -- perform write to deep_flush file
* on given region_id
*/
int
pmem2_deep_flush_write(unsigned region_id)
{
LOG(3, "region_id %d", region_id);
char deep_flush_path[PATH_MAX];
int deep_flush_fd;
char rbuf[2];
if (util_snprintf(deep_flush_path, PATH_MAX,
"/sys/bus/nd/devices/region%u/deep_flush", region_id) < 0) {
ERR("!snprintf");
return PMEM2_E_ERRNO;
}
if ((deep_flush_fd = os_open(deep_flush_path, O_RDONLY)) < 0) {
LOG(1, "!os_open(\"%s\", O_RDONLY)", deep_flush_path);
return 0;
}
if (read(deep_flush_fd, rbuf, sizeof(rbuf)) != 2) {
LOG(1, "!read(%d)", deep_flush_fd);
goto end;
}
if (rbuf[0] == '0' && rbuf[1] == '\n') {
LOG(3, "Deep flushing not needed");
goto end;
}
os_close(deep_flush_fd);
if ((deep_flush_fd = os_open(deep_flush_path, O_WRONLY)) < 0) {
LOG(1, "Cannot open deep_flush file %s to write",
deep_flush_path);
return 0;
}
if (write(deep_flush_fd, "1", 1) != 1) {
LOG(1, "Cannot write to deep_flush file %d", deep_flush_fd);
goto end;
}
end:
os_close(deep_flush_fd);
return 0;
}
/*
* pmem2_deep_flush_dax -- reads file type for map and check
* if it is device dax or reg file, depend on file type
* performs proper flush operation
*/
int
pmem2_deep_flush_dax(struct pmem2_map *map, void *ptr, size_t size)
{
int ret;
enum pmem2_file_type type = map->source.value.ftype;
if (type == PMEM2_FTYPE_REG) {
ret = pmem2_flush_file_buffers_os(map, ptr, size, 0);
if (ret) {
LOG(1, "cannot flush buffers addr %p len %zu",
ptr, size);
return ret;
}
} else if (type == PMEM2_FTYPE_DEVDAX) {
unsigned region_id;
int ret = pmem2_get_region_id(&map->source, ®ion_id);
if (ret < 0) {
LOG(1, "cannot find region id for dev %lu",
map->source.value.st_rdev);
return ret;
}
ret = pmem2_deep_flush_write(region_id);
if (ret) {
LOG(1, "cannot write to deep_flush file for region %d",
region_id);
return ret;
}
} else {
ASSERT(0);
}
return 0;
}
| 2,395 | 20.392857 | 67 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/config.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* config.h -- internal definitions for pmem2_config
*/
#ifndef PMEM2_CONFIG_H
#define PMEM2_CONFIG_H
#include "libpmem2.h"
#define PMEM2_GRANULARITY_INVALID ((enum pmem2_granularity) (-1))
#define PMEM2_ADDRESS_ANY 0 /* default value of the address request type */
struct pmem2_config {
/* offset from the beginning of the file */
size_t offset;
size_t length; /* length of the mapping */
/* persistence granularity requested by user */
void *addr; /* address of the mapping */
int addr_request; /* address request type */
enum pmem2_granularity requested_max_granularity;
enum pmem2_sharing_type sharing; /* the way the file will be mapped */
unsigned protection_flag;
};
void pmem2_config_init(struct pmem2_config *cfg);
int pmem2_config_validate_length(const struct pmem2_config *cfg,
size_t file_len, size_t alignment);
int pmem2_config_validate_addr_alignment(const struct pmem2_config *cfg,
const struct pmem2_source *src);
#endif /* PMEM2_CONFIG_H */
| 1,070 | 28.75 | 75 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/map.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* map.h -- internal definitions for libpmem2
*/
#ifndef PMEM2_MAP_H
#define PMEM2_MAP_H
#include <stddef.h>
#include <stdbool.h>
#include "libpmem2.h"
#include "os.h"
#include "source.h"
#ifdef _WIN32
#include <windows.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
typedef int (*pmem2_deep_flush_fn)(struct pmem2_map *map,
void *ptr, size_t size);
struct pmem2_map {
void *addr; /* base address */
size_t reserved_length; /* length of the mapping reservation */
size_t content_length; /* length of the mapped content */
/* effective persistence granularity */
enum pmem2_granularity effective_granularity;
pmem2_persist_fn persist_fn;
pmem2_flush_fn flush_fn;
pmem2_drain_fn drain_fn;
pmem2_deep_flush_fn deep_flush_fn;
pmem2_memmove_fn memmove_fn;
pmem2_memcpy_fn memcpy_fn;
pmem2_memset_fn memset_fn;
struct pmem2_source source;
};
enum pmem2_granularity get_min_granularity(bool eADR, bool is_pmem,
enum pmem2_sharing_type sharing);
struct pmem2_map *pmem2_map_find(const void *addr, size_t len);
int pmem2_register_mapping(struct pmem2_map *map);
int pmem2_unregister_mapping(struct pmem2_map *map);
void pmem2_map_init(void);
void pmem2_map_fini(void);
int pmem2_validate_offset(const struct pmem2_config *cfg,
size_t *offset, size_t alignment);
#ifdef __cplusplus
}
#endif
#endif /* map.h */
| 1,426 | 22.016129 | 67 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/deep_flush.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* deep_flush.h -- functions for deep flush functionality
*/
#ifndef PMEM2_DEEP_FLUSH_H
#define PMEM2_DEEP_FLUSH_H 1
#include "map.h"
#ifdef __cplusplus
extern "C" {
#endif
int pmem2_deep_flush_write(unsigned region_id);
int pmem2_deep_flush_dax(struct pmem2_map *map, void *ptr, size_t size);
int pmem2_deep_flush_page(struct pmem2_map *map, void *ptr, size_t size);
int pmem2_deep_flush_cache(struct pmem2_map *map, void *ptr, size_t size);
int pmem2_deep_flush_byte(struct pmem2_map *map, void *ptr, size_t size);
#ifdef __cplusplus
}
#endif
#endif
| 644 | 22.035714 | 74 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/persist.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* persist.c -- pmem2_get_[persist|flush|drain]_fn
*/
#include <errno.h>
#include <stdlib.h>
#include "libpmem2.h"
#include "map.h"
#include "out.h"
#include "os.h"
#include "persist.h"
#include "deep_flush.h"
#include "pmem2_arch.h"
#include "pmem2_utils.h"
#include "valgrind_internal.h"
static struct pmem2_arch_info Info;
/*
* memmove_nodrain_libc -- (internal) memmove to pmem using libc
*/
static void *
memmove_nodrain_libc(void *pmemdest, const void *src, size_t len,
unsigned flags, flush_func flush)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
LOG(15, "pmemdest %p src %p len %zu flags 0x%x", pmemdest, src, len,
flags);
memmove(pmemdest, src, len);
if (!(flags & PMEM2_F_MEM_NOFLUSH))
flush(pmemdest, len);
return pmemdest;
}
/*
* memset_nodrain_libc -- (internal) memset to pmem using libc
*/
static void *
memset_nodrain_libc(void *pmemdest, int c, size_t len, unsigned flags,
flush_func flush)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x", pmemdest, c, len,
flags);
memset(pmemdest, c, len);
if (!(flags & PMEM2_F_MEM_NOFLUSH))
flush(pmemdest, len);
return pmemdest;
}
/*
* pmem2_persist_init -- initialize persist module
*/
void
pmem2_persist_init(void)
{
Info.memmove_nodrain = NULL;
Info.memset_nodrain = NULL;
Info.memmove_nodrain_eadr = NULL;
Info.memset_nodrain_eadr = NULL;
Info.flush = NULL;
Info.fence = NULL;
Info.flush_has_builtin_fence = 0;
pmem2_arch_init(&Info);
char *ptr = os_getenv("PMEM_NO_GENERIC_MEMCPY");
long long no_generic = 0;
if (ptr)
no_generic = atoll(ptr);
if (Info.memmove_nodrain == NULL) {
if (no_generic) {
Info.memmove_nodrain = memmove_nodrain_libc;
Info.memmove_nodrain_eadr = memmove_nodrain_libc;
LOG(3, "using libc memmove");
} else {
Info.memmove_nodrain = memmove_nodrain_generic;
Info.memmove_nodrain_eadr = memmove_nodrain_generic;
LOG(3, "using generic memmove");
}
}
if (Info.memset_nodrain == NULL) {
if (no_generic) {
Info.memset_nodrain = memset_nodrain_libc;
Info.memset_nodrain_eadr = memset_nodrain_libc;
LOG(3, "using libc memset");
} else {
Info.memset_nodrain = memset_nodrain_generic;
Info.memset_nodrain_eadr = memset_nodrain_generic;
LOG(3, "using generic memset");
}
}
}
/*
* pmem2_drain -- wait for any PM stores to drain from HW buffers
*/
static void
pmem2_drain(void)
{
LOG(15, NULL);
Info.fence();
}
/*
* pmem2_log_flush -- log the flush attempt for the given range
*/
static inline void
pmem2_log_flush(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len);
}
/*
* pmem2_flush_nop -- NOP version of the flush routine, used in cases where
* memory behind the mapping is already in persistence domain
*/
static void
pmem2_flush_nop(const void *addr, size_t len)
{
pmem2_log_flush(addr, len);
/* nothing more to do, other than telling pmemcheck about it */
VALGRIND_DO_FLUSH(addr, len);
}
/*
* pmem2_flush_cpu_cache -- flush processor cache for the given range
*/
static void
pmem2_flush_cpu_cache(const void *addr, size_t len)
{
pmem2_log_flush(addr, len);
Info.flush(addr, len);
}
/*
* pmem2_persist_noflush -- make all changes to a range of pmem persistent
*/
static void
pmem2_persist_noflush(const void *addr, size_t len)
{
pmem2_flush_nop(addr, len);
pmem2_drain();
}
/*
* pmem2_persist_cpu_cache -- make all changes to a range of pmem persistent
*/
static void
pmem2_persist_cpu_cache(const void *addr, size_t len)
{
pmem2_flush_cpu_cache(addr, len);
pmem2_drain();
}
/*
* pmem2_flush_file_buffers -- flush CPU and OS caches for the given range
*/
static int
pmem2_flush_file_buffers(const void *addr, size_t len, int autorestart)
{
int olderrno = errno;
pmem2_log_flush(addr, len);
/*
* Flushing using OS-provided mechanisms requires that the address
* be a multiple of the page size.
* Align address down and change len so that [addr, addr + len) still
* contains the initial range.
*/
/* round address down to page boundary */
uintptr_t new_addr = ALIGN_DOWN((uintptr_t)addr, Pagesize);
/* increase len by the amount we gain when we round addr down */
len += (uintptr_t)addr - new_addr;
addr = (const void *)new_addr;
int ret = 0;
/*
* Find all the mappings overlapping with the [addr, addr + len) range
* and flush them, one by one.
*/
do {
struct pmem2_map *map = pmem2_map_find(addr, len);
if (!map)
break;
size_t flush;
size_t remaining = map->reserved_length;
if (map->addr < addr) {
/*
* Addr is inside of the mapping, so we have to decrease
* the remaining length by an offset from the start
* of our mapping.
*/
remaining -= (uintptr_t)addr - (uintptr_t)map->addr;
} else if (map->addr == addr) {
/* perfect match, there's nothing to do in this case */
} else {
/*
* map->addr > addr, so we have to skip the hole
* between addr and map->addr.
*/
len -= (uintptr_t)map->addr - (uintptr_t)addr;
addr = map->addr;
}
if (len > remaining)
flush = remaining;
else
flush = len;
int ret1 = pmem2_flush_file_buffers_os(map, addr, flush,
autorestart);
if (ret1 != 0)
ret = ret1;
addr = ((const char *)addr) + flush;
len -= flush;
} while (len > 0);
errno = olderrno;
return ret;
}
/*
* pmem2_persist_pages -- flush processor cache for the given range
*/
static void
pmem2_persist_pages(const void *addr, size_t len)
{
/*
* Restarting on EINTR in general is a bad idea, but we don't have
* any way to communicate the failure outside.
*/
const int autorestart = 1;
int ret = pmem2_flush_file_buffers(addr, len, autorestart);
if (ret) {
/*
* 1) There's no way to propagate this error. Silently ignoring
* it would lead to data corruption.
* 2) non-pmem code path shouldn't be used in production.
*
* The only sane thing to do is to crash the application. Sorry.
*/
abort();
}
}
/*
* pmem2_drain_nop -- variant of pmem2_drain for page granularity;
* it is a NOP because the flush part has built-in drain
*/
static void
pmem2_drain_nop(void)
{
LOG(15, NULL);
}
/*
* pmem2_deep_flush_page -- do nothing - pmem2_persist_fn already did msync
*/
int
pmem2_deep_flush_page(struct pmem2_map *map, void *ptr, size_t size)
{
LOG(3, "map %p ptr %p size %zu", map, ptr, size);
return 0;
}
/*
* pmem2_deep_flush_cache -- flush buffers for fsdax or write
* to deep_flush for DevDax
*/
int
pmem2_deep_flush_cache(struct pmem2_map *map, void *ptr, size_t size)
{
LOG(3, "map %p ptr %p size %zu", map, ptr, size);
enum pmem2_file_type type = map->source.value.ftype;
/*
* XXX: this should be moved to pmem2_deep_flush_dax
* while refactoring abstraction
*/
if (type == PMEM2_FTYPE_DEVDAX)
pmem2_persist_cpu_cache(ptr, size);
int ret = pmem2_deep_flush_dax(map, ptr, size);
if (ret < 0) {
LOG(1, "cannot perform deep flush cache for map %p", map);
return ret;
}
return 0;
}
/*
* pmem2_deep_flush_byte -- flush cpu cache and perform deep flush for dax
*/
int
pmem2_deep_flush_byte(struct pmem2_map *map, void *ptr, size_t size)
{
LOG(3, "map %p ptr %p size %zu", map, ptr, size);
if (map->source.type == PMEM2_SOURCE_ANON) {
ERR("Anonymous source does not support deep flush");
return PMEM2_E_NOSUPP;
}
ASSERT(map->source.type == PMEM2_SOURCE_FD ||
map->source.type == PMEM2_SOURCE_HANDLE);
enum pmem2_file_type type = map->source.value.ftype;
/*
* XXX: this should be moved to pmem2_deep_flush_dax
* while refactoring abstraction
*/
if (type == PMEM2_FTYPE_DEVDAX)
pmem2_persist_cpu_cache(ptr, size);
int ret = pmem2_deep_flush_dax(map, ptr, size);
if (ret < 0) {
LOG(1, "cannot perform deep flush byte for map %p", map);
return ret;
}
return 0;
}
/*
* pmem2_set_flush_fns -- set function pointers related to flushing
*/
void
pmem2_set_flush_fns(struct pmem2_map *map)
{
switch (map->effective_granularity) {
case PMEM2_GRANULARITY_PAGE:
map->persist_fn = pmem2_persist_pages;
map->flush_fn = pmem2_persist_pages;
map->drain_fn = pmem2_drain_nop;
map->deep_flush_fn = pmem2_deep_flush_page;
break;
case PMEM2_GRANULARITY_CACHE_LINE:
map->persist_fn = pmem2_persist_cpu_cache;
map->flush_fn = pmem2_flush_cpu_cache;
map->drain_fn = pmem2_drain;
map->deep_flush_fn = pmem2_deep_flush_cache;
break;
case PMEM2_GRANULARITY_BYTE:
map->persist_fn = pmem2_persist_noflush;
map->flush_fn = pmem2_flush_nop;
map->drain_fn = pmem2_drain;
map->deep_flush_fn = pmem2_deep_flush_byte;
break;
default:
abort();
}
}
/*
* pmem2_get_persist_fn - return a pointer to a function responsible for
* persisting data in range owned by pmem2_map
*/
pmem2_persist_fn
pmem2_get_persist_fn(struct pmem2_map *map)
{
return map->persist_fn;
}
/*
* pmem2_get_flush_fn - return a pointer to a function responsible for
* flushing data in range owned by pmem2_map
*/
pmem2_flush_fn
pmem2_get_flush_fn(struct pmem2_map *map)
{
return map->flush_fn;
}
/*
* pmem2_get_drain_fn - return a pointer to a function responsible for
* draining flushes in range owned by pmem2_map
*/
pmem2_drain_fn
pmem2_get_drain_fn(struct pmem2_map *map)
{
return map->drain_fn;
}
/*
* pmem2_memmove_nonpmem -- mem[move|cpy] followed by an msync
*/
static void *
pmem2_memmove_nonpmem(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM2_API_START("pmem2_memmove");
Info.memmove_nodrain(pmemdest, src, len, flags & ~PMEM2_F_MEM_NODRAIN,
Info.flush);
pmem2_persist_pages(pmemdest, len);
PMEM2_API_END("pmem2_memmove");
return pmemdest;
}
/*
* pmem2_memset_nonpmem -- memset followed by an msync
*/
static void *
pmem2_memset_nonpmem(void *pmemdest, int c, size_t len, unsigned flags)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM2_API_START("pmem2_memset");
Info.memset_nodrain(pmemdest, c, len, flags & ~PMEM2_F_MEM_NODRAIN,
Info.flush);
pmem2_persist_pages(pmemdest, len);
PMEM2_API_END("pmem2_memset");
return pmemdest;
}
/*
* pmem2_memmove -- mem[move|cpy] to pmem
*/
static void *
pmem2_memmove(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM2_API_START("pmem2_memmove");
Info.memmove_nodrain(pmemdest, src, len, flags, Info.flush);
if ((flags & (PMEM2_F_MEM_NODRAIN | PMEM2_F_MEM_NOFLUSH)) == 0)
pmem2_drain();
PMEM2_API_END("pmem2_memmove");
return pmemdest;
}
/*
* pmem2_memset -- memset to pmem
*/
static void *
pmem2_memset(void *pmemdest, int c, size_t len, unsigned flags)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM2_API_START("pmem2_memset");
Info.memset_nodrain(pmemdest, c, len, flags, Info.flush);
if ((flags & (PMEM2_F_MEM_NODRAIN | PMEM2_F_MEM_NOFLUSH)) == 0)
pmem2_drain();
PMEM2_API_END("pmem2_memset");
return pmemdest;
}
/*
* pmem2_memmove_eadr -- mem[move|cpy] to pmem, platform supports eADR
*/
static void *
pmem2_memmove_eadr(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM2_API_START("pmem2_memmove");
Info.memmove_nodrain_eadr(pmemdest, src, len, flags, Info.flush);
if ((flags & (PMEM2_F_MEM_NODRAIN | PMEM2_F_MEM_NOFLUSH)) == 0)
pmem2_drain();
PMEM2_API_END("pmem2_memmove");
return pmemdest;
}
/*
* pmem2_memset_eadr -- memset to pmem, platform supports eADR
*/
static void *
pmem2_memset_eadr(void *pmemdest, int c, size_t len, unsigned flags)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM2_API_START("pmem2_memset");
Info.memset_nodrain_eadr(pmemdest, c, len, flags, Info.flush);
if ((flags & (PMEM2_F_MEM_NODRAIN | PMEM2_F_MEM_NOFLUSH)) == 0)
pmem2_drain();
PMEM2_API_END("pmem2_memset");
return pmemdest;
}
/*
* pmem2_set_mem_fns -- set function pointers related to mem[move|cpy|set]
*/
void
pmem2_set_mem_fns(struct pmem2_map *map)
{
switch (map->effective_granularity) {
case PMEM2_GRANULARITY_PAGE:
map->memmove_fn = pmem2_memmove_nonpmem;
map->memcpy_fn = pmem2_memmove_nonpmem;
map->memset_fn = pmem2_memset_nonpmem;
break;
case PMEM2_GRANULARITY_CACHE_LINE:
map->memmove_fn = pmem2_memmove;
map->memcpy_fn = pmem2_memmove;
map->memset_fn = pmem2_memset;
break;
case PMEM2_GRANULARITY_BYTE:
map->memmove_fn = pmem2_memmove_eadr;
map->memcpy_fn = pmem2_memmove_eadr;
map->memset_fn = pmem2_memset_eadr;
break;
default:
abort();
}
}
/*
* pmem2_get_memmove_fn - return a pointer to a function
*/
pmem2_memmove_fn
pmem2_get_memmove_fn(struct pmem2_map *map)
{
return map->memmove_fn;
}
/*
* pmem2_get_memcpy_fn - return a pointer to a function
*/
pmem2_memcpy_fn
pmem2_get_memcpy_fn(struct pmem2_map *map)
{
return map->memcpy_fn;
}
/*
* pmem2_get_memset_fn - return a pointer to a function
*/
pmem2_memset_fn
pmem2_get_memset_fn(struct pmem2_map *map)
{
return map->memset_fn;
}
#if VG_PMEMCHECK_ENABLED
/*
* pmem2_emit_log -- logs library and function names to pmemcheck store log
*/
void
pmem2_emit_log(const char *func, int order)
{
util_emit_log("libpmem2", func, order);
}
#endif
| 13,665 | 21.58843 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/persist_posix.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* persist_posix.c -- POSIX-specific part of persist implementation
*/
#include <errno.h>
#include <stdint.h>
#include <sys/mman.h>
#include "out.h"
#include "persist.h"
#include "pmem2_utils.h"
#include "valgrind_internal.h"
/*
* pmem2_flush_file_buffers_os -- flush CPU and OS file caches for the given
* range
*/
int
pmem2_flush_file_buffers_os(struct pmem2_map *map, const void *addr, size_t len,
int autorestart)
{
/*
* msync accepts addresses aligned to the page boundary, so we may sync
* more and part of it may have been marked as undefined/inaccessible.
* Msyncing such memory is not a bug, so as a workaround temporarily
* disable error reporting.
*/
VALGRIND_DO_DISABLE_ERROR_REPORTING;
int ret;
do {
ret = msync((void *)addr, len, MS_SYNC);
if (ret < 0) {
ERR("!msync");
} else {
/* full flush */
VALGRIND_DO_PERSIST((uintptr_t)addr, len);
}
} while (autorestart && ret < 0 && errno == EINTR);
VALGRIND_DO_ENABLE_ERROR_REPORTING;
if (ret)
return PMEM2_E_ERRNO;
return 0;
}
| 1,126 | 21.098039 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/pmem2_utils_linux.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <sys/sysmacros.h>
#include "libpmem2.h"
#include "out.h"
#include "pmem2_utils.h"
#include "region_namespace.h"
#include "source.h"
/*
* pmem2_get_type_from_stat -- determine type of file based on output of stat
* syscall
*/
int
pmem2_get_type_from_stat(const os_stat_t *st, enum pmem2_file_type *type)
{
if (S_ISREG(st->st_mode)) {
*type = PMEM2_FTYPE_REG;
return 0;
}
if (S_ISDIR(st->st_mode)) {
*type = PMEM2_FTYPE_DIR;
return 0;
}
if (!S_ISCHR(st->st_mode)) {
ERR("file type 0%o not supported", st->st_mode & S_IFMT);
return PMEM2_E_INVALID_FILE_TYPE;
}
char spath[PATH_MAX];
int ret = util_snprintf(spath, PATH_MAX,
"/sys/dev/char/%u:%u/subsystem",
os_major(st->st_rdev), os_minor(st->st_rdev));
if (ret < 0) {
/* impossible */
ERR("!snprintf");
ASSERTinfo(0, "snprintf failed");
return PMEM2_E_ERRNO;
}
LOG(4, "device subsystem path \"%s\"", spath);
char npath[PATH_MAX];
char *rpath = realpath(spath, npath);
if (rpath == NULL) {
ERR("!realpath \"%s\"", spath);
return PMEM2_E_ERRNO;
}
char *basename = strrchr(rpath, '/');
if (!basename || strcmp("dax", basename + 1) != 0) {
LOG(3, "%s path does not match device dax prefix path", rpath);
return PMEM2_E_INVALID_FILE_TYPE;
}
*type = PMEM2_FTYPE_DEVDAX;
return 0;
}
| 1,507 | 20.239437 | 77 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/source_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* source_windows.c -- windows specific pmem2_source implementation
*/
#include <Windows.h>
#include "config.h"
#include "libpmem2.h"
#include "config.h"
#include "out.h"
#include "pmem2_utils.h"
#include "source.h"
#include "util.h"
/*
* pmem2_source_from_fd -- create a new data source instance
*/
int
pmem2_source_from_fd(struct pmem2_source **src, int fd)
{
*src = NULL;
if (fd < 0)
return PMEM2_E_INVALID_FILE_HANDLE;
HANDLE handle = (HANDLE)_get_osfhandle(fd);
if (handle == INVALID_HANDLE_VALUE) {
/*
* _get_osfhandle aborts in an error case, so technically
* this is dead code. But according to MSDN it is
* setting an errno on failure, so we can return it in case of
* "windows magic" happen and this function "accidentally"
* will not abort.
*/
ERR("!_get_osfhandle");
if (errno == EBADF)
return PMEM2_E_INVALID_FILE_HANDLE;
return PMEM2_E_ERRNO;
}
return pmem2_source_from_handle(src, handle);
}
/*
* pmem2_win_stat -- retrieve information about handle
*/
static int
pmem2_win_stat(HANDLE handle, BY_HANDLE_FILE_INFORMATION *info)
{
if (!GetFileInformationByHandle(handle, info)) {
ERR("!!GetFileInformationByHandle");
if (GetLastError() == ERROR_INVALID_HANDLE)
return PMEM2_E_INVALID_FILE_HANDLE;
else
return pmem2_lasterror_to_err();
}
if (info->dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
ERR(
"using directory doesn't make any sense in context of pmem2");
return PMEM2_E_INVALID_FILE_TYPE;
}
return 0;
}
/*
* pmem2_source_from_fd -- create a new data source instance
*/
int
pmem2_source_from_handle(struct pmem2_source **src, HANDLE handle)
{
*src = NULL;
int ret;
if (handle == INVALID_HANDLE_VALUE)
return PMEM2_E_INVALID_FILE_HANDLE;
BY_HANDLE_FILE_INFORMATION file_info;
ret = pmem2_win_stat(handle, &file_info);
if (ret)
return ret;
/* XXX: winapi doesn't provide option to get open flags from HANDLE */
struct pmem2_source *srcp = pmem2_malloc(sizeof(**src), &ret);
if (ret)
return ret;
ASSERTne(srcp, NULL);
srcp->type = PMEM2_SOURCE_HANDLE;
srcp->value.handle = handle;
*src = srcp;
return 0;
}
/*
* pmem2_source_size -- get a size of the file handle stored in the provided
* source
*/
int
pmem2_source_size(const struct pmem2_source *src, size_t *size)
{
LOG(3, "type %d", src->type);
int ret;
if (src->type == PMEM2_SOURCE_ANON) {
*size = src->value.size;
return 0;
}
ASSERTeq(src->type, PMEM2_SOURCE_HANDLE);
BY_HANDLE_FILE_INFORMATION info;
ret = pmem2_win_stat(src->value.handle, &info);
if (ret)
return ret;
*size = ((size_t)info.nFileSizeHigh << 32) | info.nFileSizeLow;
LOG(4, "file length %zu", *size);
return 0;
}
/*
* pmem2_source_alignment -- get alignment from the system info
*/
int
pmem2_source_alignment(const struct pmem2_source *src, size_t *alignment)
{
LOG(3, "type %d", src->type);
SYSTEM_INFO info;
GetSystemInfo(&info);
*alignment = (size_t)info.dwAllocationGranularity;
if (!util_is_pow2(*alignment)) {
ERR("alignment (%zu) has to be a power of two", *alignment);
return PMEM2_E_INVALID_ALIGNMENT_VALUE;
}
LOG(4, "alignment %zu", *alignment);
return 0;
}
| 3,248 | 20.235294 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/pmem2_utils_none.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
#include <errno.h>
#include "libpmem2.h"
#include "out.h"
#include "pmem2_utils.h"
#include "source.h"
/*
* pmem2_device_dax_alignment -- checks the alignment of a given
* dax device from given source
*/
int
pmem2_device_dax_alignment(const struct pmem2_source *src, size_t *alignment)
{
ERR("Cannot read Device Dax alignment - ndctl is not available");
return PMEM2_E_NOSUPP;
}
/*
* pmem2_device_dax_size -- checks the size of a given dax device from
* given source
*/
int
pmem2_device_dax_size(const struct pmem2_source *src, size_t *size)
{
ERR("Cannot read Device Dax size - ndctl is not available");
return PMEM2_E_NOSUPP;
}
| 727 | 20.411765 | 77 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/auto_flush_linux.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* auto_flush_linux.c -- Linux auto flush detection
*/
#define _GNU_SOURCE
#include <inttypes.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <string.h>
#include <errno.h>
#include "out.h"
#include "os.h"
#include "fs.h"
#include "auto_flush.h"
#define BUS_DEVICE_PATH "/sys/bus/nd/devices"
#define PERSISTENCE_DOMAIN "persistence_domain"
#define DOMAIN_VALUE_LEN 32
/*
* check_cpu_cache -- (internal) check if file contains "cpu_cache" entry
*/
static int
check_cpu_cache(const char *domain_path)
{
LOG(3, "domain_path: %s", domain_path);
char domain_value[DOMAIN_VALUE_LEN];
int domain_fd;
int cpu_cache = 0;
if ((domain_fd = os_open(domain_path, O_RDONLY)) < 0) {
LOG(1, "!open(\"%s\", O_RDONLY)", domain_path);
goto end;
}
ssize_t len = read(domain_fd, domain_value,
DOMAIN_VALUE_LEN);
if (len < 0) {
ERR("!read(%d, %p, %d)", domain_fd,
domain_value, DOMAIN_VALUE_LEN);
cpu_cache = -1;
goto end;
} else if (len == 0) {
errno = EIO;
ERR("read(%d, %p, %d) empty string",
domain_fd, domain_value,
DOMAIN_VALUE_LEN);
cpu_cache = -1;
goto end;
} else if (domain_value[len - 1] != '\n') {
ERR("!read(%d, %p, %d) invalid format",
domain_fd, domain_value,
DOMAIN_VALUE_LEN);
cpu_cache = -1;
goto end;
}
domain_value[len - 1] = '\0';
LOG(15, "detected persistent_domain: %s", domain_value);
if (strcmp(domain_value, "cpu_cache") == 0) {
LOG(15, "cpu_cache in persistent_domain: %s", domain_path);
cpu_cache = 1;
} else {
LOG(15, "cpu_cache not in persistent_domain: %s", domain_path);
cpu_cache = 0;
}
end:
if (domain_fd >= 0)
os_close(domain_fd);
return cpu_cache;
}
/*
* check_domain_in_region -- (internal) check if region
* contains persistence_domain file
*/
static int
check_domain_in_region(const char *region_path)
{
LOG(3, "region_path: %s", region_path);
struct fs *reg = NULL;
struct fs_entry *reg_entry;
char domain_path[PATH_MAX];
int cpu_cache = 0;
reg = fs_new(region_path);
if (reg == NULL) {
ERR("!fs_new: \"%s\"", region_path);
cpu_cache = -1;
goto end;
}
while ((reg_entry = fs_read(reg)) != NULL) {
/*
* persistence_domain has to be a file type entry
* and it has to be first level child for region;
* there is no need to run into deeper levels
*/
if (reg_entry->type != FS_ENTRY_FILE ||
strcmp(reg_entry->name,
PERSISTENCE_DOMAIN) != 0 ||
reg_entry->level != 1)
continue;
int ret = util_snprintf(domain_path, PATH_MAX,
"%s/"PERSISTENCE_DOMAIN, region_path);
if (ret < 0) {
ERR("!snprintf");
cpu_cache = -1;
goto end;
}
cpu_cache = check_cpu_cache(domain_path);
}
end:
if (reg)
fs_delete(reg);
return cpu_cache;
}
/*
* pmem2_auto_flush -- check if platform supports auto flush for all regions
*
* Traverse "/sys/bus/nd/devices" path to find all the nvdimm regions,
* then for each region checks if "persistence_domain" file exists and
* contains "cpu_cache" string.
* If for any region "persistence_domain" entry does not exists, or its
* context is not as expected, assume eADR is not available on this platform.
*/
int
pmem2_auto_flush(void)
{
LOG(15, NULL);
char *device_path;
int cpu_cache = 0;
device_path = BUS_DEVICE_PATH;
os_stat_t sdev;
if (os_stat(device_path, &sdev) != 0 ||
S_ISDIR(sdev.st_mode) == 0) {
LOG(3, "eADR not supported");
return cpu_cache;
}
struct fs *dev = fs_new(device_path);
if (dev == NULL) {
ERR("!fs_new: \"%s\"", device_path);
return -1;
}
struct fs_entry *dev_entry;
while ((dev_entry = fs_read(dev)) != NULL) {
/*
* Skip if not a symlink, because we expect that
* region on sysfs path is a symlink.
* Skip if depth is different than 1, because region
* we are interested in should be the first level
* child for device.
*/
if ((dev_entry->type != FS_ENTRY_SYMLINK) ||
!strstr(dev_entry->name, "region") ||
dev_entry->level != 1)
continue;
LOG(15, "Start traversing region: %s", dev_entry->path);
cpu_cache = check_domain_in_region(dev_entry->path);
if (cpu_cache != 1)
goto end;
}
end:
fs_delete(dev);
return cpu_cache;
}
| 4,214 | 21.783784 | 77 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/config.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* config.c -- pmem2_config implementation
*/
#include <unistd.h>
#include "alloc.h"
#include "config.h"
#include "libpmem2.h"
#include "out.h"
#include "pmem2.h"
#include "pmem2_utils.h"
/*
* pmem2_config_init -- initialize cfg structure.
*/
void
pmem2_config_init(struct pmem2_config *cfg)
{
cfg->offset = 0;
cfg->length = 0;
cfg->addr = NULL;
cfg->addr_request = PMEM2_ADDRESS_ANY;
cfg->requested_max_granularity = PMEM2_GRANULARITY_INVALID;
cfg->sharing = PMEM2_SHARED;
cfg->protection_flag = PMEM2_PROT_READ | PMEM2_PROT_WRITE;
}
/*
* pmem2_config_new -- allocates and initialize cfg structure.
*/
int
pmem2_config_new(struct pmem2_config **cfg)
{
int ret;
*cfg = pmem2_malloc(sizeof(**cfg), &ret);
if (ret)
return ret;
ASSERTne(cfg, NULL);
pmem2_config_init(*cfg);
return 0;
}
/*
* pmem2_config_delete -- deallocate cfg structure.
*/
int
pmem2_config_delete(struct pmem2_config **cfg)
{
Free(*cfg);
*cfg = NULL;
return 0;
}
/*
* pmem2_config_set_required_store_granularity -- set granularity
* requested by user in the pmem2_config structure
*/
int
pmem2_config_set_required_store_granularity(struct pmem2_config *cfg,
enum pmem2_granularity g)
{
switch (g) {
case PMEM2_GRANULARITY_BYTE:
case PMEM2_GRANULARITY_CACHE_LINE:
case PMEM2_GRANULARITY_PAGE:
break;
default:
ERR("unknown granularity value %d", g);
return PMEM2_E_GRANULARITY_NOT_SUPPORTED;
}
cfg->requested_max_granularity = g;
return 0;
}
/*
* pmem2_config_set_offset -- set offset in the pmem2_config structure
*/
int
pmem2_config_set_offset(struct pmem2_config *cfg, size_t offset)
{
/* mmap func takes offset as a type of off_t */
if (offset > (size_t)INT64_MAX) {
ERR("offset is greater than INT64_MAX");
return PMEM2_E_OFFSET_OUT_OF_RANGE;
}
cfg->offset = offset;
return 0;
}
/*
* pmem2_config_set_length -- set length in the pmem2_config structure
*/
int
pmem2_config_set_length(struct pmem2_config *cfg, size_t length)
{
cfg->length = length;
return 0;
}
/*
* pmem2_config_validate_length -- validate that length in the pmem2_config
* structure is consistent with the file length
*/
int
pmem2_config_validate_length(const struct pmem2_config *cfg,
size_t file_len, size_t alignment)
{
ASSERTne(alignment, 0);
if (file_len == 0) {
ERR("file length is equal 0");
return PMEM2_E_SOURCE_EMPTY;
}
if (cfg->length % alignment) {
ERR("length is not a multiple of %lu", alignment);
return PMEM2_E_LENGTH_UNALIGNED;
}
/* overflow check */
const size_t end = cfg->offset + cfg->length;
if (end < cfg->offset) {
ERR("overflow of offset and length");
return PMEM2_E_MAP_RANGE;
}
/* let's align the file size */
size_t aligned_file_len = file_len;
if (file_len % alignment)
aligned_file_len = ALIGN_UP(file_len, alignment);
/* validate mapping fit into the file */
if (end > aligned_file_len) {
ERR("mapping larger than file size");
return PMEM2_E_MAP_RANGE;
}
return 0;
}
/*
* pmem2_config_set_sharing -- set the way pmem2_map will map the file
*/
int
pmem2_config_set_sharing(struct pmem2_config *cfg, enum pmem2_sharing_type type)
{
switch (type) {
case PMEM2_SHARED:
case PMEM2_PRIVATE:
cfg->sharing = type;
break;
default:
ERR("unknown sharing value %d", type);
return PMEM2_E_INVALID_SHARING_VALUE;
}
return 0;
}
/*
* pmem2_config_validate_addr_alignment -- validate that addr in the
* pmem2_config structure is a multiple of the alignment required for
* specific cfg
*/
int
pmem2_config_validate_addr_alignment(const struct pmem2_config *cfg,
const struct pmem2_source *src)
{
/* cannot NULL % alignment, NULL is valid */
if (!cfg->addr)
return 0;
size_t alignment;
int ret = pmem2_source_alignment(src, &alignment);
if (ret)
return ret;
ASSERTne(alignment, 0);
if ((size_t)cfg->addr % alignment) {
ERR("address %p is not a multiple of %lu", cfg->addr,
alignment);
return PMEM2_E_ADDRESS_UNALIGNED;
}
return 0;
}
/*
* pmem2_config_set_address -- set addr and addr_request in the config
* struct
*/
int
pmem2_config_set_address(struct pmem2_config *cfg, void *addr,
enum pmem2_address_request_type request_type)
{
if (request_type != PMEM2_ADDRESS_FIXED_NOREPLACE) {
ERR("invalid address request_type 0x%x", request_type);
return PMEM2_E_INVALID_ADDRESS_REQUEST_TYPE;
}
if (request_type == PMEM2_ADDRESS_FIXED_NOREPLACE && !addr) {
ERR(
"cannot use address request type PMEM2_ADDRESS_FIXED_NOREPLACE with addr being NULL");
return PMEM2_E_ADDRESS_NULL;
}
cfg->addr = addr;
cfg->addr_request = (int)request_type;
return 0;
}
/*
* pmem2_config_set_vm_reservation -- set vm_reservation in the
* pmem2_config structure
*/
int
pmem2_config_set_vm_reservation(struct pmem2_config *cfg,
struct pmem2_vm_reservation *rsv, size_t offset)
{
return PMEM2_E_NOSUPP;
}
/*
* pmem2_config_clear_address -- reset addr and addr_request in the config
* to the default values
*/
void
pmem2_config_clear_address(struct pmem2_config *cfg)
{
cfg->addr = NULL;
cfg->addr_request = PMEM2_ADDRESS_ANY;
}
/*
* pmem2_config_set_protection -- set protection flags
* in the config struct
*/
int
pmem2_config_set_protection(struct pmem2_config *cfg,
unsigned prot)
{
unsigned unknown_prot = prot & ~(PMEM2_PROT_READ | PMEM2_PROT_WRITE |
PMEM2_PROT_EXEC | PMEM2_PROT_NONE);
if (unknown_prot) {
ERR("invalid flag %u", prot);
return PMEM2_E_INVALID_PROT_FLAG;
}
cfg->protection_flag = prot;
return 0;
}
| 5,603 | 20.227273 | 89 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/ravl_interval.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* ravl_interval.h -- internal definitions for ravl_interval
*/
#ifndef RAVL_INTERVAL_H
#define RAVL_INTERVAL_H
#include "libpmem2.h"
#include "os_thread.h"
#include "ravl.h"
struct ravl_interval;
struct ravl_interval_node;
typedef size_t ravl_interval_min(void *addr);
typedef size_t ravl_interval_max(void *addr);
struct ravl_interval *ravl_interval_new(ravl_interval_min *min,
ravl_interval_min *max);
void ravl_interval_delete(struct ravl_interval *ri);
int ravl_interval_insert(struct ravl_interval *ri, void *addr);
int ravl_interval_remove(struct ravl_interval *ri,
struct ravl_interval_node *rin);
struct ravl_interval_node *ravl_interval_find_equal(struct ravl_interval *ri,
void *addr);
struct ravl_interval_node *ravl_interval_find(struct ravl_interval *ri,
void *addr);
void *ravl_interval_data(struct ravl_interval_node *rin);
#endif
| 947 | 27.727273 | 77 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/memops_generic.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* memops_generic.c -- architecture-independent memmove & memset fallback
*
* This fallback is needed to fulfill guarantee that pmem_mem[cpy|set|move]
* will use at least 8-byte stores (for 8-byte aligned buffers and sizes),
* even when accelerated implementation is missing or disabled.
* This guarantee is needed to maintain correctness eg in pmemobj.
* Libc may do the same, but this behavior is not documented, so we can't rely
* on that.
*/
#include <stddef.h>
#include "out.h"
#include "pmem2_arch.h"
#include "util.h"
/*
* pmem2_flush_flags -- internal wrapper around pmem_flush
*/
static inline void
pmem2_flush_flags(const void *addr, size_t len, unsigned flags,
flush_func flush)
{
if (!(flags & PMEM2_F_MEM_NOFLUSH))
flush(addr, len);
}
/*
* cpy128 -- (internal) copy 128 bytes from src to dst
*/
static force_inline void
cpy128(uint64_t *dst, const uint64_t *src)
{
/*
* We use atomics here just to be sure compiler will not split stores.
* Order of stores doesn't matter.
*/
uint64_t tmp[16];
util_atomic_load_explicit64(&src[0], &tmp[0], memory_order_relaxed);
util_atomic_load_explicit64(&src[1], &tmp[1], memory_order_relaxed);
util_atomic_load_explicit64(&src[2], &tmp[2], memory_order_relaxed);
util_atomic_load_explicit64(&src[3], &tmp[3], memory_order_relaxed);
util_atomic_load_explicit64(&src[4], &tmp[4], memory_order_relaxed);
util_atomic_load_explicit64(&src[5], &tmp[5], memory_order_relaxed);
util_atomic_load_explicit64(&src[6], &tmp[6], memory_order_relaxed);
util_atomic_load_explicit64(&src[7], &tmp[7], memory_order_relaxed);
util_atomic_load_explicit64(&src[8], &tmp[8], memory_order_relaxed);
util_atomic_load_explicit64(&src[9], &tmp[9], memory_order_relaxed);
util_atomic_load_explicit64(&src[10], &tmp[10], memory_order_relaxed);
util_atomic_load_explicit64(&src[11], &tmp[11], memory_order_relaxed);
util_atomic_load_explicit64(&src[12], &tmp[12], memory_order_relaxed);
util_atomic_load_explicit64(&src[13], &tmp[13], memory_order_relaxed);
util_atomic_load_explicit64(&src[14], &tmp[14], memory_order_relaxed);
util_atomic_load_explicit64(&src[15], &tmp[15], memory_order_relaxed);
util_atomic_store_explicit64(&dst[0], tmp[0], memory_order_relaxed);
util_atomic_store_explicit64(&dst[1], tmp[1], memory_order_relaxed);
util_atomic_store_explicit64(&dst[2], tmp[2], memory_order_relaxed);
util_atomic_store_explicit64(&dst[3], tmp[3], memory_order_relaxed);
util_atomic_store_explicit64(&dst[4], tmp[4], memory_order_relaxed);
util_atomic_store_explicit64(&dst[5], tmp[5], memory_order_relaxed);
util_atomic_store_explicit64(&dst[6], tmp[6], memory_order_relaxed);
util_atomic_store_explicit64(&dst[7], tmp[7], memory_order_relaxed);
util_atomic_store_explicit64(&dst[8], tmp[8], memory_order_relaxed);
util_atomic_store_explicit64(&dst[9], tmp[9], memory_order_relaxed);
util_atomic_store_explicit64(&dst[10], tmp[10], memory_order_relaxed);
util_atomic_store_explicit64(&dst[11], tmp[11], memory_order_relaxed);
util_atomic_store_explicit64(&dst[12], tmp[12], memory_order_relaxed);
util_atomic_store_explicit64(&dst[13], tmp[13], memory_order_relaxed);
util_atomic_store_explicit64(&dst[14], tmp[14], memory_order_relaxed);
util_atomic_store_explicit64(&dst[15], tmp[15], memory_order_relaxed);
}
/*
* cpy64 -- (internal) copy 64 bytes from src to dst
*/
static force_inline void
cpy64(uint64_t *dst, const uint64_t *src)
{
/*
* We use atomics here just to be sure compiler will not split stores.
* Order of stores doesn't matter.
*/
uint64_t tmp[8];
util_atomic_load_explicit64(&src[0], &tmp[0], memory_order_relaxed);
util_atomic_load_explicit64(&src[1], &tmp[1], memory_order_relaxed);
util_atomic_load_explicit64(&src[2], &tmp[2], memory_order_relaxed);
util_atomic_load_explicit64(&src[3], &tmp[3], memory_order_relaxed);
util_atomic_load_explicit64(&src[4], &tmp[4], memory_order_relaxed);
util_atomic_load_explicit64(&src[5], &tmp[5], memory_order_relaxed);
util_atomic_load_explicit64(&src[6], &tmp[6], memory_order_relaxed);
util_atomic_load_explicit64(&src[7], &tmp[7], memory_order_relaxed);
util_atomic_store_explicit64(&dst[0], tmp[0], memory_order_relaxed);
util_atomic_store_explicit64(&dst[1], tmp[1], memory_order_relaxed);
util_atomic_store_explicit64(&dst[2], tmp[2], memory_order_relaxed);
util_atomic_store_explicit64(&dst[3], tmp[3], memory_order_relaxed);
util_atomic_store_explicit64(&dst[4], tmp[4], memory_order_relaxed);
util_atomic_store_explicit64(&dst[5], tmp[5], memory_order_relaxed);
util_atomic_store_explicit64(&dst[6], tmp[6], memory_order_relaxed);
util_atomic_store_explicit64(&dst[7], tmp[7], memory_order_relaxed);
}
/*
* cpy8 -- (internal) copy 8 bytes from src to dst
*/
static force_inline void
cpy8(uint64_t *dst, const uint64_t *src)
{
uint64_t tmp;
util_atomic_load_explicit64(src, &tmp, memory_order_relaxed);
util_atomic_store_explicit64(dst, tmp, memory_order_relaxed);
}
/*
* store8 -- (internal) store 8 bytes
*/
static force_inline void
store8(uint64_t *dst, uint64_t c)
{
util_atomic_store_explicit64(dst, c, memory_order_relaxed);
}
/*
* memmove_nodrain_generic -- generic memmove to pmem without hw drain
*/
void *
memmove_nodrain_generic(void *dst, const void *src, size_t len,
unsigned flags, flush_func flush)
{
LOG(15, "pmemdest %p src %p len %zu flags 0x%x", dst, src, len,
flags);
char *cdst = dst;
const char *csrc = src;
size_t remaining;
(void) flags;
if ((uintptr_t)cdst - (uintptr_t)csrc >= len) {
size_t cnt = (uint64_t)cdst & 7;
if (cnt > 0) {
cnt = 8 - cnt;
if (cnt > len)
cnt = len;
for (size_t i = 0; i < cnt; ++i)
cdst[i] = csrc[i];
pmem2_flush_flags(cdst, cnt, flags, flush);
cdst += cnt;
csrc += cnt;
len -= cnt;
}
uint64_t *dst8 = (uint64_t *)cdst;
const uint64_t *src8 = (const uint64_t *)csrc;
while (len >= 128 && CACHELINE_SIZE == 128) {
cpy128(dst8, src8);
pmem2_flush_flags(dst8, 128, flags, flush);
len -= 128;
dst8 += 16;
src8 += 16;
}
while (len >= 64) {
cpy64(dst8, src8);
pmem2_flush_flags(dst8, 64, flags, flush);
len -= 64;
dst8 += 8;
src8 += 8;
}
remaining = len;
while (len >= 8) {
cpy8(dst8, src8);
len -= 8;
dst8++;
src8++;
}
cdst = (char *)dst8;
csrc = (const char *)src8;
for (size_t i = 0; i < len; ++i)
*cdst++ = *csrc++;
if (remaining)
pmem2_flush_flags(cdst - remaining, remaining, flags,
flush);
} else {
cdst += len;
csrc += len;
size_t cnt = (uint64_t)cdst & 7;
if (cnt > 0) {
if (cnt > len)
cnt = len;
cdst -= cnt;
csrc -= cnt;
len -= cnt;
for (size_t i = cnt; i > 0; --i)
cdst[i - 1] = csrc[i - 1];
pmem2_flush_flags(cdst, cnt, flags, flush);
}
uint64_t *dst8 = (uint64_t *)cdst;
const uint64_t *src8 = (const uint64_t *)csrc;
while (len >= 128 && CACHELINE_SIZE == 128) {
dst8 -= 16;
src8 -= 16;
cpy128(dst8, src8);
pmem2_flush_flags(dst8, 128, flags, flush);
len -= 128;
}
while (len >= 64) {
dst8 -= 8;
src8 -= 8;
cpy64(dst8, src8);
pmem2_flush_flags(dst8, 64, flags, flush);
len -= 64;
}
remaining = len;
while (len >= 8) {
--dst8;
--src8;
cpy8(dst8, src8);
len -= 8;
}
cdst = (char *)dst8;
csrc = (const char *)src8;
for (size_t i = len; i > 0; --i)
*--cdst = *--csrc;
if (remaining)
pmem2_flush_flags(cdst, remaining, flags, flush);
}
return dst;
}
/*
* memset_nodrain_generic -- generic memset to pmem without hw drain
*/
void *
memset_nodrain_generic(void *dst, int c, size_t len, unsigned flags,
flush_func flush)
{
LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x", dst, c, len,
flags);
(void) flags;
char *cdst = dst;
size_t cnt = (uint64_t)cdst & 7;
if (cnt > 0) {
cnt = 8 - cnt;
if (cnt > len)
cnt = len;
for (size_t i = 0; i < cnt; ++i)
cdst[i] = (char)c;
pmem2_flush_flags(cdst, cnt, flags, flush);
cdst += cnt;
len -= cnt;
}
uint64_t *dst8 = (uint64_t *)cdst;
uint64_t u = (unsigned char)c;
uint64_t tmp = (u << 56) | (u << 48) | (u << 40) | (u << 32) |
(u << 24) | (u << 16) | (u << 8) | u;
while (len >= 128 && CACHELINE_SIZE == 128) {
store8(&dst8[0], tmp);
store8(&dst8[1], tmp);
store8(&dst8[2], tmp);
store8(&dst8[3], tmp);
store8(&dst8[4], tmp);
store8(&dst8[5], tmp);
store8(&dst8[6], tmp);
store8(&dst8[7], tmp);
store8(&dst8[8], tmp);
store8(&dst8[9], tmp);
store8(&dst8[10], tmp);
store8(&dst8[11], tmp);
store8(&dst8[12], tmp);
store8(&dst8[13], tmp);
store8(&dst8[14], tmp);
store8(&dst8[15], tmp);
pmem2_flush_flags(dst8, 128, flags, flush);
len -= 128;
dst8 += 16;
}
while (len >= 64) {
store8(&dst8[0], tmp);
store8(&dst8[1], tmp);
store8(&dst8[2], tmp);
store8(&dst8[3], tmp);
store8(&dst8[4], tmp);
store8(&dst8[5], tmp);
store8(&dst8[6], tmp);
store8(&dst8[7], tmp);
pmem2_flush_flags(dst8, 64, flags, flush);
len -= 64;
dst8 += 8;
}
size_t remaining = len;
while (len >= 8) {
store8(dst8, tmp);
len -= 8;
dst8++;
}
cdst = (char *)dst8;
for (size_t i = 0; i < len; ++i)
*cdst++ = (char)c;
if (remaining)
pmem2_flush_flags(cdst - remaining, remaining, flags, flush);
return dst;
}
| 9,345 | 26.488235 | 78 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/pmem2_arch.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* pmem2_arch.h -- core-arch interface
*/
#ifndef PMEM2_ARCH_H
#define PMEM2_ARCH_H
#include <stddef.h>
#include "libpmem2.h"
#include "util.h"
#include "valgrind_internal.h"
#ifdef __cplusplus
extern "C" {
#endif
struct pmem2_arch_info;
typedef void (*fence_func)(void);
typedef void (*flush_func)(const void *, size_t);
typedef void *(*memmove_nodrain_func)(void *pmemdest, const void *src,
size_t len, unsigned flags, flush_func flush);
typedef void *(*memset_nodrain_func)(void *pmemdest, int c, size_t len,
unsigned flags, flush_func flush);
struct pmem2_arch_info {
memmove_nodrain_func memmove_nodrain;
memmove_nodrain_func memmove_nodrain_eadr;
memset_nodrain_func memset_nodrain;
memset_nodrain_func memset_nodrain_eadr;
flush_func flush;
fence_func fence;
int flush_has_builtin_fence;
};
void pmem2_arch_init(struct pmem2_arch_info *info);
/*
* flush_empty_nolog -- (internal) do not flush the CPU cache
*/
static force_inline void
flush_empty_nolog(const void *addr, size_t len)
{
/* NOP, but tell pmemcheck about it */
VALGRIND_DO_FLUSH(addr, len);
}
void *memmove_nodrain_generic(void *pmemdest, const void *src, size_t len,
unsigned flags, flush_func flush);
void *memset_nodrain_generic(void *pmemdest, int c, size_t len, unsigned flags,
flush_func flush);
#ifdef __cplusplus
}
#endif
#endif
| 1,427 | 22.8 | 79 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/region_namespace_ndctl.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* region_namespace_ndctl.c -- common ndctl functions
*/
#include <ndctl/libndctl.h>
#include <ndctl/libdaxctl.h>
#include <sys/sysmacros.h>
#include <fcntl.h>
#include "libpmem2.h"
#include "pmem2_utils.h"
#include "region_namespace_ndctl.h"
#include "region_namespace.h"
#include "out.h"
/*
* ndctl_match_devdax -- (internal) returns 0 if the devdax matches
* with the given file, 1 if it doesn't match,
* and a negative value in case of an error.
*/
static int
ndctl_match_devdax(dev_t st_rdev, const char *devname)
{
LOG(3, "st_rdev %lu devname %s", st_rdev, devname);
if (*devname == '\0')
return 1;
char path[PATH_MAX];
os_stat_t stat;
if (util_snprintf(path, PATH_MAX, "/dev/%s", devname) < 0) {
ERR("!snprintf");
return PMEM2_E_ERRNO;
}
if (os_stat(path, &stat)) {
ERR("!stat %s", path);
return PMEM2_E_ERRNO;
}
if (st_rdev != stat.st_rdev) {
LOG(10, "skipping not matching device: %s", path);
return 1;
}
LOG(4, "found matching device: %s", path);
return 0;
}
#define BUFF_LENGTH 64
/*
* ndctl_match_fsdax -- (internal) returns 0 if the device matches
* with the given file, 1 if it doesn't match,
* and a negative value in case of an error.
*/
static int
ndctl_match_fsdax(dev_t st_dev, const char *devname)
{
LOG(3, "st_dev %lu devname %s", st_dev, devname);
if (*devname == '\0')
return 1;
char path[PATH_MAX];
char dev_id[BUFF_LENGTH];
if (util_snprintf(path, PATH_MAX, "/sys/block/%s/dev", devname) < 0) {
ERR("!snprintf");
return PMEM2_E_ERRNO;
}
if (util_snprintf(dev_id, BUFF_LENGTH, "%d:%d",
major(st_dev), minor(st_dev)) < 0) {
ERR("!snprintf");
return PMEM2_E_ERRNO;
}
int fd = os_open(path, O_RDONLY);
if (fd < 0) {
ERR("!open \"%s\"", path);
return PMEM2_E_ERRNO;
}
char buff[BUFF_LENGTH];
ssize_t nread = read(fd, buff, BUFF_LENGTH);
if (nread < 0) {
ERR("!read");
int oerrno = errno; /* save the errno */
os_close(fd);
errno = oerrno;
return PMEM2_E_ERRNO;
}
os_close(fd);
if (nread == 0) {
ERR("%s is empty", path);
return PMEM2_E_INVALID_DEV_FORMAT;
}
if (buff[nread - 1] != '\n') {
ERR("%s doesn't end with new line", path);
return PMEM2_E_INVALID_DEV_FORMAT;
}
buff[nread - 1] = '\0';
if (strcmp(buff, dev_id) != 0) {
LOG(10, "skipping not matching device: %s", path);
return 1;
}
LOG(4, "found matching device: %s", path);
return 0;
}
/*
* pmem2_region_namespace -- returns the region
* (and optionally the namespace)
* where the given file is located
*/
int
pmem2_region_namespace(struct ndctl_ctx *ctx,
const struct pmem2_source *src,
struct ndctl_region **pregion,
struct ndctl_namespace **pndns)
{
LOG(3, "ctx %p src %p pregion %p pnamespace %p",
ctx, src, pregion, pndns);
struct ndctl_bus *bus;
struct ndctl_region *region;
struct ndctl_namespace *ndns;
if (pregion)
*pregion = NULL;
if (pndns)
*pndns = NULL;
if (src->value.ftype == PMEM2_FTYPE_DIR) {
ERR("cannot check region or namespace of a directory");
return PMEM2_E_INVALID_FILE_TYPE;
}
FOREACH_BUS_REGION_NAMESPACE(ctx, bus, region, ndns) {
struct ndctl_btt *btt;
struct ndctl_dax *dax = NULL;
struct ndctl_pfn *pfn;
const char *devname;
if ((dax = ndctl_namespace_get_dax(ndns))) {
if (src->value.ftype == PMEM2_FTYPE_REG)
continue;
ASSERTeq(src->value.ftype, PMEM2_FTYPE_DEVDAX);
struct daxctl_region *dax_region;
dax_region = ndctl_dax_get_daxctl_region(dax);
if (!dax_region) {
ERR("!cannot find dax region");
return PMEM2_E_DAX_REGION_NOT_FOUND;
}
struct daxctl_dev *dev;
daxctl_dev_foreach(dax_region, dev) {
devname = daxctl_dev_get_devname(dev);
int ret = ndctl_match_devdax(src->value.st_rdev,
devname);
if (ret < 0)
return ret;
if (ret == 0) {
if (pregion)
*pregion = region;
if (pndns)
*pndns = ndns;
return 0;
}
}
} else {
if (src->value.ftype == PMEM2_FTYPE_DEVDAX)
continue;
ASSERTeq(src->value.ftype, PMEM2_FTYPE_REG);
if ((btt = ndctl_namespace_get_btt(ndns))) {
devname = ndctl_btt_get_block_device(btt);
} else if ((pfn = ndctl_namespace_get_pfn(ndns))) {
devname = ndctl_pfn_get_block_device(pfn);
} else {
devname =
ndctl_namespace_get_block_device(ndns);
}
int ret = ndctl_match_fsdax(src->value.st_dev, devname);
if (ret < 0)
return ret;
if (ret == 0) {
if (pregion)
*pregion = region;
if (pndns)
*pndns = ndns;
return 0;
}
}
}
LOG(10, "did not found any matching device");
return 0;
}
/*
* pmem2_region_get_id -- returns the region id
*/
int
pmem2_get_region_id(const struct pmem2_source *src, unsigned *region_id)
{
LOG(3, "src %p region_id %p", src, region_id);
struct ndctl_region *region;
struct ndctl_namespace *ndns;
struct ndctl_ctx *ctx;
errno = ndctl_new(&ctx) * (-1);
if (errno) {
ERR("!ndctl_new");
return PMEM2_E_ERRNO;
}
int rv = pmem2_region_namespace(ctx, src, ®ion, &ndns);
if (rv) {
LOG(1, "getting region and namespace failed");
goto end;
}
if (!region) {
ERR("unknown region");
rv = PMEM2_E_DAX_REGION_NOT_FOUND;
goto end;
}
*region_id = ndctl_region_get_id(region);
end:
ndctl_unref(ctx);
return rv;
}
| 5,467 | 20.111969 | 72 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/pmem2_utils_other.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
#include <errno.h>
#include <sys/stat.h>
#include "libpmem2.h"
#include "out.h"
#include "pmem2_utils.h"
#ifdef _WIN32
#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
#endif
int
pmem2_get_type_from_stat(const os_stat_t *st, enum pmem2_file_type *type)
{
if (S_ISREG(st->st_mode)) {
*type = PMEM2_FTYPE_REG;
return 0;
}
if (S_ISDIR(st->st_mode)) {
*type = PMEM2_FTYPE_DIR;
return 0;
}
ERR("file type 0%o not supported", st->st_mode & S_IFMT);
return PMEM2_E_INVALID_FILE_TYPE;
}
/*
* pmem2_device_dax_size -- checks the size of a given
* dax device from given source structure
*/
int
pmem2_device_dax_size(const struct pmem2_source *src, size_t *size)
{
const char *err =
"BUG: pmem2_device_dax_size should never be called on this OS";
ERR("%s", err);
ASSERTinfo(0, err);
return PMEM2_E_NOSUPP;
}
/*
* pmem2_device_dax_alignment -- checks the alignment of a given
* dax device from given source
*/
int
pmem2_device_dax_alignment(const struct pmem2_source *src, size_t *alignment)
{
const char *err =
"BUG: pmem2_device_dax_alignment should never be called on this OS";
ERR("%s", err);
ASSERTinfo(0, err);
return PMEM2_E_NOSUPP;
}
| 1,301 | 20.7 | 77 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/deep_flush.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* deep_flush.c -- pmem2_deep_flush implementation
*/
#include <stdlib.h>
#include "libpmem2.h"
#include "deep_flush.h"
#include "out.h"
/*
* pmem2_deep_flush -- performs deep flush operation
*/
int
pmem2_deep_flush(struct pmem2_map *map, void *ptr, size_t size)
{
LOG(3, "map %p ptr %p size %zu", map, ptr, size);
uintptr_t map_addr = (uintptr_t)map->addr;
uintptr_t map_end = map_addr + map->content_length;
uintptr_t flush_addr = (uintptr_t)ptr;
uintptr_t flush_end = flush_addr + size;
if (flush_addr < map_addr || flush_end > map_end) {
ERR("requested deep flush rage ptr %p size %zu"
"exceeds map range %p", ptr, size, map);
return PMEM2_E_DEEP_FLUSH_RANGE;
}
int ret = map->deep_flush_fn(map, ptr, size);
if (ret) {
LOG(1, "cannot perform deep flush operation for map %p", map);
return ret;
}
return 0;
}
| 929 | 21.682927 | 64 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/map_posix.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* map_posix.c -- pmem2_map (POSIX)
*/
#include <errno.h>
#include <stdbool.h>
#include <string.h>
#include <sys/mman.h>
#include "libpmem2.h"
#include "alloc.h"
#include "auto_flush.h"
#include "config.h"
#include "file.h"
#include "map.h"
#include "out.h"
#include "persist.h"
#include "pmem2_utils.h"
#include "source.h"
#include "valgrind_internal.h"
#ifndef MAP_SYNC
#define MAP_SYNC 0x80000
#endif
#ifndef MAP_SHARED_VALIDATE
#define MAP_SHARED_VALIDATE 0x03
#endif
#define MEGABYTE ((uintptr_t)1 << 20)
#define GIGABYTE ((uintptr_t)1 << 30)
/* indicates the cases in which the error cannot occur */
#define GRAN_IMPOSSIBLE "impossible"
#ifdef __linux__
/* requested CACHE_LINE, available PAGE */
#define REQ_CL_AVAIL_PG \
"requested granularity not available because fd doesn't point to DAX-enabled file " \
"or kernel doesn't support MAP_SYNC flag (Linux >= 4.15)"
/* requested BYTE, available PAGE */
#define REQ_BY_AVAIL_PG REQ_CL_AVAIL_PG
/* requested BYTE, available CACHE_LINE */
#define REQ_BY_AVAIL_CL \
"requested granularity not available because the platform doesn't support eADR"
static const char *granularity_err_msg[3][3] = {
/* requested granularity / available granularity */
/* -------------------------------------------------------------------- */
/* BYTE CACHE_LINE PAGE */
/* -------------------------------------------------------------------- */
/* BYTE */ {GRAN_IMPOSSIBLE, REQ_BY_AVAIL_CL, REQ_BY_AVAIL_PG},
/* CL */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, REQ_CL_AVAIL_PG},
/* PAGE */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE}};
#else
/* requested CACHE_LINE, available PAGE */
#define REQ_CL_AVAIL_PG \
"the operating system doesn't provide a method of detecting granularity"
/* requested BYTE, available PAGE */
#define REQ_BY_AVAIL_PG \
"the operating system doesn't provide a method of detecting whether the platform supports eADR"
static const char *granularity_err_msg[3][3] = {
/* requested granularity / available granularity */
/* -------------------------------------------------------------------- */
/* BYTE CACHE_LINE PAGE */
/* -------------------------------------------------------------------- */
/* BYTE */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, REQ_BY_AVAIL_PG},
/* CL */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, REQ_CL_AVAIL_PG},
/* PAGE */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE}};
#endif
/*
* get_map_alignment -- (internal) choose the desired mapping alignment
*
* The smallest supported alignment is 2 megabytes because of the object
* alignment requirements. Changing this value to 4 kilobytes constitutes a
* layout change.
*
* Use 1GB page alignment only if the mapping length is at least
* twice as big as the page size.
*/
static inline size_t
get_map_alignment(size_t len, size_t req_align)
{
size_t align = 2 * MEGABYTE;
if (req_align)
align = req_align;
else if (len >= 2 * GIGABYTE)
align = GIGABYTE;
return align;
}
/*
* map_reserve -- (internal) reserve an address for mmap()
*
* ALSR in 64-bit Linux kernel uses 28-bit of randomness for mmap
* (bit positions 12-39), which means the base mapping address is randomized
* within [0..1024GB] range, with 4KB granularity. Assuming additional
* 1GB alignment, it results in 1024 possible locations.
*/
static int
map_reserve(size_t len, size_t alignment, void **reserv, size_t *reslen,
const struct pmem2_config *cfg)
{
ASSERTne(reserv, NULL);
/* let's get addr from the cfg */
void *mmap_addr = cfg->addr;
int mmap_addr_flag = 0;
size_t dlength; /* dummy length */
/* if addr is initialized, dlength == len */
if (mmap_addr)
dlength = len;
else
dlength = len + alignment; /* dummy length */
/* "translate" pmem2 addr request type into linux flag */
if (cfg->addr_request == PMEM2_ADDRESS_FIXED_NOREPLACE) {
/*
* glibc started exposing this flag in version 4.17 but we can still
* imitate it even if it is not supported by libc or kernel
*/
#ifdef MAP_FIXED_NOREPLACE
mmap_addr_flag = MAP_FIXED_NOREPLACE;
#else
mmap_addr_flag = 0;
#endif
}
/*
* Create dummy mapping to find an unused region of given size.
* Request for increased size for later address alignment.
* Use MAP_PRIVATE with read-only access to simulate
* zero cost for overcommit accounting. Note: MAP_NORESERVE
* flag is ignored if overcommit is disabled (mode 2).
*/
char *daddr = mmap(mmap_addr, dlength, PROT_READ,
MAP_PRIVATE | MAP_ANONYMOUS | mmap_addr_flag, -1, 0);
if (daddr == MAP_FAILED) {
if (errno == EEXIST) {
ERR("!mmap MAP_FIXED_NOREPLACE");
return PMEM2_E_MAPPING_EXISTS;
}
ERR("!mmap MAP_ANONYMOUS");
return PMEM2_E_ERRNO;
}
/*
* When kernel does not support MAP_FIXED_NOREPLACE flag we imitate it.
* If kernel does not support flag and given addr is occupied, kernel
* chooses new addr randomly and returns it. We do not want that
* behavior, so we validate it and fail when addresses do not match.
*/
if (mmap_addr && cfg->addr_request == PMEM2_ADDRESS_FIXED_NOREPLACE) {
/* mapping passed and gave different addr, while it shouldn't */
if (daddr != mmap_addr) {
munmap(daddr, dlength);
ERR("mapping exists in the given address");
return PMEM2_E_MAPPING_EXISTS;
}
}
LOG(4, "system choice %p", daddr);
*reserv = (void *)roundup((uintptr_t)daddr, alignment);
/*
* since the last part of the reservation from (reserv + reslen == end)
* will be unmapped, the 'end' address has to be page-aligned.
* 'reserv' is already page-aligned (or even aligned to multiple of page
* size) so it is enough to page-align the 'reslen' value.
*/
*reslen = roundup(len, Pagesize);
LOG(4, "hint %p", *reserv);
/*
* The placeholder mapping is divided into few parts:
*
* 1 2 3 4 5
* |......|uuuuuuuuu|rrr|.................|
*
* Addresses:
* 1 == daddr
* 2 == reserv
* 3 == reserv + len
* 4 == reserv + reslen == end (has to be page-aligned)
* 5 == daddr + dlength
*
* Key:
* - '.' is an unused part of the placeholder
* - 'u' is where the actual mapping lies
* - 'r' is what reserved as padding
*/
/* unmap the placeholder before the actual mapping */
const size_t before = (uintptr_t)(*reserv) - (uintptr_t)daddr;
if (before) {
if (munmap(daddr, before)) {
ERR("!munmap");
return PMEM2_E_ERRNO;
}
}
/* unmap the placeholder after the actual mapping */
const size_t after = dlength - *reslen - before;
void *end = (void *)((uintptr_t)(*reserv) + (uintptr_t)*reslen);
if (after)
if (munmap(end, after)) {
ERR("!munmap");
return PMEM2_E_ERRNO;
}
return 0;
}
/*
* file_map -- (internal) memory map given file into memory
* If (flags & MAP_PRIVATE) it uses just mmap. Otherwise, it tries to mmap with
* (flags | MAP_SHARED_VALIDATE | MAP_SYNC) which allows flushing from the
* user-space. If MAP_SYNC fails and the user did not specify it by himself it
* falls back to the mmap with user-provided flags.
*/
static int
file_map(void *reserv, size_t len, int proto, int flags,
int fd, off_t offset, bool *map_sync, void **base)
{
LOG(15, "reserve %p len %zu proto %x flags %x fd %d offset %ld "
"map_sync %p", reserv, len, proto, flags, fd, offset,
map_sync);
ASSERTne(map_sync, NULL);
ASSERTne(base, NULL);
/*
* MAP_PRIVATE and MAP_SHARED are mutually exclusive, therefore mmap
* with MAP_PRIVATE is executed separately.
*/
if (flags & MAP_PRIVATE) {
*base = mmap(reserv, len, proto, flags, fd, offset);
if (*base == MAP_FAILED) {
ERR("!mmap");
return PMEM2_E_ERRNO;
}
LOG(4, "mmap with MAP_PRIVATE succeeded");
*map_sync = false;
return 0;
}
/* try to mmap with MAP_SYNC flag */
const int sync_flags = MAP_SHARED_VALIDATE | MAP_SYNC;
*base = mmap(reserv, len, proto, flags | sync_flags, fd, offset);
if (*base != MAP_FAILED) {
LOG(4, "mmap with MAP_SYNC succeeded");
*map_sync = true;
return 0;
}
/* try to mmap with MAP_SHARED flag (without MAP_SYNC) */
if (errno == EINVAL || errno == ENOTSUP) {
LOG(4, "mmap with MAP_SYNC not supported");
*base = mmap(reserv, len, proto, flags | MAP_SHARED, fd,
offset);
if (*base != MAP_FAILED) {
*map_sync = false;
return 0;
}
}
ERR("!mmap");
return PMEM2_E_ERRNO;
}
/*
* unmap -- (internal) unmap a memory range
*/
static int
unmap(void *addr, size_t len)
{
int retval = munmap(addr, len);
if (retval < 0) {
ERR("!munmap");
return PMEM2_E_ERRNO;
}
return 0;
}
/*
* pmem2_map -- map memory according to provided config
*/
int
pmem2_map(const struct pmem2_config *cfg, const struct pmem2_source *src,
struct pmem2_map **map_ptr)
{
LOG(3, "cfg %p src %p map_ptr %p", cfg, src, map_ptr);
int ret = 0;
struct pmem2_map *map;
size_t file_len;
*map_ptr = NULL;
if (cfg->requested_max_granularity == PMEM2_GRANULARITY_INVALID) {
ERR(
"please define the max granularity requested for the mapping");
return PMEM2_E_GRANULARITY_NOT_SET;
}
size_t src_alignment;
ret = pmem2_source_alignment(src, &src_alignment);
if (ret)
return ret;
/* get file size */
ret = pmem2_source_size(src, &file_len);
if (ret)
return ret;
/* get offset */
size_t effective_offset;
ret = pmem2_validate_offset(cfg, &effective_offset, src_alignment);
if (ret)
return ret;
ASSERTeq(effective_offset, cfg->offset);
if (src->type == PMEM2_SOURCE_ANON)
effective_offset = 0;
os_off_t off = (os_off_t)effective_offset;
/* map input and output variables */
bool map_sync = false;
/*
* MAP_SHARED - is required to mmap directly the underlying hardware
* MAP_FIXED - is required to mmap at exact address pointed by hint
*/
int flags = MAP_FIXED;
void *addr;
/* "translate" pmem2 protection flags into linux flags */
int proto = 0;
if (cfg->protection_flag == PMEM2_PROT_NONE)
proto = PROT_NONE;
if (cfg->protection_flag & PMEM2_PROT_EXEC)
proto |= PROT_EXEC;
if (cfg->protection_flag & PMEM2_PROT_READ)
proto |= PROT_READ;
if (cfg->protection_flag & PMEM2_PROT_WRITE)
proto |= PROT_WRITE;
if (src->type == PMEM2_SOURCE_FD) {
if (src->value.ftype == PMEM2_FTYPE_DIR) {
ERR("the directory is not a supported file type");
return PMEM2_E_INVALID_FILE_TYPE;
}
ASSERT(src->value.ftype == PMEM2_FTYPE_REG ||
src->value.ftype == PMEM2_FTYPE_DEVDAX);
if (cfg->sharing == PMEM2_PRIVATE &&
src->value.ftype == PMEM2_FTYPE_DEVDAX) {
ERR(
"device DAX does not support mapping with MAP_PRIVATE");
return PMEM2_E_SRC_DEVDAX_PRIVATE;
}
}
size_t content_length, reserved_length = 0;
ret = pmem2_config_validate_length(cfg, file_len, src_alignment);
if (ret)
return ret;
/* without user-provided length, map to the end of the file */
if (cfg->length)
content_length = cfg->length;
else
content_length = file_len - effective_offset;
size_t alignment = get_map_alignment(content_length,
src_alignment);
ret = pmem2_config_validate_addr_alignment(cfg, src);
if (ret)
return ret;
/* find a hint for the mapping */
void *reserv = NULL;
ret = map_reserve(content_length, alignment, &reserv, &reserved_length,
cfg);
if (ret != 0) {
if (ret == PMEM2_E_MAPPING_EXISTS)
LOG(1, "given mapping region is already occupied");
else
LOG(1, "cannot find a contiguous region of given size");
return ret;
}
ASSERTne(reserv, NULL);
if (cfg->sharing == PMEM2_PRIVATE) {
flags |= MAP_PRIVATE;
}
int map_fd = INVALID_FD;
if (src->type == PMEM2_SOURCE_FD) {
map_fd = src->value.fd;
} else if (src->type == PMEM2_SOURCE_ANON) {
flags |= MAP_ANONYMOUS;
} else {
ASSERT(0);
}
ret = file_map(reserv, content_length, proto, flags, map_fd, off,
&map_sync, &addr);
if (ret) {
/* unmap the reservation mapping */
munmap(reserv, reserved_length);
if (ret == -EACCES)
return PMEM2_E_NO_ACCESS;
else if (ret == -ENOTSUP)
return PMEM2_E_NOSUPP;
else
return ret;
}
LOG(3, "mapped at %p", addr);
bool eADR = (pmem2_auto_flush() == 1);
enum pmem2_granularity available_min_granularity =
src->type == PMEM2_SOURCE_ANON ? PMEM2_GRANULARITY_BYTE :
get_min_granularity(eADR, map_sync, cfg->sharing);
if (available_min_granularity > cfg->requested_max_granularity) {
const char *err = granularity_err_msg
[cfg->requested_max_granularity]
[available_min_granularity];
if (strcmp(err, GRAN_IMPOSSIBLE) == 0)
FATAL(
"unhandled granularity error: available_min_granularity: %d" \
"requested_max_granularity: %d",
available_min_granularity,
cfg->requested_max_granularity);
ERR("%s", err);
ret = PMEM2_E_GRANULARITY_NOT_SUPPORTED;
goto err;
}
/* prepare pmem2_map structure */
map = (struct pmem2_map *)pmem2_malloc(sizeof(*map), &ret);
if (!map)
goto err;
map->addr = addr;
map->reserved_length = reserved_length;
map->content_length = content_length;
map->effective_granularity = available_min_granularity;
pmem2_set_flush_fns(map);
pmem2_set_mem_fns(map);
map->source = *src;
map->source.value.fd = INVALID_FD; /* fd should not be used after map */
ret = pmem2_register_mapping(map);
if (ret)
goto err_register;
*map_ptr = map;
if (src->type == PMEM2_SOURCE_FD) {
VALGRIND_REGISTER_PMEM_MAPPING(map->addr, map->content_length);
VALGRIND_REGISTER_PMEM_FILE(src->value.fd,
map->addr, map->content_length, 0);
}
return 0;
err_register:
free(map);
err:
unmap(addr, reserved_length);
return ret;
}
/*
* pmem2_unmap -- unmap the specified mapping
*/
int
pmem2_unmap(struct pmem2_map **map_ptr)
{
LOG(3, "map_ptr %p", map_ptr);
int ret = 0;
struct pmem2_map *map = *map_ptr;
ret = pmem2_unregister_mapping(map);
if (ret)
return ret;
ret = unmap(map->addr, map->reserved_length);
if (ret)
return ret;
VALGRIND_REMOVE_PMEM_MAPPING(map->addr, map->content_length);
Free(map);
*map_ptr = NULL;
return ret;
}
| 13,869 | 25.879845 | 96 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/auto_flush_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2019, Intel Corporation */
/*
* auto_flush_windows.c -- Windows auto flush detection
*/
#include <windows.h>
#include <inttypes.h>
#include "alloc.h"
#include "out.h"
#include "os.h"
#include "endian.h"
#include "auto_flush_windows.h"
/*
* is_nfit_available -- (internal) check if platform supports NFIT table.
*/
static int
is_nfit_available()
{
LOG(3, "is_nfit_available()");
DWORD signatures_size;
char *signatures = NULL;
int is_nfit = 0;
DWORD offset = 0;
signatures_size = EnumSystemFirmwareTables(ACPI_SIGNATURE, NULL, 0);
if (signatures_size == 0) {
ERR("!EnumSystemFirmwareTables");
return -1;
}
signatures = (char *)Malloc(signatures_size + 1);
if (signatures == NULL) {
ERR("!malloc");
return -1;
}
int ret = EnumSystemFirmwareTables(ACPI_SIGNATURE,
signatures, signatures_size);
signatures[signatures_size] = '\0';
if (ret != signatures_size) {
ERR("!EnumSystemFirmwareTables");
goto err;
}
while (offset <= signatures_size) {
int nfit_sig = strncmp(signatures + offset,
NFIT_STR_SIGNATURE, NFIT_SIGNATURE_LEN);
if (nfit_sig == 0) {
is_nfit = 1;
break;
}
offset += NFIT_SIGNATURE_LEN;
}
Free(signatures);
return is_nfit;
err:
Free(signatures);
return -1;
}
/*
* is_auto_flush_cap_set -- (internal) check if specific
* capabilities bits are set.
*
* ACPI 6.2A Specification:
* Bit[0] - CPU Cache Flush to NVDIMM Durability on
* Power Loss Capable. If set to 1, indicates that platform
* ensures the entire CPU store data path is flushed to
* persistent memory on system power loss.
* Bit[1] - Memory Controller Flush to NVDIMM Durability on Power Loss Capable.
* If set to 1, indicates that platform provides mechanisms to automatically
* flush outstanding write data from the memory controller to persistent memory
* in the event of platform power loss. Note: If bit 0 is set to 1 then this bit
* shall be set to 1 as well.
*/
static int
is_auto_flush_cap_set(uint32_t capabilities)
{
LOG(3, "is_auto_flush_cap_set capabilities 0x%" PRIx32, capabilities);
int CPU_cache_flush = CHECK_BIT(capabilities, 0);
int memory_controller_flush = CHECK_BIT(capabilities, 1);
LOG(15, "CPU_cache_flush %d, memory_controller_flush %d",
CPU_cache_flush, memory_controller_flush);
if (memory_controller_flush == 1 && CPU_cache_flush == 1)
return 1;
return 0;
}
/*
* parse_nfit_buffer -- (internal) parse nfit buffer
* if platform_capabilities struct is available return pcs structure.
*/
static struct platform_capabilities
parse_nfit_buffer(const unsigned char *nfit_buffer, unsigned long buffer_size)
{
LOG(3, "parse_nfit_buffer nfit_buffer %s, buffer_size %lu",
nfit_buffer, buffer_size);
uint16_t type;
uint16_t length;
size_t offset = sizeof(struct nfit_header);
struct platform_capabilities pcs = {0};
while (offset < buffer_size) {
type = *(nfit_buffer + offset);
length = *(nfit_buffer + offset + 2);
if (type == PCS_TYPE_NUMBER) {
if (length == sizeof(struct platform_capabilities)) {
memmove(&pcs, nfit_buffer + offset, length);
return pcs;
}
}
offset += length;
}
return pcs;
}
/*
* pmem2_auto_flush -- check if platform supports auto flush.
*/
int
pmem2_auto_flush(void)
{
LOG(3, NULL);
DWORD nfit_buffer_size = 0;
DWORD nfit_written = 0;
PVOID nfit_buffer = NULL;
struct nfit_header *nfit_data;
struct platform_capabilities *pc = NULL;
int eADR = 0;
int is_nfit = is_nfit_available();
if (is_nfit == 0) {
LOG(15, "ACPI NFIT table not available");
return 0;
}
if (is_nfit < 0 || is_nfit != 1) {
LOG(1, "!is_nfit_available");
return -1;
}
/* get the entire nfit size */
nfit_buffer_size = GetSystemFirmwareTable(
(DWORD)ACPI_SIGNATURE, (DWORD)NFIT_REV_SIGNATURE, NULL, 0);
if (nfit_buffer_size == 0) {
ERR("!GetSystemFirmwareTable");
return -1;
}
/* reserve buffer */
nfit_buffer = (unsigned char *)Malloc(nfit_buffer_size);
if (nfit_buffer == NULL) {
ERR("!malloc");
goto err;
}
/* write actual nfit to buffer */
nfit_written = GetSystemFirmwareTable(
(DWORD)ACPI_SIGNATURE, (DWORD)NFIT_REV_SIGNATURE,
nfit_buffer, nfit_buffer_size);
if (nfit_written == 0) {
ERR("!GetSystemFirmwareTable");
goto err;
}
if (nfit_buffer_size != nfit_written) {
errno = ERROR_INVALID_DATA;
ERR("!GetSystemFirmwareTable invalid data");
goto err;
}
nfit_data = (struct nfit_header *)nfit_buffer;
int nfit_sig = strncmp(nfit_data->signature,
NFIT_STR_SIGNATURE, NFIT_SIGNATURE_LEN);
if (nfit_sig != 0) {
ERR("!NFIT buffer has invalid data");
goto err;
}
struct platform_capabilities pcs = parse_nfit_buffer(
nfit_buffer, nfit_buffer_size);
eADR = is_auto_flush_cap_set(pcs.capabilities);
Free(nfit_buffer);
return eADR;
err:
Free(nfit_buffer);
return -1;
}
| 4,857 | 23.535354 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/badblocks_ndctl.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* badblocks_ndctl.c -- implementation of DIMMs API based on the ndctl library
*/
#define _GNU_SOURCE
#include <sys/types.h>
#include <libgen.h>
#include <limits.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/sysmacros.h>
#include <fcntl.h>
#include <ndctl/libndctl.h>
#include <ndctl/libdaxctl.h>
#include "libpmem2.h"
#include "pmem2_utils.h"
#include "source.h"
#include "region_namespace_ndctl.h"
#include "file.h"
#include "out.h"
#include "badblocks.h"
#include "set_badblocks.h"
#include "extent.h"
typedef int pmem2_badblock_next_type(
struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb);
typedef void *pmem2_badblock_get_next_type(
struct pmem2_badblock_context *bbctx);
struct pmem2_badblock_context {
/* file descriptor */
int fd;
/* pmem2 file type */
enum pmem2_file_type file_type;
/* ndctl context */
struct ndctl_ctx *ctx;
/*
* Function pointer to:
* - pmem2_badblock_next_namespace() or
* - pmem2_badblock_next_region()
*/
pmem2_badblock_next_type *pmem2_badblock_next_func;
/*
* Function pointer to:
* - pmem2_namespace_get_first_badblock() or
* - pmem2_namespace_get_next_badblock() or
* - pmem2_region_get_first_badblock() or
* - pmem2_region_get_next_badblock()
*/
pmem2_badblock_get_next_type *pmem2_badblock_get_next_func;
/* needed only by the ndctl namespace badblock iterator */
struct ndctl_namespace *ndns;
/* needed only by the ndctl region badblock iterator */
struct {
struct ndctl_bus *bus;
struct ndctl_region *region;
unsigned long long ns_res; /* address of the namespace */
unsigned long long ns_beg; /* the begining of the namespace */
unsigned long long ns_end; /* the end of the namespace */
} rgn;
/* file's extents */
struct extents *exts;
unsigned first_extent;
struct pmem2_badblock last_bb;
};
/* forward declarations */
static int pmem2_badblock_next_namespace(
struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb);
static int pmem2_badblock_next_region(
struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb);
static void *pmem2_namespace_get_first_badblock(
struct pmem2_badblock_context *bbctx);
static void *pmem2_region_get_first_badblock(
struct pmem2_badblock_context *bbctx);
/*
* badblocks_get_namespace_bounds -- (internal) returns the bounds
* (offset and size) of the given namespace
* relative to the beginning of its region
*/
static int
badblocks_get_namespace_bounds(struct ndctl_region *region,
struct ndctl_namespace *ndns,
unsigned long long *ns_offset,
unsigned long long *ns_size)
{
LOG(3, "region %p namespace %p ns_offset %p ns_size %p",
region, ndns, ns_offset, ns_size);
struct ndctl_pfn *pfn = ndctl_namespace_get_pfn(ndns);
struct ndctl_dax *dax = ndctl_namespace_get_dax(ndns);
ASSERTne(ns_offset, NULL);
ASSERTne(ns_size, NULL);
if (pfn) {
*ns_offset = ndctl_pfn_get_resource(pfn);
if (*ns_offset == ULLONG_MAX) {
ERR("(pfn) cannot read offset of the namespace");
return PMEM2_E_CANNOT_READ_BOUNDS;
}
*ns_size = ndctl_pfn_get_size(pfn);
if (*ns_size == ULLONG_MAX) {
ERR("(pfn) cannot read size of the namespace");
return PMEM2_E_CANNOT_READ_BOUNDS;
}
LOG(10, "(pfn) ns_offset 0x%llx ns_size %llu",
*ns_offset, *ns_size);
} else if (dax) {
*ns_offset = ndctl_dax_get_resource(dax);
if (*ns_offset == ULLONG_MAX) {
ERR("(dax) cannot read offset of the namespace");
return PMEM2_E_CANNOT_READ_BOUNDS;
}
*ns_size = ndctl_dax_get_size(dax);
if (*ns_size == ULLONG_MAX) {
ERR("(dax) cannot read size of the namespace");
return PMEM2_E_CANNOT_READ_BOUNDS;
}
LOG(10, "(dax) ns_offset 0x%llx ns_size %llu",
*ns_offset, *ns_size);
} else { /* raw or btt */
*ns_offset = ndctl_namespace_get_resource(ndns);
if (*ns_offset == ULLONG_MAX) {
ERR("(raw/btt) cannot read offset of the namespace");
return PMEM2_E_CANNOT_READ_BOUNDS;
}
*ns_size = ndctl_namespace_get_size(ndns);
if (*ns_size == ULLONG_MAX) {
ERR("(raw/btt) cannot read size of the namespace");
return PMEM2_E_CANNOT_READ_BOUNDS;
}
LOG(10, "(raw/btt) ns_offset 0x%llx ns_size %llu",
*ns_offset, *ns_size);
}
unsigned long long region_offset = ndctl_region_get_resource(region);
if (region_offset == ULLONG_MAX) {
ERR("!cannot read offset of the region");
return PMEM2_E_ERRNO;
}
LOG(10, "region_offset 0x%llx", region_offset);
*ns_offset -= region_offset;
return 0;
}
/*
* badblocks_devdax_clear_one_badblock -- (internal) clear one bad block
* in the dax device
*/
static int
badblocks_devdax_clear_one_badblock(struct ndctl_bus *bus,
unsigned long long address,
unsigned long long length)
{
LOG(3, "bus %p address 0x%llx length %llu (bytes)",
bus, address, length);
int ret;
struct ndctl_cmd *cmd_ars_cap = ndctl_bus_cmd_new_ars_cap(bus,
address, length);
if (cmd_ars_cap == NULL) {
ERR("ndctl_bus_cmd_new_ars_cap() failed (bus '%s')",
ndctl_bus_get_provider(bus));
return PMEM2_E_ERRNO;
}
ret = ndctl_cmd_submit(cmd_ars_cap);
if (ret) {
ERR("ndctl_cmd_submit() failed (bus '%s')",
ndctl_bus_get_provider(bus));
/* ndctl_cmd_submit() returns -errno */
goto out_ars_cap;
}
struct ndctl_range range;
ret = ndctl_cmd_ars_cap_get_range(cmd_ars_cap, &range);
if (ret) {
ERR("ndctl_cmd_ars_cap_get_range() failed");
/* ndctl_cmd_ars_cap_get_range() returns -errno */
goto out_ars_cap;
}
struct ndctl_cmd *cmd_clear_error = ndctl_bus_cmd_new_clear_error(
range.address, range.length, cmd_ars_cap);
ret = ndctl_cmd_submit(cmd_clear_error);
if (ret) {
ERR("ndctl_cmd_submit() failed (bus '%s')",
ndctl_bus_get_provider(bus));
/* ndctl_cmd_submit() returns -errno */
goto out_clear_error;
}
size_t cleared = ndctl_cmd_clear_error_get_cleared(cmd_clear_error);
LOG(4, "cleared %zu out of %llu bad blocks", cleared, length);
ASSERT(cleared <= length);
if (cleared < length) {
ERR("failed to clear %llu out of %llu bad blocks",
length - cleared, length);
errno = ENXIO; /* ndctl handles such error in this way */
ret = PMEM2_E_ERRNO;
} else {
ret = 0;
}
out_clear_error:
ndctl_cmd_unref(cmd_clear_error);
out_ars_cap:
ndctl_cmd_unref(cmd_ars_cap);
return ret;
}
/*
* pmem2_badblock_context_new -- allocate and create a new bad block context
*/
int
pmem2_badblock_context_new(const struct pmem2_source *src,
struct pmem2_badblock_context **bbctx)
{
LOG(3, "src %p bbctx %p", src, bbctx);
ASSERTne(bbctx, NULL);
if (src->type == PMEM2_SOURCE_ANON) {
ERR("Anonymous source does not support bad blocks");
return PMEM2_E_NOSUPP;
}
ASSERTeq(src->type, PMEM2_SOURCE_FD);
struct ndctl_ctx *ctx;
struct ndctl_region *region;
struct ndctl_namespace *ndns;
struct pmem2_badblock_context *tbbctx = NULL;
enum pmem2_file_type pmem2_type;
int ret = PMEM2_E_UNKNOWN;
*bbctx = NULL;
errno = ndctl_new(&ctx) * (-1);
if (errno) {
ERR("!ndctl_new");
return PMEM2_E_ERRNO;
}
pmem2_type = src->value.ftype;
ret = pmem2_region_namespace(ctx, src, ®ion, &ndns);
if (ret) {
LOG(1, "getting region and namespace failed");
goto exit_ndctl_unref;
}
tbbctx = pmem2_zalloc(sizeof(struct pmem2_badblock_context), &ret);
if (ret)
goto exit_ndctl_unref;
tbbctx->fd = src->value.fd;
tbbctx->file_type = pmem2_type;
tbbctx->ctx = ctx;
if (region == NULL || ndns == NULL) {
/* did not found any matching device */
*bbctx = tbbctx;
return 0;
}
if (ndctl_namespace_get_mode(ndns) == NDCTL_NS_MODE_FSDAX) {
tbbctx->ndns = ndns;
tbbctx->pmem2_badblock_next_func =
pmem2_badblock_next_namespace;
tbbctx->pmem2_badblock_get_next_func =
pmem2_namespace_get_first_badblock;
} else {
unsigned long long ns_beg, ns_size, ns_end;
ret = badblocks_get_namespace_bounds(
region, ndns,
&ns_beg, &ns_size);
if (ret) {
LOG(1, "cannot read namespace's bounds");
goto error_free_all;
}
ns_end = ns_beg + ns_size - 1;
LOG(10,
"namespace: begin %llu, end %llu size %llu (in 512B sectors)",
B2SEC(ns_beg), B2SEC(ns_end + 1) - 1, B2SEC(ns_size));
tbbctx->rgn.bus = ndctl_region_get_bus(region);
tbbctx->rgn.region = region;
tbbctx->rgn.ns_beg = ns_beg;
tbbctx->rgn.ns_end = ns_end;
tbbctx->rgn.ns_res = ns_beg + ndctl_region_get_resource(region);
tbbctx->pmem2_badblock_next_func =
pmem2_badblock_next_region;
tbbctx->pmem2_badblock_get_next_func =
pmem2_region_get_first_badblock;
}
if (pmem2_type == PMEM2_FTYPE_REG) {
/* only regular files have extents */
ret = pmem2_extents_create_get(src->value.fd, &tbbctx->exts);
if (ret) {
LOG(1, "getting extents of fd %i failed",
src->value.fd);
goto error_free_all;
}
}
/* set the context */
*bbctx = tbbctx;
return 0;
error_free_all:
pmem2_extents_destroy(&tbbctx->exts);
Free(tbbctx);
exit_ndctl_unref:
ndctl_unref(ctx);
return ret;
}
/*
* pmem2_badblock_context_delete -- delete and free the bad block context
*/
void
pmem2_badblock_context_delete(struct pmem2_badblock_context **bbctx)
{
LOG(3, "bbctx %p", bbctx);
ASSERTne(bbctx, NULL);
if (*bbctx == NULL)
return;
struct pmem2_badblock_context *tbbctx = *bbctx;
pmem2_extents_destroy(&tbbctx->exts);
ndctl_unref(tbbctx->ctx);
Free(tbbctx);
*bbctx = NULL;
}
/*
* pmem2_namespace_get_next_badblock -- (internal) wrapper for
* ndctl_namespace_get_next_badblock
*/
static void *
pmem2_namespace_get_next_badblock(struct pmem2_badblock_context *bbctx)
{
LOG(3, "bbctx %p", bbctx);
return ndctl_namespace_get_next_badblock(bbctx->ndns);
}
/*
* pmem2_namespace_get_first_badblock -- (internal) wrapper for
* ndctl_namespace_get_first_badblock
*/
static void *
pmem2_namespace_get_first_badblock(struct pmem2_badblock_context *bbctx)
{
LOG(3, "bbctx %p", bbctx);
bbctx->pmem2_badblock_get_next_func = pmem2_namespace_get_next_badblock;
return ndctl_namespace_get_first_badblock(bbctx->ndns);
}
/*
* pmem2_region_get_next_badblock -- (internal) wrapper for
* ndctl_region_get_next_badblock
*/
static void *
pmem2_region_get_next_badblock(struct pmem2_badblock_context *bbctx)
{
LOG(3, "bbctx %p", bbctx);
return ndctl_region_get_next_badblock(bbctx->rgn.region);
}
/*
* pmem2_region_get_first_badblock -- (internal) wrapper for
* ndctl_region_get_first_badblock
*/
static void *
pmem2_region_get_first_badblock(struct pmem2_badblock_context *bbctx)
{
LOG(3, "bbctx %p", bbctx);
bbctx->pmem2_badblock_get_next_func = pmem2_region_get_next_badblock;
return ndctl_region_get_first_badblock(bbctx->rgn.region);
}
/*
* pmem2_badblock_next_namespace -- (internal) version of pmem2_badblock_next()
* called for ndctl with namespace badblock
* iterator
*
* This function works only for fsdax, but does not require any special
* permissions.
*/
static int
pmem2_badblock_next_namespace(struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb)
{
LOG(3, "bbctx %p bb %p", bbctx, bb);
ASSERTne(bbctx, NULL);
ASSERTne(bb, NULL);
struct badblock *bbn;
bbn = bbctx->pmem2_badblock_get_next_func(bbctx);
if (bbn == NULL)
return PMEM2_E_NO_BAD_BLOCK_FOUND;
/*
* libndctl returns offset and length of a bad block
* both expressed in 512B sectors. Offset is relative
* to the beginning of the namespace.
*/
bb->offset = SEC2B(bbn->offset);
bb->length = SEC2B(bbn->len);
return 0;
}
/*
* pmem2_badblock_next_region -- (internal) version of pmem2_badblock_next()
* called for ndctl with region badblock iterator
*
* This function works for all types of namespaces, but requires read access to
* privileged device information.
*/
static int
pmem2_badblock_next_region(struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb)
{
LOG(3, "bbctx %p bb %p", bbctx, bb);
ASSERTne(bbctx, NULL);
ASSERTne(bb, NULL);
unsigned long long bb_beg, bb_end;
unsigned long long beg, end;
struct badblock *bbn;
unsigned long long ns_beg = bbctx->rgn.ns_beg;
unsigned long long ns_end = bbctx->rgn.ns_end;
do {
bbn = bbctx->pmem2_badblock_get_next_func(bbctx);
if (bbn == NULL)
return PMEM2_E_NO_BAD_BLOCK_FOUND;
LOG(10,
"region bad block: begin %llu end %llu length %u (in 512B sectors)",
bbn->offset, bbn->offset + bbn->len - 1, bbn->len);
/*
* libndctl returns offset and length of a bad block
* both expressed in 512B sectors. Offset is relative
* to the beginning of the region.
*/
bb_beg = SEC2B(bbn->offset);
bb_end = bb_beg + SEC2B(bbn->len) - 1;
} while (bb_beg > ns_end || ns_beg > bb_end);
beg = (bb_beg > ns_beg) ? bb_beg : ns_beg;
end = (bb_end < ns_end) ? bb_end : ns_end;
/*
* Form a new bad block structure with offset and length
* expressed in bytes and offset relative to the beginning
* of the namespace.
*/
bb->offset = beg - ns_beg;
bb->length = end - beg + 1;
LOG(4,
"namespace bad block: begin %llu end %llu length %llu (in 512B sectors)",
B2SEC(beg - ns_beg), B2SEC(end - ns_beg), B2SEC(end - beg) + 1);
return 0;
}
/*
* pmem2_badblock_next -- get the next bad block
*/
int
pmem2_badblock_next(struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb)
{
LOG(3, "bbctx %p bb %p", bbctx, bb);
ASSERTne(bbctx, NULL);
ASSERTne(bb, NULL);
struct pmem2_badblock bbn;
unsigned long long bb_beg;
unsigned long long bb_end;
unsigned long long bb_len;
unsigned long long bb_off;
unsigned long long ext_beg;
unsigned long long ext_end;
unsigned e;
int ret;
if (bbctx->rgn.region == NULL && bbctx->ndns == NULL) {
/* did not found any matching device */
return PMEM2_E_NO_BAD_BLOCK_FOUND;
}
struct extents *exts = bbctx->exts;
/* DAX devices have no extents */
if (!exts) {
ret = bbctx->pmem2_badblock_next_func(bbctx, &bbn);
*bb = bbn;
return ret;
}
/*
* There is at least one extent.
* Loop until:
* 1) a bad block overlaps with an extent or
* 2) there are no more bad blocks.
*/
int bb_overlaps_with_extent = 0;
do {
if (bbctx->last_bb.length) {
/*
* We have saved the last bad block to check it
* with the next extent saved
* in bbctx->first_extent.
*/
ASSERTne(bbctx->first_extent, 0);
bbn = bbctx->last_bb;
bbctx->last_bb.offset = 0;
bbctx->last_bb.length = 0;
} else {
ASSERTeq(bbctx->first_extent, 0);
/* look for the next bad block */
ret = bbctx->pmem2_badblock_next_func(bbctx, &bbn);
if (ret)
return ret;
}
bb_beg = bbn.offset;
bb_end = bb_beg + bbn.length - 1;
for (e = bbctx->first_extent;
e < exts->extents_count;
e++) {
ext_beg = exts->extents[e].offset_physical;
ext_end = ext_beg + exts->extents[e].length - 1;
/* check if the bad block overlaps with the extent */
if (bb_beg <= ext_end && ext_beg <= bb_end) {
/* bad block overlaps with the extent */
bb_overlaps_with_extent = 1;
if (bb_end > ext_end &&
e + 1 < exts->extents_count) {
/*
* The bad block is longer than
* the extent and there are
* more extents.
* Save the current bad block
* to check it with the next extent.
*/
bbctx->first_extent = e + 1;
bbctx->last_bb = bbn;
} else {
/*
* All extents were checked
* with the current bad block.
*/
bbctx->first_extent = 0;
bbctx->last_bb.length = 0;
bbctx->last_bb.offset = 0;
}
break;
}
}
/* check all extents with the next bad block */
if (bb_overlaps_with_extent == 0) {
bbctx->first_extent = 0;
bbctx->last_bb.length = 0;
bbctx->last_bb.offset = 0;
}
} while (bb_overlaps_with_extent == 0);
/* bad block overlaps with an extent */
bb_beg = (bb_beg > ext_beg) ? bb_beg : ext_beg;
bb_end = (bb_end < ext_end) ? bb_end : ext_end;
bb_len = bb_end - bb_beg + 1;
bb_off = bb_beg + exts->extents[e].offset_logical
- exts->extents[e].offset_physical;
LOG(10, "bad block found: physical offset: %llu, length: %llu",
bb_beg, bb_len);
/* make sure the offset is block-aligned */
unsigned long long not_block_aligned = bb_off & (exts->blksize - 1);
if (not_block_aligned) {
bb_off -= not_block_aligned;
bb_len += not_block_aligned;
}
/* make sure the length is block-aligned */
bb_len = ALIGN_UP(bb_len, exts->blksize);
LOG(4, "bad block found: logical offset: %llu, length: %llu",
bb_off, bb_len);
/*
* Return the bad block with offset and length
* expressed in bytes and offset relative
* to the beginning of the file.
*/
bb->offset = bb_off;
bb->length = bb_len;
return 0;
}
/*
* pmem2_badblock_clear_fsdax -- (internal) clear one bad block
* in a FSDAX device
*/
static int
pmem2_badblock_clear_fsdax(int fd, const struct pmem2_badblock *bb)
{
LOG(3, "fd %i badblock %p", fd, bb);
ASSERTne(bb, NULL);
LOG(10,
"clearing a bad block: fd %i logical offset %zu length %zu (in 512B sectors)",
fd, B2SEC(bb->offset), B2SEC(bb->length));
/* fallocate() takes offset as the off_t type */
if (bb->offset > (size_t)INT64_MAX) {
ERR("bad block's offset is greater than INT64_MAX");
return PMEM2_E_OFFSET_OUT_OF_RANGE;
}
/* fallocate() takes length as the off_t type */
if (bb->length > (size_t)INT64_MAX) {
ERR("bad block's length is greater than INT64_MAX");
return PMEM2_E_LENGTH_OUT_OF_RANGE;
}
off_t offset = (off_t)bb->offset;
off_t length = (off_t)bb->length;
/* deallocate bad blocks */
if (fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
offset, length)) {
ERR("!fallocate");
return PMEM2_E_ERRNO;
}
/* allocate new blocks */
if (fallocate(fd, FALLOC_FL_KEEP_SIZE, offset, length)) {
ERR("!fallocate");
return PMEM2_E_ERRNO;
}
return 0;
}
/*
* pmem2_badblock_clear_devdax -- (internal) clear one bad block
* in a DAX device
*/
static int
pmem2_badblock_clear_devdax(const struct pmem2_badblock_context *bbctx,
const struct pmem2_badblock *bb)
{
LOG(3, "bbctx %p bb %p", bbctx, bb);
ASSERTne(bb, NULL);
ASSERTne(bbctx, NULL);
ASSERTne(bbctx->rgn.bus, NULL);
ASSERTne(bbctx->rgn.ns_res, 0);
LOG(4,
"clearing a bad block: offset %zu length %zu (in 512B sectors)",
B2SEC(bb->offset), B2SEC(bb->length));
int ret = badblocks_devdax_clear_one_badblock(bbctx->rgn.bus,
bb->offset + bbctx->rgn.ns_res,
bb->length);
if (ret) {
LOG(1,
"failed to clear a bad block: offset %zu length %zu (in 512B sectors)",
B2SEC(bb->offset),
B2SEC(bb->length));
return ret;
}
return 0;
}
/*
* pmem2_badblock_clear -- clear one bad block
*/
int
pmem2_badblock_clear(struct pmem2_badblock_context *bbctx,
const struct pmem2_badblock *bb)
{
LOG(3, "bbctx %p badblock %p", bbctx, bb);
ASSERTne(bbctx, NULL);
ASSERTne(bb, NULL);
if (bbctx->file_type == PMEM2_FTYPE_DEVDAX)
return pmem2_badblock_clear_devdax(bbctx, bb);
ASSERTeq(bbctx->file_type, PMEM2_FTYPE_REG);
return pmem2_badblock_clear_fsdax(bbctx->fd, bb);
}
| 19,316 | 24.218016 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/region_namespace_ndctl.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* region_namespace_ndctl.h -- internal definitions for libpmem2
* common ndctl functions
*/
#ifndef PMDK_REGION_NAMESPACE_NDCTL_H
#define PMDK_REGION_NAMESPACE_NDCTL_H 1
#include "os.h"
#ifdef __cplusplus
extern "C" {
#endif
#define FOREACH_BUS_REGION_NAMESPACE(ctx, bus, region, ndns) \
ndctl_bus_foreach(ctx, bus) \
ndctl_region_foreach(bus, region) \
ndctl_namespace_foreach(region, ndns)
int pmem2_region_namespace(struct ndctl_ctx *ctx,
const struct pmem2_source *src,
struct ndctl_region **pregion,
struct ndctl_namespace **pndns);
#ifdef __cplusplus
}
#endif
#endif /* PMDK_REGION_NAMESPACE_NDCTL_H */
| 754 | 21.878788 | 64 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/vm_reservation.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* vm_reservation.c -- implementation of virtual memory allocation API
*/
#include "libpmem2.h"
/*
* pmem2_vm_reservation_new -- creates new virtual memory reservation
*/
int
pmem2_vm_reservation_new(struct pmem2_vm_reservation **rsv,
size_t size, void *address)
{
return PMEM2_E_NOSUPP;
}
/*
* pmem2_vm_reservation_delete -- deletes reservation bound to
* structure pmem2_vm_reservation
*/
int
pmem2_vm_reservation_delete(struct pmem2_vm_reservation **rsv)
{
return PMEM2_E_NOSUPP;
}
| 614 | 20.206897 | 70 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/usc_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* usc_windows.c -- pmem2 usc function for windows
*/
#include "alloc.h"
#include "source.h"
#include "out.h"
#include "libpmem2.h"
#include "pmem2_utils.h"
#define GUID_SIZE sizeof("XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX")
#define VOLUME_PATH_SIZE sizeof("\\\\?\\Volume{}") + (GUID_SIZE - 2 /* \0 */)
/*
* get_volume_handle -- returns volume handle
*/
static int
get_volume_handle(HANDLE handle, HANDLE *volume_handle)
{
wchar_t *volume;
wchar_t tmp[10];
DWORD len =
GetFinalPathNameByHandleW(handle, tmp, 10, VOLUME_NAME_GUID);
if (len == 0) {
ERR("!!GetFinalPathNameByHandleW");
return pmem2_lasterror_to_err();
}
len *= sizeof(wchar_t);
int err;
volume = pmem2_malloc(len, &err);
if (volume == NULL)
return err;
if (!GetFinalPathNameByHandleW(handle, volume, len,
VOLUME_NAME_GUID)) {
Free(volume);
ERR("!!GetFinalPathNameByHandleW");
return pmem2_lasterror_to_err();
}
ASSERTeq(volume[VOLUME_PATH_SIZE], '\\');
volume[VOLUME_PATH_SIZE] = '\0';
*volume_handle = CreateFileW(volume, /* path to the file */
/* request access to send ioctl to the file */
FILE_READ_ATTRIBUTES,
/* do not block access to the file */
FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
NULL, /* security attributes */
OPEN_EXISTING, /* open only if it exists */
FILE_ATTRIBUTE_NORMAL, /* no attributes */
NULL); /* used only for new files */
Free(volume);
if (*volume_handle == INVALID_HANDLE_VALUE) {
ERR("!!CreateFileW");
return pmem2_lasterror_to_err();
}
return 0;
}
static int
get_device_guid(HANDLE handle, GUID *guid)
{
HANDLE vHandle;
int ret = get_volume_handle(handle, &vHandle);
if (vHandle == INVALID_HANDLE_VALUE)
return ret;
STORAGE_DEVICE_NUMBER_EX sdn;
sdn.DeviceNumber = -1;
DWORD dwBytesReturned = 0;
if (!DeviceIoControl(vHandle,
IOCTL_STORAGE_GET_DEVICE_NUMBER_EX,
NULL, 0,
&sdn, sizeof(sdn),
&dwBytesReturned, NULL)) {
/*
* IOCTL_STORAGE_GET_DEVICE_NUMBER_EX is not supported
* on this server
*/
ERR(
"Getting device id (IOCTL_STORAGE_GET_DEVICE_NUMBER_EX) is not supported on this system");
CloseHandle(vHandle);
return PMEM2_E_NOSUPP;
}
*guid = sdn.DeviceGuid;
CloseHandle(vHandle);
return 0;
}
int
pmem2_source_device_idW(const struct pmem2_source *src, wchar_t *id,
size_t *len)
{
if (src->type == PMEM2_SOURCE_ANON) {
ERR("Anonymous source does not have device id");
return PMEM2_E_NOSUPP;
}
ASSERTeq(src->type, PMEM2_SOURCE_HANDLE);
if (id == NULL) {
*len = GUID_SIZE * sizeof(*id);
return 0;
}
if (*len < GUID_SIZE * sizeof(*id)) {
ERR("id buffer is to small");
return PMEM2_E_BUFFER_TOO_SMALL;
}
GUID guid;
int ret = get_device_guid(src->value.handle, &guid);
if (ret)
return ret;
_snwprintf(id, GUID_SIZE,
L"%08lX-%04hX-%04hX-%02hhX%02hhX-%02hhX%02hhX%02hhX%02hhX%02hhX%02hhX",
guid.Data1, guid.Data2, guid.Data3, guid.Data4[0],
guid.Data4[1], guid.Data4[2], guid.Data4[3],
guid.Data4[4], guid.Data4[5], guid.Data4[6],
guid.Data4[7]);
return 0;
}
int
pmem2_source_device_idU(const struct pmem2_source *src, char *id, size_t *len)
{
if (src->type == PMEM2_SOURCE_ANON) {
ERR("Anonymous source does not have device id");
return PMEM2_E_NOSUPP;
}
ASSERTeq(src->type, PMEM2_SOURCE_HANDLE);
if (id == NULL) {
*len = GUID_SIZE * sizeof(*id);
return 0;
}
if (*len < GUID_SIZE * sizeof(*id)) {
ERR("id buffer is to small");
return PMEM2_E_BUFFER_TOO_SMALL;
}
GUID guid;
int ret = get_device_guid(src->value.handle, &guid);
if (ret)
return ret;
if (util_snprintf(id, GUID_SIZE,
"%08lX-%04hX-%04hX-%02hhX%02hhX-%02hhX%02hhX%02hhX%02hhX%02hhX%02hhX",
guid.Data1, guid.Data2, guid.Data3, guid.Data4[0],
guid.Data4[1], guid.Data4[2], guid.Data4[3],
guid.Data4[4], guid.Data4[5], guid.Data4[6],
guid.Data4[7]) < 0) {
ERR("!snprintf");
return PMEM2_E_ERRNO;
}
return 0;
}
int
pmem2_source_device_usc(const struct pmem2_source *src, uint64_t *usc)
{
LOG(3, "cfg %p, usc %p", src, usc);
if (src->type == PMEM2_SOURCE_ANON) {
ERR("Anonymous source does not support unsafe shutdown count");
return PMEM2_E_NOSUPP;
}
ASSERTeq(src->type, PMEM2_SOURCE_HANDLE);
*usc = 0;
HANDLE vHandle;
int err = get_volume_handle(src->value.handle, &vHandle);
if (vHandle == INVALID_HANDLE_VALUE)
return err;
STORAGE_PROPERTY_QUERY prop;
DWORD dwSize;
prop.PropertyId = StorageDeviceUnsafeShutdownCount;
prop.QueryType = PropertyExistsQuery;
prop.AdditionalParameters[0] = 0;
STORAGE_DEVICE_UNSAFE_SHUTDOWN_COUNT ret;
BOOL bResult = DeviceIoControl(vHandle,
IOCTL_STORAGE_QUERY_PROPERTY,
&prop, sizeof(prop),
&ret, sizeof(ret),
(LPDWORD)&dwSize, (LPOVERLAPPED)NULL);
if (!bResult) {
ERR(
"Getting unsafe shutdown count is not supported on this system");
CloseHandle(vHandle);
return PMEM2_E_NOSUPP;
}
prop.QueryType = PropertyStandardQuery;
bResult = DeviceIoControl(vHandle,
IOCTL_STORAGE_QUERY_PROPERTY,
&prop, sizeof(prop),
&ret, sizeof(ret),
(LPDWORD)&dwSize, (LPOVERLAPPED)NULL);
CloseHandle(vHandle);
if (!bResult) {
ERR("!!DeviceIoControl");
return pmem2_lasterror_to_err();
}
*usc = ret.UnsafeShutdownCount;
return 0;
}
| 5,261 | 22.283186 | 93 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/ravl_interval.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* ravl_interval.c -- ravl_interval implementation
*/
#include "alloc.h"
#include "map.h"
#include "ravl_interval.h"
#include "pmem2_utils.h"
#include "sys_util.h"
#include "os_thread.h"
#include "ravl.h"
/*
* ravl_interval - structure representing two points
* on the number line
*/
struct ravl_interval {
struct ravl *tree;
ravl_interval_min *get_min;
ravl_interval_max *get_max;
};
/*
* ravl_interval_node - structure holding min, max functions and address
*/
struct ravl_interval_node {
void *addr;
ravl_interval_min *get_min;
ravl_interval_max *get_max;
};
/*
* ravl_interval_compare -- compare intervals by its boundaries,
* no overlapping allowed
*/
static int
ravl_interval_compare(const void *lhs, const void *rhs)
{
const struct ravl_interval_node *left = lhs;
const struct ravl_interval_node *right = rhs;
if (left->get_min(left->addr) < right->get_min(right->addr) &&
left->get_max(left->addr) <= right->get_min(right->addr))
return -1;
if (left->get_min(left->addr) > right->get_min(right->addr) &&
left->get_max(left->addr) >= right->get_min(right->addr))
return 1;
return 0;
}
/*
* ravl_interval_delete - finalize the ravl interval module
*/
void
ravl_interval_delete(struct ravl_interval *ri)
{
ravl_delete(ri->tree);
ri->tree = NULL;
Free(ri);
}
/*
* ravl_interval_new -- initialize the ravl interval module
*/
struct ravl_interval *
ravl_interval_new(ravl_interval_min *get_min, ravl_interval_max *get_max)
{
int ret;
struct ravl_interval *interval = pmem2_malloc(sizeof(*interval), &ret);
if (ret)
goto ret_null;
interval->tree = ravl_new_sized(ravl_interval_compare,
sizeof(struct ravl_interval_node));
if (!(interval->tree))
goto free_alloc;
interval->get_min = get_min;
interval->get_max = get_max;
return interval;
free_alloc:
Free(interval);
ret_null:
return NULL;
}
/*
* ravl_interval_insert -- insert interval entry into the tree
*/
int
ravl_interval_insert(struct ravl_interval *ri, void *addr)
{
struct ravl_interval_node rin;
rin.addr = addr;
rin.get_min = ri->get_min;
rin.get_max = ri->get_max;
if (ravl_emplace_copy(ri->tree, &rin))
return PMEM2_E_ERRNO;
return 0;
}
/*
* ravl_interval_remove -- remove interval entry from the tree
*/
int
ravl_interval_remove(struct ravl_interval *ri, struct ravl_interval_node *rin)
{
struct ravl_node *node = ravl_find(ri->tree, rin,
RAVL_PREDICATE_EQUAL);
if (!node)
return PMEM2_E_MAPPING_NOT_FOUND;
ravl_remove(ri->tree, node);
return 0;
}
/*
* ravl_interval_find_prior_or_eq -- find overlapping interval starting prior to
* the current one or at the same place
*/
static struct ravl_interval_node *
ravl_interval_find_prior_or_eq(struct ravl *tree,
struct ravl_interval_node *rin)
{
struct ravl_node *node;
struct ravl_interval_node *cur;
node = ravl_find(tree, rin, RAVL_PREDICATE_LESS_EQUAL);
if (!node)
return NULL;
cur = ravl_data(node);
/*
* If the end of the found interval is below the searched boundary, then
* this is not our interval.
*/
if (cur->get_max(cur->addr) <= rin->get_min(rin->addr))
return NULL;
return cur;
}
/*
* ravl_interval_find_later -- find overlapping interval starting later than
* the current one
*/
static struct ravl_interval_node *
ravl_interval_find_later(struct ravl *tree, struct ravl_interval_node *rin)
{
struct ravl_node *node;
struct ravl_interval_node *cur;
node = ravl_find(tree, rin, RAVL_PREDICATE_GREATER);
if (!node)
return NULL;
cur = ravl_data(node);
/*
* If the beginning of the found interval is above the end of
* the searched range, then this is not our interval.
*/
if (cur->get_min(cur->addr) >= rin->get_max(rin->addr))
return NULL;
return cur;
}
/*
* ravl_interval_find_equal -- find the interval with exact (min, max) range
*/
struct ravl_interval_node *
ravl_interval_find_equal(struct ravl_interval *ri, void *addr)
{
struct ravl_interval_node range;
range.addr = addr;
range.get_min = ri->get_min;
range.get_max = ri->get_max;
struct ravl_node *node;
node = ravl_find(ri->tree, &range, RAVL_PREDICATE_EQUAL);
if (!node)
return NULL;
return ravl_data(node);
}
/*
* ravl_interval_find -- find the earliest interval within (min, max) range
*/
struct ravl_interval_node *
ravl_interval_find(struct ravl_interval *ri, void *addr)
{
struct ravl_interval_node range;
range.addr = addr;
range.get_min = ri->get_min;
range.get_max = ri->get_max;
struct ravl_interval_node *cur;
cur = ravl_interval_find_prior_or_eq(ri->tree, &range);
if (!cur)
cur = ravl_interval_find_later(ri->tree, &range);
return cur;
}
/*
* ravl_interval_data -- returns the data contained within interval node
*/
void *
ravl_interval_data(struct ravl_interval_node *rin)
{
return (void *)rin->addr;
}
| 4,963 | 21.26009 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/map_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* map_windows.c -- pmem2_map (Windows)
*/
#include <stdbool.h>
#include "libpmem2.h"
#include "alloc.h"
#include "auto_flush.h"
#include "config.h"
#include "map.h"
#include "out.h"
#include "persist.h"
#include "pmem2_utils.h"
#include "source.h"
#include "util.h"
#define HIDWORD(x) ((DWORD)((x) >> 32))
#define LODWORD(x) ((DWORD)((x) & 0xFFFFFFFF))
/* requested CACHE_LINE, available PAGE */
#define REQ_CL_AVAIL_PG \
"requested granularity not available because specified volume is not a direct access (DAX) volume"
/* requested BYTE, available PAGE */
#define REQ_BY_AVAIL_PG REQ_CL_AVAIL_PG
/* requested BYTE, available CACHE_LINE */
#define REQ_BY_AVAIL_CL \
"requested granularity not available because the platform doesn't support eADR"
/* indicates the cases in which the error cannot occur */
#define GRAN_IMPOSSIBLE "impossible"
static const char *granularity_err_msg[3][3] = {
/* requested granularity / available granularity */
/* -------------------------------------------------------------------- */
/* BYTE CACHE_LINE PAGE */
/* -------------------------------------------------------------------- */
/* BYTE */ {GRAN_IMPOSSIBLE, REQ_BY_AVAIL_CL, REQ_BY_AVAIL_PG},
/* CL */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, REQ_CL_AVAIL_PG},
/* PAGE */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE}};
/*
* create_mapping -- creates file mapping object for a file
*/
static HANDLE
create_mapping(HANDLE hfile, size_t offset, size_t length, DWORD protect,
unsigned long *err)
{
size_t max_size = length + offset;
SetLastError(0);
HANDLE mh = CreateFileMapping(hfile,
NULL, /* security attributes */
protect,
HIDWORD(max_size),
LODWORD(max_size),
NULL);
*err = GetLastError();
if (!mh) {
ERR("!!CreateFileMapping");
return NULL;
}
if (*err == ERROR_ALREADY_EXISTS) {
ERR("!!CreateFileMapping");
CloseHandle(mh);
return NULL;
}
/* if the handle is valid the last error is undefined */
*err = 0;
return mh;
}
/*
* is_direct_access -- check if the specified volume is a
* direct access (DAX) volume
*/
static int
is_direct_access(HANDLE fh)
{
DWORD filesystemFlags;
if (!GetVolumeInformationByHandleW(fh, NULL, 0, NULL,
NULL, &filesystemFlags, NULL, 0)) {
ERR("!!GetVolumeInformationByHandleW");
/* always return a negative value */
return pmem2_lasterror_to_err();
}
if (filesystemFlags & FILE_DAX_VOLUME)
return 1;
return 0;
}
/*
* pmem2_map -- map memory according to provided config
*/
int
pmem2_map(const struct pmem2_config *cfg, const struct pmem2_source *src,
struct pmem2_map **map_ptr)
{
LOG(3, "cfg %p src %p map_ptr %p", cfg, src, map_ptr);
int ret = 0;
unsigned long err = 0;
size_t file_size;
*map_ptr = NULL;
if ((int)cfg->requested_max_granularity == PMEM2_GRANULARITY_INVALID) {
ERR(
"please define the max granularity requested for the mapping");
return PMEM2_E_GRANULARITY_NOT_SET;
}
ret = pmem2_source_size(src, &file_size);
if (ret)
return ret;
size_t src_alignment;
ret = pmem2_source_alignment(src, &src_alignment);
if (ret)
return ret;
size_t length;
ret = pmem2_config_validate_length(cfg, file_size, src_alignment);
if (ret)
return ret;
size_t effective_offset;
ret = pmem2_validate_offset(cfg, &effective_offset, src_alignment);
if (ret)
return ret;
if (src->type == PMEM2_SOURCE_ANON)
effective_offset = 0;
/* without user-provided length, map to the end of the file */
if (cfg->length)
length = cfg->length;
else
length = file_size - effective_offset;
HANDLE map_handle = INVALID_HANDLE_VALUE;
if (src->type == PMEM2_SOURCE_HANDLE) {
map_handle = src->value.handle;
} else if (src->type == PMEM2_SOURCE_ANON) {
/* no extra settings */
} else {
ASSERT(0);
}
DWORD proto = PAGE_READWRITE;
DWORD access = FILE_MAP_ALL_ACCESS;
/* Unsupported flag combinations */
if ((cfg->protection_flag == PMEM2_PROT_NONE) ||
(cfg->protection_flag == PMEM2_PROT_WRITE) ||
(cfg->protection_flag == PMEM2_PROT_EXEC) ||
(cfg->protection_flag == (PMEM2_PROT_WRITE |
PMEM2_PROT_EXEC))) {
ERR("Windows does not support "
"this protection flag combination.");
return PMEM2_E_NOSUPP;
}
/* Translate protection flags into Windows flags */
if (cfg->protection_flag & PMEM2_PROT_WRITE) {
if (cfg->protection_flag & PMEM2_PROT_EXEC) {
proto = PAGE_EXECUTE_READWRITE;
access = FILE_MAP_READ | FILE_MAP_WRITE |
FILE_MAP_EXECUTE;
} else {
/*
* Due to the already done exclusion
* of incorrect combinations, PROT_WRITE
* implies PROT_READ
*/
proto = PAGE_READWRITE;
access = FILE_MAP_READ | FILE_MAP_WRITE;
}
} else if (cfg->protection_flag & PMEM2_PROT_READ) {
if (cfg->protection_flag & PMEM2_PROT_EXEC) {
proto = PAGE_EXECUTE_READ;
access = FILE_MAP_READ | FILE_MAP_EXECUTE;
} else {
proto = PAGE_READONLY;
access = FILE_MAP_READ;
}
}
if (cfg->sharing == PMEM2_PRIVATE) {
if (cfg->protection_flag & PMEM2_PROT_EXEC) {
proto = PAGE_EXECUTE_WRITECOPY;
access = FILE_MAP_EXECUTE | FILE_MAP_COPY;
} else {
/*
* If FILE_MAP_COPY is set,
* protection is changed to read/write
*/
proto = PAGE_READONLY;
access = FILE_MAP_COPY;
}
}
/* create a file mapping handle */
HANDLE mh = create_mapping(map_handle, effective_offset, length,
proto, &err);
if (!mh) {
if (err == ERROR_ALREADY_EXISTS) {
ERR("mapping already exists");
return PMEM2_E_MAPPING_EXISTS;
} else if (err == ERROR_ACCESS_DENIED) {
return PMEM2_E_NO_ACCESS;
}
return pmem2_lasterror_to_err();
}
ret = pmem2_config_validate_addr_alignment(cfg, src);
if (ret)
return ret;
/* let's get addr from cfg struct */
LPVOID addr_hint = cfg->addr;
/* obtain a pointer to the mapping view */
void *base = MapViewOfFileEx(mh,
access,
HIDWORD(effective_offset),
LODWORD(effective_offset),
length,
addr_hint); /* hint address */
if (base == NULL) {
ERR("!!MapViewOfFileEx");
if (cfg->addr_request == PMEM2_ADDRESS_FIXED_NOREPLACE) {
DWORD ret_windows = GetLastError();
if (ret_windows == ERROR_INVALID_ADDRESS)
ret = PMEM2_E_MAPPING_EXISTS;
else
ret = pmem2_lasterror_to_err();
}
else
ret = pmem2_lasterror_to_err();
goto err_close_mapping_handle;
}
if (!CloseHandle(mh)) {
ERR("!!CloseHandle");
ret = pmem2_lasterror_to_err();
goto err_unmap_base;
}
enum pmem2_granularity available_min_granularity =
PMEM2_GRANULARITY_PAGE;
if (src->type == PMEM2_SOURCE_HANDLE) {
int direct_access = is_direct_access(src->value.handle);
if (direct_access < 0) {
ret = direct_access;
goto err_unmap_base;
}
bool eADR = (pmem2_auto_flush() == 1);
available_min_granularity =
get_min_granularity(eADR, direct_access, cfg->sharing);
} else if (src->type == PMEM2_SOURCE_ANON) {
available_min_granularity = PMEM2_GRANULARITY_BYTE;
} else {
ASSERT(0);
}
if (available_min_granularity > cfg->requested_max_granularity) {
const char *err = granularity_err_msg
[cfg->requested_max_granularity]
[available_min_granularity];
if (strcmp(err, GRAN_IMPOSSIBLE) == 0)
FATAL(
"unhandled granularity error: available_min_granularity: %d" \
"requested_max_granularity: %d",
available_min_granularity,
cfg->requested_max_granularity);
ERR("%s", err);
ret = PMEM2_E_GRANULARITY_NOT_SUPPORTED;
goto err_unmap_base;
}
/* prepare pmem2_map structure */
struct pmem2_map *map;
map = (struct pmem2_map *)pmem2_malloc(sizeof(*map), &ret);
if (!map)
goto err_unmap_base;
map->addr = base;
/*
* XXX probably in some cases the reserved length > the content length.
* Maybe it is worth to do the research.
*/
map->reserved_length = length;
map->content_length = length;
map->effective_granularity = available_min_granularity;
map->source = *src;
pmem2_set_flush_fns(map);
pmem2_set_mem_fns(map);
ret = pmem2_register_mapping(map);
if (ret)
goto err_register;
/* return a pointer to the pmem2_map structure */
*map_ptr = map;
return ret;
err_register:
free(map);
err_unmap_base:
UnmapViewOfFile(base);
return ret;
err_close_mapping_handle:
CloseHandle(mh);
return ret;
}
/*
* pmem2_unmap -- unmap the specified region
*/
int
pmem2_unmap(struct pmem2_map **map_ptr)
{
LOG(3, "mapp %p", map_ptr);
struct pmem2_map *map = *map_ptr;
int ret = pmem2_unregister_mapping(map);
if (ret)
return ret;
if (!UnmapViewOfFile(map->addr)) {
ERR("!!UnmapViewOfFile");
return pmem2_lasterror_to_err();
}
Free(map);
*map_ptr = NULL;
return 0;
}
| 8,611 | 23.123249 | 99 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/extent_linux.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* extent_linux.c - implementation of the linux fs extent query API
*/
#include <string.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <linux/fs.h>
#include <linux/fiemap.h>
#include "libpmem2.h"
#include "pmem2_utils.h"
#include "file.h"
#include "out.h"
#include "extent.h"
#include "alloc.h"
/*
* pmem2_extents_create_get -- allocate extents structure and get extents
* of the given file
*/
int
pmem2_extents_create_get(int fd, struct extents **exts)
{
LOG(3, "fd %i extents %p", fd, exts);
ASSERT(fd > 2);
ASSERTne(exts, NULL);
enum pmem2_file_type pmem2_type;
struct extents *pexts = NULL;
struct fiemap *fmap = NULL;
os_stat_t st;
if (os_fstat(fd, &st) < 0) {
ERR("!fstat %d", fd);
return PMEM2_E_ERRNO;
}
int ret = pmem2_get_type_from_stat(&st, &pmem2_type);
if (ret)
return ret;
/* directories do not have any extents */
if (pmem2_type == PMEM2_FTYPE_DIR) {
ERR(
"checking extents does not make sense in case of directories");
return PMEM2_E_INVALID_FILE_TYPE;
}
/* allocate extents structure */
pexts = pmem2_zalloc(sizeof(struct extents), &ret);
if (ret)
return ret;
/* save block size */
LOG(10, "fd %i: block size: %li", fd, (long int)st.st_blksize);
pexts->blksize = (uint64_t)st.st_blksize;
/* DAX device does not have any extents */
if (pmem2_type == PMEM2_FTYPE_DEVDAX) {
*exts = pexts;
return 0;
}
ASSERTeq(pmem2_type, PMEM2_FTYPE_REG);
fmap = pmem2_zalloc(sizeof(struct fiemap), &ret);
if (ret)
goto error_free;
fmap->fm_start = 0;
fmap->fm_length = (size_t)st.st_size;
fmap->fm_flags = 0;
fmap->fm_extent_count = 0;
fmap->fm_mapped_extents = 0;
if (ioctl(fd, FS_IOC_FIEMAP, fmap) != 0) {
ERR("!fiemap ioctl() for fd=%d failed", fd);
ret = PMEM2_E_ERRNO;
goto error_free;
}
size_t newsize = sizeof(struct fiemap) +
fmap->fm_mapped_extents * sizeof(struct fiemap_extent);
struct fiemap *newfmap = pmem2_realloc(fmap, newsize, &ret);
if (ret)
goto error_free;
fmap = newfmap;
memset(fmap->fm_extents, 0, fmap->fm_mapped_extents *
sizeof(struct fiemap_extent));
fmap->fm_extent_count = fmap->fm_mapped_extents;
fmap->fm_mapped_extents = 0;
if (ioctl(fd, FS_IOC_FIEMAP, fmap) != 0) {
ERR("!fiemap ioctl() for fd=%d failed", fd);
ret = PMEM2_E_ERRNO;
goto error_free;
}
LOG(4, "file with fd=%i has %u extents:", fd, fmap->fm_mapped_extents);
/* save number of extents */
pexts->extents_count = fmap->fm_mapped_extents;
pexts->extents = pmem2_malloc(
pexts->extents_count * sizeof(struct extent),
&ret);
if (ret)
goto error_free;
/* save extents */
unsigned e;
for (e = 0; e < fmap->fm_mapped_extents; e++) {
pexts->extents[e].offset_physical =
fmap->fm_extents[e].fe_physical;
pexts->extents[e].offset_logical =
fmap->fm_extents[e].fe_logical;
pexts->extents[e].length =
fmap->fm_extents[e].fe_length;
LOG(10, " #%u: off_phy: %lu off_log: %lu len: %lu",
e,
pexts->extents[e].offset_physical,
pexts->extents[e].offset_logical,
pexts->extents[e].length);
}
*exts = pexts;
Free(fmap);
return 0;
error_free:
Free(pexts->extents);
Free(pexts);
Free(fmap);
return ret;
}
/*
* pmem2_extents_destroy -- free extents structure
*/
void
pmem2_extents_destroy(struct extents **exts)
{
LOG(3, "extents %p", exts);
ASSERTne(exts, NULL);
if (*exts) {
Free((*exts)->extents);
Free(*exts);
*exts = NULL;
}
}
| 3,519 | 20.333333 | 73 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/x86_64/flush.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
#ifndef X86_64_FLUSH_H
#define X86_64_FLUSH_H
#include <emmintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "util.h"
#include "valgrind_internal.h"
#define FLUSH_ALIGN ((uintptr_t)64)
static force_inline void
pmem_clflush(const void *addr)
{
_mm_clflush(addr);
}
#ifdef _MSC_VER
static force_inline void
pmem_clflushopt(const void *addr)
{
_mm_clflushopt(addr);
}
static force_inline void
pmem_clwb(const void *addr)
{
_mm_clwb(addr);
}
#else
/*
* The x86 memory instructions are new enough that the compiler
* intrinsic functions are not always available. The intrinsic
* functions are defined here in terms of asm statements for now.
*/
static force_inline void
pmem_clflushopt(const void *addr)
{
asm volatile(".byte 0x66; clflush %0" : "+m" \
(*(volatile char *)(addr)));
}
static force_inline void
pmem_clwb(const void *addr)
{
asm volatile(".byte 0x66; xsaveopt %0" : "+m" \
(*(volatile char *)(addr)));
}
#endif /* _MSC_VER */
typedef void flush_fn(const void *, size_t);
/*
* flush_clflush_nolog -- flush the CPU cache, using clflush
*/
static force_inline void
flush_clflush_nolog(const void *addr, size_t len)
{
uintptr_t uptr;
/*
* Loop through cache-line-size (typically 64B) aligned chunks
* covering the given range.
*/
for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1);
uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN)
_mm_clflush((char *)uptr);
}
/*
* flush_clflushopt_nolog -- flush the CPU cache, using clflushopt
*/
static force_inline void
flush_clflushopt_nolog(const void *addr, size_t len)
{
uintptr_t uptr;
/*
* Loop through cache-line-size (typically 64B) aligned chunks
* covering the given range.
*/
for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1);
uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) {
pmem_clflushopt((char *)uptr);
}
}
/*
* flush_clwb_nolog -- flush the CPU cache, using clwb
*/
static force_inline void
flush_clwb_nolog(const void *addr, size_t len)
{
uintptr_t uptr;
/*
* Loop through cache-line-size (typically 64B) aligned chunks
* covering the given range.
*/
for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1);
uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) {
pmem_clwb((char *)uptr);
}
}
/*
* flush64b_empty -- (internal) do not flush the CPU cache
*/
static force_inline void
flush64b_empty(const void *addr)
{
/* NOP, but tell pmemcheck about it */
VALGRIND_DO_FLUSH(addr, 64);
}
#endif
| 2,521 | 20.193277 | 66 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/x86_64/init.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
#include <string.h>
#include <xmmintrin.h>
#include "auto_flush.h"
#include "cpu.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "os.h"
#include "out.h"
#include "pmem2_arch.h"
#include "valgrind_internal.h"
#define MOVNT_THRESHOLD 256
size_t Movnt_threshold = MOVNT_THRESHOLD;
/*
* memory_barrier -- (internal) issue the fence instruction
*/
static void
memory_barrier(void)
{
LOG(15, NULL);
_mm_sfence(); /* ensure CLWB or CLFLUSHOPT completes */
}
/*
* flush_clflush -- (internal) flush the CPU cache, using clflush
*/
static void
flush_clflush(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
flush_clflush_nolog(addr, len);
}
/*
* flush_clflushopt -- (internal) flush the CPU cache, using clflushopt
*/
static void
flush_clflushopt(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
flush_clflushopt_nolog(addr, len);
}
/*
* flush_clwb -- (internal) flush the CPU cache, using clwb
*/
static void
flush_clwb(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
flush_clwb_nolog(addr, len);
}
#if SSE2_AVAILABLE || AVX_AVAILABLE || AVX512F_AVAILABLE
#define PMEM2_F_MEM_MOVNT (PMEM2_F_MEM_WC | PMEM2_F_MEM_NONTEMPORAL)
#define PMEM2_F_MEM_MOV (PMEM2_F_MEM_WB | PMEM2_F_MEM_TEMPORAL)
#define MEMCPY_TEMPLATE(isa, flush, perfbarrier) \
static void *\
memmove_nodrain_##isa##_##flush##perfbarrier(void *dest, const void *src, \
size_t len, unsigned flags, flush_func flushf)\
{\
if (len == 0 || src == dest)\
return dest;\
\
if (flags & PMEM2_F_MEM_NOFLUSH) \
memmove_mov_##isa##_noflush(dest, src, len); \
else if (flags & PMEM2_F_MEM_MOVNT)\
memmove_movnt_##isa ##_##flush##perfbarrier(dest, src, len);\
else if (flags & PMEM2_F_MEM_MOV)\
memmove_mov_##isa##_##flush(dest, src, len);\
else if (len < Movnt_threshold)\
memmove_mov_##isa##_##flush(dest, src, len);\
else\
memmove_movnt_##isa##_##flush##perfbarrier(dest, src, len);\
\
return dest;\
}
#define MEMCPY_TEMPLATE_EADR(isa, perfbarrier) \
static void *\
memmove_nodrain_##isa##_eadr##perfbarrier(void *dest, const void *src, \
size_t len, unsigned flags, flush_func flushf)\
{\
if (len == 0 || src == dest)\
return dest;\
\
if (flags & PMEM2_F_MEM_NOFLUSH)\
memmove_mov_##isa##_noflush(dest, src, len);\
else if (flags & PMEM2_F_MEM_NONTEMPORAL)\
memmove_movnt_##isa##_empty##perfbarrier(dest, src, len);\
else\
memmove_mov_##isa##_empty(dest, src, len);\
\
return dest;\
}
#define MEMSET_TEMPLATE(isa, flush, perfbarrier)\
static void *\
memset_nodrain_##isa##_##flush##perfbarrier(void *dest, int c, size_t len, \
unsigned flags, flush_func flushf)\
{\
if (len == 0)\
return dest;\
\
if (flags & PMEM2_F_MEM_NOFLUSH) \
memset_mov_##isa##_noflush(dest, c, len); \
else if (flags & PMEM2_F_MEM_MOVNT)\
memset_movnt_##isa##_##flush##perfbarrier(dest, c, len);\
else if (flags & PMEM2_F_MEM_MOV)\
memset_mov_##isa##_##flush(dest, c, len);\
else if (len < Movnt_threshold)\
memset_mov_##isa##_##flush(dest, c, len);\
else\
memset_movnt_##isa##_##flush##perfbarrier(dest, c, len);\
\
return dest;\
}
#define MEMSET_TEMPLATE_EADR(isa, perfbarrier) \
static void *\
memset_nodrain_##isa##_eadr##perfbarrier(void *dest, int c, size_t len, \
unsigned flags, flush_func flushf)\
{\
if (len == 0)\
return dest;\
\
if (flags & PMEM2_F_MEM_NOFLUSH)\
memset_mov_##isa##_noflush(dest, c, len);\
else if (flags & PMEM2_F_MEM_NONTEMPORAL)\
memset_movnt_##isa##_empty##perfbarrier(dest, c, len);\
else\
memset_mov_##isa##_empty(dest, c, len);\
\
return dest;\
}
#endif
#if SSE2_AVAILABLE
MEMCPY_TEMPLATE(sse2, clflush, _nobarrier)
MEMCPY_TEMPLATE(sse2, clflushopt, _nobarrier)
MEMCPY_TEMPLATE(sse2, clwb, _nobarrier)
MEMCPY_TEMPLATE_EADR(sse2, _nobarrier)
MEMSET_TEMPLATE(sse2, clflush, _nobarrier)
MEMSET_TEMPLATE(sse2, clflushopt, _nobarrier)
MEMSET_TEMPLATE(sse2, clwb, _nobarrier)
MEMSET_TEMPLATE_EADR(sse2, _nobarrier)
MEMCPY_TEMPLATE(sse2, clflush, _wcbarrier)
MEMCPY_TEMPLATE(sse2, clflushopt, _wcbarrier)
MEMCPY_TEMPLATE(sse2, clwb, _wcbarrier)
MEMCPY_TEMPLATE_EADR(sse2, _wcbarrier)
MEMSET_TEMPLATE(sse2, clflush, _wcbarrier)
MEMSET_TEMPLATE(sse2, clflushopt, _wcbarrier)
MEMSET_TEMPLATE(sse2, clwb, _wcbarrier)
MEMSET_TEMPLATE_EADR(sse2, _wcbarrier)
#endif
#if AVX_AVAILABLE
MEMCPY_TEMPLATE(avx, clflush, _nobarrier)
MEMCPY_TEMPLATE(avx, clflushopt, _nobarrier)
MEMCPY_TEMPLATE(avx, clwb, _nobarrier)
MEMCPY_TEMPLATE_EADR(avx, _nobarrier)
MEMSET_TEMPLATE(avx, clflush, _nobarrier)
MEMSET_TEMPLATE(avx, clflushopt, _nobarrier)
MEMSET_TEMPLATE(avx, clwb, _nobarrier)
MEMSET_TEMPLATE_EADR(avx, _nobarrier)
MEMCPY_TEMPLATE(avx, clflush, _wcbarrier)
MEMCPY_TEMPLATE(avx, clflushopt, _wcbarrier)
MEMCPY_TEMPLATE(avx, clwb, _wcbarrier)
MEMCPY_TEMPLATE_EADR(avx, _wcbarrier)
MEMSET_TEMPLATE(avx, clflush, _wcbarrier)
MEMSET_TEMPLATE(avx, clflushopt, _wcbarrier)
MEMSET_TEMPLATE(avx, clwb, _wcbarrier)
MEMSET_TEMPLATE_EADR(avx, _wcbarrier)
#endif
#if AVX512F_AVAILABLE
MEMCPY_TEMPLATE(avx512f, clflush, /* cstyle wa */)
MEMCPY_TEMPLATE(avx512f, clflushopt, /* */)
MEMCPY_TEMPLATE(avx512f, clwb, /* */)
MEMCPY_TEMPLATE_EADR(avx512f, /* */)
MEMSET_TEMPLATE(avx512f, clflush, /* */)
MEMSET_TEMPLATE(avx512f, clflushopt, /* */)
MEMSET_TEMPLATE(avx512f, clwb, /* */)
MEMSET_TEMPLATE_EADR(avx512f, /* */)
#endif
enum memcpy_impl {
MEMCPY_INVALID,
MEMCPY_SSE2,
MEMCPY_AVX,
MEMCPY_AVX512F
};
/*
* use_sse2_memcpy_memset -- (internal) SSE2 detected, use it if possible
*/
static void
use_sse2_memcpy_memset(struct pmem2_arch_info *info, enum memcpy_impl *impl,
int wc_workaround)
{
#if SSE2_AVAILABLE
*impl = MEMCPY_SSE2;
if (wc_workaround) {
info->memmove_nodrain_eadr =
memmove_nodrain_sse2_eadr_wcbarrier;
if (info->flush == flush_clflush)
info->memmove_nodrain =
memmove_nodrain_sse2_clflush_wcbarrier;
else if (info->flush == flush_clflushopt)
info->memmove_nodrain =
memmove_nodrain_sse2_clflushopt_wcbarrier;
else if (info->flush == flush_clwb)
info->memmove_nodrain =
memmove_nodrain_sse2_clwb_wcbarrier;
else
ASSERT(0);
info->memset_nodrain_eadr = memset_nodrain_sse2_eadr_wcbarrier;
if (info->flush == flush_clflush)
info->memset_nodrain =
memset_nodrain_sse2_clflush_wcbarrier;
else if (info->flush == flush_clflushopt)
info->memset_nodrain =
memset_nodrain_sse2_clflushopt_wcbarrier;
else if (info->flush == flush_clwb)
info->memset_nodrain =
memset_nodrain_sse2_clwb_wcbarrier;
else
ASSERT(0);
} else {
info->memmove_nodrain_eadr =
memmove_nodrain_sse2_eadr_nobarrier;
if (info->flush == flush_clflush)
info->memmove_nodrain =
memmove_nodrain_sse2_clflush_nobarrier;
else if (info->flush == flush_clflushopt)
info->memmove_nodrain =
memmove_nodrain_sse2_clflushopt_nobarrier;
else if (info->flush == flush_clwb)
info->memmove_nodrain =
memmove_nodrain_sse2_clwb_nobarrier;
else
ASSERT(0);
info->memset_nodrain_eadr =
memset_nodrain_sse2_eadr_nobarrier;
if (info->flush == flush_clflush)
info->memset_nodrain =
memset_nodrain_sse2_clflush_nobarrier;
else if (info->flush == flush_clflushopt)
info->memset_nodrain =
memset_nodrain_sse2_clflushopt_nobarrier;
else if (info->flush == flush_clwb)
info->memset_nodrain =
memset_nodrain_sse2_clwb_nobarrier;
else
ASSERT(0);
}
#else
LOG(3, "sse2 disabled at build time");
#endif
}
/*
* use_avx_memcpy_memset -- (internal) AVX detected, use it if possible
*/
static void
use_avx_memcpy_memset(struct pmem2_arch_info *info, enum memcpy_impl *impl,
int wc_workaround)
{
#if AVX_AVAILABLE
LOG(3, "avx supported");
char *e = os_getenv("PMEM_AVX");
if (e != NULL && strcmp(e, "0") == 0) {
LOG(3, "PMEM_AVX set to 0");
return;
}
LOG(3, "PMEM_AVX enabled");
*impl = MEMCPY_AVX;
if (wc_workaround) {
info->memmove_nodrain_eadr =
memmove_nodrain_avx_eadr_wcbarrier;
if (info->flush == flush_clflush)
info->memmove_nodrain =
memmove_nodrain_avx_clflush_wcbarrier;
else if (info->flush == flush_clflushopt)
info->memmove_nodrain =
memmove_nodrain_avx_clflushopt_wcbarrier;
else if (info->flush == flush_clwb)
info->memmove_nodrain =
memmove_nodrain_avx_clwb_wcbarrier;
else
ASSERT(0);
info->memset_nodrain_eadr =
memset_nodrain_avx_eadr_wcbarrier;
if (info->flush == flush_clflush)
info->memset_nodrain =
memset_nodrain_avx_clflush_wcbarrier;
else if (info->flush == flush_clflushopt)
info->memset_nodrain =
memset_nodrain_avx_clflushopt_wcbarrier;
else if (info->flush == flush_clwb)
info->memset_nodrain =
memset_nodrain_avx_clwb_wcbarrier;
else
ASSERT(0);
} else {
info->memmove_nodrain_eadr =
memmove_nodrain_avx_eadr_nobarrier;
if (info->flush == flush_clflush)
info->memmove_nodrain =
memmove_nodrain_avx_clflush_nobarrier;
else if (info->flush == flush_clflushopt)
info->memmove_nodrain =
memmove_nodrain_avx_clflushopt_nobarrier;
else if (info->flush == flush_clwb)
info->memmove_nodrain =
memmove_nodrain_avx_clwb_nobarrier;
else
ASSERT(0);
info->memset_nodrain_eadr =
memset_nodrain_avx_eadr_nobarrier;
if (info->flush == flush_clflush)
info->memset_nodrain =
memset_nodrain_avx_clflush_nobarrier;
else if (info->flush == flush_clflushopt)
info->memset_nodrain =
memset_nodrain_avx_clflushopt_nobarrier;
else if (info->flush == flush_clwb)
info->memset_nodrain =
memset_nodrain_avx_clwb_nobarrier;
else
ASSERT(0);
}
#else
LOG(3, "avx supported, but disabled at build time");
#endif
}
/*
* use_avx512f_memcpy_memset -- (internal) AVX512F detected, use it if possible
*/
static void
use_avx512f_memcpy_memset(struct pmem2_arch_info *info,
enum memcpy_impl *impl)
{
#if AVX512F_AVAILABLE
LOG(3, "avx512f supported");
char *e = os_getenv("PMEM_AVX512F");
if (e != NULL && strcmp(e, "0") == 0) {
LOG(3, "PMEM_AVX512F set to 0");
return;
}
LOG(3, "PMEM_AVX512F enabled");
*impl = MEMCPY_AVX512F;
info->memmove_nodrain_eadr = memmove_nodrain_avx512f_eadr;
if (info->flush == flush_clflush)
info->memmove_nodrain = memmove_nodrain_avx512f_clflush;
else if (info->flush == flush_clflushopt)
info->memmove_nodrain = memmove_nodrain_avx512f_clflushopt;
else if (info->flush == flush_clwb)
info->memmove_nodrain = memmove_nodrain_avx512f_clwb;
else
ASSERT(0);
info->memset_nodrain_eadr = memset_nodrain_avx512f_eadr;
if (info->flush == flush_clflush)
info->memset_nodrain = memset_nodrain_avx512f_clflush;
else if (info->flush == flush_clflushopt)
info->memset_nodrain = memset_nodrain_avx512f_clflushopt;
else if (info->flush == flush_clwb)
info->memset_nodrain = memset_nodrain_avx512f_clwb;
else
ASSERT(0);
#else
LOG(3, "avx512f supported, but disabled at build time");
#endif
}
/*
* pmem_get_cpuinfo -- configure libpmem based on CPUID
*/
static void
pmem_cpuinfo_to_funcs(struct pmem2_arch_info *info, enum memcpy_impl *impl)
{
LOG(3, NULL);
if (is_cpu_clflush_present()) {
LOG(3, "clflush supported");
info->flush = flush_clflush;
info->flush_has_builtin_fence = 1;
info->fence = memory_barrier;
}
if (is_cpu_clflushopt_present()) {
LOG(3, "clflushopt supported");
char *e = os_getenv("PMEM_NO_CLFLUSHOPT");
if (e && strcmp(e, "1") == 0) {
LOG(3, "PMEM_NO_CLFLUSHOPT forced no clflushopt");
} else {
info->flush = flush_clflushopt;
info->flush_has_builtin_fence = 0;
info->fence = memory_barrier;
}
}
if (is_cpu_clwb_present()) {
LOG(3, "clwb supported");
char *e = os_getenv("PMEM_NO_CLWB");
if (e && strcmp(e, "1") == 0) {
LOG(3, "PMEM_NO_CLWB forced no clwb");
} else {
info->flush = flush_clwb;
info->flush_has_builtin_fence = 0;
info->fence = memory_barrier;
}
}
/*
* XXX Disable this work around for Intel CPUs with optimized
* WC eviction.
*/
int wc_workaround = is_cpu_genuine_intel();
char *ptr = os_getenv("PMEM_WC_WORKAROUND");
if (ptr) {
if (strcmp(ptr, "1") == 0) {
LOG(3, "WC workaround forced to 1");
wc_workaround = 1;
} else if (strcmp(ptr, "0") == 0) {
LOG(3, "WC workaround forced to 0");
wc_workaround = 0;
} else {
LOG(3, "incorrect value of PMEM_WC_WORKAROUND (%s)",
ptr);
}
}
LOG(3, "WC workaround = %d", wc_workaround);
ptr = os_getenv("PMEM_NO_MOVNT");
if (ptr && strcmp(ptr, "1") == 0) {
LOG(3, "PMEM_NO_MOVNT forced no movnt");
} else {
use_sse2_memcpy_memset(info, impl, wc_workaround);
if (is_cpu_avx_present())
use_avx_memcpy_memset(info, impl, wc_workaround);
if (is_cpu_avx512f_present())
use_avx512f_memcpy_memset(info, impl);
}
}
/*
* pmem2_arch_init -- initialize architecture-specific list of pmem operations
*/
void
pmem2_arch_init(struct pmem2_arch_info *info)
{
LOG(3, NULL);
enum memcpy_impl impl = MEMCPY_INVALID;
pmem_cpuinfo_to_funcs(info, &impl);
/*
* For testing, allow overriding the default threshold
* for using non-temporal stores in pmem_memcpy_*(), pmem_memmove_*()
* and pmem_memset_*().
* It has no effect if movnt is not supported or disabled.
*/
const char *ptr = os_getenv("PMEM_MOVNT_THRESHOLD");
if (ptr) {
long long val = atoll(ptr);
if (val < 0) {
LOG(3, "Invalid PMEM_MOVNT_THRESHOLD");
} else {
LOG(3, "PMEM_MOVNT_THRESHOLD set to %zu", (size_t)val);
Movnt_threshold = (size_t)val;
}
}
if (info->flush == flush_clwb)
LOG(3, "using clwb");
else if (info->flush == flush_clflushopt)
LOG(3, "using clflushopt");
else if (info->flush == flush_clflush)
LOG(3, "using clflush");
else
FATAL("invalid deep flush function address");
if (impl == MEMCPY_AVX512F)
LOG(3, "using movnt AVX512F");
else if (impl == MEMCPY_AVX)
LOG(3, "using movnt AVX");
else if (impl == MEMCPY_SSE2)
LOG(3, "using movnt SSE2");
}
| 13,899 | 25.275992 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/x86_64/avx.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2018, Intel Corporation */
#ifndef PMEM_AVX_H
#define PMEM_AVX_H
#include <immintrin.h>
#include "util.h"
/*
* avx_zeroupper -- _mm256_zeroupper wrapper
*
* _mm256_zeroupper clears upper parts of avx registers.
*
* It's needed for 2 reasons:
* - it improves performance of non-avx code after avx
* - it works around problem discovered by Valgrind
*
* In optimized builds gcc inserts VZEROUPPER automatically before
* calling non-avx code (or at the end of the function). But in release
* builds it doesn't, so if we don't do this by ourselves, then when
* someone memcpy'ies uninitialized data, Valgrind complains whenever
* someone reads those registers.
*
* One notable example is loader, which tries to detect whether it
* needs to save whole ymm registers by looking at their current
* (possibly uninitialized) value.
*
* Valgrind complains like that:
* Conditional jump or move depends on uninitialised value(s)
* at 0x4015CC9: _dl_runtime_resolve_avx_slow
* (in /lib/x86_64-linux-gnu/ld-2.24.so)
* by 0x10B531: test_realloc_api (obj_basic_integration.c:185)
* by 0x10F1EE: main (obj_basic_integration.c:594)
*
* Note: We have to be careful to not read AVX registers after this
* intrinsic, because of this stupid gcc bug:
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82735
*/
static force_inline void
avx_zeroupper(void)
{
_mm256_zeroupper();
}
static force_inline __m128i
m256_get16b(__m256i ymm)
{
return _mm256_extractf128_si256(ymm, 0);
}
#ifdef _MSC_VER
static force_inline uint64_t
m256_get8b(__m256i ymm)
{
return (uint64_t)_mm_extract_epi64(m256_get16b(ymm), 0);
}
static force_inline uint32_t
m256_get4b(__m256i ymm)
{
return (uint32_t)m256_get8b(ymm);
}
static force_inline uint16_t
m256_get2b(__m256i ymm)
{
return (uint16_t)m256_get8b(ymm);
}
#else
static force_inline uint64_t
m256_get8b(__m256i ymm)
{
return (uint64_t)_mm256_extract_epi64(ymm, 0);
}
static force_inline uint32_t
m256_get4b(__m256i ymm)
{
return (uint32_t)_mm256_extract_epi32(ymm, 0);
}
static force_inline uint16_t
m256_get2b(__m256i ymm)
{
return (uint16_t)_mm256_extract_epi16(ymm, 0);
}
#endif
#endif
| 2,238 | 24.735632 | 72 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/x86_64/memcpy_memset.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
#ifndef MEMCPY_MEMSET_H
#define MEMCPY_MEMSET_H
#include <stddef.h>
#include <xmmintrin.h>
#include "pmem2_arch.h"
typedef void barrier_fn(void);
typedef void flush64b_fn(const void *);
static inline void
barrier_after_ntstores(void)
{
/*
* In this configuration pmem_drain does not contain sfence, so we have
* to serialize non-temporal store instructions.
*/
_mm_sfence();
}
static inline void
no_barrier_after_ntstores(void)
{
/*
* In this configuration pmem_drain contains sfence, so we don't have
* to serialize non-temporal store instructions
*/
}
static inline void
noflush(const void *addr, size_t len)
{
/* NOP, not even pmemcheck annotation */
}
static inline void
noflush64b(const void *addr)
{
/* NOP, not even pmemcheck annotation */
}
typedef void perf_barrier_fn(void);
static force_inline void
wc_barrier(void)
{
/*
* Currently, for SSE2 and AVX code paths, use of non-temporal stores
* on all generations of CPUs must be limited to the number of
* write-combining buffers (12) because otherwise, suboptimal eviction
* policy might impact performance when writing more data than WC
* buffers can simultaneously hold.
*
* The AVX512 code path is not affected, probably because we are
* overwriting whole cache lines.
*/
_mm_sfence();
}
static force_inline void
no_barrier(void)
{
}
#ifndef AVX512F_AVAILABLE
/*
* XXX not supported in MSVC version we currently use.
* Enable Windows tests pmem2_mem_ext when MSVC we
* use will support AVX512F.
*/
#ifdef _MSC_VER
#define AVX512F_AVAILABLE 0
#else
#define AVX512F_AVAILABLE 1
#endif
#endif
#ifndef AVX_AVAILABLE
#define AVX_AVAILABLE 1
#endif
#ifndef SSE2_AVAILABLE
#define SSE2_AVAILABLE 1
#endif
#if SSE2_AVAILABLE
void memmove_mov_sse2_clflush(char *dest, const char *src, size_t len);
void memmove_mov_sse2_clflushopt(char *dest, const char *src, size_t len);
void memmove_mov_sse2_clwb(char *dest, const char *src, size_t len);
void memmove_mov_sse2_empty(char *dest, const char *src, size_t len);
void memmove_mov_sse2_noflush(char *dest, const char *src, size_t len);
void memmove_movnt_sse2_clflush_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_clflushopt_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_clwb_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_empty_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_noflush_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_clflush_wcbarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_clflushopt_wcbarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_clwb_wcbarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_empty_wcbarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_noflush_wcbarrier(char *dest, const char *src,
size_t len);
void memset_mov_sse2_clflush(char *dest, int c, size_t len);
void memset_mov_sse2_clflushopt(char *dest, int c, size_t len);
void memset_mov_sse2_clwb(char *dest, int c, size_t len);
void memset_mov_sse2_empty(char *dest, int c, size_t len);
void memset_mov_sse2_noflush(char *dest, int c, size_t len);
void memset_movnt_sse2_clflush_nobarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_clflushopt_nobarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_clwb_nobarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_empty_nobarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_noflush_nobarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_clflush_wcbarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_clflushopt_wcbarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_clwb_wcbarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_empty_wcbarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_noflush_wcbarrier(char *dest, int c, size_t len);
#endif
#if AVX_AVAILABLE
void memmove_mov_avx_clflush(char *dest, const char *src, size_t len);
void memmove_mov_avx_clflushopt(char *dest, const char *src, size_t len);
void memmove_mov_avx_clwb(char *dest, const char *src, size_t len);
void memmove_mov_avx_empty(char *dest, const char *src, size_t len);
void memmove_mov_avx_noflush(char *dest, const char *src, size_t len);
void memmove_movnt_avx_clflush_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_clflushopt_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_clwb_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_empty_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_noflush_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_clflush_wcbarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_clflushopt_wcbarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_clwb_wcbarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_empty_wcbarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_noflush_wcbarrier(char *dest, const char *src,
size_t len);
void memset_mov_avx_clflush(char *dest, int c, size_t len);
void memset_mov_avx_clflushopt(char *dest, int c, size_t len);
void memset_mov_avx_clwb(char *dest, int c, size_t len);
void memset_mov_avx_empty(char *dest, int c, size_t len);
void memset_mov_avx_noflush(char *dest, int c, size_t len);
void memset_movnt_avx_clflush_nobarrier(char *dest, int c, size_t len);
void memset_movnt_avx_clflushopt_nobarrier(char *dest, int c, size_t len);
void memset_movnt_avx_clwb_nobarrier(char *dest, int c, size_t len);
void memset_movnt_avx_empty_nobarrier(char *dest, int c, size_t len);
void memset_movnt_avx_noflush_nobarrier(char *dest, int c, size_t len);
void memset_movnt_avx_clflush_wcbarrier(char *dest, int c, size_t len);
void memset_movnt_avx_clflushopt_wcbarrier(char *dest, int c, size_t len);
void memset_movnt_avx_clwb_wcbarrier(char *dest, int c, size_t len);
void memset_movnt_avx_empty_wcbarrier(char *dest, int c, size_t len);
void memset_movnt_avx_noflush_wcbarrier(char *dest, int c, size_t len);
#endif
#if AVX512F_AVAILABLE
void memmove_mov_avx512f_clflush(char *dest, const char *src, size_t len);
void memmove_mov_avx512f_clflushopt(char *dest, const char *src, size_t len);
void memmove_mov_avx512f_clwb(char *dest, const char *src, size_t len);
void memmove_mov_avx512f_empty(char *dest, const char *src, size_t len);
void memmove_mov_avx512f_noflush(char *dest, const char *src, size_t len);
void memmove_movnt_avx512f_clflush(char *dest, const char *src, size_t len);
void memmove_movnt_avx512f_clflushopt(char *dest, const char *src, size_t len);
void memmove_movnt_avx512f_clwb(char *dest, const char *src, size_t len);
void memmove_movnt_avx512f_empty(char *dest, const char *src, size_t len);
void memmove_movnt_avx512f_noflush(char *dest, const char *src, size_t len);
void memset_mov_avx512f_clflush(char *dest, int c, size_t len);
void memset_mov_avx512f_clflushopt(char *dest, int c, size_t len);
void memset_mov_avx512f_clwb(char *dest, int c, size_t len);
void memset_mov_avx512f_empty(char *dest, int c, size_t len);
void memset_mov_avx512f_noflush(char *dest, int c, size_t len);
void memset_movnt_avx512f_clflush(char *dest, int c, size_t len);
void memset_movnt_avx512f_clflushopt(char *dest, int c, size_t len);
void memset_movnt_avx512f_clwb(char *dest, int c, size_t len);
void memset_movnt_avx512f_empty(char *dest, int c, size_t len);
void memset_movnt_avx512f_noflush(char *dest, int c, size_t len);
#endif
extern size_t Movnt_threshold;
/*
* SSE2/AVX1 only:
*
* How much data WC buffers can hold at the same time, after which sfence
* is needed to flush them.
*
* For some reason sfence affects performance of reading from DRAM, so we have
* to prefetch the source data earlier.
*/
#define PERF_BARRIER_SIZE (12 * CACHELINE_SIZE /* 768 */)
/*
* How much to prefetch initially.
* Cannot be bigger than the size of L1 (32kB) - PERF_BARRIER_SIZE.
*/
#define INI_PREFETCH_SIZE (64 * CACHELINE_SIZE /* 4096 */)
static force_inline void
prefetch(const char *addr)
{
_mm_prefetch(addr, _MM_HINT_T0);
}
static force_inline void
prefetch_ini_fw(const char *src, size_t len)
{
size_t pref = MIN(len, INI_PREFETCH_SIZE);
for (size_t i = 0; i < pref; i += CACHELINE_SIZE)
prefetch(src + i);
}
static force_inline void
prefetch_ini_bw(const char *src, size_t len)
{
size_t pref = MIN(len, INI_PREFETCH_SIZE);
for (size_t i = 0; i < pref; i += CACHELINE_SIZE)
prefetch(src - i);
}
static force_inline void
prefetch_next_fw(const char *src, const char *srcend)
{
const char *begin = src + INI_PREFETCH_SIZE;
const char *end = begin + PERF_BARRIER_SIZE;
if (end > srcend)
end = srcend;
for (const char *addr = begin; addr < end; addr += CACHELINE_SIZE)
prefetch(addr);
}
static force_inline void
prefetch_next_bw(const char *src, const char *srcbegin)
{
const char *begin = src - INI_PREFETCH_SIZE;
const char *end = begin - PERF_BARRIER_SIZE;
if (end < srcbegin)
end = srcbegin;
for (const char *addr = begin; addr >= end; addr -= CACHELINE_SIZE)
prefetch(addr);
}
#endif
| 9,351 | 33.131387 | 79 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/x86_64/memset/memset_nt_sse2.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem2_arch.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memset_sse2.h"
#include "out.h"
#include "valgrind_internal.h"
static force_inline void
mm_stream_si128(char *dest, unsigned idx, __m128i src)
{
_mm_stream_si128((__m128i *)dest + idx, src);
barrier();
}
static force_inline void
memset_movnt4x64b(char *dest, __m128i xmm)
{
mm_stream_si128(dest, 0, xmm);
mm_stream_si128(dest, 1, xmm);
mm_stream_si128(dest, 2, xmm);
mm_stream_si128(dest, 3, xmm);
mm_stream_si128(dest, 4, xmm);
mm_stream_si128(dest, 5, xmm);
mm_stream_si128(dest, 6, xmm);
mm_stream_si128(dest, 7, xmm);
mm_stream_si128(dest, 8, xmm);
mm_stream_si128(dest, 9, xmm);
mm_stream_si128(dest, 10, xmm);
mm_stream_si128(dest, 11, xmm);
mm_stream_si128(dest, 12, xmm);
mm_stream_si128(dest, 13, xmm);
mm_stream_si128(dest, 14, xmm);
mm_stream_si128(dest, 15, xmm);
}
static force_inline void
memset_movnt2x64b(char *dest, __m128i xmm)
{
mm_stream_si128(dest, 0, xmm);
mm_stream_si128(dest, 1, xmm);
mm_stream_si128(dest, 2, xmm);
mm_stream_si128(dest, 3, xmm);
mm_stream_si128(dest, 4, xmm);
mm_stream_si128(dest, 5, xmm);
mm_stream_si128(dest, 6, xmm);
mm_stream_si128(dest, 7, xmm);
}
static force_inline void
memset_movnt1x64b(char *dest, __m128i xmm)
{
mm_stream_si128(dest, 0, xmm);
mm_stream_si128(dest, 1, xmm);
mm_stream_si128(dest, 2, xmm);
mm_stream_si128(dest, 3, xmm);
}
static force_inline void
memset_movnt1x32b(char *dest, __m128i xmm)
{
mm_stream_si128(dest, 0, xmm);
mm_stream_si128(dest, 1, xmm);
}
static force_inline void
memset_movnt1x16b(char *dest, __m128i xmm)
{
_mm_stream_si128((__m128i *)dest, xmm);
}
static force_inline void
memset_movnt1x8b(char *dest, __m128i xmm)
{
uint64_t x = (uint64_t)_mm_cvtsi128_si64(xmm);
_mm_stream_si64((long long *)dest, (long long)x);
}
static force_inline void
memset_movnt1x4b(char *dest, __m128i xmm)
{
uint32_t x = (uint32_t)_mm_cvtsi128_si32(xmm);
_mm_stream_si32((int *)dest, (int)x);
}
static force_inline void
memset_movnt_sse2(char *dest, int c, size_t len, flush_fn flush,
barrier_fn barrier, perf_barrier_fn perf_barrier)
{
char *orig_dest = dest;
size_t orig_len = len;
__m128i xmm = _mm_set1_epi8((char)c);
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memset_small_sse2(dest, xmm, cnt, flush);
dest += cnt;
len -= cnt;
}
while (len >= PERF_BARRIER_SIZE) {
memset_movnt4x64b(dest, xmm);
dest += 4 * 64;
len -= 4 * 64;
memset_movnt4x64b(dest, xmm);
dest += 4 * 64;
len -= 4 * 64;
memset_movnt4x64b(dest, xmm);
dest += 4 * 64;
len -= 4 * 64;
COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (4 + 4 + 4) * 64);
if (len)
perf_barrier();
}
while (len >= 4 * 64) {
memset_movnt4x64b(dest, xmm);
dest += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memset_movnt2x64b(dest, xmm);
dest += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memset_movnt1x64b(dest, xmm);
dest += 1 * 64;
len -= 1 * 64;
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32)
memset_movnt1x32b(dest, xmm);
else if (len == 16)
memset_movnt1x16b(dest, xmm);
else if (len == 8)
memset_movnt1x8b(dest, xmm);
else if (len == 4)
memset_movnt1x4b(dest, xmm);
else
goto nonnt;
goto end;
}
nonnt:
memset_small_sse2(dest, xmm, len, flush);
end:
barrier();
VALGRIND_DO_FLUSH(orig_dest, orig_len);
}
/* variants without perf_barrier */
void
memset_movnt_sse2_noflush_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, noflush, barrier_after_ntstores,
no_barrier);
}
void
memset_movnt_sse2_empty_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, flush_empty_nolog,
barrier_after_ntstores, no_barrier);
}
void
memset_movnt_sse2_clflush_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, flush_clflush_nolog,
barrier_after_ntstores, no_barrier);
}
void
memset_movnt_sse2_clflushopt_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, flush_clflushopt_nolog,
no_barrier_after_ntstores, no_barrier);
}
void
memset_movnt_sse2_clwb_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, flush_clwb_nolog,
no_barrier_after_ntstores, no_barrier);
}
/* variants with perf_barrier */
void
memset_movnt_sse2_noflush_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, noflush, barrier_after_ntstores,
wc_barrier);
}
void
memset_movnt_sse2_empty_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, flush_empty_nolog,
barrier_after_ntstores, wc_barrier);
}
void
memset_movnt_sse2_clflush_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, flush_clflush_nolog,
barrier_after_ntstores, wc_barrier);
}
void
memset_movnt_sse2_clflushopt_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, flush_clflushopt_nolog,
no_barrier_after_ntstores, wc_barrier);
}
void
memset_movnt_sse2_clwb_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, flush_clwb_nolog,
no_barrier_after_ntstores, wc_barrier);
}
| 5,912 | 20.580292 | 71 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/x86_64/memset/memset_nt_avx.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem2_arch.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memset_avx.h"
#include "out.h"
#include "valgrind_internal.h"
static force_inline void
mm256_stream_si256(char *dest, unsigned idx, __m256i src)
{
_mm256_stream_si256((__m256i *)dest + idx, src);
barrier();
}
static force_inline void
memset_movnt8x64b(char *dest, __m256i ymm)
{
mm256_stream_si256(dest, 0, ymm);
mm256_stream_si256(dest, 1, ymm);
mm256_stream_si256(dest, 2, ymm);
mm256_stream_si256(dest, 3, ymm);
mm256_stream_si256(dest, 4, ymm);
mm256_stream_si256(dest, 5, ymm);
mm256_stream_si256(dest, 6, ymm);
mm256_stream_si256(dest, 7, ymm);
mm256_stream_si256(dest, 8, ymm);
mm256_stream_si256(dest, 9, ymm);
mm256_stream_si256(dest, 10, ymm);
mm256_stream_si256(dest, 11, ymm);
mm256_stream_si256(dest, 12, ymm);
mm256_stream_si256(dest, 13, ymm);
mm256_stream_si256(dest, 14, ymm);
mm256_stream_si256(dest, 15, ymm);
}
static force_inline void
memset_movnt4x64b(char *dest, __m256i ymm)
{
mm256_stream_si256(dest, 0, ymm);
mm256_stream_si256(dest, 1, ymm);
mm256_stream_si256(dest, 2, ymm);
mm256_stream_si256(dest, 3, ymm);
mm256_stream_si256(dest, 4, ymm);
mm256_stream_si256(dest, 5, ymm);
mm256_stream_si256(dest, 6, ymm);
mm256_stream_si256(dest, 7, ymm);
}
static force_inline void
memset_movnt2x64b(char *dest, __m256i ymm)
{
mm256_stream_si256(dest, 0, ymm);
mm256_stream_si256(dest, 1, ymm);
mm256_stream_si256(dest, 2, ymm);
mm256_stream_si256(dest, 3, ymm);
}
static force_inline void
memset_movnt1x64b(char *dest, __m256i ymm)
{
mm256_stream_si256(dest, 0, ymm);
mm256_stream_si256(dest, 1, ymm);
}
static force_inline void
memset_movnt1x32b(char *dest, __m256i ymm)
{
mm256_stream_si256(dest, 0, ymm);
}
static force_inline void
memset_movnt1x16b(char *dest, __m256i ymm)
{
__m128i xmm0 = m256_get16b(ymm);
_mm_stream_si128((__m128i *)dest, xmm0);
}
static force_inline void
memset_movnt1x8b(char *dest, __m256i ymm)
{
uint64_t x = m256_get8b(ymm);
_mm_stream_si64((long long *)dest, (long long)x);
}
static force_inline void
memset_movnt1x4b(char *dest, __m256i ymm)
{
uint32_t x = m256_get4b(ymm);
_mm_stream_si32((int *)dest, (int)x);
}
static force_inline void
memset_movnt_avx(char *dest, int c, size_t len, flush_fn flush,
barrier_fn barrier, perf_barrier_fn perf_barrier)
{
char *orig_dest = dest;
size_t orig_len = len;
__m256i ymm = _mm256_set1_epi8((char)c);
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memset_small_avx(dest, ymm, cnt, flush);
dest += cnt;
len -= cnt;
}
while (len >= PERF_BARRIER_SIZE) {
memset_movnt8x64b(dest, ymm);
dest += 8 * 64;
len -= 8 * 64;
memset_movnt4x64b(dest, ymm);
dest += 4 * 64;
len -= 4 * 64;
COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (8 + 4) * 64);
if (len)
perf_barrier();
}
if (len >= 8 * 64) {
memset_movnt8x64b(dest, ymm);
dest += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memset_movnt4x64b(dest, ymm);
dest += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memset_movnt2x64b(dest, ymm);
dest += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memset_movnt1x64b(dest, ymm);
dest += 1 * 64;
len -= 1 * 64;
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32)
memset_movnt1x32b(dest, ymm);
else if (len == 16)
memset_movnt1x16b(dest, ymm);
else if (len == 8)
memset_movnt1x8b(dest, ymm);
else if (len == 4)
memset_movnt1x4b(dest, ymm);
else
goto nonnt;
goto end;
}
nonnt:
memset_small_avx(dest, ymm, len, flush);
end:
avx_zeroupper();
barrier();
VALGRIND_DO_FLUSH(orig_dest, orig_len);
}
/* variants without perf_barrier */
void
memset_movnt_avx_noflush_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, noflush, barrier_after_ntstores,
no_barrier);
}
void
memset_movnt_avx_empty_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, flush_empty_nolog,
barrier_after_ntstores, no_barrier);
}
void
memset_movnt_avx_clflush_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, flush_clflush_nolog,
barrier_after_ntstores, no_barrier);
}
void
memset_movnt_avx_clflushopt_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, flush_clflushopt_nolog,
no_barrier_after_ntstores, no_barrier);
}
void
memset_movnt_avx_clwb_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, flush_clwb_nolog,
no_barrier_after_ntstores, no_barrier);
}
/* variants with perf_barrier */
void
memset_movnt_avx_noflush_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, noflush, barrier_after_ntstores,
wc_barrier);
}
void
memset_movnt_avx_empty_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, flush_empty_nolog,
barrier_after_ntstores, wc_barrier);
}
void
memset_movnt_avx_clflush_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, flush_clflush_nolog,
barrier_after_ntstores, wc_barrier);
}
void
memset_movnt_avx_clflushopt_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, flush_clflushopt_nolog,
no_barrier_after_ntstores, wc_barrier);
}
void
memset_movnt_avx_clwb_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, flush_clwb_nolog,
no_barrier_after_ntstores, wc_barrier);
}
| 6,151 | 20.43554 | 71 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/x86_64/memset/memset_t_avx512f.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem2_arch.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memset_avx512f.h"
static force_inline void
mm512_store_si512(char *dest, unsigned idx, __m512i src)
{
_mm512_store_si512((__m512i *)dest + idx, src);
}
static force_inline void
memset_mov32x64b(char *dest, __m512i zmm, flush64b_fn flush64b)
{
mm512_store_si512(dest, 0, zmm);
mm512_store_si512(dest, 1, zmm);
mm512_store_si512(dest, 2, zmm);
mm512_store_si512(dest, 3, zmm);
mm512_store_si512(dest, 4, zmm);
mm512_store_si512(dest, 5, zmm);
mm512_store_si512(dest, 6, zmm);
mm512_store_si512(dest, 7, zmm);
mm512_store_si512(dest, 8, zmm);
mm512_store_si512(dest, 9, zmm);
mm512_store_si512(dest, 10, zmm);
mm512_store_si512(dest, 11, zmm);
mm512_store_si512(dest, 12, zmm);
mm512_store_si512(dest, 13, zmm);
mm512_store_si512(dest, 14, zmm);
mm512_store_si512(dest, 15, zmm);
mm512_store_si512(dest, 16, zmm);
mm512_store_si512(dest, 17, zmm);
mm512_store_si512(dest, 18, zmm);
mm512_store_si512(dest, 19, zmm);
mm512_store_si512(dest, 20, zmm);
mm512_store_si512(dest, 21, zmm);
mm512_store_si512(dest, 22, zmm);
mm512_store_si512(dest, 23, zmm);
mm512_store_si512(dest, 24, zmm);
mm512_store_si512(dest, 25, zmm);
mm512_store_si512(dest, 26, zmm);
mm512_store_si512(dest, 27, zmm);
mm512_store_si512(dest, 28, zmm);
mm512_store_si512(dest, 29, zmm);
mm512_store_si512(dest, 30, zmm);
mm512_store_si512(dest, 31, zmm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
flush64b(dest + 8 * 64);
flush64b(dest + 9 * 64);
flush64b(dest + 10 * 64);
flush64b(dest + 11 * 64);
flush64b(dest + 12 * 64);
flush64b(dest + 13 * 64);
flush64b(dest + 14 * 64);
flush64b(dest + 15 * 64);
flush64b(dest + 16 * 64);
flush64b(dest + 17 * 64);
flush64b(dest + 18 * 64);
flush64b(dest + 19 * 64);
flush64b(dest + 20 * 64);
flush64b(dest + 21 * 64);
flush64b(dest + 22 * 64);
flush64b(dest + 23 * 64);
flush64b(dest + 24 * 64);
flush64b(dest + 25 * 64);
flush64b(dest + 26 * 64);
flush64b(dest + 27 * 64);
flush64b(dest + 28 * 64);
flush64b(dest + 29 * 64);
flush64b(dest + 30 * 64);
flush64b(dest + 31 * 64);
}
static force_inline void
memset_mov16x64b(char *dest, __m512i zmm, flush64b_fn flush64b)
{
mm512_store_si512(dest, 0, zmm);
mm512_store_si512(dest, 1, zmm);
mm512_store_si512(dest, 2, zmm);
mm512_store_si512(dest, 3, zmm);
mm512_store_si512(dest, 4, zmm);
mm512_store_si512(dest, 5, zmm);
mm512_store_si512(dest, 6, zmm);
mm512_store_si512(dest, 7, zmm);
mm512_store_si512(dest, 8, zmm);
mm512_store_si512(dest, 9, zmm);
mm512_store_si512(dest, 10, zmm);
mm512_store_si512(dest, 11, zmm);
mm512_store_si512(dest, 12, zmm);
mm512_store_si512(dest, 13, zmm);
mm512_store_si512(dest, 14, zmm);
mm512_store_si512(dest, 15, zmm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
flush64b(dest + 8 * 64);
flush64b(dest + 9 * 64);
flush64b(dest + 10 * 64);
flush64b(dest + 11 * 64);
flush64b(dest + 12 * 64);
flush64b(dest + 13 * 64);
flush64b(dest + 14 * 64);
flush64b(dest + 15 * 64);
}
static force_inline void
memset_mov8x64b(char *dest, __m512i zmm, flush64b_fn flush64b)
{
mm512_store_si512(dest, 0, zmm);
mm512_store_si512(dest, 1, zmm);
mm512_store_si512(dest, 2, zmm);
mm512_store_si512(dest, 3, zmm);
mm512_store_si512(dest, 4, zmm);
mm512_store_si512(dest, 5, zmm);
mm512_store_si512(dest, 6, zmm);
mm512_store_si512(dest, 7, zmm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
}
static force_inline void
memset_mov4x64b(char *dest, __m512i zmm, flush64b_fn flush64b)
{
mm512_store_si512(dest, 0, zmm);
mm512_store_si512(dest, 1, zmm);
mm512_store_si512(dest, 2, zmm);
mm512_store_si512(dest, 3, zmm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
}
static force_inline void
memset_mov2x64b(char *dest, __m512i zmm, flush64b_fn flush64b)
{
mm512_store_si512(dest, 0, zmm);
mm512_store_si512(dest, 1, zmm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
}
static force_inline void
memset_mov1x64b(char *dest, __m512i zmm, flush64b_fn flush64b)
{
mm512_store_si512(dest, 0, zmm);
flush64b(dest + 0 * 64);
}
static force_inline void
memset_mov_avx512f(char *dest, int c, size_t len,
flush_fn flush, flush64b_fn flush64b)
{
__m512i zmm = _mm512_set1_epi8((char)c);
/* See comment in memset_movnt_avx512f */
__m256i ymm = _mm256_set1_epi8((char)c);
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memset_small_avx512f(dest, ymm, cnt, flush);
dest += cnt;
len -= cnt;
}
while (len >= 32 * 64) {
memset_mov32x64b(dest, zmm, flush64b);
dest += 32 * 64;
len -= 32 * 64;
}
if (len >= 16 * 64) {
memset_mov16x64b(dest, zmm, flush64b);
dest += 16 * 64;
len -= 16 * 64;
}
if (len >= 8 * 64) {
memset_mov8x64b(dest, zmm, flush64b);
dest += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memset_mov4x64b(dest, zmm, flush64b);
dest += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memset_mov2x64b(dest, zmm, flush64b);
dest += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memset_mov1x64b(dest, zmm, flush64b);
dest += 1 * 64;
len -= 1 * 64;
}
if (len)
memset_small_avx512f(dest, ymm, len, flush);
avx_zeroupper();
}
void
memset_mov_avx512f_noflush(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_avx512f(dest, c, len, noflush, noflush64b);
}
void
memset_mov_avx512f_empty(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_avx512f(dest, c, len, flush_empty_nolog, flush64b_empty);
}
void
memset_mov_avx512f_clflush(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_avx512f(dest, c, len, flush_clflush_nolog, pmem_clflush);
}
void
memset_mov_avx512f_clflushopt(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_avx512f(dest, c, len, flush_clflushopt_nolog,
pmem_clflushopt);
}
void
memset_mov_avx512f_clwb(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_avx512f(dest, c, len, flush_clwb_nolog, pmem_clwb);
}
| 6,851 | 22.958042 | 69 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/x86_64/memset/memset_nt_avx512f.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem2_arch.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memset_avx512f.h"
#include "out.h"
#include "util.h"
#include "valgrind_internal.h"
static force_inline void
mm512_stream_si512(char *dest, unsigned idx, __m512i src)
{
_mm512_stream_si512((__m512i *)dest + idx, src);
barrier();
}
static force_inline void
memset_movnt32x64b(char *dest, __m512i zmm)
{
mm512_stream_si512(dest, 0, zmm);
mm512_stream_si512(dest, 1, zmm);
mm512_stream_si512(dest, 2, zmm);
mm512_stream_si512(dest, 3, zmm);
mm512_stream_si512(dest, 4, zmm);
mm512_stream_si512(dest, 5, zmm);
mm512_stream_si512(dest, 6, zmm);
mm512_stream_si512(dest, 7, zmm);
mm512_stream_si512(dest, 8, zmm);
mm512_stream_si512(dest, 9, zmm);
mm512_stream_si512(dest, 10, zmm);
mm512_stream_si512(dest, 11, zmm);
mm512_stream_si512(dest, 12, zmm);
mm512_stream_si512(dest, 13, zmm);
mm512_stream_si512(dest, 14, zmm);
mm512_stream_si512(dest, 15, zmm);
mm512_stream_si512(dest, 16, zmm);
mm512_stream_si512(dest, 17, zmm);
mm512_stream_si512(dest, 18, zmm);
mm512_stream_si512(dest, 19, zmm);
mm512_stream_si512(dest, 20, zmm);
mm512_stream_si512(dest, 21, zmm);
mm512_stream_si512(dest, 22, zmm);
mm512_stream_si512(dest, 23, zmm);
mm512_stream_si512(dest, 24, zmm);
mm512_stream_si512(dest, 25, zmm);
mm512_stream_si512(dest, 26, zmm);
mm512_stream_si512(dest, 27, zmm);
mm512_stream_si512(dest, 28, zmm);
mm512_stream_si512(dest, 29, zmm);
mm512_stream_si512(dest, 30, zmm);
mm512_stream_si512(dest, 31, zmm);
}
static force_inline void
memset_movnt16x64b(char *dest, __m512i zmm)
{
mm512_stream_si512(dest, 0, zmm);
mm512_stream_si512(dest, 1, zmm);
mm512_stream_si512(dest, 2, zmm);
mm512_stream_si512(dest, 3, zmm);
mm512_stream_si512(dest, 4, zmm);
mm512_stream_si512(dest, 5, zmm);
mm512_stream_si512(dest, 6, zmm);
mm512_stream_si512(dest, 7, zmm);
mm512_stream_si512(dest, 8, zmm);
mm512_stream_si512(dest, 9, zmm);
mm512_stream_si512(dest, 10, zmm);
mm512_stream_si512(dest, 11, zmm);
mm512_stream_si512(dest, 12, zmm);
mm512_stream_si512(dest, 13, zmm);
mm512_stream_si512(dest, 14, zmm);
mm512_stream_si512(dest, 15, zmm);
}
static force_inline void
memset_movnt8x64b(char *dest, __m512i zmm)
{
mm512_stream_si512(dest, 0, zmm);
mm512_stream_si512(dest, 1, zmm);
mm512_stream_si512(dest, 2, zmm);
mm512_stream_si512(dest, 3, zmm);
mm512_stream_si512(dest, 4, zmm);
mm512_stream_si512(dest, 5, zmm);
mm512_stream_si512(dest, 6, zmm);
mm512_stream_si512(dest, 7, zmm);
}
static force_inline void
memset_movnt4x64b(char *dest, __m512i zmm)
{
mm512_stream_si512(dest, 0, zmm);
mm512_stream_si512(dest, 1, zmm);
mm512_stream_si512(dest, 2, zmm);
mm512_stream_si512(dest, 3, zmm);
}
static force_inline void
memset_movnt2x64b(char *dest, __m512i zmm)
{
mm512_stream_si512(dest, 0, zmm);
mm512_stream_si512(dest, 1, zmm);
}
static force_inline void
memset_movnt1x64b(char *dest, __m512i zmm)
{
mm512_stream_si512(dest, 0, zmm);
}
static force_inline void
memset_movnt1x32b(char *dest, __m256i ymm)
{
_mm256_stream_si256((__m256i *)dest, ymm);
}
static force_inline void
memset_movnt1x16b(char *dest, __m256i ymm)
{
__m128i xmm = _mm256_extracti128_si256(ymm, 0);
_mm_stream_si128((__m128i *)dest, xmm);
}
static force_inline void
memset_movnt1x8b(char *dest, __m256i ymm)
{
uint64_t x = m256_get8b(ymm);
_mm_stream_si64((long long *)dest, (long long)x);
}
static force_inline void
memset_movnt1x4b(char *dest, __m256i ymm)
{
uint32_t x = m256_get4b(ymm);
_mm_stream_si32((int *)dest, (int)x);
}
static force_inline void
memset_movnt_avx512f(char *dest, int c, size_t len, flush_fn flush,
barrier_fn barrier)
{
char *orig_dest = dest;
size_t orig_len = len;
__m512i zmm = _mm512_set1_epi8((char)c);
/*
* Can't use _mm512_extracti64x4_epi64, because some versions of gcc
* crash. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82887
*/
__m256i ymm = _mm256_set1_epi8((char)c);
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memset_small_avx512f(dest, ymm, cnt, flush);
dest += cnt;
len -= cnt;
}
while (len >= 32 * 64) {
memset_movnt32x64b(dest, zmm);
dest += 32 * 64;
len -= 32 * 64;
}
if (len >= 16 * 64) {
memset_movnt16x64b(dest, zmm);
dest += 16 * 64;
len -= 16 * 64;
}
if (len >= 8 * 64) {
memset_movnt8x64b(dest, zmm);
dest += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memset_movnt4x64b(dest, zmm);
dest += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memset_movnt2x64b(dest, zmm);
dest += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memset_movnt1x64b(dest, zmm);
dest += 1 * 64;
len -= 1 * 64;
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32)
memset_movnt1x32b(dest, ymm);
else if (len == 16)
memset_movnt1x16b(dest, ymm);
else if (len == 8)
memset_movnt1x8b(dest, ymm);
else if (len == 4)
memset_movnt1x4b(dest, ymm);
else
goto nonnt;
goto end;
}
nonnt:
memset_small_avx512f(dest, ymm, len, flush);
end:
avx_zeroupper();
barrier();
VALGRIND_DO_FLUSH(orig_dest, orig_len);
}
void
memset_movnt_avx512f_noflush(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx512f(dest, c, len, noflush, barrier_after_ntstores);
}
void
memset_movnt_avx512f_empty(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx512f(dest, c, len, flush_empty_nolog,
barrier_after_ntstores);
}
void
memset_movnt_avx512f_clflush(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx512f(dest, c, len, flush_clflush_nolog,
barrier_after_ntstores);
}
void
memset_movnt_avx512f_clflushopt(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx512f(dest, c, len, flush_clflushopt_nolog,
no_barrier_after_ntstores);
}
void
memset_movnt_avx512f_clwb(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx512f(dest, c, len, flush_clwb_nolog,
no_barrier_after_ntstores);
}
| 6,397 | 21.607774 | 71 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/x86_64/memset/memset_t_sse2.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem2_arch.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memset_sse2.h"
static force_inline void
mm_store_si128(char *dest, unsigned idx, __m128i src)
{
_mm_store_si128((__m128i *)dest + idx, src);
}
static force_inline void
memset_mov4x64b(char *dest, __m128i xmm, flush64b_fn flush64b)
{
mm_store_si128(dest, 0, xmm);
mm_store_si128(dest, 1, xmm);
mm_store_si128(dest, 2, xmm);
mm_store_si128(dest, 3, xmm);
mm_store_si128(dest, 4, xmm);
mm_store_si128(dest, 5, xmm);
mm_store_si128(dest, 6, xmm);
mm_store_si128(dest, 7, xmm);
mm_store_si128(dest, 8, xmm);
mm_store_si128(dest, 9, xmm);
mm_store_si128(dest, 10, xmm);
mm_store_si128(dest, 11, xmm);
mm_store_si128(dest, 12, xmm);
mm_store_si128(dest, 13, xmm);
mm_store_si128(dest, 14, xmm);
mm_store_si128(dest, 15, xmm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
}
static force_inline void
memset_mov2x64b(char *dest, __m128i xmm, flush64b_fn flush64b)
{
mm_store_si128(dest, 0, xmm);
mm_store_si128(dest, 1, xmm);
mm_store_si128(dest, 2, xmm);
mm_store_si128(dest, 3, xmm);
mm_store_si128(dest, 4, xmm);
mm_store_si128(dest, 5, xmm);
mm_store_si128(dest, 6, xmm);
mm_store_si128(dest, 7, xmm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
}
static force_inline void
memset_mov1x64b(char *dest, __m128i xmm, flush64b_fn flush64b)
{
mm_store_si128(dest, 0, xmm);
mm_store_si128(dest, 1, xmm);
mm_store_si128(dest, 2, xmm);
mm_store_si128(dest, 3, xmm);
flush64b(dest + 0 * 64);
}
static force_inline void
memset_mov_sse2(char *dest, int c, size_t len,
flush_fn flush, flush64b_fn flush64b)
{
__m128i xmm = _mm_set1_epi8((char)c);
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memset_small_sse2(dest, xmm, cnt, flush);
dest += cnt;
len -= cnt;
}
while (len >= 4 * 64) {
memset_mov4x64b(dest, xmm, flush64b);
dest += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memset_mov2x64b(dest, xmm, flush64b);
dest += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memset_mov1x64b(dest, xmm, flush64b);
dest += 1 * 64;
len -= 1 * 64;
}
if (len)
memset_small_sse2(dest, xmm, len, flush);
}
void
memset_mov_sse2_noflush(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_sse2(dest, c, len, noflush, noflush64b);
}
void
memset_mov_sse2_empty(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_sse2(dest, c, len, flush_empty_nolog, flush64b_empty);
}
void
memset_mov_sse2_clflush(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_sse2(dest, c, len, flush_clflush_nolog, pmem_clflush);
}
void
memset_mov_sse2_clflushopt(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_sse2(dest, c, len, flush_clflushopt_nolog,
pmem_clflushopt);
}
void
memset_mov_sse2_clwb(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_sse2(dest, c, len, flush_clwb_nolog, pmem_clwb);
}
| 3,304 | 20.461039 | 66 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/x86_64/memset/memset_sse2.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#ifndef PMEM2_MEMSET_SSE2_H
#define PMEM2_MEMSET_SSE2_H
#include <xmmintrin.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include "out.h"
static force_inline void
memset_small_sse2_noflush(char *dest, __m128i xmm, size_t len)
{
ASSERT(len <= 64);
if (len <= 8)
goto le8;
if (len <= 32)
goto le32;
if (len > 48) {
/* 49..64 */
_mm_storeu_si128((__m128i *)(dest + 0), xmm);
_mm_storeu_si128((__m128i *)(dest + 16), xmm);
_mm_storeu_si128((__m128i *)(dest + 32), xmm);
_mm_storeu_si128((__m128i *)(dest + len - 16), xmm);
return;
}
/* 33..48 */
_mm_storeu_si128((__m128i *)(dest + 0), xmm);
_mm_storeu_si128((__m128i *)(dest + 16), xmm);
_mm_storeu_si128((__m128i *)(dest + len - 16), xmm);
return;
le32:
if (len > 16) {
/* 17..32 */
_mm_storeu_si128((__m128i *)(dest + 0), xmm);
_mm_storeu_si128((__m128i *)(dest + len - 16), xmm);
return;
}
/* 9..16 */
uint64_t d8 = (uint64_t)_mm_cvtsi128_si64(xmm);
*(ua_uint64_t *)dest = d8;
*(ua_uint64_t *)(dest + len - 8) = d8;
return;
le8:
if (len <= 2)
goto le2;
if (len > 4) {
/* 5..8 */
uint32_t d4 = (uint32_t)_mm_cvtsi128_si32(xmm);
*(ua_uint32_t *)dest = d4;
*(ua_uint32_t *)(dest + len - 4) = d4;
return;
}
/* 3..4 */
uint16_t d2 = (uint16_t)(uint32_t)_mm_cvtsi128_si32(xmm);
*(ua_uint16_t *)dest = d2;
*(ua_uint16_t *)(dest + len - 2) = d2;
return;
le2:
if (len == 2) {
uint16_t d2 = (uint16_t)(uint32_t)_mm_cvtsi128_si32(xmm);
*(ua_uint16_t *)dest = d2;
return;
}
*(uint8_t *)dest = (uint8_t)_mm_cvtsi128_si32(xmm);
}
static force_inline void
memset_small_sse2(char *dest, __m128i xmm, size_t len, flush_fn flush)
{
/*
* pmemcheck complains about "overwritten stores before they were made
* persistent" for overlapping stores (last instruction in each code
* path) in the optimized version.
* libc's memset also does that, so we can't use it here.
*/
if (On_pmemcheck) {
memset_nodrain_generic(dest, (uint8_t)_mm_cvtsi128_si32(xmm),
len, PMEM2_F_MEM_NOFLUSH, NULL);
} else {
memset_small_sse2_noflush(dest, xmm, len);
}
flush(dest, len);
}
#endif
| 2,213 | 20.085714 | 71 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/x86_64/memset/memset_t_avx.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem2_arch.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memset_avx.h"
static force_inline void
mm256_store_si256(char *dest, unsigned idx, __m256i src)
{
_mm256_store_si256((__m256i *)dest + idx, src);
}
static force_inline void
memset_mov8x64b(char *dest, __m256i ymm, flush64b_fn flush64b)
{
mm256_store_si256(dest, 0, ymm);
mm256_store_si256(dest, 1, ymm);
mm256_store_si256(dest, 2, ymm);
mm256_store_si256(dest, 3, ymm);
mm256_store_si256(dest, 4, ymm);
mm256_store_si256(dest, 5, ymm);
mm256_store_si256(dest, 6, ymm);
mm256_store_si256(dest, 7, ymm);
mm256_store_si256(dest, 8, ymm);
mm256_store_si256(dest, 9, ymm);
mm256_store_si256(dest, 10, ymm);
mm256_store_si256(dest, 11, ymm);
mm256_store_si256(dest, 12, ymm);
mm256_store_si256(dest, 13, ymm);
mm256_store_si256(dest, 14, ymm);
mm256_store_si256(dest, 15, ymm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
}
static force_inline void
memset_mov4x64b(char *dest, __m256i ymm, flush64b_fn flush64b)
{
mm256_store_si256(dest, 0, ymm);
mm256_store_si256(dest, 1, ymm);
mm256_store_si256(dest, 2, ymm);
mm256_store_si256(dest, 3, ymm);
mm256_store_si256(dest, 4, ymm);
mm256_store_si256(dest, 5, ymm);
mm256_store_si256(dest, 6, ymm);
mm256_store_si256(dest, 7, ymm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
}
static force_inline void
memset_mov2x64b(char *dest, __m256i ymm, flush64b_fn flush64b)
{
mm256_store_si256(dest, 0, ymm);
mm256_store_si256(dest, 1, ymm);
mm256_store_si256(dest, 2, ymm);
mm256_store_si256(dest, 3, ymm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
}
static force_inline void
memset_mov1x64b(char *dest, __m256i ymm, flush64b_fn flush64b)
{
mm256_store_si256(dest, 0, ymm);
mm256_store_si256(dest, 1, ymm);
flush64b(dest + 0 * 64);
}
static force_inline void
memset_mov_avx(char *dest, int c, size_t len,
flush_fn flush, flush64b_fn flush64b)
{
__m256i ymm = _mm256_set1_epi8((char)c);
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memset_small_avx(dest, ymm, cnt, flush);
dest += cnt;
len -= cnt;
}
while (len >= 8 * 64) {
memset_mov8x64b(dest, ymm, flush64b);
dest += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memset_mov4x64b(dest, ymm, flush64b);
dest += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memset_mov2x64b(dest, ymm, flush64b);
dest += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memset_mov1x64b(dest, ymm, flush64b);
dest += 1 * 64;
len -= 1 * 64;
}
if (len)
memset_small_avx(dest, ymm, len, flush);
avx_zeroupper();
}
void
memset_mov_avx_noflush(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_avx(dest, c, len, noflush, noflush64b);
}
void
memset_mov_avx_empty(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_avx(dest, c, len, flush_empty_nolog, flush64b_empty);
}
void
memset_mov_avx_clflush(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_avx(dest, c, len, flush_clflush_nolog, pmem_clflush);
}
void
memset_mov_avx_clflushopt(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_avx(dest, c, len, flush_clflushopt_nolog,
pmem_clflushopt);
}
void
memset_mov_avx_clwb(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_avx(dest, c, len, flush_clwb_nolog, pmem_clwb);
}
| 3,890 | 20.73743 | 65 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/x86_64/memcpy/memcpy_t_sse2.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem2_arch.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memcpy_sse2.h"
#include "out.h"
static force_inline __m128i
mm_loadu_si128(const char *src, unsigned idx)
{
return _mm_loadu_si128((const __m128i *)src + idx);
}
static force_inline void
mm_store_si128(char *dest, unsigned idx, __m128i src)
{
_mm_store_si128((__m128i *)dest + idx, src);
}
static force_inline void
memmove_mov4x64b(char *dest, const char *src, flush64b_fn flush64b)
{
__m128i xmm0 = mm_loadu_si128(src, 0);
__m128i xmm1 = mm_loadu_si128(src, 1);
__m128i xmm2 = mm_loadu_si128(src, 2);
__m128i xmm3 = mm_loadu_si128(src, 3);
__m128i xmm4 = mm_loadu_si128(src, 4);
__m128i xmm5 = mm_loadu_si128(src, 5);
__m128i xmm6 = mm_loadu_si128(src, 6);
__m128i xmm7 = mm_loadu_si128(src, 7);
__m128i xmm8 = mm_loadu_si128(src, 8);
__m128i xmm9 = mm_loadu_si128(src, 9);
__m128i xmm10 = mm_loadu_si128(src, 10);
__m128i xmm11 = mm_loadu_si128(src, 11);
__m128i xmm12 = mm_loadu_si128(src, 12);
__m128i xmm13 = mm_loadu_si128(src, 13);
__m128i xmm14 = mm_loadu_si128(src, 14);
__m128i xmm15 = mm_loadu_si128(src, 15);
mm_store_si128(dest, 0, xmm0);
mm_store_si128(dest, 1, xmm1);
mm_store_si128(dest, 2, xmm2);
mm_store_si128(dest, 3, xmm3);
mm_store_si128(dest, 4, xmm4);
mm_store_si128(dest, 5, xmm5);
mm_store_si128(dest, 6, xmm6);
mm_store_si128(dest, 7, xmm7);
mm_store_si128(dest, 8, xmm8);
mm_store_si128(dest, 9, xmm9);
mm_store_si128(dest, 10, xmm10);
mm_store_si128(dest, 11, xmm11);
mm_store_si128(dest, 12, xmm12);
mm_store_si128(dest, 13, xmm13);
mm_store_si128(dest, 14, xmm14);
mm_store_si128(dest, 15, xmm15);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
}
static force_inline void
memmove_mov2x64b(char *dest, const char *src, flush64b_fn flush64b)
{
__m128i xmm0 = mm_loadu_si128(src, 0);
__m128i xmm1 = mm_loadu_si128(src, 1);
__m128i xmm2 = mm_loadu_si128(src, 2);
__m128i xmm3 = mm_loadu_si128(src, 3);
__m128i xmm4 = mm_loadu_si128(src, 4);
__m128i xmm5 = mm_loadu_si128(src, 5);
__m128i xmm6 = mm_loadu_si128(src, 6);
__m128i xmm7 = mm_loadu_si128(src, 7);
mm_store_si128(dest, 0, xmm0);
mm_store_si128(dest, 1, xmm1);
mm_store_si128(dest, 2, xmm2);
mm_store_si128(dest, 3, xmm3);
mm_store_si128(dest, 4, xmm4);
mm_store_si128(dest, 5, xmm5);
mm_store_si128(dest, 6, xmm6);
mm_store_si128(dest, 7, xmm7);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
}
static force_inline void
memmove_mov1x64b(char *dest, const char *src, flush64b_fn flush64b)
{
__m128i xmm0 = mm_loadu_si128(src, 0);
__m128i xmm1 = mm_loadu_si128(src, 1);
__m128i xmm2 = mm_loadu_si128(src, 2);
__m128i xmm3 = mm_loadu_si128(src, 3);
mm_store_si128(dest, 0, xmm0);
mm_store_si128(dest, 1, xmm1);
mm_store_si128(dest, 2, xmm2);
mm_store_si128(dest, 3, xmm3);
flush64b(dest + 0 * 64);
}
static force_inline void
memmove_mov_sse_fw(char *dest, const char *src, size_t len,
flush_fn flush, flush64b_fn flush64b)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_sse2(dest, src, cnt, flush);
dest += cnt;
src += cnt;
len -= cnt;
}
while (len >= 4 * 64) {
memmove_mov4x64b(dest, src, flush64b);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_mov2x64b(dest, src, flush64b);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_mov1x64b(dest, src, flush64b);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len)
memmove_small_sse2(dest, src, len, flush);
}
static force_inline void
memmove_mov_sse_bw(char *dest, const char *src, size_t len,
flush_fn flush, flush64b_fn flush64b)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_sse2(dest, src, cnt, flush);
}
while (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_mov4x64b(dest, src, flush64b);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_mov2x64b(dest, src, flush64b);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_mov1x64b(dest, src, flush64b);
}
if (len)
memmove_small_sse2(dest - len, src - len, len, flush);
}
static force_inline void
memmove_mov_sse2(char *dest, const char *src, size_t len,
flush_fn flush, flush64b_fn flush64b)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_mov_sse_fw(dest, src, len, flush, flush64b);
else
memmove_mov_sse_bw(dest, src, len, flush, flush64b);
}
void
memmove_mov_sse2_noflush(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_sse2(dest, src, len, noflush, noflush64b);
}
void
memmove_mov_sse2_empty(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_sse2(dest, src, len, flush_empty_nolog, flush64b_empty);
}
void
memmove_mov_sse2_clflush(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_sse2(dest, src, len, flush_clflush_nolog, pmem_clflush);
}
void
memmove_mov_sse2_clflushopt(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_sse2(dest, src, len, flush_clflushopt_nolog,
pmem_clflushopt);
}
void
memmove_mov_sse2_clwb(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_sse2(dest, src, len, flush_clwb_nolog, pmem_clwb);
}
| 5,820 | 22.566802 | 69 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/x86_64/memcpy/memcpy_avx.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#ifndef PMEM2_MEMCPY_AVX_H
#define PMEM2_MEMCPY_AVX_H
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "out.h"
static force_inline void
memmove_small_avx_noflush(char *dest, const char *src, size_t len)
{
ASSERT(len <= 64);
if (len <= 8)
goto le8;
if (len <= 32)
goto le32;
/* 33..64 */
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)(src + len - 32));
_mm256_storeu_si256((__m256i *)dest, ymm0);
_mm256_storeu_si256((__m256i *)(dest + len - 32), ymm1);
return;
le32:
if (len > 16) {
/* 17..32 */
__m128i xmm0 = _mm_loadu_si128((__m128i *)src);
__m128i xmm1 = _mm_loadu_si128((__m128i *)(src + len - 16));
_mm_storeu_si128((__m128i *)dest, xmm0);
_mm_storeu_si128((__m128i *)(dest + len - 16), xmm1);
return;
}
/* 9..16 */
ua_uint64_t d80 = *(ua_uint64_t *)src;
ua_uint64_t d81 = *(ua_uint64_t *)(src + len - 8);
*(ua_uint64_t *)dest = d80;
*(ua_uint64_t *)(dest + len - 8) = d81;
return;
le8:
if (len <= 2)
goto le2;
if (len > 4) {
/* 5..8 */
ua_uint32_t d40 = *(ua_uint32_t *)src;
ua_uint32_t d41 = *(ua_uint32_t *)(src + len - 4);
*(ua_uint32_t *)dest = d40;
*(ua_uint32_t *)(dest + len - 4) = d41;
return;
}
/* 3..4 */
ua_uint16_t d20 = *(ua_uint16_t *)src;
ua_uint16_t d21 = *(ua_uint16_t *)(src + len - 2);
*(ua_uint16_t *)dest = d20;
*(ua_uint16_t *)(dest + len - 2) = d21;
return;
le2:
if (len == 2) {
*(ua_uint16_t *)dest = *(ua_uint16_t *)src;
return;
}
*(uint8_t *)dest = *(uint8_t *)src;
}
static force_inline void
memmove_small_avx(char *dest, const char *src, size_t len, flush_fn flush)
{
/*
* pmemcheck complains about "overwritten stores before they were made
* persistent" for overlapping stores (last instruction in each code
* path) in the optimized version.
* libc's memcpy also does that, so we can't use it here.
*/
if (On_pmemcheck) {
memmove_nodrain_generic(dest, src, len, PMEM2_F_MEM_NOFLUSH,
NULL);
} else {
memmove_small_avx_noflush(dest, src, len);
}
flush(dest, len);
}
#endif
| 2,173 | 20.524752 | 74 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/x86_64/memcpy/memcpy_t_avx.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem2_arch.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memcpy_avx.h"
static force_inline __m256i
mm256_loadu_si256(const char *src, unsigned idx)
{
return _mm256_loadu_si256((const __m256i *)src + idx);
}
static force_inline void
mm256_store_si256(char *dest, unsigned idx, __m256i src)
{
_mm256_store_si256((__m256i *)dest + idx, src);
}
static force_inline void
memmove_mov8x64b(char *dest, const char *src, flush64b_fn flush64b)
{
__m256i ymm0 = mm256_loadu_si256(src, 0);
__m256i ymm1 = mm256_loadu_si256(src, 1);
__m256i ymm2 = mm256_loadu_si256(src, 2);
__m256i ymm3 = mm256_loadu_si256(src, 3);
__m256i ymm4 = mm256_loadu_si256(src, 4);
__m256i ymm5 = mm256_loadu_si256(src, 5);
__m256i ymm6 = mm256_loadu_si256(src, 6);
__m256i ymm7 = mm256_loadu_si256(src, 7);
__m256i ymm8 = mm256_loadu_si256(src, 8);
__m256i ymm9 = mm256_loadu_si256(src, 9);
__m256i ymm10 = mm256_loadu_si256(src, 10);
__m256i ymm11 = mm256_loadu_si256(src, 11);
__m256i ymm12 = mm256_loadu_si256(src, 12);
__m256i ymm13 = mm256_loadu_si256(src, 13);
__m256i ymm14 = mm256_loadu_si256(src, 14);
__m256i ymm15 = mm256_loadu_si256(src, 15);
mm256_store_si256(dest, 0, ymm0);
mm256_store_si256(dest, 1, ymm1);
mm256_store_si256(dest, 2, ymm2);
mm256_store_si256(dest, 3, ymm3);
mm256_store_si256(dest, 4, ymm4);
mm256_store_si256(dest, 5, ymm5);
mm256_store_si256(dest, 6, ymm6);
mm256_store_si256(dest, 7, ymm7);
mm256_store_si256(dest, 8, ymm8);
mm256_store_si256(dest, 9, ymm9);
mm256_store_si256(dest, 10, ymm10);
mm256_store_si256(dest, 11, ymm11);
mm256_store_si256(dest, 12, ymm12);
mm256_store_si256(dest, 13, ymm13);
mm256_store_si256(dest, 14, ymm14);
mm256_store_si256(dest, 15, ymm15);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
}
static force_inline void
memmove_mov4x64b(char *dest, const char *src, flush64b_fn flush64b)
{
__m256i ymm0 = mm256_loadu_si256(src, 0);
__m256i ymm1 = mm256_loadu_si256(src, 1);
__m256i ymm2 = mm256_loadu_si256(src, 2);
__m256i ymm3 = mm256_loadu_si256(src, 3);
__m256i ymm4 = mm256_loadu_si256(src, 4);
__m256i ymm5 = mm256_loadu_si256(src, 5);
__m256i ymm6 = mm256_loadu_si256(src, 6);
__m256i ymm7 = mm256_loadu_si256(src, 7);
mm256_store_si256(dest, 0, ymm0);
mm256_store_si256(dest, 1, ymm1);
mm256_store_si256(dest, 2, ymm2);
mm256_store_si256(dest, 3, ymm3);
mm256_store_si256(dest, 4, ymm4);
mm256_store_si256(dest, 5, ymm5);
mm256_store_si256(dest, 6, ymm6);
mm256_store_si256(dest, 7, ymm7);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
}
static force_inline void
memmove_mov2x64b(char *dest, const char *src, flush64b_fn flush64b)
{
__m256i ymm0 = mm256_loadu_si256(src, 0);
__m256i ymm1 = mm256_loadu_si256(src, 1);
__m256i ymm2 = mm256_loadu_si256(src, 2);
__m256i ymm3 = mm256_loadu_si256(src, 3);
mm256_store_si256(dest, 0, ymm0);
mm256_store_si256(dest, 1, ymm1);
mm256_store_si256(dest, 2, ymm2);
mm256_store_si256(dest, 3, ymm3);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
}
static force_inline void
memmove_mov1x64b(char *dest, const char *src, flush64b_fn flush64b)
{
__m256i ymm0 = mm256_loadu_si256(src, 0);
__m256i ymm1 = mm256_loadu_si256(src, 1);
mm256_store_si256(dest, 0, ymm0);
mm256_store_si256(dest, 1, ymm1);
flush64b(dest + 0 * 64);
}
static force_inline void
memmove_mov_avx_fw(char *dest, const char *src, size_t len,
flush_fn flush, flush64b_fn flush64b)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_avx(dest, src, cnt, flush);
dest += cnt;
src += cnt;
len -= cnt;
}
while (len >= 8 * 64) {
memmove_mov8x64b(dest, src, flush64b);
dest += 8 * 64;
src += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memmove_mov4x64b(dest, src, flush64b);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_mov2x64b(dest, src, flush64b);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_mov1x64b(dest, src, flush64b);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len)
memmove_small_avx(dest, src, len, flush);
}
static force_inline void
memmove_mov_avx_bw(char *dest, const char *src, size_t len,
flush_fn flush, flush64b_fn flush64b)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_avx(dest, src, cnt, flush);
}
while (len >= 8 * 64) {
dest -= 8 * 64;
src -= 8 * 64;
len -= 8 * 64;
memmove_mov8x64b(dest, src, flush64b);
}
if (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_mov4x64b(dest, src, flush64b);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_mov2x64b(dest, src, flush64b);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_mov1x64b(dest, src, flush64b);
}
if (len)
memmove_small_avx(dest - len, src - len, len, flush);
}
static force_inline void
memmove_mov_avx(char *dest, const char *src, size_t len,
flush_fn flush, flush64b_fn flush64b)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_mov_avx_fw(dest, src, len, flush, flush64b);
else
memmove_mov_avx_bw(dest, src, len, flush, flush64b);
avx_zeroupper();
}
void
memmove_mov_avx_noflush(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_avx(dest, src, len, noflush, noflush64b);
}
void
memmove_mov_avx_empty(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_avx(dest, src, len, flush_empty_nolog, flush64b_empty);
}
void
memmove_mov_avx_clflush(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_avx(dest, src, len, flush_clflush_nolog, pmem_clflush);
}
void
memmove_mov_avx_clflushopt(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_avx(dest, src, len, flush_clflushopt_nolog,
pmem_clflushopt);
}
void
memmove_mov_avx_clwb(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_avx(dest, src, len, flush_clwb_nolog, pmem_clwb);
}
| 6,705 | 22.780142 | 68 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/x86_64/memcpy/memcpy_t_avx512f.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem2_arch.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memcpy_avx512f.h"
static force_inline __m512i
mm512_loadu_si512(const char *src, unsigned idx)
{
return _mm512_loadu_si512((const __m512i *)src + idx);
}
static force_inline void
mm512_store_si512(char *dest, unsigned idx, __m512i src)
{
_mm512_store_si512((__m512i *)dest + idx, src);
}
static force_inline void
memmove_mov32x64b(char *dest, const char *src, flush64b_fn flush64b)
{
__m512i zmm0 = mm512_loadu_si512(src, 0);
__m512i zmm1 = mm512_loadu_si512(src, 1);
__m512i zmm2 = mm512_loadu_si512(src, 2);
__m512i zmm3 = mm512_loadu_si512(src, 3);
__m512i zmm4 = mm512_loadu_si512(src, 4);
__m512i zmm5 = mm512_loadu_si512(src, 5);
__m512i zmm6 = mm512_loadu_si512(src, 6);
__m512i zmm7 = mm512_loadu_si512(src, 7);
__m512i zmm8 = mm512_loadu_si512(src, 8);
__m512i zmm9 = mm512_loadu_si512(src, 9);
__m512i zmm10 = mm512_loadu_si512(src, 10);
__m512i zmm11 = mm512_loadu_si512(src, 11);
__m512i zmm12 = mm512_loadu_si512(src, 12);
__m512i zmm13 = mm512_loadu_si512(src, 13);
__m512i zmm14 = mm512_loadu_si512(src, 14);
__m512i zmm15 = mm512_loadu_si512(src, 15);
__m512i zmm16 = mm512_loadu_si512(src, 16);
__m512i zmm17 = mm512_loadu_si512(src, 17);
__m512i zmm18 = mm512_loadu_si512(src, 18);
__m512i zmm19 = mm512_loadu_si512(src, 19);
__m512i zmm20 = mm512_loadu_si512(src, 20);
__m512i zmm21 = mm512_loadu_si512(src, 21);
__m512i zmm22 = mm512_loadu_si512(src, 22);
__m512i zmm23 = mm512_loadu_si512(src, 23);
__m512i zmm24 = mm512_loadu_si512(src, 24);
__m512i zmm25 = mm512_loadu_si512(src, 25);
__m512i zmm26 = mm512_loadu_si512(src, 26);
__m512i zmm27 = mm512_loadu_si512(src, 27);
__m512i zmm28 = mm512_loadu_si512(src, 28);
__m512i zmm29 = mm512_loadu_si512(src, 29);
__m512i zmm30 = mm512_loadu_si512(src, 30);
__m512i zmm31 = mm512_loadu_si512(src, 31);
mm512_store_si512(dest, 0, zmm0);
mm512_store_si512(dest, 1, zmm1);
mm512_store_si512(dest, 2, zmm2);
mm512_store_si512(dest, 3, zmm3);
mm512_store_si512(dest, 4, zmm4);
mm512_store_si512(dest, 5, zmm5);
mm512_store_si512(dest, 6, zmm6);
mm512_store_si512(dest, 7, zmm7);
mm512_store_si512(dest, 8, zmm8);
mm512_store_si512(dest, 9, zmm9);
mm512_store_si512(dest, 10, zmm10);
mm512_store_si512(dest, 11, zmm11);
mm512_store_si512(dest, 12, zmm12);
mm512_store_si512(dest, 13, zmm13);
mm512_store_si512(dest, 14, zmm14);
mm512_store_si512(dest, 15, zmm15);
mm512_store_si512(dest, 16, zmm16);
mm512_store_si512(dest, 17, zmm17);
mm512_store_si512(dest, 18, zmm18);
mm512_store_si512(dest, 19, zmm19);
mm512_store_si512(dest, 20, zmm20);
mm512_store_si512(dest, 21, zmm21);
mm512_store_si512(dest, 22, zmm22);
mm512_store_si512(dest, 23, zmm23);
mm512_store_si512(dest, 24, zmm24);
mm512_store_si512(dest, 25, zmm25);
mm512_store_si512(dest, 26, zmm26);
mm512_store_si512(dest, 27, zmm27);
mm512_store_si512(dest, 28, zmm28);
mm512_store_si512(dest, 29, zmm29);
mm512_store_si512(dest, 30, zmm30);
mm512_store_si512(dest, 31, zmm31);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
flush64b(dest + 8 * 64);
flush64b(dest + 9 * 64);
flush64b(dest + 10 * 64);
flush64b(dest + 11 * 64);
flush64b(dest + 12 * 64);
flush64b(dest + 13 * 64);
flush64b(dest + 14 * 64);
flush64b(dest + 15 * 64);
flush64b(dest + 16 * 64);
flush64b(dest + 17 * 64);
flush64b(dest + 18 * 64);
flush64b(dest + 19 * 64);
flush64b(dest + 20 * 64);
flush64b(dest + 21 * 64);
flush64b(dest + 22 * 64);
flush64b(dest + 23 * 64);
flush64b(dest + 24 * 64);
flush64b(dest + 25 * 64);
flush64b(dest + 26 * 64);
flush64b(dest + 27 * 64);
flush64b(dest + 28 * 64);
flush64b(dest + 29 * 64);
flush64b(dest + 30 * 64);
flush64b(dest + 31 * 64);
}
static force_inline void
memmove_mov16x64b(char *dest, const char *src, flush64b_fn flush64b)
{
__m512i zmm0 = mm512_loadu_si512(src, 0);
__m512i zmm1 = mm512_loadu_si512(src, 1);
__m512i zmm2 = mm512_loadu_si512(src, 2);
__m512i zmm3 = mm512_loadu_si512(src, 3);
__m512i zmm4 = mm512_loadu_si512(src, 4);
__m512i zmm5 = mm512_loadu_si512(src, 5);
__m512i zmm6 = mm512_loadu_si512(src, 6);
__m512i zmm7 = mm512_loadu_si512(src, 7);
__m512i zmm8 = mm512_loadu_si512(src, 8);
__m512i zmm9 = mm512_loadu_si512(src, 9);
__m512i zmm10 = mm512_loadu_si512(src, 10);
__m512i zmm11 = mm512_loadu_si512(src, 11);
__m512i zmm12 = mm512_loadu_si512(src, 12);
__m512i zmm13 = mm512_loadu_si512(src, 13);
__m512i zmm14 = mm512_loadu_si512(src, 14);
__m512i zmm15 = mm512_loadu_si512(src, 15);
mm512_store_si512(dest, 0, zmm0);
mm512_store_si512(dest, 1, zmm1);
mm512_store_si512(dest, 2, zmm2);
mm512_store_si512(dest, 3, zmm3);
mm512_store_si512(dest, 4, zmm4);
mm512_store_si512(dest, 5, zmm5);
mm512_store_si512(dest, 6, zmm6);
mm512_store_si512(dest, 7, zmm7);
mm512_store_si512(dest, 8, zmm8);
mm512_store_si512(dest, 9, zmm9);
mm512_store_si512(dest, 10, zmm10);
mm512_store_si512(dest, 11, zmm11);
mm512_store_si512(dest, 12, zmm12);
mm512_store_si512(dest, 13, zmm13);
mm512_store_si512(dest, 14, zmm14);
mm512_store_si512(dest, 15, zmm15);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
flush64b(dest + 8 * 64);
flush64b(dest + 9 * 64);
flush64b(dest + 10 * 64);
flush64b(dest + 11 * 64);
flush64b(dest + 12 * 64);
flush64b(dest + 13 * 64);
flush64b(dest + 14 * 64);
flush64b(dest + 15 * 64);
}
static force_inline void
memmove_mov8x64b(char *dest, const char *src, flush64b_fn flush64b)
{
__m512i zmm0 = mm512_loadu_si512(src, 0);
__m512i zmm1 = mm512_loadu_si512(src, 1);
__m512i zmm2 = mm512_loadu_si512(src, 2);
__m512i zmm3 = mm512_loadu_si512(src, 3);
__m512i zmm4 = mm512_loadu_si512(src, 4);
__m512i zmm5 = mm512_loadu_si512(src, 5);
__m512i zmm6 = mm512_loadu_si512(src, 6);
__m512i zmm7 = mm512_loadu_si512(src, 7);
mm512_store_si512(dest, 0, zmm0);
mm512_store_si512(dest, 1, zmm1);
mm512_store_si512(dest, 2, zmm2);
mm512_store_si512(dest, 3, zmm3);
mm512_store_si512(dest, 4, zmm4);
mm512_store_si512(dest, 5, zmm5);
mm512_store_si512(dest, 6, zmm6);
mm512_store_si512(dest, 7, zmm7);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
}
static force_inline void
memmove_mov4x64b(char *dest, const char *src, flush64b_fn flush64b)
{
__m512i zmm0 = mm512_loadu_si512(src, 0);
__m512i zmm1 = mm512_loadu_si512(src, 1);
__m512i zmm2 = mm512_loadu_si512(src, 2);
__m512i zmm3 = mm512_loadu_si512(src, 3);
mm512_store_si512(dest, 0, zmm0);
mm512_store_si512(dest, 1, zmm1);
mm512_store_si512(dest, 2, zmm2);
mm512_store_si512(dest, 3, zmm3);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
}
static force_inline void
memmove_mov2x64b(char *dest, const char *src, flush64b_fn flush64b)
{
__m512i zmm0 = mm512_loadu_si512(src, 0);
__m512i zmm1 = mm512_loadu_si512(src, 1);
mm512_store_si512(dest, 0, zmm0);
mm512_store_si512(dest, 1, zmm1);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
}
static force_inline void
memmove_mov1x64b(char *dest, const char *src, flush64b_fn flush64b)
{
__m512i zmm0 = mm512_loadu_si512(src, 0);
mm512_store_si512(dest, 0, zmm0);
flush64b(dest + 0 * 64);
}
static force_inline void
memmove_mov_avx512f_fw(char *dest, const char *src, size_t len,
flush_fn flush, flush64b_fn flush64b)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_avx512f(dest, src, cnt, flush);
dest += cnt;
src += cnt;
len -= cnt;
}
while (len >= 32 * 64) {
memmove_mov32x64b(dest, src, flush64b);
dest += 32 * 64;
src += 32 * 64;
len -= 32 * 64;
}
if (len >= 16 * 64) {
memmove_mov16x64b(dest, src, flush64b);
dest += 16 * 64;
src += 16 * 64;
len -= 16 * 64;
}
if (len >= 8 * 64) {
memmove_mov8x64b(dest, src, flush64b);
dest += 8 * 64;
src += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memmove_mov4x64b(dest, src, flush64b);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_mov2x64b(dest, src, flush64b);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_mov1x64b(dest, src, flush64b);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len)
memmove_small_avx512f(dest, src, len, flush);
}
static force_inline void
memmove_mov_avx512f_bw(char *dest, const char *src, size_t len,
flush_fn flush, flush64b_fn flush64b)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_avx512f(dest, src, cnt, flush);
}
while (len >= 32 * 64) {
dest -= 32 * 64;
src -= 32 * 64;
len -= 32 * 64;
memmove_mov32x64b(dest, src, flush64b);
}
if (len >= 16 * 64) {
dest -= 16 * 64;
src -= 16 * 64;
len -= 16 * 64;
memmove_mov16x64b(dest, src, flush64b);
}
if (len >= 8 * 64) {
dest -= 8 * 64;
src -= 8 * 64;
len -= 8 * 64;
memmove_mov8x64b(dest, src, flush64b);
}
if (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_mov4x64b(dest, src, flush64b);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_mov2x64b(dest, src, flush64b);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_mov1x64b(dest, src, flush64b);
}
if (len)
memmove_small_avx512f(dest - len, src - len, len, flush);
}
static force_inline void
memmove_mov_avx512f(char *dest, const char *src, size_t len,
flush_fn flush, flush64b_fn flush64b)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_mov_avx512f_fw(dest, src, len, flush, flush64b);
else
memmove_mov_avx512f_bw(dest, src, len, flush, flush64b);
avx_zeroupper();
}
void
memmove_mov_avx512f_noflush(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_avx512f(dest, src, len, noflush, noflush64b);
}
void
memmove_mov_avx512f_empty(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_avx512f(dest, src, len, flush_empty_nolog, flush64b_empty);
}
void
memmove_mov_avx512f_clflush(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_avx512f(dest, src, len, flush_clflush_nolog, pmem_clflush);
}
void
memmove_mov_avx512f_clflushopt(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_avx512f(dest, src, len, flush_clflushopt_nolog,
pmem_clflushopt);
}
void
memmove_mov_avx512f_clwb(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_avx512f(dest, src, len, flush_clwb_nolog, pmem_clwb);
}
| 11,422 | 25.020501 | 72 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/x86_64/memcpy/memcpy_sse2.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#ifndef PMEM2_MEMCPY_SSE2_H
#define PMEM2_MEMCPY_SSE2_H
#include <xmmintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "out.h"
static force_inline void
memmove_small_sse2_noflush(char *dest, const char *src, size_t len)
{
ASSERT(len <= 64);
if (len <= 8)
goto le8;
if (len <= 32)
goto le32;
if (len > 48) {
/* 49..64 */
__m128i xmm0 = _mm_loadu_si128((__m128i *)src);
__m128i xmm1 = _mm_loadu_si128((__m128i *)(src + 16));
__m128i xmm2 = _mm_loadu_si128((__m128i *)(src + 32));
__m128i xmm3 = _mm_loadu_si128((__m128i *)(src + len - 16));
_mm_storeu_si128((__m128i *)dest, xmm0);
_mm_storeu_si128((__m128i *)(dest + 16), xmm1);
_mm_storeu_si128((__m128i *)(dest + 32), xmm2);
_mm_storeu_si128((__m128i *)(dest + len - 16), xmm3);
return;
}
/* 33..48 */
__m128i xmm0 = _mm_loadu_si128((__m128i *)src);
__m128i xmm1 = _mm_loadu_si128((__m128i *)(src + 16));
__m128i xmm2 = _mm_loadu_si128((__m128i *)(src + len - 16));
_mm_storeu_si128((__m128i *)dest, xmm0);
_mm_storeu_si128((__m128i *)(dest + 16), xmm1);
_mm_storeu_si128((__m128i *)(dest + len - 16), xmm2);
return;
le32:
if (len > 16) {
/* 17..32 */
__m128i xmm0 = _mm_loadu_si128((__m128i *)src);
__m128i xmm1 = _mm_loadu_si128((__m128i *)(src + len - 16));
_mm_storeu_si128((__m128i *)dest, xmm0);
_mm_storeu_si128((__m128i *)(dest + len - 16), xmm1);
return;
}
/* 9..16 */
uint64_t d80 = *(ua_uint64_t *)src;
uint64_t d81 = *(ua_uint64_t *)(src + len - 8);
*(ua_uint64_t *)dest = d80;
*(ua_uint64_t *)(dest + len - 8) = d81;
return;
le8:
if (len <= 2)
goto le2;
if (len > 4) {
/* 5..8 */
uint32_t d40 = *(ua_uint32_t *)src;
uint32_t d41 = *(ua_uint32_t *)(src + len - 4);
*(ua_uint32_t *)dest = d40;
*(ua_uint32_t *)(dest + len - 4) = d41;
return;
}
/* 3..4 */
uint16_t d20 = *(ua_uint16_t *)src;
uint16_t d21 = *(ua_uint16_t *)(src + len - 2);
*(ua_uint16_t *)dest = d20;
*(ua_uint16_t *)(dest + len - 2) = d21;
return;
le2:
if (len == 2) {
*(ua_uint16_t *)dest = *(ua_uint16_t *)src;
return;
}
*(uint8_t *)dest = *(uint8_t *)src;
}
static force_inline void
memmove_small_sse2(char *dest, const char *src, size_t len, flush_fn flush)
{
/*
* pmemcheck complains about "overwritten stores before they were made
* persistent" for overlapping stores (last instruction in each code
* path) in the optimized version.
* libc's memcpy also does that, so we can't use it here.
*/
if (On_pmemcheck) {
memmove_nodrain_generic(dest, src, len, PMEM2_F_MEM_NOFLUSH,
NULL);
} else {
memmove_small_sse2_noflush(dest, src, len);
}
flush(dest, len);
}
#endif
| 2,726 | 22.307692 | 75 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/x86_64/memcpy/memcpy_nt_avx.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem2_arch.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memcpy_avx.h"
#include "valgrind_internal.h"
static force_inline __m256i
mm256_loadu_si256(const char *src, unsigned idx)
{
return _mm256_loadu_si256((const __m256i *)src + idx);
}
static force_inline void
mm256_stream_si256(char *dest, unsigned idx, __m256i src)
{
_mm256_stream_si256((__m256i *)dest + idx, src);
barrier();
}
static force_inline void
memmove_movnt8x64b(char *dest, const char *src)
{
__m256i ymm0 = mm256_loadu_si256(src, 0);
__m256i ymm1 = mm256_loadu_si256(src, 1);
__m256i ymm2 = mm256_loadu_si256(src, 2);
__m256i ymm3 = mm256_loadu_si256(src, 3);
__m256i ymm4 = mm256_loadu_si256(src, 4);
__m256i ymm5 = mm256_loadu_si256(src, 5);
__m256i ymm6 = mm256_loadu_si256(src, 6);
__m256i ymm7 = mm256_loadu_si256(src, 7);
__m256i ymm8 = mm256_loadu_si256(src, 8);
__m256i ymm9 = mm256_loadu_si256(src, 9);
__m256i ymm10 = mm256_loadu_si256(src, 10);
__m256i ymm11 = mm256_loadu_si256(src, 11);
__m256i ymm12 = mm256_loadu_si256(src, 12);
__m256i ymm13 = mm256_loadu_si256(src, 13);
__m256i ymm14 = mm256_loadu_si256(src, 14);
__m256i ymm15 = mm256_loadu_si256(src, 15);
mm256_stream_si256(dest, 0, ymm0);
mm256_stream_si256(dest, 1, ymm1);
mm256_stream_si256(dest, 2, ymm2);
mm256_stream_si256(dest, 3, ymm3);
mm256_stream_si256(dest, 4, ymm4);
mm256_stream_si256(dest, 5, ymm5);
mm256_stream_si256(dest, 6, ymm6);
mm256_stream_si256(dest, 7, ymm7);
mm256_stream_si256(dest, 8, ymm8);
mm256_stream_si256(dest, 9, ymm9);
mm256_stream_si256(dest, 10, ymm10);
mm256_stream_si256(dest, 11, ymm11);
mm256_stream_si256(dest, 12, ymm12);
mm256_stream_si256(dest, 13, ymm13);
mm256_stream_si256(dest, 14, ymm14);
mm256_stream_si256(dest, 15, ymm15);
}
static force_inline void
memmove_movnt4x64b(char *dest, const char *src)
{
__m256i ymm0 = mm256_loadu_si256(src, 0);
__m256i ymm1 = mm256_loadu_si256(src, 1);
__m256i ymm2 = mm256_loadu_si256(src, 2);
__m256i ymm3 = mm256_loadu_si256(src, 3);
__m256i ymm4 = mm256_loadu_si256(src, 4);
__m256i ymm5 = mm256_loadu_si256(src, 5);
__m256i ymm6 = mm256_loadu_si256(src, 6);
__m256i ymm7 = mm256_loadu_si256(src, 7);
mm256_stream_si256(dest, 0, ymm0);
mm256_stream_si256(dest, 1, ymm1);
mm256_stream_si256(dest, 2, ymm2);
mm256_stream_si256(dest, 3, ymm3);
mm256_stream_si256(dest, 4, ymm4);
mm256_stream_si256(dest, 5, ymm5);
mm256_stream_si256(dest, 6, ymm6);
mm256_stream_si256(dest, 7, ymm7);
}
static force_inline void
memmove_movnt2x64b(char *dest, const char *src)
{
__m256i ymm0 = mm256_loadu_si256(src, 0);
__m256i ymm1 = mm256_loadu_si256(src, 1);
__m256i ymm2 = mm256_loadu_si256(src, 2);
__m256i ymm3 = mm256_loadu_si256(src, 3);
mm256_stream_si256(dest, 0, ymm0);
mm256_stream_si256(dest, 1, ymm1);
mm256_stream_si256(dest, 2, ymm2);
mm256_stream_si256(dest, 3, ymm3);
}
static force_inline void
memmove_movnt1x64b(char *dest, const char *src)
{
__m256i ymm0 = mm256_loadu_si256(src, 0);
__m256i ymm1 = mm256_loadu_si256(src, 1);
mm256_stream_si256(dest, 0, ymm0);
mm256_stream_si256(dest, 1, ymm1);
}
static force_inline void
memmove_movnt1x32b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src);
mm256_stream_si256(dest, 0, ymm0);
}
static force_inline void
memmove_movnt1x16b(char *dest, const char *src)
{
__m128i xmm0 = _mm_loadu_si128((__m128i *)src);
_mm_stream_si128((__m128i *)dest, xmm0);
}
static force_inline void
memmove_movnt1x8b(char *dest, const char *src)
{
_mm_stream_si64((long long *)dest, *(long long *)src);
}
static force_inline void
memmove_movnt1x4b(char *dest, const char *src)
{
_mm_stream_si32((int *)dest, *(int *)src);
}
static force_inline void
memmove_movnt_avx_fw(char *dest, const char *src, size_t len, flush_fn flush,
perf_barrier_fn perf_barrier)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_avx(dest, src, cnt, flush);
dest += cnt;
src += cnt;
len -= cnt;
}
const char *srcend = src + len;
prefetch_ini_fw(src, len);
while (len >= PERF_BARRIER_SIZE) {
prefetch_next_fw(src, srcend);
memmove_movnt8x64b(dest, src);
dest += 8 * 64;
src += 8 * 64;
len -= 8 * 64;
memmove_movnt4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (8 + 4) * 64);
if (len)
perf_barrier();
}
if (len >= 8 * 64) {
memmove_movnt8x64b(dest, src);
dest += 8 * 64;
src += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memmove_movnt4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_movnt2x64b(dest, src);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_movnt1x64b(dest, src);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32)
memmove_movnt1x32b(dest, src);
else if (len == 16)
memmove_movnt1x16b(dest, src);
else if (len == 8)
memmove_movnt1x8b(dest, src);
else if (len == 4)
memmove_movnt1x4b(dest, src);
else
goto nonnt;
goto end;
}
nonnt:
memmove_small_avx(dest, src, len, flush);
end:
avx_zeroupper();
}
static force_inline void
memmove_movnt_avx_bw(char *dest, const char *src, size_t len, flush_fn flush,
perf_barrier_fn perf_barrier)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_avx(dest, src, cnt, flush);
}
const char *srcbegin = src - len;
prefetch_ini_bw(src, len);
while (len >= PERF_BARRIER_SIZE) {
prefetch_next_bw(src, srcbegin);
dest -= 8 * 64;
src -= 8 * 64;
len -= 8 * 64;
memmove_movnt8x64b(dest, src);
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (8 + 4) * 64);
if (len)
perf_barrier();
}
if (len >= 8 * 64) {
dest -= 8 * 64;
src -= 8 * 64;
len -= 8 * 64;
memmove_movnt8x64b(dest, src);
}
if (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_movnt2x64b(dest, src);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_movnt1x64b(dest, src);
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32) {
dest -= 32;
src -= 32;
memmove_movnt1x32b(dest, src);
} else if (len == 16) {
dest -= 16;
src -= 16;
memmove_movnt1x16b(dest, src);
} else if (len == 8) {
dest -= 8;
src -= 8;
memmove_movnt1x8b(dest, src);
} else if (len == 4) {
dest -= 4;
src -= 4;
memmove_movnt1x4b(dest, src);
} else {
goto nonnt;
}
goto end;
}
nonnt:
dest -= len;
src -= len;
memmove_small_avx(dest, src, len, flush);
end:
avx_zeroupper();
}
static force_inline void
memmove_movnt_avx(char *dest, const char *src, size_t len, flush_fn flush,
barrier_fn barrier, perf_barrier_fn perf_barrier)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_movnt_avx_fw(dest, src, len, flush, perf_barrier);
else
memmove_movnt_avx_bw(dest, src, len, flush, perf_barrier);
barrier();
VALGRIND_DO_FLUSH(dest, len);
}
/* variants without perf_barrier */
void
memmove_movnt_avx_noflush_nobarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx(dest, src, len, noflush, barrier_after_ntstores,
no_barrier);
}
void
memmove_movnt_avx_empty_nobarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx(dest, src, len, flush_empty_nolog,
barrier_after_ntstores, no_barrier);
}
void
memmove_movnt_avx_clflush_nobarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx(dest, src, len, flush_clflush_nolog,
barrier_after_ntstores, no_barrier);
}
void
memmove_movnt_avx_clflushopt_nobarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx(dest, src, len, flush_clflushopt_nolog,
no_barrier_after_ntstores, no_barrier);
}
void
memmove_movnt_avx_clwb_nobarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx(dest, src, len, flush_clwb_nolog,
no_barrier_after_ntstores, no_barrier);
}
/* variants with perf_barrier */
void
memmove_movnt_avx_noflush_wcbarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx(dest, src, len, noflush, barrier_after_ntstores,
wc_barrier);
}
void
memmove_movnt_avx_empty_wcbarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx(dest, src, len, flush_empty_nolog,
barrier_after_ntstores, wc_barrier);
}
void
memmove_movnt_avx_clflush_wcbarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx(dest, src, len, flush_clflush_nolog,
barrier_after_ntstores, wc_barrier);
}
void
memmove_movnt_avx_clflushopt_wcbarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx(dest, src, len, flush_clflushopt_nolog,
no_barrier_after_ntstores, wc_barrier);
}
void
memmove_movnt_avx_clwb_wcbarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx(dest, src, len, flush_clwb_nolog,
no_barrier_after_ntstores, wc_barrier);
}
| 10,092 | 21.731982 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/x86_64/memcpy/memcpy_nt_sse2.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem2_arch.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memcpy_sse2.h"
#include "valgrind_internal.h"
static force_inline __m128i
mm_loadu_si128(const char *src, unsigned idx)
{
return _mm_loadu_si128((const __m128i *)src + idx);
}
static force_inline void
mm_stream_si128(char *dest, unsigned idx, __m128i src)
{
_mm_stream_si128((__m128i *)dest + idx, src);
barrier();
}
static force_inline void
memmove_movnt4x64b(char *dest, const char *src)
{
__m128i xmm0 = mm_loadu_si128(src, 0);
__m128i xmm1 = mm_loadu_si128(src, 1);
__m128i xmm2 = mm_loadu_si128(src, 2);
__m128i xmm3 = mm_loadu_si128(src, 3);
__m128i xmm4 = mm_loadu_si128(src, 4);
__m128i xmm5 = mm_loadu_si128(src, 5);
__m128i xmm6 = mm_loadu_si128(src, 6);
__m128i xmm7 = mm_loadu_si128(src, 7);
__m128i xmm8 = mm_loadu_si128(src, 8);
__m128i xmm9 = mm_loadu_si128(src, 9);
__m128i xmm10 = mm_loadu_si128(src, 10);
__m128i xmm11 = mm_loadu_si128(src, 11);
__m128i xmm12 = mm_loadu_si128(src, 12);
__m128i xmm13 = mm_loadu_si128(src, 13);
__m128i xmm14 = mm_loadu_si128(src, 14);
__m128i xmm15 = mm_loadu_si128(src, 15);
mm_stream_si128(dest, 0, xmm0);
mm_stream_si128(dest, 1, xmm1);
mm_stream_si128(dest, 2, xmm2);
mm_stream_si128(dest, 3, xmm3);
mm_stream_si128(dest, 4, xmm4);
mm_stream_si128(dest, 5, xmm5);
mm_stream_si128(dest, 6, xmm6);
mm_stream_si128(dest, 7, xmm7);
mm_stream_si128(dest, 8, xmm8);
mm_stream_si128(dest, 9, xmm9);
mm_stream_si128(dest, 10, xmm10);
mm_stream_si128(dest, 11, xmm11);
mm_stream_si128(dest, 12, xmm12);
mm_stream_si128(dest, 13, xmm13);
mm_stream_si128(dest, 14, xmm14);
mm_stream_si128(dest, 15, xmm15);
}
static force_inline void
memmove_movnt2x64b(char *dest, const char *src)
{
__m128i xmm0 = mm_loadu_si128(src, 0);
__m128i xmm1 = mm_loadu_si128(src, 1);
__m128i xmm2 = mm_loadu_si128(src, 2);
__m128i xmm3 = mm_loadu_si128(src, 3);
__m128i xmm4 = mm_loadu_si128(src, 4);
__m128i xmm5 = mm_loadu_si128(src, 5);
__m128i xmm6 = mm_loadu_si128(src, 6);
__m128i xmm7 = mm_loadu_si128(src, 7);
mm_stream_si128(dest, 0, xmm0);
mm_stream_si128(dest, 1, xmm1);
mm_stream_si128(dest, 2, xmm2);
mm_stream_si128(dest, 3, xmm3);
mm_stream_si128(dest, 4, xmm4);
mm_stream_si128(dest, 5, xmm5);
mm_stream_si128(dest, 6, xmm6);
mm_stream_si128(dest, 7, xmm7);
}
static force_inline void
memmove_movnt1x64b(char *dest, const char *src)
{
__m128i xmm0 = mm_loadu_si128(src, 0);
__m128i xmm1 = mm_loadu_si128(src, 1);
__m128i xmm2 = mm_loadu_si128(src, 2);
__m128i xmm3 = mm_loadu_si128(src, 3);
mm_stream_si128(dest, 0, xmm0);
mm_stream_si128(dest, 1, xmm1);
mm_stream_si128(dest, 2, xmm2);
mm_stream_si128(dest, 3, xmm3);
}
static force_inline void
memmove_movnt1x32b(char *dest, const char *src)
{
__m128i xmm0 = mm_loadu_si128(src, 0);
__m128i xmm1 = mm_loadu_si128(src, 1);
mm_stream_si128(dest, 0, xmm0);
mm_stream_si128(dest, 1, xmm1);
}
static force_inline void
memmove_movnt1x16b(char *dest, const char *src)
{
__m128i xmm0 = mm_loadu_si128(src, 0);
mm_stream_si128(dest, 0, xmm0);
}
static force_inline void
memmove_movnt1x8b(char *dest, const char *src)
{
_mm_stream_si64((long long *)dest, *(long long *)src);
}
static force_inline void
memmove_movnt1x4b(char *dest, const char *src)
{
_mm_stream_si32((int *)dest, *(int *)src);
}
static force_inline void
memmove_movnt_sse_fw(char *dest, const char *src, size_t len, flush_fn flush,
perf_barrier_fn perf_barrier)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_sse2(dest, src, cnt, flush);
dest += cnt;
src += cnt;
len -= cnt;
}
const char *srcend = src + len;
prefetch_ini_fw(src, len);
while (len >= PERF_BARRIER_SIZE) {
prefetch_next_fw(src, srcend);
memmove_movnt4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (4 + 4 + 4) * 64);
if (len)
perf_barrier();
}
while (len >= 4 * 64) {
memmove_movnt4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_movnt2x64b(dest, src);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_movnt1x64b(dest, src);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len == 0)
return;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32)
memmove_movnt1x32b(dest, src);
else if (len == 16)
memmove_movnt1x16b(dest, src);
else if (len == 8)
memmove_movnt1x8b(dest, src);
else if (len == 4)
memmove_movnt1x4b(dest, src);
else
goto nonnt;
return;
}
nonnt:
memmove_small_sse2(dest, src, len, flush);
}
static force_inline void
memmove_movnt_sse_bw(char *dest, const char *src, size_t len, flush_fn flush,
perf_barrier_fn perf_barrier)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_sse2(dest, src, cnt, flush);
}
const char *srcbegin = src - len;
prefetch_ini_bw(src, len);
while (len >= PERF_BARRIER_SIZE) {
prefetch_next_bw(src, srcbegin);
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (4 + 4 + 4) * 64);
if (len)
perf_barrier();
}
while (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_movnt2x64b(dest, src);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_movnt1x64b(dest, src);
}
if (len == 0)
return;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32) {
dest -= 32;
src -= 32;
memmove_movnt1x32b(dest, src);
} else if (len == 16) {
dest -= 16;
src -= 16;
memmove_movnt1x16b(dest, src);
} else if (len == 8) {
dest -= 8;
src -= 8;
memmove_movnt1x8b(dest, src);
} else if (len == 4) {
dest -= 4;
src -= 4;
memmove_movnt1x4b(dest, src);
} else {
goto nonnt;
}
return;
}
nonnt:
dest -= len;
src -= len;
memmove_small_sse2(dest, src, len, flush);
}
static force_inline void
memmove_movnt_sse2(char *dest, const char *src, size_t len, flush_fn flush,
barrier_fn barrier, perf_barrier_fn perf_barrier)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_movnt_sse_fw(dest, src, len, flush, perf_barrier);
else
memmove_movnt_sse_bw(dest, src, len, flush, perf_barrier);
barrier();
VALGRIND_DO_FLUSH(dest, len);
}
/* variants without perf_barrier */
void
memmove_movnt_sse2_noflush_nobarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_sse2(dest, src, len, noflush, barrier_after_ntstores,
no_barrier);
}
void
memmove_movnt_sse2_empty_nobarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_sse2(dest, src, len, flush_empty_nolog,
barrier_after_ntstores, no_barrier);
}
void
memmove_movnt_sse2_clflush_nobarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_sse2(dest, src, len, flush_clflush_nolog,
barrier_after_ntstores, no_barrier);
}
void
memmove_movnt_sse2_clflushopt_nobarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_sse2(dest, src, len, flush_clflushopt_nolog,
no_barrier_after_ntstores, no_barrier);
}
void
memmove_movnt_sse2_clwb_nobarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_sse2(dest, src, len, flush_clwb_nolog,
no_barrier_after_ntstores, no_barrier);
}
/* variants with perf_barrier */
void
memmove_movnt_sse2_noflush_wcbarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_sse2(dest, src, len, noflush, barrier_after_ntstores,
wc_barrier);
}
void
memmove_movnt_sse2_empty_wcbarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_sse2(dest, src, len, flush_empty_nolog,
barrier_after_ntstores, wc_barrier);
}
void
memmove_movnt_sse2_clflush_wcbarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_sse2(dest, src, len, flush_clflush_nolog,
barrier_after_ntstores, wc_barrier);
}
void
memmove_movnt_sse2_clflushopt_wcbarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_sse2(dest, src, len, flush_clflushopt_nolog,
no_barrier_after_ntstores, wc_barrier);
}
void
memmove_movnt_sse2_clwb_wcbarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_sse2(dest, src, len, flush_clwb_nolog,
no_barrier_after_ntstores, wc_barrier);
}
| 9,636 | 21.463869 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/x86_64/memcpy/memcpy_nt_avx512f.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem2_arch.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memcpy_avx512f.h"
#include "valgrind_internal.h"
static force_inline __m512i
mm512_loadu_si512(const char *src, unsigned idx)
{
return _mm512_loadu_si512((const __m512i *)src + idx);
}
static force_inline void
mm512_stream_si512(char *dest, unsigned idx, __m512i src)
{
_mm512_stream_si512((__m512i *)dest + idx, src);
barrier();
}
static force_inline void
memmove_movnt32x64b(char *dest, const char *src)
{
__m512i zmm0 = mm512_loadu_si512(src, 0);
__m512i zmm1 = mm512_loadu_si512(src, 1);
__m512i zmm2 = mm512_loadu_si512(src, 2);
__m512i zmm3 = mm512_loadu_si512(src, 3);
__m512i zmm4 = mm512_loadu_si512(src, 4);
__m512i zmm5 = mm512_loadu_si512(src, 5);
__m512i zmm6 = mm512_loadu_si512(src, 6);
__m512i zmm7 = mm512_loadu_si512(src, 7);
__m512i zmm8 = mm512_loadu_si512(src, 8);
__m512i zmm9 = mm512_loadu_si512(src, 9);
__m512i zmm10 = mm512_loadu_si512(src, 10);
__m512i zmm11 = mm512_loadu_si512(src, 11);
__m512i zmm12 = mm512_loadu_si512(src, 12);
__m512i zmm13 = mm512_loadu_si512(src, 13);
__m512i zmm14 = mm512_loadu_si512(src, 14);
__m512i zmm15 = mm512_loadu_si512(src, 15);
__m512i zmm16 = mm512_loadu_si512(src, 16);
__m512i zmm17 = mm512_loadu_si512(src, 17);
__m512i zmm18 = mm512_loadu_si512(src, 18);
__m512i zmm19 = mm512_loadu_si512(src, 19);
__m512i zmm20 = mm512_loadu_si512(src, 20);
__m512i zmm21 = mm512_loadu_si512(src, 21);
__m512i zmm22 = mm512_loadu_si512(src, 22);
__m512i zmm23 = mm512_loadu_si512(src, 23);
__m512i zmm24 = mm512_loadu_si512(src, 24);
__m512i zmm25 = mm512_loadu_si512(src, 25);
__m512i zmm26 = mm512_loadu_si512(src, 26);
__m512i zmm27 = mm512_loadu_si512(src, 27);
__m512i zmm28 = mm512_loadu_si512(src, 28);
__m512i zmm29 = mm512_loadu_si512(src, 29);
__m512i zmm30 = mm512_loadu_si512(src, 30);
__m512i zmm31 = mm512_loadu_si512(src, 31);
mm512_stream_si512(dest, 0, zmm0);
mm512_stream_si512(dest, 1, zmm1);
mm512_stream_si512(dest, 2, zmm2);
mm512_stream_si512(dest, 3, zmm3);
mm512_stream_si512(dest, 4, zmm4);
mm512_stream_si512(dest, 5, zmm5);
mm512_stream_si512(dest, 6, zmm6);
mm512_stream_si512(dest, 7, zmm7);
mm512_stream_si512(dest, 8, zmm8);
mm512_stream_si512(dest, 9, zmm9);
mm512_stream_si512(dest, 10, zmm10);
mm512_stream_si512(dest, 11, zmm11);
mm512_stream_si512(dest, 12, zmm12);
mm512_stream_si512(dest, 13, zmm13);
mm512_stream_si512(dest, 14, zmm14);
mm512_stream_si512(dest, 15, zmm15);
mm512_stream_si512(dest, 16, zmm16);
mm512_stream_si512(dest, 17, zmm17);
mm512_stream_si512(dest, 18, zmm18);
mm512_stream_si512(dest, 19, zmm19);
mm512_stream_si512(dest, 20, zmm20);
mm512_stream_si512(dest, 21, zmm21);
mm512_stream_si512(dest, 22, zmm22);
mm512_stream_si512(dest, 23, zmm23);
mm512_stream_si512(dest, 24, zmm24);
mm512_stream_si512(dest, 25, zmm25);
mm512_stream_si512(dest, 26, zmm26);
mm512_stream_si512(dest, 27, zmm27);
mm512_stream_si512(dest, 28, zmm28);
mm512_stream_si512(dest, 29, zmm29);
mm512_stream_si512(dest, 30, zmm30);
mm512_stream_si512(dest, 31, zmm31);
}
static force_inline void
memmove_movnt16x64b(char *dest, const char *src)
{
__m512i zmm0 = mm512_loadu_si512(src, 0);
__m512i zmm1 = mm512_loadu_si512(src, 1);
__m512i zmm2 = mm512_loadu_si512(src, 2);
__m512i zmm3 = mm512_loadu_si512(src, 3);
__m512i zmm4 = mm512_loadu_si512(src, 4);
__m512i zmm5 = mm512_loadu_si512(src, 5);
__m512i zmm6 = mm512_loadu_si512(src, 6);
__m512i zmm7 = mm512_loadu_si512(src, 7);
__m512i zmm8 = mm512_loadu_si512(src, 8);
__m512i zmm9 = mm512_loadu_si512(src, 9);
__m512i zmm10 = mm512_loadu_si512(src, 10);
__m512i zmm11 = mm512_loadu_si512(src, 11);
__m512i zmm12 = mm512_loadu_si512(src, 12);
__m512i zmm13 = mm512_loadu_si512(src, 13);
__m512i zmm14 = mm512_loadu_si512(src, 14);
__m512i zmm15 = mm512_loadu_si512(src, 15);
mm512_stream_si512(dest, 0, zmm0);
mm512_stream_si512(dest, 1, zmm1);
mm512_stream_si512(dest, 2, zmm2);
mm512_stream_si512(dest, 3, zmm3);
mm512_stream_si512(dest, 4, zmm4);
mm512_stream_si512(dest, 5, zmm5);
mm512_stream_si512(dest, 6, zmm6);
mm512_stream_si512(dest, 7, zmm7);
mm512_stream_si512(dest, 8, zmm8);
mm512_stream_si512(dest, 9, zmm9);
mm512_stream_si512(dest, 10, zmm10);
mm512_stream_si512(dest, 11, zmm11);
mm512_stream_si512(dest, 12, zmm12);
mm512_stream_si512(dest, 13, zmm13);
mm512_stream_si512(dest, 14, zmm14);
mm512_stream_si512(dest, 15, zmm15);
}
static force_inline void
memmove_movnt8x64b(char *dest, const char *src)
{
__m512i zmm0 = mm512_loadu_si512(src, 0);
__m512i zmm1 = mm512_loadu_si512(src, 1);
__m512i zmm2 = mm512_loadu_si512(src, 2);
__m512i zmm3 = mm512_loadu_si512(src, 3);
__m512i zmm4 = mm512_loadu_si512(src, 4);
__m512i zmm5 = mm512_loadu_si512(src, 5);
__m512i zmm6 = mm512_loadu_si512(src, 6);
__m512i zmm7 = mm512_loadu_si512(src, 7);
mm512_stream_si512(dest, 0, zmm0);
mm512_stream_si512(dest, 1, zmm1);
mm512_stream_si512(dest, 2, zmm2);
mm512_stream_si512(dest, 3, zmm3);
mm512_stream_si512(dest, 4, zmm4);
mm512_stream_si512(dest, 5, zmm5);
mm512_stream_si512(dest, 6, zmm6);
mm512_stream_si512(dest, 7, zmm7);
}
static force_inline void
memmove_movnt4x64b(char *dest, const char *src)
{
__m512i zmm0 = mm512_loadu_si512(src, 0);
__m512i zmm1 = mm512_loadu_si512(src, 1);
__m512i zmm2 = mm512_loadu_si512(src, 2);
__m512i zmm3 = mm512_loadu_si512(src, 3);
mm512_stream_si512(dest, 0, zmm0);
mm512_stream_si512(dest, 1, zmm1);
mm512_stream_si512(dest, 2, zmm2);
mm512_stream_si512(dest, 3, zmm3);
}
static force_inline void
memmove_movnt2x64b(char *dest, const char *src)
{
__m512i zmm0 = mm512_loadu_si512(src, 0);
__m512i zmm1 = mm512_loadu_si512(src, 1);
mm512_stream_si512(dest, 0, zmm0);
mm512_stream_si512(dest, 1, zmm1);
}
static force_inline void
memmove_movnt1x64b(char *dest, const char *src)
{
__m512i zmm0 = mm512_loadu_si512(src, 0);
mm512_stream_si512(dest, 0, zmm0);
}
static force_inline void
memmove_movnt1x32b(char *dest, const char *src)
{
__m256i zmm0 = _mm256_loadu_si256((__m256i *)src);
_mm256_stream_si256((__m256i *)dest, zmm0);
}
static force_inline void
memmove_movnt1x16b(char *dest, const char *src)
{
__m128i ymm0 = _mm_loadu_si128((__m128i *)src);
_mm_stream_si128((__m128i *)dest, ymm0);
}
static force_inline void
memmove_movnt1x8b(char *dest, const char *src)
{
_mm_stream_si64((long long *)dest, *(long long *)src);
}
static force_inline void
memmove_movnt1x4b(char *dest, const char *src)
{
_mm_stream_si32((int *)dest, *(int *)src);
}
static force_inline void
memmove_movnt_avx512f_fw(char *dest, const char *src, size_t len,
flush_fn flush)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_avx512f(dest, src, cnt, flush);
dest += cnt;
src += cnt;
len -= cnt;
}
while (len >= 32 * 64) {
memmove_movnt32x64b(dest, src);
dest += 32 * 64;
src += 32 * 64;
len -= 32 * 64;
}
if (len >= 16 * 64) {
memmove_movnt16x64b(dest, src);
dest += 16 * 64;
src += 16 * 64;
len -= 16 * 64;
}
if (len >= 8 * 64) {
memmove_movnt8x64b(dest, src);
dest += 8 * 64;
src += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memmove_movnt4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_movnt2x64b(dest, src);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_movnt1x64b(dest, src);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32)
memmove_movnt1x32b(dest, src);
else if (len == 16)
memmove_movnt1x16b(dest, src);
else if (len == 8)
memmove_movnt1x8b(dest, src);
else if (len == 4)
memmove_movnt1x4b(dest, src);
else
goto nonnt;
goto end;
}
nonnt:
memmove_small_avx512f(dest, src, len, flush);
end:
avx_zeroupper();
}
static force_inline void
memmove_movnt_avx512f_bw(char *dest, const char *src, size_t len,
flush_fn flush)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_avx512f(dest, src, cnt, flush);
}
while (len >= 32 * 64) {
dest -= 32 * 64;
src -= 32 * 64;
len -= 32 * 64;
memmove_movnt32x64b(dest, src);
}
if (len >= 16 * 64) {
dest -= 16 * 64;
src -= 16 * 64;
len -= 16 * 64;
memmove_movnt16x64b(dest, src);
}
if (len >= 8 * 64) {
dest -= 8 * 64;
src -= 8 * 64;
len -= 8 * 64;
memmove_movnt8x64b(dest, src);
}
if (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_movnt2x64b(dest, src);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_movnt1x64b(dest, src);
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32) {
dest -= 32;
src -= 32;
memmove_movnt1x32b(dest, src);
} else if (len == 16) {
dest -= 16;
src -= 16;
memmove_movnt1x16b(dest, src);
} else if (len == 8) {
dest -= 8;
src -= 8;
memmove_movnt1x8b(dest, src);
} else if (len == 4) {
dest -= 4;
src -= 4;
memmove_movnt1x4b(dest, src);
} else {
goto nonnt;
}
goto end;
}
nonnt:
dest -= len;
src -= len;
memmove_small_avx512f(dest, src, len, flush);
end:
avx_zeroupper();
}
static force_inline void
memmove_movnt_avx512f(char *dest, const char *src, size_t len, flush_fn flush,
barrier_fn barrier)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_movnt_avx512f_fw(dest, src, len, flush);
else
memmove_movnt_avx512f_bw(dest, src, len, flush);
barrier();
VALGRIND_DO_FLUSH(dest, len);
}
void
memmove_movnt_avx512f_noflush(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx512f(dest, src, len, noflush, barrier_after_ntstores);
}
void
memmove_movnt_avx512f_empty(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx512f(dest, src, len, flush_empty_nolog,
barrier_after_ntstores);
}
void
memmove_movnt_avx512f_clflush(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx512f(dest, src, len, flush_clflush_nolog,
barrier_after_ntstores);
}
void
memmove_movnt_avx512f_clflushopt(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx512f(dest, src, len, flush_clflushopt_nolog,
no_barrier_after_ntstores);
}
void
memmove_movnt_avx512f_clwb(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx512f(dest, src, len, flush_clwb_nolog,
no_barrier_after_ntstores);
}
| 11,246 | 23.45 | 78 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/aarch64/arm_cacheops.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* ARM inline assembly to flush and invalidate caches
* clwb => dc cvac
* clflushopt => dc civac
* fence => dmb ish
* sfence => dmb ishst
*/
/*
* Cache instructions on ARM:
* ARMv8.0-a DC CVAC - cache clean to Point of Coherency
* Meant for thread synchronization, usually implies
* real memory flush but may mean less.
* ARMv8.2-a DC CVAP - cache clean to Point of Persistency
* Meant exactly for our use.
* ARMv8.5-a DC CVADP - cache clean to Point of Deep Persistency
* As of mid-2019 not on any commercially available CPU.
* Any of the above may be disabled for EL0, but it's probably safe to consider
* that a system configuration error.
* Other flags include I (like "DC CIVAC") that invalidates the cache line, but
* we don't want that.
*
* Memory fences:
* * DMB [ISH] MFENCE
* * DMB [ISH]ST SFENCE
* * DMB [ISH]LD LFENCE
*
* Memory domains (cache coherency):
* * non-shareable - local to a single core
* * inner shareable (ISH) - a group of CPU clusters/sockets/other hardware
* Linux requires that anything within one operating system/hypervisor
* is within the same Inner Shareable domain.
* * outer shareable (OSH) - one or more separate ISH domains
* * full system (SY) - anything that can possibly access memory
* Docs: ARM DDI 0487E.a page B2-144.
*
* Exception (privilege) levels:
* * EL0 - userspace (ring 3)
* * EL1 - kernel (ring 0)
* * EL2 - hypervisor (ring -1)
* * EL3 - "secure world" (ring -3)
*/
#ifndef AARCH64_CACHEOPS_H
#define AARCH64_CACHEOPS_H
#include <stdlib.h>
static inline void
arm_clean_va_to_poc(void const *p __attribute__((unused)))
{
asm volatile("dc cvac, %0" : : "r" (p) : "memory");
}
static inline void
arm_store_memory_barrier(void)
{
asm volatile("dmb ishst" : : : "memory");
}
#endif
| 1,988 | 30.571429 | 80 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmem2/ppc64/init.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, IBM Corporation */
/* Copyright 2019-2020, Intel Corporation */
#include <errno.h>
#include <sys/mman.h>
#include "out.h"
#include "pmem2_arch.h"
#include "util.h"
/*
* Older assemblers versions do not support the latest versions of L, e.g.
* Binutils 2.34.
* Workaround this by using longs.
*/
#define __SYNC(l) ".long (0x7c0004AC | ((" #l ") << 21))"
#define __DCBF(ra, rb, l) ".long (0x7c0000AC | ((" #l ") << 21)" \
" | ((" #ra ") << 16) | ((" #rb ") << 11))"
static void
ppc_fence(void)
{
LOG(15, NULL);
/*
* Force a memory barrier to flush out all cache lines.
* Uses a heavyweight sync in order to guarantee the memory ordering
* even with a data cache flush.
* According to the POWER ISA 3.1, phwsync (aka. sync (L=4)) is treated
* as a hwsync by processors compatible with previous versions of the
* POWER ISA.
*/
asm volatile(__SYNC(4) : : : "memory");
}
static void
ppc_flush(const void *addr, size_t size)
{
LOG(15, "addr %p size %zu", addr, size);
uintptr_t uptr = (uintptr_t)addr;
uintptr_t end = uptr + size;
/* round down the address */
uptr &= ~(CACHELINE_SIZE - 1);
while (uptr < end) {
/*
* Flush the data cache block.
* According to the POWER ISA 3.1, dcbstps (aka. dcbf (L=6))
* behaves as dcbf (L=0) on previous processors.
*/
asm volatile(__DCBF(0, %0, 6) : :"r"(uptr) : "memory");
uptr += CACHELINE_SIZE;
}
}
void
pmem2_arch_init(struct pmem2_arch_info *info)
{
LOG(3, "libpmem*: PPC64 support");
info->fence = ppc_fence;
info->flush = ppc_flush;
}
| 1,594 | 22.80597 | 74 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/windows/getopt/getopt.c | /*
* *Copyright (c) 2012, Kim Gräsman
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Kim Gräsman nor the
* names of contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL KIM GRÄSMAN BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "getopt.h"
#include <stddef.h>
#include <string.h>
#include <stdio.h>
char* optarg;
int optopt;
/* The variable optind [...] shall be initialized to 1 by the system. */
int optind = 1;
int opterr;
static char* optcursor = NULL;
static char *first = NULL;
/* rotates argv array */
static void rotate(char **argv, int argc) {
if (argc <= 1)
return;
char *tmp = argv[0];
memmove(argv, argv + 1, (argc - 1) * sizeof(char *));
argv[argc - 1] = tmp;
}
/* Implemented based on [1] and [2] for optional arguments.
optopt is handled FreeBSD-style, per [3].
Other GNU and FreeBSD extensions are purely accidental.
[1] https://pubs.opengroup.org/onlinepubs/000095399/functions/getopt.html
[2] https://www.kernel.org/doc/man-pages/online/pages/man3/getopt.3.html
[3] https://www.freebsd.org/cgi/man.cgi?query=getopt&sektion=3&manpath=FreeBSD+9.0-RELEASE
*/
int getopt(int argc, char* const argv[], const char* optstring) {
int optchar = -1;
const char* optdecl = NULL;
optarg = NULL;
opterr = 0;
optopt = 0;
/* Unspecified, but we need it to avoid overrunning the argv bounds. */
if (optind >= argc)
goto no_more_optchars;
/* If, when getopt() is called argv[optind] is a null pointer, getopt()
shall return -1 without changing optind. */
if (argv[optind] == NULL)
goto no_more_optchars;
/* If, when getopt() is called *argv[optind] is not the character '-',
permute argv to move non options to the end */
if (*argv[optind] != '-') {
if (argc - optind <= 1)
goto no_more_optchars;
if (!first)
first = argv[optind];
do {
rotate((char **)(argv + optind), argc - optind);
} while (*argv[optind] != '-' && argv[optind] != first);
if (argv[optind] == first)
goto no_more_optchars;
}
/* If, when getopt() is called argv[optind] points to the string "-",
getopt() shall return -1 without changing optind. */
if (strcmp(argv[optind], "-") == 0)
goto no_more_optchars;
/* If, when getopt() is called argv[optind] points to the string "--",
getopt() shall return -1 after incrementing optind. */
if (strcmp(argv[optind], "--") == 0) {
++optind;
if (first) {
do {
rotate((char **)(argv + optind), argc - optind);
} while (argv[optind] != first);
}
goto no_more_optchars;
}
if (optcursor == NULL || *optcursor == '\0')
optcursor = argv[optind] + 1;
optchar = *optcursor;
/* FreeBSD: The variable optopt saves the last known option character
returned by getopt(). */
optopt = optchar;
/* The getopt() function shall return the next option character (if one is
found) from argv that matches a character in optstring, if there is
one that matches. */
optdecl = strchr(optstring, optchar);
if (optdecl) {
/* [I]f a character is followed by a colon, the option takes an
argument. */
if (optdecl[1] == ':') {
optarg = ++optcursor;
if (*optarg == '\0') {
/* GNU extension: Two colons mean an option takes an
optional arg; if there is text in the current argv-element
(i.e., in the same word as the option name itself, for example,
"-oarg"), then it is returned in optarg, otherwise optarg is set
to zero. */
if (optdecl[2] != ':') {
/* If the option was the last character in the string pointed to by
an element of argv, then optarg shall contain the next element
of argv, and optind shall be incremented by 2. If the resulting
value of optind is greater than argc, this indicates a missing
option-argument, and getopt() shall return an error indication.
Otherwise, optarg shall point to the string following the
option character in that element of argv, and optind shall be
incremented by 1.
*/
if (++optind < argc) {
optarg = argv[optind];
} else {
/* If it detects a missing option-argument, it shall return the
colon character ( ':' ) if the first character of optstring
was a colon, or a question-mark character ( '?' ) otherwise.
*/
optarg = NULL;
fprintf(stderr, "%s: option requires an argument -- '%c'\n", argv[0], optchar);
optchar = (optstring[0] == ':') ? ':' : '?';
}
} else {
optarg = NULL;
}
}
optcursor = NULL;
}
} else {
fprintf(stderr,"%s: invalid option -- '%c'\n", argv[0], optchar);
/* If getopt() encounters an option character that is not contained in
optstring, it shall return the question-mark ( '?' ) character. */
optchar = '?';
}
if (optcursor == NULL || *++optcursor == '\0')
++optind;
return optchar;
no_more_optchars:
optcursor = NULL;
first = NULL;
return -1;
}
/* Implementation based on [1].
[1] https://www.kernel.org/doc/man-pages/online/pages/man3/getopt.3.html
*/
int getopt_long(int argc, char* const argv[], const char* optstring,
const struct option* longopts, int* longindex) {
const struct option* o = longopts;
const struct option* match = NULL;
int num_matches = 0;
size_t argument_name_length = 0;
const char* current_argument = NULL;
int retval = -1;
optarg = NULL;
optopt = 0;
if (optind >= argc)
return -1;
/* If, when getopt() is called argv[optind] is a null pointer, getopt_long()
shall return -1 without changing optind. */
if (argv[optind] == NULL)
goto no_more_optchars;
/* If, when getopt_long() is called *argv[optind] is not the character '-',
permute argv to move non options to the end */
if (*argv[optind] != '-') {
if (argc - optind <= 1)
goto no_more_optchars;
if (!first)
first = argv[optind];
do {
rotate((char **)(argv + optind), argc - optind);
} while (*argv[optind] != '-' && argv[optind] != first);
if (argv[optind] == first)
goto no_more_optchars;
}
if (strlen(argv[optind]) < 3 || strncmp(argv[optind], "--", 2) != 0)
return getopt(argc, argv, optstring);
/* It's an option; starts with -- and is longer than two chars. */
current_argument = argv[optind] + 2;
argument_name_length = strcspn(current_argument, "=");
for (; o->name; ++o) {
if (strncmp(o->name, current_argument, argument_name_length) == 0) {
match = o;
++num_matches;
if (strlen(o->name) == argument_name_length) {
/* found match is exactly the one which we are looking for */
num_matches = 1;
break;
}
}
}
if (num_matches == 1) {
/* If longindex is not NULL, it points to a variable which is set to the
index of the long option relative to longopts. */
if (longindex)
*longindex = (int)(match - longopts);
/* If flag is NULL, then getopt_long() shall return val.
Otherwise, getopt_long() returns 0, and flag shall point to a variable
which shall be set to val if the option is found, but left unchanged if
the option is not found. */
if (match->flag)
*(match->flag) = match->val;
retval = match->flag ? 0 : match->val;
if (match->has_arg != no_argument) {
optarg = strchr(argv[optind], '=');
if (optarg != NULL)
++optarg;
if (match->has_arg == required_argument) {
/* Only scan the next argv for required arguments. Behavior is not
specified, but has been observed with Ubuntu and Mac OSX. */
if (optarg == NULL && ++optind < argc) {
optarg = argv[optind];
}
if (optarg == NULL)
retval = ':';
}
} else if (strchr(argv[optind], '=')) {
/* An argument was provided to a non-argument option.
I haven't seen this specified explicitly, but both GNU and BSD-based
implementations show this behavior.
*/
retval = '?';
}
} else {
/* Unknown option or ambiguous match. */
retval = '?';
if (num_matches == 0) {
fprintf(stderr, "%s: unrecognized option -- '%s'\n", argv[0], argv[optind]);
} else {
fprintf(stderr, "%s: option '%s' is ambiguous\n", argv[0], argv[optind]);
}
}
++optind;
return retval;
no_more_optchars:
first = NULL;
return -1;
}
| 9,866 | 32.561224 | 91 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/windows/getopt/getopt.h | /*
* *Copyright (c) 2012, Kim Gräsman
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Kim Gräsman nor the
* names of contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL KIM GRÄSMAN BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef INCLUDED_GETOPT_PORT_H
#define INCLUDED_GETOPT_PORT_H
#if defined(__cplusplus)
extern "C" {
#endif
#define no_argument 0
#define required_argument 1
#define optional_argument 2
extern char* optarg;
extern int optind, opterr, optopt;
struct option {
const char* name;
int has_arg;
int* flag;
int val;
};
int getopt(int argc, char* const argv[], const char* optstring);
int getopt_long(int argc, char* const argv[],
const char* optstring, const struct option* longopts, int* longindex);
#if defined(__cplusplus)
}
#endif
#endif // INCLUDED_GETOPT_PORT_H
| 2,137 | 35.237288 | 79 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/windows/include/win_mmap.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* win_mmap.h -- (internal) tracks the regions mapped by mmap
*/
#ifndef WIN_MMAP_H
#define WIN_MMAP_H 1
#include "queue.h"
#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
#define rounddown(x, y) (((x) / (y)) * (y))
void win_mmap_init(void);
void win_mmap_fini(void);
/* allocation/mmap granularity */
extern unsigned long long Mmap_align;
typedef enum FILE_MAPPING_TRACKER_FLAGS {
FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED = 0x0001,
/*
* This should hold the value of all flags ORed for debug purpose.
*/
FILE_MAPPING_TRACKER_FLAGS_MASK =
FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED
} FILE_MAPPING_TRACKER_FLAGS;
/*
* this structure tracks the file mappings outstanding per file handle
*/
typedef struct FILE_MAPPING_TRACKER {
PMDK_SORTEDQ_ENTRY(FILE_MAPPING_TRACKER) ListEntry;
HANDLE FileHandle;
HANDLE FileMappingHandle;
void *BaseAddress;
void *EndAddress;
DWORD Access;
os_off_t Offset;
size_t FileLen;
FILE_MAPPING_TRACKER_FLAGS Flags;
} FILE_MAPPING_TRACKER, *PFILE_MAPPING_TRACKER;
extern SRWLOCK FileMappingQLock;
extern PMDK_SORTEDQ_HEAD(FMLHead, FILE_MAPPING_TRACKER) FileMappingQHead;
#endif /* WIN_MMAP_H */
| 2,871 | 34.02439 | 74 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/windows/include/platform.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* platform.h -- dirty hacks to compile Linux code on Windows using VC++
*
* This is included to each source file using "/FI" (forced include) option.
*
* XXX - it is a subject for refactoring
*/
#ifndef PLATFORM_H
#define PLATFORM_H 1
#pragma warning(disable : 4996)
#pragma warning(disable : 4200) /* allow flexible array member */
#pragma warning(disable : 4819) /* non unicode characters */
#ifdef __cplusplus
extern "C" {
#endif
/* Prevent PMDK compilation for 32-bit platforms */
#if defined(_WIN32) && !defined(_WIN64)
#error "32-bit builds of PMDK are not supported!"
#endif
#define _CRT_RAND_S /* rand_s() */
#include <windows.h>
#include <stdint.h>
#include <time.h>
#include <io.h>
#include <process.h>
#include <fcntl.h>
#include <sys/types.h>
#include <malloc.h>
#include <signal.h>
#include <intrin.h>
#include <direct.h>
/* use uuid_t definition from util.h */
#ifdef uuid_t
#undef uuid_t
#endif
/* a few trivial substitutions */
#define PATH_MAX MAX_PATH
#define __thread __declspec(thread)
#define __func__ __FUNCTION__
#ifdef _DEBUG
#define DEBUG
#endif
/*
* The inline keyword is available only in VC++.
* https://msdn.microsoft.com/en-us/library/bw1hbe6y.aspx
*/
#ifndef __cplusplus
#define inline __inline
#endif
/* XXX - no equivalents in VC++ */
#define __attribute__(a)
#define __builtin_constant_p(cnd) 0
/*
* missing definitions
*/
/* errno.h */
#define ELIBACC 79 /* cannot access a needed shared library */
/* sys/stat.h */
#define S_IRUSR S_IREAD
#define S_IWUSR S_IWRITE
#define S_IRGRP S_IRUSR
#define S_IWGRP S_IWUSR
#define O_SYNC 0
typedef int mode_t;
#define fchmod(fd, mode) 0 /* XXX - dummy */
#define setlinebuf(fp) setvbuf(fp, NULL, _IOLBF, BUFSIZ);
/* unistd.h */
typedef long long os_off_t;
typedef long long ssize_t;
int setenv(const char *name, const char *value, int overwrite);
int unsetenv(const char *name);
/* fcntl.h */
int posix_fallocate(int fd, os_off_t offset, os_off_t len);
/* string.h */
#define strtok_r strtok_s
/* time.h */
#define CLOCK_MONOTONIC 1
#define CLOCK_REALTIME 2
int clock_gettime(int id, struct timespec *ts);
/* signal.h */
typedef unsigned long long sigset_t; /* one bit for each signal */
C_ASSERT(NSIG <= sizeof(sigset_t) * 8);
struct sigaction {
void (*sa_handler) (int signum);
/* void (*sa_sigaction)(int, siginfo_t *, void *); */
sigset_t sa_mask;
int sa_flags;
void (*sa_restorer) (void);
};
__inline int
sigemptyset(sigset_t *set)
{
*set = 0;
return 0;
}
__inline int
sigfillset(sigset_t *set)
{
*set = ~0;
return 0;
}
__inline int
sigaddset(sigset_t *set, int signum)
{
if (signum <= 0 || signum >= NSIG) {
errno = EINVAL;
return -1;
}
*set |= (1ULL << (signum - 1));
return 0;
}
__inline int
sigdelset(sigset_t *set, int signum)
{
if (signum <= 0 || signum >= NSIG) {
errno = EINVAL;
return -1;
}
*set &= ~(1ULL << (signum - 1));
return 0;
}
__inline int
sigismember(const sigset_t *set, int signum)
{
if (signum <= 0 || signum >= NSIG) {
errno = EINVAL;
return -1;
}
return ((*set & (1ULL << (signum - 1))) ? 1 : 0);
}
/* sched.h */
/*
* sched_yield -- yield the processor
*/
__inline int
sched_yield(void)
{
SwitchToThread();
return 0; /* always succeeds */
}
/*
* helper macros for library ctor/dtor function declarations
*/
#define MSVC_CONSTR(func) \
void func(void); \
__pragma(comment(linker, "/include:_" #func)) \
__pragma(section(".CRT$XCU", read)) \
__declspec(allocate(".CRT$XCU")) \
const void (WINAPI *_##func)(void) = (const void (WINAPI *)(void))func;
#define MSVC_DESTR(func) \
void func(void); \
static void _##func##_reg(void) { atexit(func); }; \
MSVC_CONSTR(_##func##_reg)
#ifdef __cplusplus
}
#endif
#endif /* PLATFORM_H */
| 5,431 | 22.929515 | 76 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/windows/include/endian.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* endian.h -- convert values between host and big-/little-endian byte order
*/
#ifndef ENDIAN_H
#define ENDIAN_H 1
/*
* XXX: On Windows we can assume little-endian architecture
*/
#include <intrin.h>
#define htole16(a) (a)
#define htole32(a) (a)
#define htole64(a) (a)
#define le16toh(a) (a)
#define le32toh(a) (a)
#define le64toh(a) (a)
#define htobe16(x) _byteswap_ushort(x)
#define htobe32(x) _byteswap_ulong(x)
#define htobe64(x) _byteswap_uint64(x)
#define be16toh(x) _byteswap_ushort(x)
#define be32toh(x) _byteswap_ulong(x)
#define be64toh(x) _byteswap_uint64(x)
#endif /* ENDIAN_H */
| 696 | 20.121212 | 76 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/windows/include/sys/file.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* sys/file.h -- file locking
*/
| 1,750 | 45.078947 | 74 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/windows/include/sys/param.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* sys/param.h -- a few useful macros
*/
#ifndef SYS_PARAM_H
#define SYS_PARAM_H 1
#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
#define howmany(x, y) (((x) + ((y) - 1)) / (y))
#define BPB 8 /* bits per byte */
#define setbit(b, i) ((b)[(i) / BPB] |= 1 << ((i) % BPB))
#define isset(b, i) ((b)[(i) / BPB] & (1 << ((i) % BPB)))
#define isclr(b, i) (((b)[(i) / BPB] & (1 << ((i) % BPB))) == 0)
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
#endif /* SYS_PARAM_H */
| 612 | 24.541667 | 64 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/include/libpmemblk.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* libpmemblk.h -- definitions of libpmemblk entry points
*
* This library provides support for programming with persistent memory (pmem).
*
* libpmemblk provides support for arrays of atomically-writable blocks.
*
* See libpmemblk(7) for details.
*/
#ifndef LIBPMEMBLK_H
#define LIBPMEMBLK_H 1
#include <sys/types.h>
#ifdef _WIN32
#include <pmemcompat.h>
#ifndef PMDK_UTF8_API
#define pmemblk_open pmemblk_openW
#define pmemblk_create pmemblk_createW
#define pmemblk_check pmemblk_checkW
#define pmemblk_check_version pmemblk_check_versionW
#define pmemblk_errormsg pmemblk_errormsgW
#define pmemblk_ctl_get pmemblk_ctl_getW
#define pmemblk_ctl_set pmemblk_ctl_setW
#define pmemblk_ctl_exec pmemblk_ctl_execW
#else
#define pmemblk_open pmemblk_openU
#define pmemblk_create pmemblk_createU
#define pmemblk_check pmemblk_checkU
#define pmemblk_check_version pmemblk_check_versionU
#define pmemblk_errormsg pmemblk_errormsgU
#define pmemblk_ctl_get pmemblk_ctl_getU
#define pmemblk_ctl_set pmemblk_ctl_setU
#define pmemblk_ctl_exec pmemblk_ctl_execU
#endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
/*
* opaque type, internal to libpmemblk
*/
typedef struct pmemblk PMEMblkpool;
/*
* PMEMBLK_MAJOR_VERSION and PMEMBLK_MINOR_VERSION provide the current version
* of the libpmemblk API as provided by this header file. Applications can
* verify that the version available at run-time is compatible with the version
* used at compile-time by passing these defines to pmemblk_check_version().
*/
#define PMEMBLK_MAJOR_VERSION 1
#define PMEMBLK_MINOR_VERSION 1
#ifndef _WIN32
const char *pmemblk_check_version(unsigned major_required,
unsigned minor_required);
#else
const char *pmemblk_check_versionU(unsigned major_required,
unsigned minor_required);
const wchar_t *pmemblk_check_versionW(unsigned major_required,
unsigned minor_required);
#endif
/* XXX - unify minimum pool size for both OS-es */
#ifndef _WIN32
#if defined(__x86_64__) || defined(__M_X64__) || defined(__aarch64__)
/* minimum pool size: 16MiB + 4KiB (minimum BTT size + mmap alignment) */
#define PMEMBLK_MIN_POOL ((size_t)((1u << 20) * 16 + (1u << 10) * 8))
#elif defined(__PPC64__)
/* minimum pool size: 16MiB + 128KiB (minimum BTT size + mmap alignment) */
#define PMEMBLK_MIN_POOL ((size_t)((1u << 20) * 16 + (1u << 10) * 128))
#else
#error unable to recognize ISA at compile time
#endif
#else
/* minimum pool size: 16MiB + 64KiB (minimum BTT size + mmap alignment) */
#define PMEMBLK_MIN_POOL ((size_t)((1u << 20) * 16 + (1u << 10) * 64))
#endif
/*
* This limit is set arbitrary to incorporate a pool header and required
* alignment plus supply.
*/
#define PMEMBLK_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */
#define PMEMBLK_MIN_BLK ((size_t)512)
#ifndef _WIN32
PMEMblkpool *pmemblk_open(const char *path, size_t bsize);
#else
PMEMblkpool *pmemblk_openU(const char *path, size_t bsize);
PMEMblkpool *pmemblk_openW(const wchar_t *path, size_t bsize);
#endif
#ifndef _WIN32
PMEMblkpool *pmemblk_create(const char *path, size_t bsize,
size_t poolsize, mode_t mode);
#else
PMEMblkpool *pmemblk_createU(const char *path, size_t bsize,
size_t poolsize, mode_t mode);
PMEMblkpool *pmemblk_createW(const wchar_t *path, size_t bsize,
size_t poolsize, mode_t mode);
#endif
#ifndef _WIN32
int pmemblk_check(const char *path, size_t bsize);
#else
int pmemblk_checkU(const char *path, size_t bsize);
int pmemblk_checkW(const wchar_t *path, size_t bsize);
#endif
void pmemblk_close(PMEMblkpool *pbp);
size_t pmemblk_bsize(PMEMblkpool *pbp);
size_t pmemblk_nblock(PMEMblkpool *pbp);
int pmemblk_read(PMEMblkpool *pbp, void *buf, long long blockno);
int pmemblk_write(PMEMblkpool *pbp, const void *buf, long long blockno);
int pmemblk_set_zero(PMEMblkpool *pbp, long long blockno);
int pmemblk_set_error(PMEMblkpool *pbp, long long blockno);
/*
* Passing NULL to pmemblk_set_funcs() tells libpmemblk to continue to use the
* default for that function. The replacement functions must not make calls
* back into libpmemblk.
*/
void pmemblk_set_funcs(
void *(*malloc_func)(size_t size),
void (*free_func)(void *ptr),
void *(*realloc_func)(void *ptr, size_t size),
char *(*strdup_func)(const char *s));
#ifndef _WIN32
const char *pmemblk_errormsg(void);
#else
const char *pmemblk_errormsgU(void);
const wchar_t *pmemblk_errormsgW(void);
#endif
#ifndef _WIN32
/* EXPERIMENTAL */
int pmemblk_ctl_get(PMEMblkpool *pbp, const char *name, void *arg);
int pmemblk_ctl_set(PMEMblkpool *pbp, const char *name, void *arg);
int pmemblk_ctl_exec(PMEMblkpool *pbp, const char *name, void *arg);
#else
int pmemblk_ctl_getU(PMEMblkpool *pbp, const char *name, void *arg);
int pmemblk_ctl_getW(PMEMblkpool *pbp, const wchar_t *name, void *arg);
int pmemblk_ctl_setU(PMEMblkpool *pbp, const char *name, void *arg);
int pmemblk_ctl_setW(PMEMblkpool *pbp, const wchar_t *name, void *arg);
int pmemblk_ctl_execU(PMEMblkpool *pbp, const char *name, void *arg);
int pmemblk_ctl_execW(PMEMblkpool *pbp, const wchar_t *name, void *arg);
#endif
#ifdef __cplusplus
}
#endif
#endif /* libpmemblk.h */
| 5,183 | 30.418182 | 79 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/include/libpmempool.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* libpmempool.h -- definitions of libpmempool entry points
*
* See libpmempool(7) for details.
*/
#ifndef LIBPMEMPOOL_H
#define LIBPMEMPOOL_H 1
#include <stdint.h>
#include <stddef.h>
#include <limits.h>
#ifdef _WIN32
#include <pmemcompat.h>
#ifndef PMDK_UTF8_API
#define pmempool_check_status pmempool_check_statusW
#define pmempool_check_args pmempool_check_argsW
#define pmempool_check_init pmempool_check_initW
#define pmempool_check pmempool_checkW
#define pmempool_sync pmempool_syncW
#define pmempool_transform pmempool_transformW
#define pmempool_rm pmempool_rmW
#define pmempool_check_version pmempool_check_versionW
#define pmempool_errormsg pmempool_errormsgW
#define pmempool_feature_enable pmempool_feature_enableW
#define pmempool_feature_disable pmempool_feature_disableW
#define pmempool_feature_query pmempool_feature_queryW
#else
#define pmempool_check_status pmempool_check_statusU
#define pmempool_check_args pmempool_check_argsU
#define pmempool_check_init pmempool_check_initU
#define pmempool_check pmempool_checkU
#define pmempool_sync pmempool_syncU
#define pmempool_transform pmempool_transformU
#define pmempool_rm pmempool_rmU
#define pmempool_check_version pmempool_check_versionU
#define pmempool_errormsg pmempool_errormsgU
#define pmempool_feature_enable pmempool_feature_enableU
#define pmempool_feature_disable pmempool_feature_disableU
#define pmempool_feature_query pmempool_feature_queryU
#endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* PMEMPOOL CHECK */
/*
* pool types
*/
enum pmempool_pool_type {
PMEMPOOL_POOL_TYPE_DETECT,
PMEMPOOL_POOL_TYPE_LOG,
PMEMPOOL_POOL_TYPE_BLK,
PMEMPOOL_POOL_TYPE_OBJ,
PMEMPOOL_POOL_TYPE_BTT,
PMEMPOOL_POOL_TYPE_RESERVED1, /* used to be cto */
};
/*
* perform repairs
*/
#define PMEMPOOL_CHECK_REPAIR (1U << 0)
/*
* emulate repairs
*/
#define PMEMPOOL_CHECK_DRY_RUN (1U << 1)
/*
* perform hazardous repairs
*/
#define PMEMPOOL_CHECK_ADVANCED (1U << 2)
/*
* do not ask before repairs
*/
#define PMEMPOOL_CHECK_ALWAYS_YES (1U << 3)
/*
* generate info statuses
*/
#define PMEMPOOL_CHECK_VERBOSE (1U << 4)
/*
* generate string format statuses
*/
#define PMEMPOOL_CHECK_FORMAT_STR (1U << 5)
/*
* types of check statuses
*/
enum pmempool_check_msg_type {
PMEMPOOL_CHECK_MSG_TYPE_INFO,
PMEMPOOL_CHECK_MSG_TYPE_ERROR,
PMEMPOOL_CHECK_MSG_TYPE_QUESTION,
};
/*
* check result types
*/
enum pmempool_check_result {
PMEMPOOL_CHECK_RESULT_CONSISTENT,
PMEMPOOL_CHECK_RESULT_NOT_CONSISTENT,
PMEMPOOL_CHECK_RESULT_REPAIRED,
PMEMPOOL_CHECK_RESULT_CANNOT_REPAIR,
PMEMPOOL_CHECK_RESULT_ERROR,
PMEMPOOL_CHECK_RESULT_SYNC_REQ,
};
/*
* check context
*/
typedef struct pmempool_check_ctx PMEMpoolcheck;
/*
* finalize the check and get the result
*/
enum pmempool_check_result pmempool_check_end(PMEMpoolcheck *ppc);
/* PMEMPOOL RM */
#define PMEMPOOL_RM_FORCE (1U << 0) /* ignore any errors */
#define PMEMPOOL_RM_POOLSET_LOCAL (1U << 1) /* remove local poolsets */
#define PMEMPOOL_RM_POOLSET_REMOTE (1U << 2) /* remove remote poolsets */
/*
* LIBPMEMPOOL SYNC
*/
/*
* fix bad blocks - it requires creating or reading special recovery files
*/
#define PMEMPOOL_SYNC_FIX_BAD_BLOCKS (1U << 0)
/*
* do not apply changes, only check if operation is viable
*/
#define PMEMPOOL_SYNC_DRY_RUN (1U << 1)
/*
* LIBPMEMPOOL TRANSFORM
*/
/*
* do not apply changes, only check if operation is viable
*/
#define PMEMPOOL_TRANSFORM_DRY_RUN (1U << 1)
/*
* PMEMPOOL_MAJOR_VERSION and PMEMPOOL_MINOR_VERSION provide the current version
* of the libpmempool API as provided by this header file. Applications can
* verify that the version available at run-time is compatible with the version
* used at compile-time by passing these defines to pmempool_check_version().
*/
#define PMEMPOOL_MAJOR_VERSION 1
#define PMEMPOOL_MINOR_VERSION 3
/*
* check status
*/
struct pmempool_check_statusU {
enum pmempool_check_msg_type type;
struct {
const char *msg;
const char *answer;
} str;
};
#ifndef _WIN32
#define pmempool_check_status pmempool_check_statusU
#else
struct pmempool_check_statusW {
enum pmempool_check_msg_type type;
struct {
const wchar_t *msg;
const wchar_t *answer;
} str;
};
#endif
/*
* check context arguments
*/
struct pmempool_check_argsU {
const char *path;
const char *backup_path;
enum pmempool_pool_type pool_type;
unsigned flags;
};
#ifndef _WIN32
#define pmempool_check_args pmempool_check_argsU
#else
struct pmempool_check_argsW {
const wchar_t *path;
const wchar_t *backup_path;
enum pmempool_pool_type pool_type;
unsigned flags;
};
#endif
/*
* initialize a check context
*/
#ifndef _WIN32
PMEMpoolcheck *
pmempool_check_init(struct pmempool_check_args *args, size_t args_size);
#else
PMEMpoolcheck *
pmempool_check_initU(struct pmempool_check_argsU *args, size_t args_size);
PMEMpoolcheck *
pmempool_check_initW(struct pmempool_check_argsW *args, size_t args_size);
#endif
/*
* start / resume the check
*/
#ifndef _WIN32
struct pmempool_check_status *pmempool_check(PMEMpoolcheck *ppc);
#else
struct pmempool_check_statusU *pmempool_checkU(PMEMpoolcheck *ppc);
struct pmempool_check_statusW *pmempool_checkW(PMEMpoolcheck *ppc);
#endif
/*
* LIBPMEMPOOL SYNC & TRANSFORM
*/
/*
* Synchronize data between replicas within a poolset.
*
* EXPERIMENTAL
*/
#ifndef _WIN32
int pmempool_sync(const char *poolset_file, unsigned flags);
#else
int pmempool_syncU(const char *poolset_file, unsigned flags);
int pmempool_syncW(const wchar_t *poolset_file, unsigned flags);
#endif
/*
* Modify internal structure of a poolset.
*
* EXPERIMENTAL
*/
#ifndef _WIN32
int pmempool_transform(const char *poolset_file_src,
const char *poolset_file_dst, unsigned flags);
#else
int pmempool_transformU(const char *poolset_file_src,
const char *poolset_file_dst, unsigned flags);
int pmempool_transformW(const wchar_t *poolset_file_src,
const wchar_t *poolset_file_dst, unsigned flags);
#endif
/* PMEMPOOL feature enable, disable, query */
/*
* feature types
*/
enum pmempool_feature {
PMEMPOOL_FEAT_SINGLEHDR,
PMEMPOOL_FEAT_CKSUM_2K,
PMEMPOOL_FEAT_SHUTDOWN_STATE,
PMEMPOOL_FEAT_CHECK_BAD_BLOCKS,
};
/* PMEMPOOL FEATURE ENABLE */
#ifndef _WIN32
int pmempool_feature_enable(const char *path, enum pmempool_feature feature,
unsigned flags);
#else
int pmempool_feature_enableU(const char *path, enum pmempool_feature feature,
unsigned flags);
int pmempool_feature_enableW(const wchar_t *path,
enum pmempool_feature feature, unsigned flags);
#endif
/* PMEMPOOL FEATURE DISABLE */
#ifndef _WIN32
int pmempool_feature_disable(const char *path, enum pmempool_feature feature,
unsigned flags);
#else
int pmempool_feature_disableU(const char *path, enum pmempool_feature feature,
unsigned flags);
int pmempool_feature_disableW(const wchar_t *path,
enum pmempool_feature feature, unsigned flags);
#endif
/* PMEMPOOL FEATURE QUERY */
#ifndef _WIN32
int pmempool_feature_query(const char *path, enum pmempool_feature feature,
unsigned flags);
#else
int pmempool_feature_queryU(const char *path, enum pmempool_feature feature,
unsigned flags);
int pmempool_feature_queryW(const wchar_t *path,
enum pmempool_feature feature, unsigned flags);
#endif
/* PMEMPOOL RM */
#ifndef _WIN32
int pmempool_rm(const char *path, unsigned flags);
#else
int pmempool_rmU(const char *path, unsigned flags);
int pmempool_rmW(const wchar_t *path, unsigned flags);
#endif
#ifndef _WIN32
const char *pmempool_check_version(unsigned major_required,
unsigned minor_required);
#else
const char *pmempool_check_versionU(unsigned major_required,
unsigned minor_required);
const wchar_t *pmempool_check_versionW(unsigned major_required,
unsigned minor_required);
#endif
#ifndef _WIN32
const char *pmempool_errormsg(void);
#else
const char *pmempool_errormsgU(void);
const wchar_t *pmempool_errormsgW(void);
#endif
#ifdef __cplusplus
}
#endif
#endif /* libpmempool.h */
| 8,009 | 22.910448 | 80 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/include/librpmem.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* librpmem.h -- definitions of librpmem entry points (EXPERIMENTAL)
*
* This library provides low-level support for remote access to persistent
* memory utilizing RDMA-capable RNICs.
*
* See librpmem(7) for details.
*/
#ifndef LIBRPMEM_H
#define LIBRPMEM_H 1
#include <sys/types.h>
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct rpmem_pool RPMEMpool;
#define RPMEM_POOL_HDR_SIG_LEN 8
#define RPMEM_POOL_HDR_UUID_LEN 16 /* uuid byte length */
#define RPMEM_POOL_USER_FLAGS_LEN 16
struct rpmem_pool_attr {
char signature[RPMEM_POOL_HDR_SIG_LEN]; /* pool signature */
uint32_t major; /* format major version number */
uint32_t compat_features; /* mask: compatible "may" features */
uint32_t incompat_features; /* mask: "must support" features */
uint32_t ro_compat_features; /* mask: force RO if unsupported */
unsigned char poolset_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* pool uuid */
unsigned char uuid[RPMEM_POOL_HDR_UUID_LEN]; /* first part uuid */
unsigned char next_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* next pool uuid */
unsigned char prev_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* prev pool uuid */
unsigned char user_flags[RPMEM_POOL_USER_FLAGS_LEN]; /* user flags */
};
RPMEMpool *rpmem_create(const char *target, const char *pool_set_name,
void *pool_addr, size_t pool_size, unsigned *nlanes,
const struct rpmem_pool_attr *create_attr);
RPMEMpool *rpmem_open(const char *target, const char *pool_set_name,
void *pool_addr, size_t pool_size, unsigned *nlanes,
struct rpmem_pool_attr *open_attr);
int rpmem_set_attr(RPMEMpool *rpp, const struct rpmem_pool_attr *attr);
int rpmem_close(RPMEMpool *rpp);
#define RPMEM_PERSIST_RELAXED (1U << 0)
#define RPMEM_FLUSH_RELAXED (1U << 0)
int rpmem_flush(RPMEMpool *rpp, size_t offset, size_t length, unsigned lane,
unsigned flags);
int rpmem_drain(RPMEMpool *rpp, unsigned lane, unsigned flags);
int rpmem_persist(RPMEMpool *rpp, size_t offset, size_t length,
unsigned lane, unsigned flags);
int rpmem_read(RPMEMpool *rpp, void *buff, size_t offset, size_t length,
unsigned lane);
int rpmem_deep_persist(RPMEMpool *rpp, size_t offset, size_t length,
unsigned lane);
#define RPMEM_REMOVE_FORCE 0x1
#define RPMEM_REMOVE_POOL_SET 0x2
int rpmem_remove(const char *target, const char *pool_set, int flags);
/*
* RPMEM_MAJOR_VERSION and RPMEM_MINOR_VERSION provide the current version of
* the librpmem API as provided by this header file. Applications can verify
* that the version available at run-time is compatible with the version used
* at compile-time by passing these defines to rpmem_check_version().
*/
#define RPMEM_MAJOR_VERSION 1
#define RPMEM_MINOR_VERSION 3
const char *rpmem_check_version(unsigned major_required,
unsigned minor_required);
const char *rpmem_errormsg(void);
/* minimum size of a pool */
#define RPMEM_MIN_POOL ((size_t)(1024 * 8)) /* 8 KB */
/*
* This limit is set arbitrary to incorporate a pool header and required
* alignment plus supply.
*/
#define RPMEM_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */
#ifdef __cplusplus
}
#endif
#endif /* librpmem.h */
| 3,197 | 31.30303 | 77 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/include/libpmemobj.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* libpmemobj.h -- definitions of libpmemobj entry points
*
* This library provides support for programming with persistent memory (pmem).
*
* libpmemobj provides a pmem-resident transactional object store.
*
* See libpmemobj(7) for details.
*/
#ifndef LIBPMEMOBJ_H
#define LIBPMEMOBJ_H 1
#include <libpmemobj/action.h>
#include <libpmemobj/atomic.h>
#include <libpmemobj/ctl.h>
#include <libpmemobj/iterator.h>
#include <libpmemobj/lists_atomic.h>
#include <libpmemobj/pool.h>
#include <libpmemobj/thread.h>
#include <libpmemobj/tx.h>
#endif /* libpmemobj.h */
| 662 | 23.555556 | 79 | h |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.