repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_tx_user_data/obj_tx_user_data.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* obj_tx_user_data.c -- unit test for pmemobj_tx_(get/set)_user_data
*/
#include "unittest.h"
#define LAYOUT_NAME "tx_user_data"
#define USER_DATA_V1 (void *) 123456789ULL
#define USER_DATA_V2 (void *) 987654321ULL
/*
* do_tx_set_get_user_data_nested -- do set and verify user data in a tx
*/
static void
do_tx_set_get_user_data_nested(PMEMobjpool *pop)
{
TX_BEGIN(pop) {
pmemobj_tx_set_user_data(USER_DATA_V1);
UT_ASSERTeq(USER_DATA_V1, pmemobj_tx_get_user_data());
TX_BEGIN(pop) {
UT_ASSERTeq(USER_DATA_V1, pmemobj_tx_get_user_data());
pmemobj_tx_set_user_data(USER_DATA_V2);
UT_ASSERTeq(USER_DATA_V2, pmemobj_tx_get_user_data());
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
} TX_ONCOMMIT {
UT_ASSERTeq(USER_DATA_V2, pmemobj_tx_get_user_data());
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TX_BEGIN(pop) {
UT_ASSERTeq(NULL, pmemobj_tx_get_user_data());
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
/*
* do_tx_set_get_user_data_abort -- do set and verify user data in a tx after
* tx abort
*/
static void
do_tx_set_get_user_data_abort(PMEMobjpool *pop)
{
TX_BEGIN(pop) {
pmemobj_tx_set_user_data(USER_DATA_V1);
UT_ASSERTeq(USER_DATA_V1, pmemobj_tx_get_user_data());
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
UT_ASSERTeq(USER_DATA_V1, pmemobj_tx_get_user_data());
} TX_END
TX_BEGIN(pop) {
UT_ASSERTeq(NULL, pmemobj_tx_get_user_data());
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_user_data");
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
do_tx_set_get_user_data_nested(pop);
do_tx_set_get_user_data_abort(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 1,948 | 20.655556 | 77 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem_memset/pmem_memset.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* pmem_memset.c -- unit test for doing a memset
*
* usage: pmem_memset file offset length
*/
#include "unittest.h"
#include "util_pmem.h"
#include "file.h"
#include "memset_common.h"
typedef void *pmem_memset_fn(void *pmemdest, int c, size_t len, unsigned flags);
static void *
pmem_memset_persist_wrapper(void *pmemdest, int c, size_t len, unsigned flags)
{
(void) flags;
return pmem_memset_persist(pmemdest, c, len);
}
static void *
pmem_memset_nodrain_wrapper(void *pmemdest, int c, size_t len, unsigned flags)
{
(void) flags;
return pmem_memset_nodrain(pmemdest, c, len);
}
static void
do_memset_variants(int fd, char *dest, const char *file_name, size_t dest_off,
size_t bytes, persist_fn p)
{
do_memset(fd, dest, file_name, dest_off, bytes,
pmem_memset_persist_wrapper, 0, p);
do_memset(fd, dest, file_name, dest_off, bytes,
pmem_memset_nodrain_wrapper, 0, p);
for (int i = 0; i < ARRAY_SIZE(Flags); ++i) {
do_memset(fd, dest, file_name, dest_off, bytes,
pmem_memset, Flags[i], p);
if (Flags[i] & PMEMOBJ_F_MEM_NOFLUSH)
pmem_persist(dest, bytes);
}
}
static void
do_persist_ddax(const void *ptr, size_t size)
{
util_persist_auto(1, ptr, size);
}
static void
do_persist(const void *ptr, size_t size)
{
util_persist_auto(0, ptr, size);
}
int
main(int argc, char *argv[])
{
int fd;
size_t mapped_len;
char *dest;
if (argc != 4)
UT_FATAL("usage: %s file offset length", argv[0]);
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem_memset %s %s %s %savx %savx512f",
argv[2], argv[3],
thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
fd = OPEN(argv[1], O_RDWR);
/* open a pmem file and memory map it */
if ((dest = pmem_map_file(argv[1], 0, 0, 0, &mapped_len, NULL)) == NULL)
UT_FATAL("!Could not mmap %s\n", argv[1]);
size_t dest_off = strtoul(argv[2], NULL, 0);
size_t bytes = strtoul(argv[3], NULL, 0);
enum file_type type = util_fd_get_type(fd);
if (type < 0)
UT_FATAL("cannot check type of file with fd %d", fd);
persist_fn p;
p = type == TYPE_DEVDAX ? do_persist_ddax : do_persist;
do_memset_variants(fd, dest, argv[1], dest_off, bytes, p);
UT_ASSERTeq(pmem_unmap(dest, mapped_len), 0);
CLOSE(fd);
DONE(NULL);
}
| 2,428 | 22.355769 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_fragmentation/obj_fragmentation.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* obj_fragmentation.c -- measures average heap fragmentation
*
* A pretty simplistic test that measures internal fragmentation of the
* allocator for the given size.
*/
#include <stdlib.h>
#include "unittest.h"
#define LAYOUT_NAME "obj_fragmentation"
#define OBJECT_OVERHEAD 64 /* account for the header added to each object */
#define MAX_OVERALL_OVERHEAD 0.10f
/*
* For the best accuracy fragmentation should be measured for one full zone
* because the metadata is preallocated. For reasonable test duration a smaller
* size must be used.
*/
#define DEFAULT_FILE_SIZE ((size_t)(1ULL << 28)) /* 256 megabytes */
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_fragmentation");
if (argc < 3)
UT_FATAL("usage: %s allocsize filename [filesize]", argv[0]);
size_t file_size;
if (argc == 4)
file_size = ATOUL(argv[3]);
else
file_size = DEFAULT_FILE_SIZE;
size_t alloc_size = ATOUL(argv[1]);
const char *path = argv[2];
PMEMobjpool *pop = pmemobj_create(path, LAYOUT_NAME, file_size,
S_IWUSR | S_IRUSR);
if (pop == NULL)
UT_FATAL("!pmemobj_create: %s", path);
size_t allocated = 0;
int err = 0;
do {
PMEMoid oid;
err = pmemobj_alloc(pop, &oid, alloc_size, 0, NULL, NULL);
if (err == 0)
allocated += pmemobj_alloc_usable_size(oid) +
OBJECT_OVERHEAD;
} while (err == 0);
float allocated_pct = ((float)allocated / file_size);
float overhead_pct = 1.f - allocated_pct;
UT_ASSERT(overhead_pct <= MAX_OVERALL_OVERHEAD);
pmemobj_close(pop);
DONE(NULL);
}
| 1,607 | 23.738462 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_recovery/obj_recovery.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* obj_recovery.c -- unit test for pool recovery
*/
#include "unittest.h"
#include "valgrind_internal.h"
#if VG_PMEMCHECK_ENABLED
#define VALGRIND_PMEMCHECK_END_TX VALGRIND_PMC_END_TX
#else
#define VALGRIND_PMEMCHECK_END_TX
#endif
POBJ_LAYOUT_BEGIN(recovery);
POBJ_LAYOUT_ROOT(recovery, struct root);
POBJ_LAYOUT_TOID(recovery, struct foo);
POBJ_LAYOUT_END(recovery);
#define MB (1 << 20)
struct foo {
int bar;
};
struct root {
PMEMmutex lock;
TOID(struct foo) foo;
char large_data[MB];
};
#define BAR_VALUE 5
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_recovery");
/* root doesn't count */
UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(recovery) != 1);
if (argc != 5)
UT_FATAL("usage: %s [file] [lock: y/n] "
"[cmd: c/o] [type: n/f/s/l]",
argv[0]);
const char *path = argv[1];
PMEMobjpool *pop = NULL;
int exists = argv[3][0] == 'o';
enum { TEST_NEW, TEST_FREE, TEST_SET, TEST_LARGE } type;
if (argv[4][0] == 'n')
type = TEST_NEW;
else if (argv[4][0] == 'f')
type = TEST_FREE;
else if (argv[4][0] == 's')
type = TEST_SET;
else if (argv[4][0] == 'l')
type = TEST_LARGE;
else
UT_FATAL("invalid type");
if (!exists) {
if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(recovery),
0, S_IWUSR | S_IRUSR)) == NULL) {
UT_FATAL("failed to create pool\n");
}
} else {
if ((pop = pmemobj_open(path, POBJ_LAYOUT_NAME(recovery)))
== NULL) {
UT_FATAL("failed to open pool\n");
}
}
TOID(struct root) root = POBJ_ROOT(pop, struct root);
int lock_type = TX_PARAM_NONE;
void *lock = NULL;
if (argv[2][0] == 'y') {
lock_type = TX_PARAM_MUTEX;
lock = &D_RW(root)->lock;
}
if (type == TEST_SET) {
if (!exists) {
TX_BEGIN_PARAM(pop, lock_type, lock) {
TX_ADD(root);
TOID(struct foo) f = TX_NEW(struct foo);
D_RW(root)->foo = f;
D_RW(f)->bar = BAR_VALUE;
} TX_END
TX_BEGIN_PARAM(pop, lock_type, lock) {
TX_ADD_FIELD(D_RW(root)->foo, bar);
D_RW(D_RW(root)->foo)->bar = BAR_VALUE * 2;
/*
* Even though flushes are not required inside
* of a transaction, this is done here to
* suppress irrelevant pmemcheck issues, because
* we exit the program before the data is
* flushed, while preserving any real ones.
*/
pmemobj_persist(pop,
&D_RW(D_RW(root)->foo)->bar,
sizeof(int));
/*
* We also need to cleanup the transaction state
* of pmemcheck.
*/
VALGRIND_PMEMCHECK_END_TX;
exit(0); /* simulate a crash */
} TX_END
} else {
UT_ASSERT(D_RW(D_RW(root)->foo)->bar == BAR_VALUE);
}
} else if (type == TEST_LARGE) {
if (!exists) {
TX_BEGIN(pop) {
TX_MEMSET(D_RW(root)->large_data, 0xc, MB);
pmemobj_persist(pop,
D_RW(root)->large_data, MB);
VALGRIND_PMEMCHECK_END_TX;
exit(0);
} TX_END
} else {
UT_ASSERT(util_is_zeroed(D_RW(root)->large_data, MB));
TX_BEGIN(pop) { /* we should be able to start TX */
TX_MEMSET(D_RW(root)->large_data, 0xc, MB);
pmemobj_persist(pop,
D_RW(root)->large_data, MB);
VALGRIND_PMEMCHECK_END_TX;
pmemobj_tx_abort(0);
} TX_END
}
} else if (type == TEST_NEW) {
if (!exists) {
TX_BEGIN_PARAM(pop, lock_type, lock) {
TOID(struct foo) f = TX_NEW(struct foo);
TX_SET(root, foo, f);
pmemobj_persist(pop,
&D_RW(root)->foo,
sizeof(PMEMoid));
VALGRIND_PMEMCHECK_END_TX;
exit(0); /* simulate a crash */
} TX_END
} else {
UT_ASSERT(TOID_IS_NULL(D_RW(root)->foo));
}
} else { /* TEST_FREE */
if (!exists) {
TX_BEGIN_PARAM(pop, lock_type, lock) {
TX_ADD(root);
TOID(struct foo) f = TX_NEW(struct foo);
D_RW(root)->foo = f;
D_RW(f)->bar = BAR_VALUE;
} TX_END
TX_BEGIN_PARAM(pop, lock_type, lock) {
TX_ADD(root);
TX_FREE(D_RW(root)->foo);
D_RW(root)->foo = TOID_NULL(struct foo);
pmemobj_persist(pop,
&D_RW(root)->foo,
sizeof(PMEMoid));
VALGRIND_PMEMCHECK_END_TX;
exit(0); /* simulate a crash */
} TX_END
} else {
UT_ASSERT(!TOID_IS_NULL(D_RW(root)->foo));
}
}
UT_ASSERT(pmemobj_check(path, POBJ_LAYOUT_NAME(recovery)));
pmemobj_close(pop);
DONE(NULL);
}
| 4,244 | 20.994819 | 61 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_perror/pmem2_perror.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_perror.c -- pmem2_perror unittests
*/
#include "libpmem2.h"
#include "unittest.h"
#include "out.h"
#include "config.h"
#include "source.h"
/*
* test_fail_pmem2_func_simple - simply check print message when func
* from pmem2 API fails
*/
static int
test_fail_pmem2_func_simple(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
size_t offset = (size_t)INT64_MAX + 1;
/* "randomly" chosen function to be failed */
int ret = pmem2_config_set_offset(&cfg, offset);
UT_ASSERTne(ret, 0);
pmem2_perror("pmem2_config_set_offset");
return 0;
}
/*
* test_fail_pmem2_func_format - check print message when func
* from pmem2 API fails and ellipsis operator is used
*/
static int
test_fail_pmem2_func_format(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
size_t offset = (size_t)INT64_MAX + 1;
/* "randomly" chosen function to be failed */
int ret = pmem2_config_set_offset(&cfg, offset);
UT_ASSERTne(ret, 0);
pmem2_perror("pmem2_config_set_offset %d", 123);
return 0;
}
/*
* test_fail_system_func_simple - check print message when directly called
* system func fails
*/
static int
test_fail_system_func_simple(const struct test_case *tc, int argc, char *argv[])
{
/* "randomly" chosen function to be failed */
int ret = os_open("XXX", O_RDONLY);
UT_ASSERTeq(ret, -1);
ERR("!open");
pmem2_perror("test");
return 0;
}
/*
* test_fail_system_func_format - check print message when directly called
* system func fails and ellipsis operator is used
*/
static int
test_fail_system_func_format(const struct test_case *tc, int argc, char *argv[])
{
/* "randomly" chosen function to be failed */
int ret = os_open("XXX", O_RDONLY);
UT_ASSERTeq(ret, -1);
ERR("!open");
pmem2_perror("test %d", 123);
return 0;
}
/*
* test_fail_pmem2_syscall_simple - check print message when system func
* fails through pmem2_source_size func
*/
static int
test_fail_pmem2_syscall_simple(const struct test_case *tc,
int argc, char *argv[])
{
struct pmem2_source src;
size_t size;
#ifdef _WIN32
src.type = PMEM2_SOURCE_HANDLE;
src.value.handle = INVALID_HANDLE_VALUE;
#else
src.type = PMEM2_SOURCE_FD;
src.value.fd = -1;
#endif
/* "randomly" chosen function to be failed */
int ret = pmem2_source_size(&src, &size);
ASSERTne(ret, 0);
pmem2_perror("test");
return 0;
}
/*
* test_fail_pmem2_syscall_simple - check print message when system func
* fails through pmem2_source_size func and ellipsis operator is used
*/
static int
test_fail_pmem2_syscall_format(const struct test_case *tc,
int argc, char *argv[])
{
struct pmem2_source src;
size_t size;
#ifdef _WIN32
src.type = PMEM2_SOURCE_HANDLE;
src.value.handle = INVALID_HANDLE_VALUE;
#else
src.type = PMEM2_SOURCE_FD;
src.value.fd = -1;
#endif
/* "randomly" chosen function to be failed */
int ret = pmem2_source_size(&src, &size);
ASSERTne(ret, 0);
pmem2_perror("test %d", 123);
return 0;
}
/*
* test_simple_err_to_errno_check - check if conversion
* from pmem2 err value to errno works fine
*/
static int
test_simple_err_to_errno_check(const struct test_case *tc,
int argc, char *argv[])
{
int ret_errno = pmem2_err_to_errno(PMEM2_E_NOSUPP);
UT_ASSERTeq(ret_errno, ENOTSUP);
ret_errno = pmem2_err_to_errno(PMEM2_E_UNKNOWN);
UT_ASSERTeq(ret_errno, EINVAL);
ret_errno = pmem2_err_to_errno(-ENOTSUP);
UT_ASSERTeq(ret_errno, ENOTSUP);
return 0;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_fail_pmem2_func_simple),
TEST_CASE(test_fail_pmem2_func_format),
TEST_CASE(test_fail_system_func_simple),
TEST_CASE(test_fail_system_func_format),
TEST_CASE(test_fail_pmem2_syscall_simple),
TEST_CASE(test_fail_pmem2_syscall_format),
TEST_CASE(test_simple_err_to_errno_check),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char **argv)
{
START(argc, argv, "pmem2_perror");
util_init();
out_init("pmem2_perror", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0, 0);
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
out_fini();
DONE(NULL);
}
| 4,205 | 21.253968 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_mem_ext/pmem2_mem_ext.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_mem_ext.c -- test for low level functions from libpmem2
*/
#include "unittest.h"
#include "file.h"
#include "ut_pmem2.h"
#include "valgrind_internal.h"
typedef void *(*memmove_fn)(void *pmemdest, const void *src, size_t len,
unsigned flags);
typedef void *(*memcpy_fn)(void *pmemdest, const void *src, size_t len,
unsigned flags);
typedef void *(*memset_fn)(void *pmemdest, int c, size_t len,
unsigned flags);
static unsigned Flags[] = {
0,
PMEM_F_MEM_NONTEMPORAL,
PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_WC,
PMEM_F_MEM_WB,
PMEM_F_MEM_NOFLUSH,
PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH |
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL |
PMEM_F_MEM_WC | PMEM_F_MEM_WB,
};
/*
* do_memcpy_with_flag -- pmem2 memcpy with specified flag amd size
*/
static void
do_memcpy_with_flag(char *addr, size_t data_size, memcpy_fn cpy_fn, int flag)
{
char *addr2 = addr + data_size;
cpy_fn(addr2, addr, data_size, Flags[flag]);
}
/*
* do_memmove_with_flag -- pmem2 memmove with specified flag and size
*/
static void
do_memmove_with_flag(char *addr, size_t data_size, memmove_fn mov_fn, int flag)
{
char *addr2 = addr + data_size;
mov_fn(addr2, addr, data_size, Flags[flag]);
}
/*
* do_memset_with_flag -- pmem2 memset with specified flag and size
*/
static void
do_memset_with_flag(char *addr, size_t data_size, memset_fn set_fn, int flag)
{
set_fn(addr, 1, data_size, Flags[flag]);
if (Flags[flag] & PMEM2_F_MEM_NOFLUSH)
VALGRIND_DO_PERSIST(addr, data_size);
}
int
main(int argc, char *argv[])
{
int fd;
char *addr;
size_t mapped_len;
struct pmem2_config *cfg;
struct pmem2_source *src;
struct pmem2_map *map;
if (argc != 5)
UT_FATAL("usage: %s file type size flag", argv[0]);
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem2_mem_ext %s %savx %savx512f",
thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
util_init();
char type = argv[2][0];
size_t data_size = strtoul(argv[3], NULL, 0);
int flag = atoi(argv[4]);
UT_ASSERT(flag < ARRAY_SIZE(Flags));
fd = OPEN(argv[1], O_RDWR);
UT_ASSERT(fd != -1);
PMEM2_CONFIG_NEW(&cfg);
PMEM2_SOURCE_FROM_FD(&src, fd);
PMEM2_CONFIG_SET_GRANULARITY(cfg, PMEM2_GRANULARITY_PAGE);
int ret = pmem2_map(cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_CONFIG_DELETE(&cfg);
PMEM2_SOURCE_DELETE(&src);
mapped_len = pmem2_map_get_size(map);
UT_ASSERT(data_size * 2 < mapped_len);
addr = pmem2_map_get_address(map);
if (addr == NULL)
UT_FATAL("!could not map file: %s", argv[1]);
switch (type) {
case 'C':
{
pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map);
do_memcpy_with_flag(addr, data_size, memcpy_fn, flag);
break;
}
case 'S':
{
pmem2_memset_fn memset_fn = pmem2_get_memset_fn(map);
do_memset_with_flag(addr, data_size, memset_fn, flag);
break;
}
case 'M':
{
pmem2_memmove_fn memmove_fn = pmem2_get_memmove_fn(map);
do_memmove_with_flag(addr, data_size, memmove_fn, flag);
break;
}
default:
UT_FATAL("!wrong type of test %c", type);
break;
}
ret = pmem2_unmap(&map);
UT_ASSERTeq(ret, 0);
CLOSE(fd);
DONE(NULL);
}
| 3,349 | 22.426573 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/rpmemd_util/rpmemd_util_test.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2018, Intel Corporation */
/*
* rpmemd_util_test.c -- unit tests for rpmemd_util module
*/
#include "unittest.h"
#include "rpmem_common.h"
#include "rpmemd_log.h"
#include "rpmemd_util.h"
#include "util.h"
/* structure to store results */
struct result {
int ret;
enum rpmem_persist_method persist_method;
int (*persist)(const void *addr, size_t len);
void *(*memcpy_persist)(void *pmemdest, const void *src, size_t len);
};
/* all values to test */
static const enum rpmem_persist_method pms[] =
{RPMEM_PM_GPSPM, RPMEM_PM_APM, MAX_RPMEM_PM};
static const int is_pmems[] = {0, 1};
enum mode {
MODE_VALID,
MODE_INVALID,
MODE_MAX
};
static const int ranges[2][2][2] = {
[MODE_VALID] = {
{0, ARRAY_SIZE(pms) - 1},
{0, ARRAY_SIZE(is_pmems)}
},
[MODE_INVALID] = {
{ARRAY_SIZE(pms) - 1, ARRAY_SIZE(pms)},
{0, ARRAY_SIZE(is_pmems)}
}
};
/* expected results */
static const struct result exp_results[3][2] = {
{
/* GPSPM and is_pmem == false */
{0, RPMEM_PM_GPSPM, pmem_msync, memcpy},
/* GPSPM and is_pmem == true */
{0, RPMEM_PM_GPSPM, rpmemd_pmem_persist,
pmem_memcpy_persist}
}, {
/* APM and is_pmem == false */
{0, RPMEM_PM_GPSPM, pmem_msync, memcpy},
/* APM and is_pmem == true */
{0, RPMEM_PM_APM, rpmemd_flush_fatal,
pmem_memcpy_persist}
}, {
/* persistency method outside of the range */
{1, 0, 0, 0},
{1, 0, 0, 0}
}
};
static void
test_apply_pm_policy(struct result *result, int is_pmem)
{
if (rpmemd_apply_pm_policy(&result->persist_method, &result->persist,
&result->memcpy_persist, is_pmem)) {
goto err;
}
result->ret = 0;
return;
err:
result->ret = 1;
}
#define USAGE() do {\
UT_ERR("usage: %s valid|invalid", argv[0]);\
} while (0)
static void
test(const int pm_range[2], const int is_pmem_range[2])
{
rpmemd_log_level = RPD_LOG_NOTICE;
int ret = rpmemd_log_init("rpmemd_log", NULL, 0);
UT_ASSERTeq(ret, 0);
struct result result;
const struct result *exp_result;
for (int pm_ind = pm_range[0]; pm_ind < pm_range[1]; ++pm_ind) {
for (int is_pmem_ind = is_pmem_range[0];
is_pmem_ind < is_pmem_range[1]; ++is_pmem_ind) {
result.persist_method = pms[pm_ind];
exp_result = &exp_results[pm_ind][is_pmem_ind];
test_apply_pm_policy(&result, is_pmems[is_pmem_ind]);
UT_ASSERTeq(result.ret, exp_result->ret);
if (exp_result->ret == 0) {
UT_ASSERTeq(result.persist_method,
exp_result->persist_method);
UT_ASSERTeq(result.persist,
exp_result->persist);
}
}
}
rpmemd_log_close();
}
int
main(int argc, char *argv[])
{
START(argc, argv, "rpmemd_util");
if (argc < 2) {
USAGE();
return 1;
}
const char *mode_str = argv[1];
enum mode mode = MODE_MAX;
if (strcmp(mode_str, "valid") == 0) {
mode = MODE_VALID;
} else if (strcmp(mode_str, "invalid") == 0) {
mode = MODE_INVALID;
} else {
USAGE();
return 1;
}
UT_ASSERTne(mode, MODE_MAX);
test(ranges[mode][0], ranges[mode][1]);
DONE(NULL);
}
| 3,027 | 20.027778 | 70 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_defrag/obj_defrag.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* obj_defrag.c -- unit test for pmemobj_defrag
*/
#include "unittest.h"
#include <limits.h>
#define OBJECT_SIZE 100
static void
defrag_basic(PMEMobjpool *pop)
{
int ret;
PMEMoid oid1;
PMEMoid oid2;
PMEMoid oid3;
ret = pmemobj_zalloc(pop, &oid1, OBJECT_SIZE, 0);
UT_ASSERTeq(ret, 0);
ret = pmemobj_zalloc(pop, &oid2, OBJECT_SIZE, 0);
UT_ASSERTeq(ret, 0);
ret = pmemobj_zalloc(pop, &oid3, OBJECT_SIZE, 0);
UT_ASSERTeq(ret, 0);
char *buff = (char *)MALLOC(OBJECT_SIZE);
memset(buff, 0xc, OBJECT_SIZE);
char *foop = (char *)pmemobj_direct(oid3);
pmemobj_memcpy_persist(pop, foop, buff, OBJECT_SIZE);
UT_ASSERT(memcmp(foop, buff, OBJECT_SIZE) == 0);
pmemobj_free(&oid1);
PMEMoid oid4 = oid3;
PMEMoid *oids[] = {&oid2, &oid3, &oid4};
struct pobj_defrag_result result;
ret = pmemobj_defrag(pop, oids, 3, &result);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(result.total, 2);
UT_ASSERTeq(result.relocated, 2);
/* the object at higher location should move into the freed oid1 pos */
foop = (char *)pmemobj_direct(oid3);
UT_ASSERT(oid3.off < oid2.off);
UT_ASSERTeq(oid3.off, oid4.off);
UT_ASSERT(memcmp(foop, buff, OBJECT_SIZE) == 0);
pmemobj_free(&oid2);
pmemobj_free(&oid3);
FREE(buff);
}
struct test_object
{
PMEMoid a;
PMEMoid b;
PMEMoid c;
};
static void
defrag_nested_pointers(PMEMobjpool *pop)
{
int ret;
/*
* This is done so that the oids below aren't allocated literally in the
* ideal position in the heap (chunk 0, offset 0).
*/
#define EXTRA_ALLOCS 100
for (int i = 0; i < EXTRA_ALLOCS; ++i) {
PMEMoid extra;
ret = pmemobj_zalloc(pop, &extra, OBJECT_SIZE, 0);
UT_ASSERTeq(ret, 0);
pmemobj_free(&extra);
}
#undef EXTRA_ALLOCS
PMEMoid oid1;
PMEMoid oid2;
PMEMoid oid3;
ret = pmemobj_zalloc(pop, &oid1, OBJECT_SIZE, 0);
UT_ASSERTeq(ret, 0);
ret = pmemobj_zalloc(pop, &oid2, OBJECT_SIZE, 0);
UT_ASSERTeq(ret, 0);
ret = pmemobj_zalloc(pop, &oid3, OBJECT_SIZE, 0);
UT_ASSERTeq(ret, 0);
struct test_object *oid1p = (struct test_object *)pmemobj_direct(oid1);
struct test_object *oid2p = (struct test_object *)pmemobj_direct(oid2);
struct test_object *oid3p = (struct test_object *)pmemobj_direct(oid3);
oid1p->a = OID_NULL;
oid1p->b = oid2;
oid1p->c = oid1;
pmemobj_persist(pop, oid1p, sizeof(*oid1p));
oid2p->a = oid1;
oid2p->b = OID_NULL;
oid2p->c = oid3;
pmemobj_persist(pop, oid2p, sizeof(*oid2p));
oid3p->a = oid2;
oid3p->b = oid2;
oid3p->c = oid1;
pmemobj_persist(pop, oid3p, sizeof(*oid3p));
#define OID_PTRS 12
#define EXTRA_OID_PTRS 60
#define OIDS_ALL (EXTRA_OID_PTRS + OID_PTRS)
PMEMoid **oids = (PMEMoid **)MALLOC(sizeof(PMEMoid *) * OIDS_ALL);
PMEMoid *oid3pprs = (PMEMoid *)MALLOC(sizeof(PMEMoid) * EXTRA_OID_PTRS);
int i;
for (i = 0; i < EXTRA_OID_PTRS; ++i) {
oid3pprs[i] = oid3;
oids[i] = &oid3pprs[i];
}
oids[i + 0] = &oid1;
oids[i + 1] = &oid2;
oids[i + 2] = &oid3;
oids[i + 3] = &oid1p->a;
oids[i + 4] = &oid1p->b;
oids[i + 5] = &oid1p->c;
oids[i + 6] = &oid2p->a;
oids[i + 7] = &oid2p->b;
oids[i + 8] = &oid2p->c;
oids[i + 9] = &oid3p->a;
oids[i + 10] = &oid3p->b;
oids[i + 11] = &oid3p->c;
struct pobj_defrag_result result;
ret = pmemobj_defrag(pop, oids, OIDS_ALL, &result);
UT_ASSERTeq(result.total, 3);
UT_ASSERTeq(result.relocated, 3);
UT_ASSERTeq(ret, 0);
oid1p = (struct test_object *)pmemobj_direct(oid1);
oid2p = (struct test_object *)pmemobj_direct(oid2);
oid3p = (struct test_object *)pmemobj_direct(oid3);
for (int i = 0; i < EXTRA_OID_PTRS; ++i) {
UT_ASSERTeq(oid3pprs[i].off, oid3.off);
}
UT_ASSERTeq(oid1p->a.off, 0);
UT_ASSERTeq(oid1p->b.off, oid2.off);
UT_ASSERTeq(oid1p->c.off, oid1.off);
UT_ASSERTeq(oid2p->a.off, oid1.off);
UT_ASSERTeq(oid2p->b.off, 0);
UT_ASSERTeq(oid2p->c.off, oid3.off);
UT_ASSERTeq(oid3p->a.off, oid2.off);
UT_ASSERTeq(oid3p->b.off, oid2.off);
UT_ASSERTeq(oid3p->c.off, oid1.off);
pmemobj_free(&oid1);
pmemobj_free(&oid2);
pmemobj_free(&oid3);
FREE(oids);
FREE(oid3pprs);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_defrag");
const char *path = argv[1];
PMEMobjpool *pop = NULL;
pop = pmemobj_create(path, POBJ_LAYOUT_NAME(basic),
PMEMOBJ_MIN_POOL * 2, S_IWUSR | S_IRUSR);
if (pop == NULL)
UT_FATAL("!pmemobj_create: %s", path);
defrag_basic(pop);
defrag_nested_pointers(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 4,429 | 22.817204 | 73 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_deep_flush/pmem2_deep_flush.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_deep_flush.c -- unit test for pmem_deep_flush()
*
* usage: pmem2_deep_flush file deep_persist_size offset
*
* pmem2_deep_flush depending on the mapping granularity is performed using one
* of the following paths:
* - page: NOP
* - cache: pmem2_deep_flush_dax
* - byte: pmem2_persist_cpu_cache + pmem2_deep_flush_dax
*
* Where pmem2_deep_flush_dax:
* - pmem2_get_type_from_stat is used to determine a file type
* - for regular files performs pmem2_flush_file_buffers_os OR
* - for Device DAX:
* - is looking for Device DAX region (pmem2_get_region_id)
* - is constructing the region deep flush file paths
* - opens deep_flush file (os_open)
* - reads deep_flush file (read)
* - performs a write to it (write)
*
* Where pmem2_persist_cpu_cache performs:
* - flush (replaced by mock_flush) AND
* - drain (replaced by mock_drain)
*
* Additionally, for the sake of this test, the following functions are
* replaced:
* - pmem2_get_type_from_stat (to control perceived file type)
* - pmem2_flush_file_buffers_os (for counting calls)
* - pmem2_get_region_id (to prevent reading sysfs in search for non
* existing Device DAXes)
* or mocked:
* - os_open (to prevent opening non existing
* /sys/bus/nd/devices/region[0-9]+/deep_flush files)
* - write (for counting writes to non-existing
* /sys/bus/nd/devices/region[0-9]+/deep_flush files)
*
* NOTE: In normal usage the persist function precedes any call to
* pmem2_deep_flush. This test aims to validate the pmem2_deep_flush
* function and so the persist function is omitted.
*/
#include "source.h"
#ifndef _WIN32
#include <sys/sysmacros.h>
#endif
#include "mmap.h"
#include "persist.h"
#include "pmem2_arch.h"
#include "pmem2_utils.h"
#include "region_namespace.h"
#include "unittest.h"
static int n_file_buffs_flushes = 0;
static int n_fences = 0;
static int n_flushes = 0;
static int n_writes = 0;
static int n_reads = 0;
static enum pmem2_file_type *ftype_value;
static int read_invalid = 0;
static int deep_flush_not_needed = 0;
#ifndef _WIN32
#define MOCK_FD 999
#define MOCK_REG_ID 888
#define MOCK_BUS_DEVICE_PATH "/sys/bus/nd/devices/region888/deep_flush"
#define MOCK_DEV_ID 777UL
/*
* pmem2_get_region_id -- redefine libpmem2 function
*/
int
pmem2_get_region_id(const struct pmem2_source *src,
unsigned *region_id)
{
*region_id = MOCK_REG_ID;
return 0;
}
/*
* os_open -- os_open mock
*/
FUNC_MOCK(os_open, int, const char *path, int flags, ...)
FUNC_MOCK_RUN_DEFAULT {
if (strcmp(path, MOCK_BUS_DEVICE_PATH) == 0)
return MOCK_FD;
va_list ap;
va_start(ap, flags);
int mode = va_arg(ap, int);
va_end(ap);
return _FUNC_REAL(os_open)(path, flags, mode);
}
FUNC_MOCK_END
/*
* write -- write mock
*/
FUNC_MOCK(write, int, int fd, const void *buffer, size_t count)
FUNC_MOCK_RUN_DEFAULT {
UT_ASSERTeq(*(char *)buffer, '1');
UT_ASSERTeq(count, 1);
UT_ASSERTeq(fd, MOCK_FD);
++n_writes;
return 1;
}
FUNC_MOCK_END
/*
* read -- read mock
*/
FUNC_MOCK(read, int, int fd, void *buffer, size_t nbytes)
FUNC_MOCK_RUN_DEFAULT {
UT_ASSERTeq(nbytes, 2);
UT_ASSERTeq(fd, MOCK_FD);
UT_OUT("mocked read, fd %d", fd);
char pattern[2] = {'1', '\n'};
int ret = sizeof(pattern);
if (deep_flush_not_needed)
pattern[0] = '0';
if (read_invalid) {
ret = 0;
goto end;
}
memcpy(buffer, pattern, sizeof(pattern));
end:
++n_reads;
return ret;
}
FUNC_MOCK_END
#endif /* not _WIN32 */
/*
* mock_flush -- count flush calls in the test
*/
static void
mock_flush(const void *addr, size_t len)
{
++n_flushes;
}
/*
* mock_drain -- count drain calls in the test
*/
static void
mock_drain(void)
{
++n_fences;
}
/*
* pmem2_arch_init -- attach flush and drain functions replacements
*/
void
pmem2_arch_init(struct pmem2_arch_info *info)
{
info->flush = mock_flush;
info->fence = mock_drain;
}
/*
* pmem2_map_find -- redefine libpmem2 function, redefinition is needed
* for a proper compilation of the test. NOTE: this function is not used
* in the test.
*/
struct pmem2_map *
pmem2_map_find(const void *addr, size_t len)
{
UT_ASSERT(0);
return NULL;
}
/*
* pmem2_flush_file_buffers_os -- redefine libpmem2 function
*/
int
pmem2_flush_file_buffers_os(struct pmem2_map *map, const void *addr, size_t len,
int autorestart)
{
++n_file_buffs_flushes;
return 0;
}
/*
* map_init -- fill pmem2_map in minimal scope
*/
static void
map_init(struct pmem2_map *map)
{
const size_t length = 8 * MEGABYTE;
map->content_length = length;
/*
* The test needs to allocate more memory because some test cases
* validate behavior with address beyond mapping.
*/
map->addr = MALLOC(2 * length);
#ifndef _WIN32
map->source.type = PMEM2_SOURCE_FD;
/* mocked device ID for device DAX */
map->source.value.st_rdev = MOCK_DEV_ID;
#else
map->source.type = PMEM2_SOURCE_HANDLE;
#endif
ftype_value = &map->source.value.ftype;
}
/*
* counters_check_n_reset -- check numbers of uses of deep-flushing elements
* and reset them
*/
static void
counters_check_n_reset(int msynces, int flushes, int fences,
int writes, int reads)
{
UT_ASSERTeq(n_file_buffs_flushes, msynces);
UT_ASSERTeq(n_flushes, flushes);
UT_ASSERTeq(n_fences, fences);
UT_ASSERTeq(n_writes, writes);
UT_ASSERTeq(n_reads, reads);
n_file_buffs_flushes = 0;
n_flushes = 0;
n_fences = 0;
n_writes = 0;
n_reads = 0;
read_invalid = 0;
deep_flush_not_needed = 0;
}
/*
* test_deep_flush_func -- test pmem2_deep_flush for all granularity options
*/
static int
test_deep_flush_func(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_map map;
map_init(&map);
*ftype_value = PMEM2_FTYPE_REG;
void *addr = map.addr;
size_t len = map.content_length;
map.effective_granularity = PMEM2_GRANULARITY_PAGE;
pmem2_set_flush_fns(&map);
int ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(0, 0, 0, 0, 0);
map.effective_granularity = PMEM2_GRANULARITY_CACHE_LINE;
pmem2_set_flush_fns(&map);
ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(1, 0, 0, 0, 0);
map.effective_granularity = PMEM2_GRANULARITY_BYTE;
pmem2_set_flush_fns(&map);
ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(1, 0, 0, 0, 0);
FREE(map.addr);
return 0;
}
/*
* test_deep_flush_func_devdax -- test pmem2_deep_flush with mocked DAX devices
*/
static int
test_deep_flush_func_devdax(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_map map;
map_init(&map);
void *addr = map.addr;
size_t len = map.content_length;
*ftype_value = PMEM2_FTYPE_DEVDAX;
map.effective_granularity = PMEM2_GRANULARITY_CACHE_LINE;
pmem2_set_flush_fns(&map);
int ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(0, 1, 1, 1, 1);
deep_flush_not_needed = 1;
ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(0, 1, 1, 0, 1);
read_invalid = 1;
ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(0, 1, 1, 0, 1);
map.effective_granularity = PMEM2_GRANULARITY_BYTE;
pmem2_set_flush_fns(&map);
ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(0, 1, 1, 1, 1);
deep_flush_not_needed = 1;
ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(0, 1, 1, 0, 1);
read_invalid = 1;
ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(0, 1, 1, 0, 1);
FREE(map.addr);
return 0;
}
/*
* test_deep_flush_range_beyond_mapping -- test pmem2_deep_flush with
* the address that goes beyond mapping
*/
static int
test_deep_flush_range_beyond_mapping(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_map map;
map_init(&map);
/* set address completely beyond mapping */
void *addr = (void *)((uintptr_t)map.addr + map.content_length);
size_t len = map.content_length;
int ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, PMEM2_E_DEEP_FLUSH_RANGE);
/*
* set address in the middle of mapping, which makes range partially
* beyond mapping
*/
addr = (void *)((uintptr_t)map.addr + map.content_length / 2);
ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, PMEM2_E_DEEP_FLUSH_RANGE);
FREE(map.addr);
return 0;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_deep_flush_func),
TEST_CASE(test_deep_flush_func_devdax),
TEST_CASE(test_deep_flush_range_beyond_mapping),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem2_deep_flush");
pmem2_persist_init();
util_init();
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
DONE(NULL);
}
| 8,865 | 22.270341 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_direct/obj_direct.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_direct.c -- unit test for pmemobj_direct()
*/
#include "obj.h"
#include "obj_direct.h"
#include "sys_util.h"
#include "unittest.h"
#define MAX_PATH_LEN 255
#define LAYOUT_NAME "direct"
static os_mutex_t lock1;
static os_mutex_t lock2;
static os_cond_t sync_cond1;
static os_cond_t sync_cond2;
static int cond1;
static int cond2;
static PMEMoid thread_oid;
static void *
obj_direct(PMEMoid oid)
{
void *ptr1 = obj_direct_inline(oid);
void *ptr2 = obj_direct_non_inline(oid);
UT_ASSERTeq(ptr1, ptr2);
return ptr1;
}
static void *
test_worker(void *arg)
{
/* check before pool is closed, then let main continue */
UT_ASSERTne(obj_direct(thread_oid), NULL);
util_mutex_lock(&lock1);
cond1 = 1;
os_cond_signal(&sync_cond1);
util_mutex_unlock(&lock1);
/* wait for main thread to free & close, then check */
util_mutex_lock(&lock2);
while (!cond2)
os_cond_wait(&sync_cond2, &lock2);
util_mutex_unlock(&lock2);
UT_ASSERTeq(obj_direct(thread_oid), NULL);
return NULL;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_direct");
if (argc != 3)
UT_FATAL("usage: %s [directory] [# of pools]", argv[0]);
unsigned npools = ATOU(argv[2]);
const char *dir = argv[1];
int r;
util_mutex_init(&lock1);
util_mutex_init(&lock2);
util_cond_init(&sync_cond1);
util_cond_init(&sync_cond2);
cond1 = cond2 = 0;
PMEMobjpool **pops = MALLOC(npools * sizeof(PMEMobjpool *));
UT_ASSERTne(pops, NULL);
size_t length = strlen(dir) + MAX_PATH_LEN;
char *path = MALLOC(length);
for (unsigned i = 0; i < npools; ++i) {
int ret = snprintf(path, length, "%s"OS_DIR_SEP_STR"testfile%d",
dir, i);
if (ret < 0 || ret >= length)
UT_FATAL("snprintf: %d", ret);
pops[i] = pmemobj_create(path, LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR);
if (pops[i] == NULL)
UT_FATAL("!pmemobj_create");
}
PMEMoid *oids = MALLOC(npools * sizeof(PMEMoid));
UT_ASSERTne(oids, NULL);
PMEMoid *tmpoids = MALLOC(npools * sizeof(PMEMoid));
UT_ASSERTne(tmpoids, NULL);
oids[0] = OID_NULL;
UT_ASSERTeq(obj_direct(oids[0]), NULL);
for (unsigned i = 0; i < npools; ++i) {
oids[i] = (PMEMoid) {pops[i]->uuid_lo, 0};
UT_ASSERTeq(obj_direct(oids[i]), NULL);
uint64_t off = pops[i]->heap_offset;
oids[i] = (PMEMoid) {pops[i]->uuid_lo, off};
UT_ASSERTeq((char *)obj_direct(oids[i]) - off,
(char *)pops[i]);
r = pmemobj_alloc(pops[i], &tmpoids[i], 100, 1, NULL, NULL);
UT_ASSERTeq(r, 0);
}
r = pmemobj_alloc(pops[0], &thread_oid, 100, 2, NULL, NULL);
UT_ASSERTeq(r, 0);
UT_ASSERTne(obj_direct(thread_oid), NULL);
os_thread_t t;
THREAD_CREATE(&t, NULL, test_worker, NULL);
/* wait for the worker thread to perform the first check */
util_mutex_lock(&lock1);
while (!cond1)
os_cond_wait(&sync_cond1, &lock1);
util_mutex_unlock(&lock1);
for (unsigned i = 0; i < npools; ++i) {
UT_ASSERTne(obj_direct(tmpoids[i]), NULL);
pmemobj_free(&tmpoids[i]);
UT_ASSERTeq(obj_direct(tmpoids[i]), NULL);
pmemobj_close(pops[i]);
UT_ASSERTeq(obj_direct(oids[i]), NULL);
}
/* signal the worker that we're free and closed */
util_mutex_lock(&lock2);
cond2 = 1;
os_cond_signal(&sync_cond2);
util_mutex_unlock(&lock2);
THREAD_JOIN(&t, NULL);
util_cond_destroy(&sync_cond1);
util_cond_destroy(&sync_cond2);
util_mutex_destroy(&lock1);
util_mutex_destroy(&lock2);
FREE(pops);
FREE(tmpoids);
FREE(oids);
DONE(NULL);
}
| 3,476 | 22.653061 | 66 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_memcheck/obj_memcheck.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
#include "unittest.h"
#include "valgrind_internal.h"
/*
* Layout definition
*/
POBJ_LAYOUT_BEGIN(mc);
POBJ_LAYOUT_ROOT(mc, struct root);
POBJ_LAYOUT_TOID(mc, struct struct1);
POBJ_LAYOUT_END(mc);
struct struct1 {
int fld;
int dyn[];
};
struct root {
TOID(struct struct1) s1;
TOID(struct struct1) s2;
};
static void
test_memcheck_bug(void)
{
#if VG_MEMCHECK_ENABLED
volatile char tmp[100];
VALGRIND_CREATE_MEMPOOL(tmp, 0, 0);
VALGRIND_MEMPOOL_ALLOC(tmp, tmp + 8, 16);
VALGRIND_MEMPOOL_FREE(tmp, tmp + 8);
VALGRIND_MEMPOOL_ALLOC(tmp, tmp + 8, 16);
VALGRIND_MAKE_MEM_NOACCESS(tmp, 8);
tmp[7] = 0x66;
#endif
}
static void
test_memcheck_bug2(void)
{
#if VG_MEMCHECK_ENABLED
volatile char tmp[1000];
VALGRIND_CREATE_MEMPOOL(tmp, 0, 0);
VALGRIND_MEMPOOL_ALLOC(tmp, tmp + 128, 128);
VALGRIND_MEMPOOL_FREE(tmp, tmp + 128);
VALGRIND_MEMPOOL_ALLOC(tmp, tmp + 256, 128);
VALGRIND_MEMPOOL_FREE(tmp, tmp + 256);
/*
* This should produce warning:
* Address ... is 0 bytes inside a block of size 128 bytes freed.
* instead, it produces a warning:
* Address ... is 0 bytes after a block of size 128 freed
*/
int *data = (int *)(tmp + 256);
*data = 0x66;
#endif
}
static void
test_everything(const char *path)
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(mc),
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
struct root *rt = D_RW(POBJ_ROOT(pop, struct root));
POBJ_ALLOC(pop, &rt->s1, struct struct1, sizeof(struct struct1),
NULL, NULL);
struct struct1 *s1 = D_RW(rt->s1);
struct struct1 *s2;
POBJ_ALLOC(pop, &rt->s2, struct struct1, sizeof(struct struct1),
NULL, NULL);
s2 = D_RW(rt->s2);
POBJ_FREE(&rt->s2);
/* read of uninitialized variable */
if (s1->fld)
UT_OUT("%d", 1);
/* write to freed object */
s2->fld = 7;
pmemobj_persist(pop, s2, sizeof(*s2));
POBJ_ALLOC(pop, &rt->s2, struct struct1, sizeof(struct struct1),
NULL, NULL);
s2 = D_RW(rt->s2);
memset(s2, 0, pmemobj_alloc_usable_size(rt->s2.oid));
s2->fld = 12; /* ok */
/* invalid write */
s2->dyn[100000] = 9;
/* invalid write */
s2->dyn[1000] = 9;
pmemobj_persist(pop, s2, sizeof(struct struct1));
POBJ_REALLOC(pop, &rt->s2, struct struct1,
sizeof(struct struct1) + 100 * sizeof(int));
s2 = D_RW(rt->s2);
s2->dyn[0] = 9; /* ok */
pmemobj_persist(pop, s2, sizeof(struct struct1) + 100 * sizeof(int));
POBJ_FREE(&rt->s2);
/* invalid write to REALLOCated and FREEd object */
s2->dyn[0] = 9;
pmemobj_persist(pop, s2, sizeof(struct struct1) + 100 * sizeof(int));
POBJ_ALLOC(pop, &rt->s2, struct struct1, sizeof(struct struct1),
NULL, NULL);
POBJ_REALLOC(pop, &rt->s2, struct struct1,
sizeof(struct struct1) + 30 * sizeof(int));
s2 = D_RW(rt->s2);
s2->dyn[0] = 0;
s2->dyn[29] = 29;
pmemobj_persist(pop, s2, sizeof(struct struct1) + 30 * sizeof(int));
POBJ_FREE(&rt->s2);
s2->dyn[0] = 9;
pmemobj_persist(pop, s2, sizeof(struct struct1) + 30 * sizeof(int));
pmemobj_close(pop);
}
static void usage(const char *a)
{
UT_FATAL("usage: %s [m|t] file-name", a);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_memcheck");
/* root doesn't count */
UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(mc) != 1);
if (argc < 2)
usage(argv[0]);
if (strcmp(argv[1], "m") == 0)
test_memcheck_bug();
else if (strcmp(argv[1], "t") == 0) {
if (argc < 3)
usage(argv[0]);
test_everything(argv[2]);
} else
usage(argv[0]);
test_memcheck_bug2();
DONE(NULL);
}
| 3,591 | 20.769697 | 70 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_defrag_advanced/obj_defrag_advanced.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* obj_defrag_advanced.c -- test for libpmemobj defragmentation feature
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <stddef.h>
#include <unistd.h>
#include <stdlib.h>
#include "rand.h"
#include "vgraph.h"
#include "pgraph.h"
#include "os_thread.h"
#include "unittest.h"
struct create_params_t {
uint64_t seed;
rng_t rng;
struct vgraph_params vparams;
struct pgraph_params pparams;
};
/*
* graph_create -- create a graph
* - generate an intermediate volatile graph representation
* - use the volatile graph to allocate a persistent one
*/
static void
graph_create(struct create_params_t *task, PMEMobjpool *pop, PMEMoid *oidp,
rng_t *rngp)
{
struct vgraph_t *vgraph = vgraph_new(&task->vparams, rngp);
pgraph_new(pop, oidp, vgraph, &task->pparams, rngp);
vgraph_delete(vgraph);
}
/*
* graph_defrag -- defragment the pool
* - collect pointers to all PMEMoids
* - do a sanity checks
* - call pmemobj_defrag
* - return # of relocated objects
*/
static size_t
graph_defrag(PMEMobjpool *pop, PMEMoid oid)
{
struct pgraph_t *pgraph = (struct pgraph_t *)pmemobj_direct(oid);
/* count number of oids */
unsigned oidcnt = pgraph->nodes_num;
for (unsigned i = 0; i < pgraph->nodes_num; ++i) {
struct pnode_t *pnode = (struct pnode_t *)pmemobj_direct
(pgraph->nodes[i]);
oidcnt += pnode->edges_num;
}
/* create array of oid pointers */
PMEMoid **oidv = (PMEMoid **)MALLOC(sizeof(PMEMoid *) * oidcnt);
unsigned oidi = 0;
for (unsigned i = 0; i < pgraph->nodes_num; ++i) {
oidv[oidi++] = &pgraph->nodes[i];
struct pnode_t *pnode = (struct pnode_t *)pmemobj_direct
(pgraph->nodes[i]);
for (unsigned j = 0; j < pnode->edges_num; ++j) {
oidv[oidi++] = &pnode->edges[j];
}
}
UT_ASSERTeq(oidi, oidcnt);
/* check if all oids are valid */
for (unsigned i = 0; i < oidcnt; ++i) {
void *ptr = pmemobj_direct(*oidv[i]);
UT_ASSERTne(ptr, NULL);
}
/* check if all oids appear only once */
for (unsigned i = 0; i < oidcnt - 1; ++i) {
for (unsigned j = i + 1; j < oidcnt; ++j) {
UT_ASSERTne(oidv[i], oidv[j]);
}
}
struct pobj_defrag_result result;
int ret = pmemobj_defrag(pop, oidv, oidcnt, &result);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(result.total, pgraph->nodes_num);
FREE(oidv);
return result.relocated;
}
/*
* graph_defrag_ntimes -- defragment the graph N times
* - where N <= max_rounds
* - it stops defrag if # of relocated objects == 0
*/
static void
graph_defrag_ntimes(PMEMobjpool *pop, PMEMoid oid, unsigned max_rounds)
{
size_t relocated;
unsigned rounds = 0;
do {
relocated = graph_defrag(pop, oid);
++rounds;
} while (relocated > 0 && rounds < max_rounds);
}
#define HAS_TO_EXIST (1)
/*
* graph_dump -- dump a graph from the pool to a text file
*/
static void
graph_dump(PMEMoid oid, const char *path, int has_exist)
{
struct pgraph_t *pgraph = (struct pgraph_t *)pmemobj_direct(oid);
if (has_exist)
UT_ASSERTne(pgraph, NULL);
if (pgraph)
pgraph_print(pgraph, path);
}
#define FGETS_BUFF_LEN 1024
/*
* dump_compare -- compare graph dumps
* Test fails if the contents of dumps do not match
*/
static void
dump_compare(const char *path1, const char *path2)
{
FILE *dump1 = FOPEN(path1, "r");
FILE *dump2 = FOPEN(path2, "r");
char buff1[FGETS_BUFF_LEN];
char buff2[FGETS_BUFF_LEN];
char *sret1, *sret2;
do {
sret1 = fgets(buff1, FGETS_BUFF_LEN, dump1);
sret2 = fgets(buff2, FGETS_BUFF_LEN, dump2);
/* both files have to end at the same time */
if (!sret1) {
UT_ASSERTeq(sret2, NULL);
FCLOSE(dump1);
FCLOSE(dump2);
return;
}
UT_ASSERTeq(sret1, buff1);
UT_ASSERTeq(sret2, buff2);
UT_ASSERTeq(strcmp(buff1, buff2), 0);
} while (1);
}
/*
* create_params_init -- initialize create params
*/
static void
create_params_init(struct create_params_t *params)
{
params->seed = 1;
/* good enough defaults - no magic here */
params->vparams.max_nodes = 50;
params->vparams.max_edges = 10;
params->vparams.range_nodes = 10;
params->vparams.range_edges = 10;
params->vparams.min_pattern_size = 8;
params->vparams.max_pattern_size = 1024;
params->pparams.graph_copies = 10;
}
/* global state */
static struct global_t {
PMEMobjpool *pop;
} global;
/*
* PMEMobj root object structure
*/
struct root_t {
unsigned graphs_num;
PMEMoid graphs[];
};
/*
* root_size -- calculate a root object size
*/
static inline size_t
root_size(unsigned graph_num, size_t min_root_size)
{
size_t size = sizeof(struct root_t) + sizeof(PMEMoid) * graph_num;
return MAX(size, min_root_size);
}
#define QUERY_GRAPHS_NUM UINT_MAX
static struct root_t *
get_root(unsigned graphs_num, size_t min_root_size)
{
PMEMoid roid;
struct root_t *root;
if (graphs_num == QUERY_GRAPHS_NUM) {
/* allocate a root object without graphs */
roid = pmemobj_root(global.pop, root_size(0, 0));
if (OID_IS_NULL(roid))
UT_FATAL("!pmemobj_root:");
root = (struct root_t *)pmemobj_direct(roid);
UT_ASSERTne(root, NULL);
graphs_num = root->graphs_num;
}
UT_ASSERT(graphs_num > 0);
/* reallocate a root object with all known graphs */
roid = pmemobj_root(global.pop, root_size(graphs_num, min_root_size));
if (OID_IS_NULL(roid))
UT_FATAL("!pmemobj_root:");
root = (struct root_t *)pmemobj_direct(roid);
UT_ASSERTne(root, NULL);
return root;
}
/*
* parse_nonzero -- parse non-zero unsigned
*/
static void
parse_nonzero(unsigned *var, const char *arg)
{
unsigned long v = STRTOUL(arg, NULL, 10);
UT_ASSERTne(v, 0);
UT_ASSERT(v < UINT_MAX);
*var = v;
}
#define GRAPH_LAYOUT POBJ_LAYOUT_NAME(graph)
/*
* op_pool_create -- create a pool
*/
static int
op_pool_create(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <path>", tc->name);
/* parse arguments */
const char *path = argv[0];
/* open a pool */
global.pop = pmemobj_create(path, GRAPH_LAYOUT, 0, S_IWUSR | S_IRUSR);
if (global.pop == NULL) {
UT_FATAL("!pmemobj_create: %s", path);
}
return 1;
}
/*
* op_pool_close -- close the poll
*/
static int
op_pool_close(const struct test_case *tc, int argc, char *argv[])
{
pmemobj_close(global.pop);
global.pop = NULL;
return 0;
}
/*
* op_graph_create -- create a graph
*/
static int
op_graph_create(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 4)
UT_FATAL("usage: %s <max-nodes> <max-edges> <graph-copies>"
" <min-root-size>", tc->name);
/* parse arguments */
struct create_params_t cparams;
create_params_init(&cparams);
parse_nonzero(&cparams.vparams.max_nodes, argv[0]);
parse_nonzero(&cparams.vparams.max_edges, argv[1]);
parse_nonzero(&cparams.pparams.graph_copies, argv[2]);
size_t min_root_size = STRTOULL(argv[3], NULL, 10);
struct root_t *root = get_root(1, min_root_size);
randomize(cparams.seed);
/* generate a single graph */
graph_create(&cparams, global.pop, &root->graphs[0], NULL);
root->graphs_num = 1;
pmemobj_persist(global.pop, root, root_size(1, min_root_size));
return 4;
}
/*
* op_graph_dump -- dump the graph
*/
static int
op_graph_dump(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <dump>", tc->name);
/* parse arguments */
const char *dump = argv[0];
struct root_t *root = get_root(QUERY_GRAPHS_NUM, 0);
UT_ASSERTeq(root->graphs_num, 1);
/* dump the graph before defrag */
graph_dump(root->graphs[0], dump, HAS_TO_EXIST);
return 1;
}
/*
* op_graph_defrag -- defrag the graph
*/
static int
op_graph_defrag(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <max-rounds>", tc->name);
/* parse arguments */
unsigned max_rounds;
parse_nonzero(&max_rounds, argv[0]);
struct root_t *root = get_root(QUERY_GRAPHS_NUM, 0);
UT_ASSERTeq(root->graphs_num, 1);
/* do the defrag */
graph_defrag_ntimes(global.pop, root->graphs[0], max_rounds);
return 1;
}
/*
* op_dump_compare -- compare dumps
*/
static int
op_dump_compare(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: %s <dump1> <dump2>", tc->name);
/* parse arguments */
const char *dump1 = argv[0];
const char *dump2 = argv[1];
dump_compare(dump1, dump2);
return 2;
}
struct create_n_defrag_params_t {
char dump1[PATH_MAX];
char dump2[PATH_MAX];
struct create_params_t cparams;
PMEMobjpool *pop;
PMEMoid *oidp;
unsigned max_rounds;
unsigned ncycles;
};
/*
* create_n_defrag_thread -- create and defrag graphs mutiple times
*/
static void *
create_n_defrag_thread(void *arg)
{
struct create_n_defrag_params_t *params =
(struct create_n_defrag_params_t *)arg;
struct create_params_t *cparams = ¶ms->cparams;
for (unsigned i = 0; i < params->ncycles; ++i) {
graph_create(cparams, global.pop, params->oidp, &cparams->rng);
graph_dump(*params->oidp, params->dump1, HAS_TO_EXIST);
graph_defrag_ntimes(params->pop, *params->oidp,
params->max_rounds);
graph_dump(*params->oidp, params->dump2, HAS_TO_EXIST);
dump_compare(params->dump1, params->dump2);
pgraph_delete(params->oidp);
}
return NULL;
}
/*
* op_graph_create_n_defrag_mt -- multi-threaded graphs creation & defrag
*/
static int
op_graph_create_n_defrag_mt(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 8)
UT_FATAL("usage: %s <max-nodes> <max-edges> <graph-copies>"
" <min-root-size> <max-defrag-rounds> <n-threads>"
"<n-create-defrag-cycles> <dump-suffix>",
tc->name);
/* parse arguments */
struct create_params_t cparams;
create_params_init(&cparams);
parse_nonzero(&cparams.vparams.max_nodes, argv[0]);
parse_nonzero(&cparams.vparams.max_edges, argv[1]);
parse_nonzero(&cparams.pparams.graph_copies, argv[2]);
size_t min_root_size = STRTOULL(argv[3], NULL, 10);
unsigned max_rounds;
parse_nonzero(&max_rounds, argv[4]);
unsigned nthreads;
parse_nonzero(&nthreads, argv[5]);
unsigned ncycles;
parse_nonzero(&ncycles, argv[6]);
char *dump_suffix = argv[7];
struct root_t *root = get_root(nthreads, min_root_size);
root->graphs_num = nthreads;
pmemobj_persist(global.pop, root, sizeof(*root));
/* prepare threads params */
struct create_n_defrag_params_t *paramss =
(struct create_n_defrag_params_t *)MALLOC(
sizeof(*paramss) * nthreads);
for (unsigned i = 0; i < nthreads; ++i) {
struct create_n_defrag_params_t *params = ¶mss[i];
SNPRINTF(params->dump1, PATH_MAX, "dump_1_th%u_%s.log",
i, dump_suffix);
SNPRINTF(params->dump2, PATH_MAX, "dump_2_th%u_%s.log",
i, dump_suffix);
memcpy(¶ms->cparams, &cparams, sizeof(cparams));
params->cparams.seed += i;
randomize_r(¶ms->cparams.rng, params->cparams.seed);
params->pop = global.pop;
params->oidp = &root->graphs[i];
params->max_rounds = max_rounds;
params->ncycles = ncycles;
}
/* spawn threads */
os_thread_t *threads = (os_thread_t *)MALLOC(
sizeof(*threads) * nthreads);
for (unsigned i = 0; i < nthreads; ++i)
THREAD_CREATE(&threads[i], NULL, create_n_defrag_thread,
¶mss[i]);
/* join all threads */
void *ret = NULL;
for (unsigned i = 0; i < nthreads; ++i) {
THREAD_JOIN(&threads[i], &ret);
UT_ASSERTeq(ret, NULL);
}
FREE(threads);
FREE(paramss);
return 8;
}
/*
* op_pool_open -- open the pool
*/
static int
op_pool_open(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <path>", tc->name);
/* parse arguments */
const char *path = argv[0];
/* open a pool */
global.pop = pmemobj_open(path, GRAPH_LAYOUT);
if (global.pop == NULL)
UT_FATAL("!pmemobj_create: %s", path);
return 1;
}
/*
* op_graph_dump_all -- dump all graphs
*/
static int
op_graph_dump_all(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <dump-prefix>", tc->name);
/* parse arguments */
const char *dump_prefix = argv[0];
struct root_t *root = get_root(QUERY_GRAPHS_NUM, 0);
char dump[PATH_MAX];
for (unsigned i = 0; i < root->graphs_num; ++i) {
SNPRINTF(dump, PATH_MAX, "%s_%u.log", dump_prefix, i);
graph_dump(root->graphs[i], dump, HAS_TO_EXIST);
}
return 1;
}
/*
* ops -- available ops
*/
static struct test_case ops[] = {
TEST_CASE(op_pool_create),
TEST_CASE(op_pool_close),
TEST_CASE(op_graph_create),
TEST_CASE(op_graph_dump),
TEST_CASE(op_graph_defrag),
TEST_CASE(op_dump_compare),
TEST_CASE(op_graph_create_n_defrag_mt),
/* for pool validation only */
TEST_CASE(op_pool_open),
TEST_CASE(op_graph_dump_all),
};
#define NOPS ARRAY_SIZE(ops)
#define TEST_NAME "obj_defrag_advanced"
int
main(int argc, char *argv[])
{
START(argc, argv, TEST_NAME);
TEST_CASE_PROCESS(argc, argv, ops, NOPS);
DONE(NULL);
}
| 12,707 | 21.452297 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_defrag_advanced/pgraph.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pgraph.c -- persistent graph representation
*/
#include <inttypes.h>
#include "unittest.h"
#include "vgraph.h"
#include "pgraph.h"
#define PATTERN 'g'
/*
* pnode_size -- return the entire of node size
*/
static size_t
pnode_size(unsigned edges_num, size_t pattern_size)
{
size_t node_size = sizeof(struct pnode_t);
node_size += sizeof(PMEMoid) * edges_num;
node_size += pattern_size;
return node_size;
}
/*
* pnode_init -- initialize the node
*/
static void
pnode_init(PMEMobjpool *pop, PMEMoid pnode_oid, struct vnode_t *vnode,
PMEMoid pnodes[])
{
struct pnode_t *pnode = (struct pnode_t *)pmemobj_direct(pnode_oid);
pnode->node_id = vnode->node_id;
pnode->size = vnode->psize;
/* set edges */
pnode->edges_num = vnode->edges_num;
for (unsigned i = 0; i < vnode->edges_num; ++i)
pnode->edges[i] = pnodes[vnode->edges[i]];
/* initialize pattern */
pnode->pattern_size = vnode->pattern_size;
void *pattern = (void *)&pnode->edges[pnode->edges_num];
pmemobj_memset(pop, pattern, PATTERN, pnode->pattern_size,
PMEMOBJ_F_MEM_NOFLUSH);
/* persist the whole node state */
pmemobj_persist(pop, (const void *)pnode, pnode->size);
}
/*
* order_shuffle -- shuffle the nodes in graph
*/
static void
order_shuffle(unsigned *order, unsigned num, rng_t *rngp)
{
for (unsigned i = 0; i < num; ++i) {
unsigned j = rand_range(0, num, rngp);
unsigned temp = order[j];
order[j] = order[i];
order[i] = temp;
}
}
/*
* order_new -- generate the sequence of the graph nodes allocation
*/
static unsigned *
order_new(struct vgraph_t *vgraph, rng_t *rngp)
{
unsigned *order = (unsigned *)MALLOC(sizeof(unsigned)
* vgraph->nodes_num);
/* initialize id list */
for (unsigned i = 0; i < vgraph->nodes_num; ++i)
order[i] = i;
order_shuffle(order, vgraph->nodes_num, rngp);
return order;
}
/*
* pgraph_copy_new -- allocate a persistent copy of the volatile graph
*/
static PMEMoid *
pgraph_copy_new(PMEMobjpool *pop, struct vgraph_t *vgraph, rng_t *rngp)
{
/* to be returned array of PMEMoids to raw nodes allocations */
PMEMoid *nodes = (PMEMoid *)MALLOC(sizeof(PMEMoid) * vgraph->nodes_num);
/* generates random order of nodes allocation */
unsigned *order = order_new(vgraph, rngp);
/* allocate the nodes in the random order */
int ret;
for (unsigned i = 0; i < vgraph->nodes_num; ++i) {
struct vnode_t vnode = vgraph->node[order[i]];
PMEMoid *node = &nodes[order[i]];
ret = pmemobj_alloc(pop, node, vnode.psize, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
}
FREE(order);
return nodes;
}
/*
* pgraph_copy_delete -- free copies of the graph
*/
static void
pgraph_copy_delete(PMEMoid *nodes, unsigned num)
{
for (unsigned i = 0; i < num; ++i) {
if (OID_IS_NULL(nodes[i]))
continue;
pmemobj_free(&nodes[i]);
}
FREE(nodes);
}
/*
* pgraph_size -- return the struct pgraph_t size
*/
static size_t
pgraph_size(unsigned nodes_num)
{
return sizeof(struct pgraph_t) + sizeof(PMEMoid) * nodes_num;
}
/*
* pgraph_new -- allocate a new persistent graph in such a way
* that the fragmentation is as large as possible
*/
void
pgraph_new(PMEMobjpool *pop, PMEMoid *oidp, struct vgraph_t *vgraph,
struct pgraph_params *params, rng_t *rngp)
{
int ret = pmemobj_alloc(pop, oidp, pgraph_size(vgraph->nodes_num),
0, NULL, NULL);
UT_ASSERTeq(ret, 0);
struct pgraph_t *pgraph = (struct pgraph_t *)pmemobj_direct(*oidp);
pgraph->nodes_num = vgraph->nodes_num;
pmemobj_persist(pop, pgraph, sizeof(*pgraph));
/* calculate size of pnodes */
for (unsigned i = 0; i < vgraph->nodes_num; ++i) {
struct vnode_t *vnode = &vgraph->node[i];
vnode->psize = pnode_size(vnode->edges_num,
vnode->pattern_size);
}
/* prepare multiple copies of the nodes */
unsigned copies_num = rand_range(1, params->graph_copies, rngp);
PMEMoid **copies = (PMEMoid **)MALLOC(sizeof(PMEMoid *) * copies_num);
for (unsigned i = 0; i < copies_num; ++i)
copies[i] = pgraph_copy_new(pop, vgraph, rngp);
/* peek exactly the one copy of each node */
for (unsigned i = 0; i < pgraph->nodes_num; ++i) {
unsigned copy_id = rand_range(0, copies_num, rngp);
pgraph->nodes[i] = copies[copy_id][i];
copies[copy_id][i] = OID_NULL;
}
pmemobj_persist(pop, pgraph->nodes,
sizeof(PMEMoid) * pgraph->nodes_num);
/* free unused copies of the nodes */
for (unsigned i = 0; i < copies_num; ++i)
pgraph_copy_delete(copies[i], vgraph->nodes_num);
FREE(copies);
/* initialize pnodes */
for (unsigned i = 0; i < pgraph->nodes_num; ++i)
pnode_init(pop, pgraph->nodes[i], &vgraph->node[i],
pgraph->nodes);
}
/*
* pgraph_delete -- free the persistent graph
*/
void
pgraph_delete(PMEMoid *oidp)
{
struct pgraph_t *pgraph = (struct pgraph_t *)pmemobj_direct(*oidp);
/* free pnodes */
for (unsigned i = 0; i < pgraph->nodes_num; ++i)
pmemobj_free(&pgraph->nodes[i]);
pmemobj_free(oidp);
}
/*
* pgraph_print -- print graph in human readable format
*/
void
pgraph_print(struct pgraph_t *pgraph, const char *dump)
{
UT_ASSERTne(dump, NULL);
FILE *out = FOPEN(dump, "w");
/* print the graph statistics */
fprintf(out, "# of nodes: %u\n", pgraph->nodes_num);
uint64_t total_edges_num = 0;
for (unsigned i = 0; i < pgraph->nodes_num; ++i) {
PMEMoid node_oid = pgraph->nodes[i];
struct pnode_t *pnode =
(struct pnode_t *)pmemobj_direct(node_oid);
total_edges_num += pnode->edges_num;
}
fprintf(out, "Total # of edges: %" PRIu64 "\n\n", total_edges_num);
/* print the graph itself */
for (unsigned i = 0; i < pgraph->nodes_num; ++i) {
PMEMoid node_oid = pgraph->nodes[i];
struct pnode_t *pnode =
(struct pnode_t *)pmemobj_direct(node_oid);
fprintf(out, "%u:", pnode->node_id);
for (unsigned j = 0; j < pnode->edges_num; ++j) {
PMEMoid edge_oid = pnode->edges[j];
struct pnode_t *edge =
(struct pnode_t *)pmemobj_direct(edge_oid);
UT_ASSERT(edge->node_id < pgraph->nodes_num);
fprintf(out, "%u, ", edge->node_id);
}
fprintf(out, "\n");
}
FCLOSE(out);
}
| 6,058 | 23.934156 | 73 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_defrag_advanced/vgraph.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* vgraph.c -- volatile graph representation
*/
#include <stdlib.h>
#include <stdio.h>
#include "rand.h"
#include "unittest.h"
#include "vgraph.h"
/*
* rand_range -- generate pseudo-random number from given interval [min, max]
*/
unsigned
rand_range(unsigned min, unsigned max, rng_t *rngp)
{
if (min == max)
return min;
if (min > max)
UT_FATAL("!rand_range");
unsigned ret;
if (rngp)
ret = (unsigned)rnd64_r(rngp);
else
ret = (unsigned)rnd64();
return ((unsigned)ret % (max - min)) + min;
}
/*
* vnode_new -- allocate a new volatile node
*/
static void
vnode_new(struct vnode_t *node, unsigned v, struct vgraph_params *params,
rng_t *rngp)
{
unsigned min_edges = 1;
if (params->max_edges > params->range_edges)
min_edges = params->max_edges - params->range_edges;
unsigned edges_num = rand_range(min_edges, params->max_edges, rngp);
node->node_id = v;
node->edges_num = edges_num;
node->edges = (unsigned *)MALLOC(sizeof(int) * edges_num);
node->pattern_size = rand_range(params->min_pattern_size,
params->max_pattern_size, rngp);
}
/*
* vnode_delete -- free a volatile node
*/
static void
vnode_delete(struct vnode_t *node)
{
FREE(node->edges);
}
/*
* vgraph_get_node -- return node in graph based on given id_node
*/
static struct vnode_t *
vgraph_get_node(struct vgraph_t *graph, unsigned id_node)
{
struct vnode_t *node;
node = &graph->node[id_node];
return node;
}
/*
* vgraph_add_edges -- randomly assign destination nodes to the edges
*/
static void
vgraph_add_edges(struct vgraph_t *graph, rng_t *rngp)
{
unsigned nodes_count = 0;
unsigned edges_count = 0;
struct vnode_t *node;
for (nodes_count = 0; nodes_count < graph->nodes_num; nodes_count++) {
node = vgraph_get_node(graph, nodes_count);
unsigned edges_num = node->edges_num;
for (edges_count = 0; edges_count < edges_num; edges_count++) {
unsigned node_link =
rand_range(0, graph->nodes_num, rngp);
node->edges[edges_count] = node_link;
}
}
}
/*
* vgraph_new -- allocate a new volatile graph
*/
struct vgraph_t *
vgraph_new(struct vgraph_params *params, rng_t *rngp)
{
unsigned min_nodes = 1;
if (params->max_nodes > params->range_nodes)
min_nodes = params->max_nodes - params->range_nodes;
unsigned nodes_num = rand_range(min_nodes, params->max_nodes, rngp);
struct vgraph_t *graph =
(struct vgraph_t *)MALLOC(sizeof(struct vgraph_t) +
sizeof(struct vnode_t) * nodes_num);
graph->nodes_num = nodes_num;
for (unsigned i = 0; i < nodes_num; i++) {
vnode_new(&graph->node[i], i, params, rngp);
}
vgraph_add_edges(graph, rngp);
return graph;
}
/*
* vgraph_delete -- free the volatile graph
*/
void
vgraph_delete(struct vgraph_t *graph)
{
for (unsigned i = 0; i < graph->nodes_num; i++)
vnode_delete(&graph->node[i]);
FREE(graph);
}
| 2,894 | 21.099237 | 77 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_defrag_advanced/vgraph.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* vgraph.h -- volatile graph representation
*/
#ifndef OBJ_DEFRAG_ADV_VGRAPH
#define OBJ_DEFRAG_ADV_VGRAPH
#include "rand.h"
struct vgraph_params
{
unsigned max_nodes; /* max # of nodes per graph */
unsigned max_edges; /* max # of edges per node */
/* # of nodes is between [max_nodes - range_nodes, max_nodes] */
unsigned range_nodes;
/* # of edges is between [max_edges - range_edges, max_edges] */
unsigned range_edges;
unsigned min_pattern_size;
unsigned max_pattern_size;
};
struct vnode_t
{
unsigned node_id;
unsigned edges_num; /* # of edges starting from this node */
unsigned *edges; /* ids of nodes the edges are pointing to */
/* the persistent node attributes */
size_t pattern_size; /* size of the pattern allocated after the node */
size_t psize; /* the total size of the node */
};
struct vgraph_t
{
unsigned nodes_num;
struct vnode_t node[];
};
unsigned rand_range(unsigned min, unsigned max, rng_t *rngp);
struct vgraph_t *vgraph_new(struct vgraph_params *params, rng_t *rngp);
void vgraph_delete(struct vgraph_t *graph);
#endif
| 1,158 | 23.145833 | 72 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_mem/obj_mem.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* obj_mem.c -- simple test for pmemobj_memcpy, pmemobj_memmove and
* pmemobj_memset that verifies nothing blows up on pmemobj side.
* Real consistency tests are for libpmem.
*/
#include "unittest.h"
static unsigned Flags[] = {
0,
PMEMOBJ_F_MEM_NODRAIN,
PMEMOBJ_F_MEM_NONTEMPORAL,
PMEMOBJ_F_MEM_TEMPORAL,
PMEMOBJ_F_MEM_NONTEMPORAL | PMEMOBJ_F_MEM_TEMPORAL,
PMEMOBJ_F_MEM_NONTEMPORAL | PMEMOBJ_F_MEM_NODRAIN,
PMEMOBJ_F_MEM_WC,
PMEMOBJ_F_MEM_WB,
PMEMOBJ_F_MEM_NOFLUSH,
/* all possible flags */
PMEMOBJ_F_MEM_NODRAIN | PMEMOBJ_F_MEM_NOFLUSH |
PMEMOBJ_F_MEM_NONTEMPORAL | PMEMOBJ_F_MEM_TEMPORAL |
PMEMOBJ_F_MEM_WC | PMEMOBJ_F_MEM_WB,
};
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_mem");
if (argc != 2)
UT_FATAL("usage: %s [directory]", argv[0]);
PMEMobjpool *pop = pmemobj_create(argv[1], "obj_mem", 0,
S_IWUSR | S_IRUSR);
if (!pop)
UT_FATAL("!pmemobj_create");
struct root {
char c[4096];
};
struct root *r = pmemobj_direct(pmemobj_root(pop, sizeof(struct root)));
for (int i = 0; i < ARRAY_SIZE(Flags); ++i) {
unsigned f = Flags[i];
pmemobj_memset(pop, &r->c[0], 0x77, 2048, f);
pmemobj_memset(pop, &r->c[2048], 0xff, 2048, f);
pmemobj_memcpy(pop, &r->c[2048 + 7], &r->c[0], 100, f);
pmemobj_memcpy(pop, &r->c[2048 + 1024], &r->c[0] + 17, 128, f);
pmemobj_memmove(pop, &r->c[125], &r->c[150], 100, f);
pmemobj_memmove(pop, &r->c[350], &r->c[325], 100, f);
if (f & PMEMOBJ_F_MEM_NOFLUSH)
pmemobj_persist(pop, r, sizeof(*r));
}
pmemobj_close(pop);
DONE(NULL);
}
| 1,644 | 22.84058 | 73 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem_valgr_simple/pmem_valgr_simple.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2016, Intel Corporation */
/*
* pmem_valgr_simple.c -- simple unit test using pmemcheck
*
* usage: pmem_valgr_simple file
*/
#include "unittest.h"
int
main(int argc, char *argv[])
{
size_t mapped_len;
char *dest;
int is_pmem;
START(argc, argv, "pmem_valgr_simple");
if (argc != 4)
UT_FATAL("usage: %s file offset length", argv[0]);
int dest_off = atoi(argv[2]);
size_t bytes = strtoul(argv[3], NULL, 0);
dest = pmem_map_file(argv[1], 0, 0, 0, &mapped_len, &is_pmem);
if (dest == NULL)
UT_FATAL("!Could not mmap %s\n", argv[1]);
/* these will not be made persistent */
*(int *)dest = 4;
/* this will be made persistent */
uint64_t *tmp64dst = (void *)((uintptr_t)dest + 4096);
*tmp64dst = 50;
if (is_pmem) {
pmem_persist(tmp64dst, sizeof(*tmp64dst));
} else {
UT_ASSERTeq(pmem_msync(tmp64dst, sizeof(*tmp64dst)), 0);
}
uint16_t *tmp16dst = (void *)((uintptr_t)dest + 1024);
*tmp16dst = 21;
/* will appear as flushed/fenced in valgrind log */
pmem_flush(tmp16dst, sizeof(*tmp16dst));
/* shows strange behavior of memset in some cases */
memset(dest + dest_off, 0, bytes);
UT_ASSERTeq(pmem_unmap(dest, mapped_len), 0);
DONE(NULL);
}
| 1,240 | 21.160714 | 63 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/libpmempool_check_version/libpmempool_check_version.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* libpmempool_check_version -- a unittest for libpmempool_check_version.
*
*/
#include "unittest.h"
#include "libpmempool.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "libpmempool_check_version");
UT_ASSERTne(pmempool_check_version(0, 0), NULL);
UT_ASSERTne(NULL, pmempool_check_version(PMEMPOOL_MAJOR_VERSION - 1,
PMEMPOOL_MINOR_VERSION));
if (PMEMPOOL_MINOR_VERSION > 0) {
UT_ASSERTeq(NULL, pmempool_check_version(PMEMPOOL_MAJOR_VERSION,
PMEMPOOL_MINOR_VERSION - 1));
}
UT_ASSERTeq(NULL, pmempool_check_version(PMEMPOOL_MAJOR_VERSION,
PMEMPOOL_MINOR_VERSION));
UT_ASSERTne(NULL, pmempool_check_version(PMEMPOOL_MAJOR_VERSION + 1,
PMEMPOOL_MINOR_VERSION));
UT_ASSERTne(NULL, pmempool_check_version(PMEMPOOL_MAJOR_VERSION,
PMEMPOOL_MINOR_VERSION + 1));
DONE(NULL);
}
| 897 | 22.631579 | 73 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/win_mmap_dtor/win_mmap_dtor.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* win_mmap_dtor.c -- unit test for windows mmap destructor
*/
#include "unittest.h"
#include "os.h"
#include "win_mmap.h"
#define KILOBYTE (1 << 10)
#define MEGABYTE (1 << 20)
unsigned long long Mmap_align;
int
main(int argc, char *argv[])
{
START(argc, argv, "win_mmap_dtor");
if (argc != 2)
UT_FATAL("usage: %s path", argv[0]);
SYSTEM_INFO si;
GetSystemInfo(&si);
/* set pagesize for mmap */
Mmap_align = si.dwAllocationGranularity;
const char *path = argv[1];
int fd = os_open(path, O_RDWR);
UT_ASSERTne(fd, -1);
/*
* Input file has size equal to 2MB, but the mapping is 3MB.
* In this case mmap should map whole file and reserve 1MB
* of virtual address space for remaining part of the mapping.
*/
void *addr = mmap(NULL, 3 * MEGABYTE, PROT_READ, MAP_SHARED, fd, 0);
UT_ASSERTne(addr, MAP_FAILED);
MEMORY_BASIC_INFORMATION basic_info;
SIZE_T bytes_returned;
bytes_returned = VirtualQuery(addr, &basic_info,
sizeof(basic_info));
UT_ASSERTeq(bytes_returned, sizeof(basic_info));
UT_ASSERTeq(basic_info.RegionSize, 2 * MEGABYTE);
UT_ASSERTeq(basic_info.State, MEM_COMMIT);
bytes_returned = VirtualQuery((char *)addr + 2 * MEGABYTE,
&basic_info, sizeof(basic_info));
UT_ASSERTeq(bytes_returned, sizeof(basic_info));
UT_ASSERTeq(basic_info.RegionSize, MEGABYTE);
UT_ASSERTeq(basic_info.State, MEM_RESERVE);
win_mmap_fini();
bytes_returned = VirtualQuery((char *)addr + 2 * MEGABYTE,
&basic_info, sizeof(basic_info));
UT_ASSERTeq(bytes_returned, sizeof(basic_info));
/*
* region size can be bigger than 1MB because there was probably
* free space after this mapping
*/
UT_ASSERTeq(basic_info.State, MEM_FREE);
DONE(NULL);
}
| 1,778 | 22.72 | 69 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem_map_file_win/mocks_windows.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* mocks_windows.h -- redefinitions of libc functions
*
* This file is Windows-specific.
*
* This file should be included (i.e. using Forced Include) by libpmem
* files, when compiled for the purpose of pmem_map_file test.
* It would replace default implementation with mocked functions defined
* in pmem_map_file.c.
*
* These defines could be also passed as preprocessor definitions.
*/
#ifndef WRAP_REAL
#define os_posix_fallocate __wrap_os_posix_fallocate
#define os_ftruncate __wrap_os_ftruncate
#endif
| 608 | 28 | 72 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem_map_file_win/mocks_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* mocks_windows.c -- mocked functions used in pmem_map_file.c
* (Windows-specific)
*/
#include "unittest.h"
#define MAX_LEN (4 * 1024 * 1024)
/*
* posix_fallocate -- interpose on libc posix_fallocate()
*/
FUNC_MOCK(os_posix_fallocate, int, int fd, os_off_t offset, os_off_t len)
FUNC_MOCK_RUN_DEFAULT {
UT_OUT("posix_fallocate: off %ju len %ju", offset, len);
if (len > MAX_LEN)
return ENOSPC;
return _FUNC_REAL(os_posix_fallocate)(fd, offset, len);
}
FUNC_MOCK_END
/*
* ftruncate -- interpose on libc ftruncate()
*/
FUNC_MOCK(os_ftruncate, int, int fd, os_off_t len)
FUNC_MOCK_RUN_DEFAULT {
UT_OUT("ftruncate: len %ju", len);
if (len > MAX_LEN) {
errno = ENOSPC;
return -1;
}
return _FUNC_REAL(os_ftruncate)(fd, len);
}
FUNC_MOCK_END
| 868 | 21.868421 | 73 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_tx_locks/obj_tx_locks.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_tx_locks.c -- unit test for transaction locks
*/
#include "unittest.h"
#define LAYOUT_NAME "direct"
#define NUM_LOCKS 2
#define NUM_THREADS 10
#define TEST_VALUE_A 5
#define TEST_VALUE_B 10
#define TEST_VALUE_C 15
#define BEGIN_TX(pop, mutexes, rwlocks)\
TX_BEGIN_PARAM((pop), TX_PARAM_MUTEX,\
&(mutexes)[0], TX_PARAM_MUTEX, &(mutexes)[1], TX_PARAM_RWLOCK,\
&(rwlocks)[0], TX_PARAM_RWLOCK, &(rwlocks)[1], TX_PARAM_NONE)
#define BEGIN_TX_OLD(pop, mutexes, rwlocks)\
TX_BEGIN_LOCK((pop), TX_LOCK_MUTEX,\
&(mutexes)[0], TX_LOCK_MUTEX, &(mutexes)[1], TX_LOCK_RWLOCK,\
&(rwlocks)[0], TX_LOCK_RWLOCK, &(rwlocks)[1], TX_LOCK_NONE)
struct transaction_data {
PMEMmutex mutexes[NUM_LOCKS];
PMEMrwlock rwlocks[NUM_LOCKS];
int a;
int b;
int c;
};
static PMEMobjpool *Pop;
/*
* do_tx -- (internal) thread-friendly transaction
*/
static void *
do_tx(void *arg)
{
struct transaction_data *data = arg;
BEGIN_TX(Pop, data->mutexes, data->rwlocks) {
data->a = TEST_VALUE_A;
} TX_ONCOMMIT {
UT_ASSERT(data->a == TEST_VALUE_A);
data->b = TEST_VALUE_B;
} TX_ONABORT { /* not called */
data->a = TEST_VALUE_B;
} TX_FINALLY {
UT_ASSERT(data->b == TEST_VALUE_B);
data->c = TEST_VALUE_C;
} TX_END
return NULL;
}
/*
* do_tx_old -- (internal) thread-friendly transaction, tests deprecated macros
*/
static void *
do_tx_old(void *arg)
{
struct transaction_data *data = arg;
BEGIN_TX_OLD(Pop, data->mutexes, data->rwlocks) {
data->a = TEST_VALUE_A;
} TX_ONCOMMIT {
UT_ASSERT(data->a == TEST_VALUE_A);
data->b = TEST_VALUE_B;
} TX_ONABORT { /* not called */
data->a = TEST_VALUE_B;
} TX_FINALLY {
UT_ASSERT(data->b == TEST_VALUE_B);
data->c = TEST_VALUE_C;
} TX_END
return NULL;
}
/*
* do_aborted_tx -- (internal) thread-friendly aborted transaction
*/
static void *
do_aborted_tx(void *arg)
{
struct transaction_data *data = arg;
BEGIN_TX(Pop, data->mutexes, data->rwlocks) {
data->a = TEST_VALUE_A;
pmemobj_tx_abort(EINVAL);
data->a = TEST_VALUE_B;
} TX_ONCOMMIT { /* not called */
data->a = TEST_VALUE_B;
} TX_ONABORT {
UT_ASSERT(data->a == TEST_VALUE_A);
data->b = TEST_VALUE_B;
} TX_FINALLY {
UT_ASSERT(data->b == TEST_VALUE_B);
data->c = TEST_VALUE_C;
} TX_END
return NULL;
}
/*
* do_nested_tx-- (internal) thread-friendly nested transaction
*/
static void *
do_nested_tx(void *arg)
{
struct transaction_data *data = arg;
BEGIN_TX(Pop, data->mutexes, data->rwlocks) {
BEGIN_TX(Pop, data->mutexes, data->rwlocks) {
data->a = TEST_VALUE_A;
} TX_ONCOMMIT {
UT_ASSERT(data->a == TEST_VALUE_A);
data->b = TEST_VALUE_B;
} TX_END
} TX_ONCOMMIT {
data->c = TEST_VALUE_C;
} TX_END
return NULL;
}
/*
* do_aborted_nested_tx -- (internal) thread-friendly aborted nested transaction
*/
static void *
do_aborted_nested_tx(void *arg)
{
struct transaction_data *data = arg;
BEGIN_TX(Pop, data->mutexes, data->rwlocks) {
data->a = TEST_VALUE_C;
BEGIN_TX(Pop, data->mutexes, data->rwlocks) {
data->a = TEST_VALUE_A;
pmemobj_tx_abort(EINVAL);
data->a = TEST_VALUE_B;
} TX_ONCOMMIT { /* not called */
data->a = TEST_VALUE_C;
} TX_ONABORT {
UT_ASSERT(data->a == TEST_VALUE_A);
data->b = TEST_VALUE_B;
} TX_FINALLY {
UT_ASSERT(data->b == TEST_VALUE_B);
data->c = TEST_VALUE_C;
} TX_END
data->a = TEST_VALUE_B;
} TX_ONCOMMIT { /* not called */
UT_ASSERT(data->a == TEST_VALUE_A);
data->c = TEST_VALUE_C;
} TX_ONABORT {
UT_ASSERT(data->a == TEST_VALUE_A);
UT_ASSERT(data->b == TEST_VALUE_B);
UT_ASSERT(data->c == TEST_VALUE_C);
data->a = TEST_VALUE_B;
} TX_FINALLY {
UT_ASSERT(data->a == TEST_VALUE_B);
data->b = TEST_VALUE_A;
} TX_END
return NULL;
}
static void
run_mt_test(void *(*worker)(void *), void *arg)
{
os_thread_t thread[NUM_THREADS];
for (int i = 0; i < NUM_THREADS; ++i) {
THREAD_CREATE(&thread[i], NULL, worker, arg);
}
for (int i = 0; i < NUM_THREADS; ++i) {
THREAD_JOIN(&thread[i], NULL);
}
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_locks");
if (argc > 3)
UT_FATAL("usage: %s <file> [m]", argv[0]);
if ((Pop = pmemobj_create(argv[1], LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
int multithread = 0;
if (argc == 3) {
multithread = (argv[2][0] == 'm');
if (!multithread)
UT_FATAL("wrong test type supplied %c", argv[1][0]);
}
PMEMoid root = pmemobj_root(Pop, sizeof(struct transaction_data));
struct transaction_data *test_obj =
(struct transaction_data *)pmemobj_direct(root);
if (multithread) {
run_mt_test(do_tx, test_obj);
} else {
do_tx(test_obj);
do_tx(test_obj);
}
UT_ASSERT(test_obj->a == TEST_VALUE_A);
UT_ASSERT(test_obj->b == TEST_VALUE_B);
UT_ASSERT(test_obj->c == TEST_VALUE_C);
if (multithread) {
run_mt_test(do_aborted_tx, test_obj);
} else {
do_aborted_tx(test_obj);
do_aborted_tx(test_obj);
}
UT_ASSERT(test_obj->a == TEST_VALUE_A);
UT_ASSERT(test_obj->b == TEST_VALUE_B);
UT_ASSERT(test_obj->c == TEST_VALUE_C);
if (multithread) {
run_mt_test(do_nested_tx, test_obj);
} else {
do_nested_tx(test_obj);
do_nested_tx(test_obj);
}
UT_ASSERT(test_obj->a == TEST_VALUE_A);
UT_ASSERT(test_obj->b == TEST_VALUE_B);
UT_ASSERT(test_obj->c == TEST_VALUE_C);
if (multithread) {
run_mt_test(do_aborted_nested_tx, test_obj);
} else {
do_aborted_nested_tx(test_obj);
do_aborted_nested_tx(test_obj);
}
UT_ASSERT(test_obj->a == TEST_VALUE_B);
UT_ASSERT(test_obj->b == TEST_VALUE_A);
UT_ASSERT(test_obj->c == TEST_VALUE_C);
/* test that deprecated macros still work */
UT_COMPILE_ERROR_ON((int)TX_LOCK_NONE != (int)TX_PARAM_NONE);
UT_COMPILE_ERROR_ON((int)TX_LOCK_MUTEX != (int)TX_PARAM_MUTEX);
UT_COMPILE_ERROR_ON((int)TX_LOCK_RWLOCK != (int)TX_PARAM_RWLOCK);
if (multithread) {
run_mt_test(do_tx_old, test_obj);
} else {
do_tx_old(test_obj);
do_tx_old(test_obj);
}
UT_ASSERT(test_obj->a == TEST_VALUE_A);
UT_ASSERT(test_obj->b == TEST_VALUE_B);
UT_ASSERT(test_obj->c == TEST_VALUE_C);
pmemobj_close(Pop);
DONE(NULL);
}
| 6,164 | 21.918216 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/blk_recovery/blk_recovery.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* blk_recovery.c -- unit test for pmemblk recovery
*
* usage: blk_recovery bsize file first_lba lba
*
*/
#include "unittest.h"
#include <sys/param.h>
#include "blk.h"
#include "btt_layout.h"
#include <endian.h>
static size_t Bsize;
/*
* construct -- build a buffer for writing
*/
static void
construct(unsigned char *buf)
{
static int ord = 1;
for (int i = 0; i < Bsize; i++)
buf[i] = ord;
ord++;
if (ord > 255)
ord = 1;
}
/*
* ident -- identify what a buffer holds
*/
static char *
ident(unsigned char *buf)
{
static char descr[100];
unsigned val = *buf;
for (int i = 1; i < Bsize; i++)
if (buf[i] != val) {
sprintf(descr, "{%u} TORN at byte %d", val, i);
return descr;
}
sprintf(descr, "{%u}", val);
return descr;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "blk_recovery");
if (argc != 5 && argc != 3)
UT_FATAL("usage: %s bsize file [first_lba lba]", argv[0]);
Bsize = strtoul(argv[1], NULL, 0);
const char *path = argv[2];
if (argc > 3) {
PMEMblkpool *handle;
if ((handle = pmemblk_create(path, Bsize, 0,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!%s: pmemblk_create", path);
UT_OUT("%s block size %zu usable blocks %zu",
argv[1], Bsize, pmemblk_nblock(handle));
/* write the first lba */
os_off_t lba = STRTOL(argv[3], NULL, 0);
unsigned char *buf = MALLOC(Bsize);
construct(buf);
if (pmemblk_write(handle, buf, lba) < 0)
UT_FATAL("!write lba %zu", lba);
UT_OUT("write lba %zu: %s", lba, ident(buf));
/* reach into the layout and write-protect the map */
struct btt_info *infop = (void *)((char *)handle +
roundup(sizeof(struct pmemblk), BLK_FORMAT_DATA_ALIGN));
char *mapaddr = (char *)infop + le32toh(infop->mapoff);
char *flogaddr = (char *)infop + le32toh(infop->flogoff);
UT_OUT("write-protecting map, length %zu",
(size_t)(flogaddr - mapaddr));
MPROTECT(mapaddr, (size_t)(flogaddr - mapaddr), PROT_READ);
/* map each file argument with the given map type */
lba = STRTOL(argv[4], NULL, 0);
construct(buf);
if (pmemblk_write(handle, buf, lba) < 0)
UT_FATAL("!write lba %zu", lba);
else
UT_FATAL("write lba %zu: %s", lba, ident(buf));
} else {
int result = pmemblk_check(path, Bsize);
if (result < 0)
UT_OUT("!%s: pmemblk_check", path);
else if (result == 0)
UT_OUT("%s: pmemblk_check: not consistent", path);
else
UT_OUT("%s: consistent", path);
}
DONE(NULL);
}
| 4,164 | 26.766667 | 74 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/out_err_mt/out_err_mt.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* out_err_mt.c -- unit test for error messages
*/
#include <sys/types.h>
#include <stdarg.h>
#include <errno.h>
#include "unittest.h"
#include "valgrind_internal.h"
#include "util.h"
#define NUM_THREADS 16
static void
print_errors(const char *msg)
{
UT_OUT("%s", msg);
UT_OUT("PMEM: %s", pmem_errormsg());
UT_OUT("PMEMOBJ: %s", pmemobj_errormsg());
UT_OUT("PMEMLOG: %s", pmemlog_errormsg());
UT_OUT("PMEMBLK: %s", pmemblk_errormsg());
UT_OUT("PMEMPOOL: %s", pmempool_errormsg());
}
static void
check_errors(unsigned ver)
{
int ret;
int err_need;
int err_found;
ret = sscanf(pmem_errormsg(),
"libpmem major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEM_MAJOR_VERSION);
ret = sscanf(pmemobj_errormsg(),
"libpmemobj major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMOBJ_MAJOR_VERSION);
ret = sscanf(pmemlog_errormsg(),
"libpmemlog major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMLOG_MAJOR_VERSION);
ret = sscanf(pmemblk_errormsg(),
"libpmemblk major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMBLK_MAJOR_VERSION);
ret = sscanf(pmempool_errormsg(),
"libpmempool major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMPOOL_MAJOR_VERSION);
}
static void *
do_test(void *arg)
{
unsigned ver = *(unsigned *)arg;
pmem_check_version(ver, 0);
pmemobj_check_version(ver, 0);
pmemlog_check_version(ver, 0);
pmemblk_check_version(ver, 0);
pmempool_check_version(ver, 0);
check_errors(ver);
return NULL;
}
static void
run_mt_test(void *(*worker)(void *))
{
os_thread_t thread[NUM_THREADS];
unsigned ver[NUM_THREADS];
for (unsigned i = 0; i < NUM_THREADS; ++i) {
ver[i] = 10000 + i;
THREAD_CREATE(&thread[i], NULL, worker, &ver[i]);
}
for (unsigned i = 0; i < NUM_THREADS; ++i) {
THREAD_JOIN(&thread[i], NULL);
}
}
int
main(int argc, char *argv[])
{
START(argc, argv, "out_err_mt");
if (argc != 6)
UT_FATAL("usage: %s file1 file2 file3 file4 dir",
argv[0]);
print_errors("start");
PMEMobjpool *pop = pmemobj_create(argv[1], "test",
PMEMOBJ_MIN_POOL, 0666);
PMEMlogpool *plp = pmemlog_create(argv[2],
PMEMLOG_MIN_POOL, 0666);
PMEMblkpool *pbp = pmemblk_create(argv[3],
128, PMEMBLK_MIN_POOL, 0666);
util_init();
pmem_check_version(10000, 0);
pmemobj_check_version(10001, 0);
pmemlog_check_version(10002, 0);
pmemblk_check_version(10003, 0);
pmempool_check_version(10006, 0);
print_errors("version check");
void *ptr = NULL;
/*
* We are testing library error reporting and we don't want this test
* to fail under memcheck.
*/
VALGRIND_DO_DISABLE_ERROR_REPORTING;
pmem_msync(ptr, 1);
VALGRIND_DO_ENABLE_ERROR_REPORTING;
print_errors("pmem_msync");
int ret;
PMEMoid oid;
ret = pmemobj_alloc(pop, &oid, 0, 0, NULL, NULL);
UT_ASSERTeq(ret, -1);
print_errors("pmemobj_alloc");
pmemlog_append(plp, NULL, PMEMLOG_MIN_POOL);
print_errors("pmemlog_append");
size_t nblock = pmemblk_nblock(pbp);
pmemblk_set_error(pbp, (long long)nblock + 1);
print_errors("pmemblk_set_error");
run_mt_test(do_test);
pmemobj_close(pop);
pmemlog_close(plp);
pmemblk_close(pbp);
PMEMpoolcheck *ppc;
struct pmempool_check_args args = {NULL, };
ppc = pmempool_check_init(&args, sizeof(args) / 2);
UT_ASSERTeq(ppc, NULL);
print_errors("pmempool_check_init");
DONE(NULL);
}
| 3,840 | 22.278788 | 70 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/libpmempool_api/libpmempool_test.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* libpmempool_test -- test of libpmempool.
*
*/
#include <stddef.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <getopt.h>
#include "unittest.h"
/*
* Exact copy of the struct pmempool_check_args from libpmempool 1.0 provided to
* test libpmempool against various pmempool_check_args structure versions.
*/
struct pmempool_check_args_1_0 {
const char *path;
const char *backup_path;
enum pmempool_pool_type pool_type;
int flags;
};
/*
* check_pool -- check given pool
*/
static void
check_pool(struct pmempool_check_args *args, size_t args_size)
{
const char *status2str[] = {
[PMEMPOOL_CHECK_RESULT_CONSISTENT] = "consistent",
[PMEMPOOL_CHECK_RESULT_NOT_CONSISTENT] = "not consistent",
[PMEMPOOL_CHECK_RESULT_REPAIRED] = "repaired",
[PMEMPOOL_CHECK_RESULT_CANNOT_REPAIR] = "cannot repair",
[PMEMPOOL_CHECK_RESULT_ERROR] = "fatal",
};
PMEMpoolcheck *ppc = pmempool_check_init(args, args_size);
if (!ppc) {
char buff[UT_MAX_ERR_MSG];
ut_strerror(errno, buff, UT_MAX_ERR_MSG);
UT_OUT("Error: %s", buff);
return;
}
struct pmempool_check_status *status = NULL;
while ((status = pmempool_check(ppc)) != NULL) {
switch (status->type) {
case PMEMPOOL_CHECK_MSG_TYPE_ERROR:
UT_OUT("%s", status->str.msg);
break;
case PMEMPOOL_CHECK_MSG_TYPE_INFO:
UT_OUT("%s", status->str.msg);
break;
case PMEMPOOL_CHECK_MSG_TYPE_QUESTION:
UT_OUT("%s", status->str.msg);
status->str.answer = "yes";
break;
default:
pmempool_check_end(ppc);
exit(EXIT_FAILURE);
}
}
enum pmempool_check_result ret = pmempool_check_end(ppc);
UT_OUT("status = %s", status2str[ret]);
}
/*
* print_usage -- print usage of program
*/
static void
print_usage(char *name)
{
UT_OUT("Usage: %s [-t <pool_type>] [-r <repair>] [-d <dry_run>] "
"[-y <always_yes>] [-f <flags>] [-a <advanced>] "
"[-b <backup_path>] <pool_path>", name);
}
/*
* set_flag -- parse the value and set the flag according to a obtained value
*/
static void
set_flag(const char *value, int *flags, int flag)
{
if (atoi(value) > 0)
*flags |= flag;
else
*flags &= ~flag;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "libpmempool_test");
int opt;
struct pmempool_check_args_1_0 args = {
.path = NULL,
.backup_path = NULL,
.pool_type = PMEMPOOL_POOL_TYPE_LOG,
.flags = PMEMPOOL_CHECK_FORMAT_STR |
PMEMPOOL_CHECK_REPAIR | PMEMPOOL_CHECK_VERBOSE
};
size_t args_size = sizeof(struct pmempool_check_args_1_0);
while ((opt = getopt(argc, argv, "t:r:d:a:y:s:b:")) != -1) {
switch (opt) {
case 't':
if (strcmp(optarg, "blk") == 0) {
args.pool_type = PMEMPOOL_POOL_TYPE_BLK;
} else if (strcmp(optarg, "log") == 0) {
args.pool_type = PMEMPOOL_POOL_TYPE_LOG;
} else if (strcmp(optarg, "obj") == 0) {
args.pool_type = PMEMPOOL_POOL_TYPE_OBJ;
} else if (strcmp(optarg, "btt") == 0) {
args.pool_type = PMEMPOOL_POOL_TYPE_BTT;
} else {
args.pool_type =
(uint32_t)strtoul(optarg, NULL, 0);
}
break;
case 'r':
set_flag(optarg, &args.flags, PMEMPOOL_CHECK_REPAIR);
break;
case 'd':
set_flag(optarg, &args.flags, PMEMPOOL_CHECK_DRY_RUN);
break;
case 'a':
set_flag(optarg, &args.flags, PMEMPOOL_CHECK_ADVANCED);
break;
case 'y':
set_flag(optarg, &args.flags,
PMEMPOOL_CHECK_ALWAYS_YES);
break;
case 's':
args_size = strtoul(optarg, NULL, 0);
break;
case 'b':
args.backup_path = optarg;
break;
default:
print_usage(argv[0]);
UT_FATAL("unknown option: %c", opt);
}
}
if (optind < argc) {
args.path = argv[optind];
}
check_pool((struct pmempool_check_args *)&args, args_size);
DONE(NULL);
}
| 3,753 | 22.31677 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/blk_nblock/blk_nblock.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* blk_nblock.c -- unit test for pmemblk_nblock()
*
* usage: blk_nblock bsize:file...
*
*/
#include "unittest.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "blk_nblock");
if (argc < 2)
UT_FATAL("usage: %s bsize:file...", argv[0]);
/* map each file argument with the given map type */
for (int arg = 1; arg < argc; arg++) {
char *fname;
size_t bsize = strtoul(argv[arg], &fname, 0);
if (*fname != ':')
UT_FATAL("usage: %s bsize:file...", argv[0]);
fname++;
PMEMblkpool *handle;
handle = pmemblk_create(fname, bsize, 0, S_IWUSR | S_IRUSR);
if (handle == NULL) {
UT_OUT("!%s: pmemblk_create", fname);
} else {
UT_OUT("%s: block size %zu usable blocks: %zu",
fname, bsize, pmemblk_nblock(handle));
UT_ASSERTeq(pmemblk_bsize(handle), bsize);
pmemblk_close(handle);
int result = pmemblk_check(fname, bsize);
if (result < 0)
UT_OUT("!%s: pmemblk_check", fname);
else if (result == 0)
UT_OUT("%s: pmemblk_check: not consistent",
fname);
else {
UT_ASSERTeq(pmemblk_check(fname, bsize + 1),
-1);
UT_ASSERTeq(pmemblk_check(fname, 0), 1);
handle = pmemblk_open(fname, 0);
UT_ASSERTeq(pmemblk_bsize(handle), bsize);
pmemblk_close(handle);
}
}
}
DONE(NULL);
}
| 1,358 | 22.431034 | 62 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_tx_invalid/obj_tx_invalid.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* obj_tx_invalid.c -- tests which transactional functions are available in
* which transaction stages
*/
#include <stddef.h>
#include "file.h"
#include "unittest.h"
/*
* Layout definition
*/
POBJ_LAYOUT_BEGIN(tx_invalid);
POBJ_LAYOUT_ROOT(tx_invalid, struct dummy_root);
POBJ_LAYOUT_TOID(tx_invalid, struct dummy_node);
POBJ_LAYOUT_END(tx_invalid);
struct dummy_node {
int value;
};
struct dummy_root {
TOID(struct dummy_node) node;
};
int
main(int argc, char *argv[])
{
if (argc != 3)
UT_FATAL("usage: %s file-name op", argv[0]);
START(argc, argv, "obj_tx_invalid %s", argv[2]);
/* root doesn't count */
UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(tx_invalid) != 1);
PMEMobjpool *pop;
const char *path = argv[1];
int exists = util_file_exists(path);
if (exists < 0)
UT_FATAL("!util_file_exists");
if (!exists) {
if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(tx_invalid),
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL) {
UT_FATAL("!pmemobj_create %s", path);
}
} else {
if ((pop = pmemobj_open(path, POBJ_LAYOUT_NAME(tx_invalid)))
== NULL) {
UT_FATAL("!pmemobj_open %s", path);
}
}
PMEMoid oid = pmemobj_first(pop);
if (OID_IS_NULL(oid)) {
if (pmemobj_alloc(pop, &oid, 10, 1, NULL, NULL))
UT_FATAL("!pmemobj_alloc");
} else {
UT_ASSERTeq(pmemobj_type_num(oid), 1);
}
if (strcmp(argv[2], "alloc") == 0)
pmemobj_tx_alloc(10, 1);
else if (strcmp(argv[2], "alloc-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_alloc(10, 1);
} TX_END
} else if (strcmp(argv[2], "alloc-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_alloc(10, 1);
} TX_END
} else if (strcmp(argv[2], "alloc-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_alloc(10, 1);
} TX_END
} else if (strcmp(argv[2], "alloc-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_alloc(10, 1);
} TX_END
} else if (strcmp(argv[2], "alloc-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_alloc(10, 1);
}
else if (strcmp(argv[2], "zalloc") == 0)
pmemobj_tx_zalloc(10, 1);
else if (strcmp(argv[2], "zalloc-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_zalloc(10, 1);
} TX_END
} else if (strcmp(argv[2], "zalloc-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_zalloc(10, 1);
} TX_END
} else if (strcmp(argv[2], "zalloc-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_zalloc(10, 1);
} TX_END
} else if (strcmp(argv[2], "zalloc-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_zalloc(10, 1);
} TX_END
} else if (strcmp(argv[2], "zalloc-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_zalloc(10, 1);
}
else if (strcmp(argv[2], "strdup") == 0)
pmemobj_tx_strdup("aaa", 1);
else if (strcmp(argv[2], "strdup-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_strdup("aaa", 1);
} TX_END
} else if (strcmp(argv[2], "strdup-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_strdup("aaa", 1);
} TX_END
} else if (strcmp(argv[2], "strdup-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_strdup("aaa", 1);
} TX_END
} else if (strcmp(argv[2], "strdup-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_strdup("aaa", 1);
} TX_END
} else if (strcmp(argv[2], "strdup-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_strdup("aaa", 1);
}
else if (strcmp(argv[2], "realloc") == 0)
pmemobj_tx_realloc(oid, 10, 1);
else if (strcmp(argv[2], "realloc-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_realloc(oid, 10, 1);
} TX_END
} else if (strcmp(argv[2], "realloc-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_realloc(oid, 10, 1);
} TX_END
} else if (strcmp(argv[2], "realloc-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_realloc(oid, 10, 1);
} TX_END
} else if (strcmp(argv[2], "realloc-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_realloc(oid, 10, 1);
} TX_END
} else if (strcmp(argv[2], "realloc-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_realloc(oid, 10, 1);
}
else if (strcmp(argv[2], "zrealloc") == 0)
pmemobj_tx_zrealloc(oid, 10, 1);
else if (strcmp(argv[2], "zrealloc-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_zrealloc(oid, 10, 1);
} TX_END
} else if (strcmp(argv[2], "zrealloc-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_zrealloc(oid, 10, 1);
} TX_END
} else if (strcmp(argv[2], "zrealloc-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_zrealloc(oid, 10, 1);
} TX_END
} else if (strcmp(argv[2], "zrealloc-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_zrealloc(oid, 10, 1);
} TX_END
} else if (strcmp(argv[2], "zrealloc-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_zrealloc(oid, 10, 1);
}
else if (strcmp(argv[2], "free") == 0)
pmemobj_tx_free(oid);
else if (strcmp(argv[2], "free-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_free(oid);
} TX_END
} else if (strcmp(argv[2], "free-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_free(oid);
} TX_END
} else if (strcmp(argv[2], "free-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_free(oid);
} TX_END
} else if (strcmp(argv[2], "free-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_free(oid);
} TX_END
} else if (strcmp(argv[2], "free-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_free(oid);
}
else if (strcmp(argv[2], "add_range") == 0)
pmemobj_tx_add_range(oid, 0, 10);
else if (strcmp(argv[2], "add_range-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_add_range(oid, 0, 10);
} TX_END
} else if (strcmp(argv[2], "add_range-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_add_range(oid, 0, 10);
} TX_END
} else if (strcmp(argv[2], "add_range-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_add_range(oid, 0, 10);
} TX_END
} else if (strcmp(argv[2], "add_range-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_add_range(oid, 0, 10);
} TX_END
} else if (strcmp(argv[2], "add_range-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_add_range(oid, 0, 10);
}
else if (strcmp(argv[2], "add_range_direct") == 0)
pmemobj_tx_add_range_direct(pmemobj_direct(oid), 10);
else if (strcmp(argv[2], "add_range_direct-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_add_range_direct(pmemobj_direct(oid), 10);
} TX_END
} else if (strcmp(argv[2], "add_range_direct-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_add_range_direct(pmemobj_direct(oid), 10);
} TX_END
} else if (strcmp(argv[2], "add_range_direct-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_add_range_direct(pmemobj_direct(oid), 10);
} TX_END
} else if (strcmp(argv[2], "add_range_direct-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_add_range_direct(pmemobj_direct(oid), 10);
} TX_END
} else if (strcmp(argv[2], "add_range_direct-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_add_range_direct(pmemobj_direct(oid), 10);
}
else if (strcmp(argv[2], "abort") == 0)
pmemobj_tx_abort(ENOMEM);
else if (strcmp(argv[2], "abort-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_END
} else if (strcmp(argv[2], "abort-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_abort(ENOMEM);
} TX_END
} else if (strcmp(argv[2], "abort-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_abort(ENOMEM);
} TX_END
} else if (strcmp(argv[2], "abort-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_abort(ENOMEM);
} TX_END
} else if (strcmp(argv[2], "abort-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_abort(ENOMEM);
}
else if (strcmp(argv[2], "commit") == 0)
pmemobj_tx_commit();
else if (strcmp(argv[2], "commit-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_commit();
} TX_END
} else if (strcmp(argv[2], "commit-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_commit();
} TX_END
} else if (strcmp(argv[2], "commit-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_commit();
} TX_END
} else if (strcmp(argv[2], "commit-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_commit();
} TX_END
} else if (strcmp(argv[2], "commit-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_commit();
}
else if (strcmp(argv[2], "end") == 0)
pmemobj_tx_end();
else if (strcmp(argv[2], "end-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_end();
} TX_END
} else if (strcmp(argv[2], "end-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_end();
pmemobj_close(pop);
exit(0);
} TX_END
} else if (strcmp(argv[2], "end-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_end();
pmemobj_close(pop);
exit(0);
} TX_END
} else if (strcmp(argv[2], "end-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_end();
pmemobj_close(pop);
exit(0);
} TX_END
} else if (strcmp(argv[2], "end-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_end();
}
else if (strcmp(argv[2], "process") == 0)
pmemobj_tx_process();
else if (strcmp(argv[2], "process-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_process();
} TX_END
} else if (strcmp(argv[2], "process-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_process();
} TX_END
} else if (strcmp(argv[2], "process-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_process();
} TX_END
} else if (strcmp(argv[2], "process-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_process();
pmemobj_tx_end();
pmemobj_close(pop);
exit(0);
} TX_END
} else if (strcmp(argv[2], "process-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_process();
}
else if (strcmp(argv[2], "begin") == 0) {
TX_BEGIN(pop) {
} TX_END
} else if (strcmp(argv[2], "begin-in-work") == 0) {
TX_BEGIN(pop) {
TX_BEGIN(pop) {
} TX_END
} TX_END
} else if (strcmp(argv[2], "begin-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
TX_BEGIN(pop) {
} TX_END
} TX_END
} else if (strcmp(argv[2], "begin-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
TX_BEGIN(pop) {
} TX_END
} TX_END
} else if (strcmp(argv[2], "begin-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
TX_BEGIN(pop) {
} TX_END
} TX_END
} else if (strcmp(argv[2], "begin-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
TX_BEGIN(pop) {
} TX_END
}
pmemobj_close(pop);
DONE(NULL);
}
| 11,213 | 23.809735 | 75 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem_has_auto_flush/mocks_posix.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* mocks_posix.c -- mocked functions used in pmem_has_auto_flush.c
*/
#include <fts.h>
#include "fs.h"
#include "unittest.h"
#define BUS_DEVICE_PATH "/sys/bus/nd/devices"
/*
* open -- open mock
*/
FUNC_MOCK(open, int, const char *path, int flags, ...)
FUNC_MOCK_RUN_DEFAULT {
va_list ap;
va_start(ap, flags);
int mode = va_arg(ap, int);
va_end(ap);
if (!strstr(path, BUS_DEVICE_PATH))
return _FUNC_REAL(open)(path, flags, mode);
const char *prefix = os_getenv("BUS_DEVICE_PATH");
char path2[PATH_MAX] = { 0 };
strcat(path2, prefix);
strcat(path2, path + strlen(BUS_DEVICE_PATH));
return _FUNC_REAL(open)(path2, flags, mode);
}
FUNC_MOCK_END
struct fs {
FTS *ft;
struct fs_entry entry;
};
/*
* fs_new -- creates fs traversal instance
*/
FUNC_MOCK(fs_new, struct fs *, const char *path)
FUNC_MOCK_RUN_DEFAULT {
if (!strstr(path, BUS_DEVICE_PATH))
return _FUNC_REAL(fs_new)(path);
const char *prefix = os_getenv("BUS_DEVICE_PATH");
char path2[PATH_MAX] = { 0 };
strcat(path2, prefix);
strcat(path2, path + strlen(BUS_DEVICE_PATH));
return _FUNC_REAL(fs_new)(path2);
}
FUNC_MOCK_END
/*
* os_stat -- os_stat mock to handle sysfs path
*/
FUNC_MOCK(os_stat, int, const char *path, os_stat_t *buf)
FUNC_MOCK_RUN_DEFAULT {
if (!strstr(path, BUS_DEVICE_PATH))
return _FUNC_REAL(os_stat)(path, buf);
const char *prefix = os_getenv("BUS_DEVICE_PATH");
char path2[PATH_MAX] = { 0 };
strcat(path2, prefix);
strcat(path2, path + strlen(BUS_DEVICE_PATH));
return _FUNC_REAL(os_stat)(path2, buf);
}
FUNC_MOCK_END
| 1,627 | 22.257143 | 66 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_badblock_mocks/mocks_ndctl.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* mocks_ndctl.c -- mocked ndctl functions used
* indirectly in pmem2_badblock_mocks.c
*/
#include <sys/stat.h>
#include <ndctl/libndctl.h>
#include "unittest.h"
#include "pmem2_badblock_mocks.h"
#define RESOURCE_ADDRESS 0x1000 /* any non-zero value */
#define UINT(ptr) (unsigned)((uintptr_t)ptr)
/* index of bad blocks */
static unsigned i_bb;
/*
* ndctl_namespace_get_mode - mock ndctl_namespace_get_mode
*/
FUNC_MOCK(ndctl_namespace_get_mode, enum ndctl_namespace_mode,
struct ndctl_namespace *ndns)
FUNC_MOCK_RUN_DEFAULT {
if (IS_MODE_NAMESPACE((uintptr_t)ndns))
/* namespace mode */
return NDCTL_NS_MODE_FSDAX;
/* raw mode */
return NDCTL_NS_MODE_RAW;
}
FUNC_MOCK_END
/*
* ndctl_namespace_get_pfn - mock ndctl_namespace_get_pfn
*/
FUNC_MOCK(ndctl_namespace_get_pfn, struct ndctl_pfn *,
struct ndctl_namespace *ndns)
FUNC_MOCK_RUN_DEFAULT {
if (IS_MODE_NAMESPACE((uintptr_t)ndns))
/* namespace mode */
return (struct ndctl_pfn *)ndns;
return NULL;
}
FUNC_MOCK_END
/*
* ndctl_namespace_get_dax - mock ndctl_namespace_get_dax
*/
FUNC_MOCK(ndctl_namespace_get_dax, struct ndctl_dax *,
struct ndctl_namespace *ndns)
FUNC_MOCK_RUN_DEFAULT {
if (IS_MODE_REGION((uintptr_t)ndns))
/* region mode */
return (struct ndctl_dax *)ndns;
return NULL;
}
FUNC_MOCK_END
/*
* ndctl_pfn_get_resource - mock ndctl_pfn_get_resource
*/
FUNC_MOCK(ndctl_pfn_get_resource, unsigned long long,
struct ndctl_pfn *pfn)
FUNC_MOCK_RUN_DEFAULT {
return RESOURCE_ADDRESS;
}
FUNC_MOCK_END
/*
* ndctl_pfn_get_size - mock ndctl_pfn_get_size
*/
FUNC_MOCK(ndctl_pfn_get_size, unsigned long long,
struct ndctl_pfn *pfn)
FUNC_MOCK_RUN_DEFAULT {
return DEV_SIZE_1GB; /* 1 GiB */
}
FUNC_MOCK_END
/*
* ndctl_dax_get_resource - mock ndctl_dax_get_resource
*/
FUNC_MOCK(ndctl_dax_get_resource, unsigned long long,
struct ndctl_dax *dax)
FUNC_MOCK_RUN_DEFAULT {
return RESOURCE_ADDRESS;
}
FUNC_MOCK_END
/*
* ndctl_dax_get_size - mock ndctl_dax_get_size
*/
FUNC_MOCK(ndctl_dax_get_size, unsigned long long,
struct ndctl_dax *dax)
FUNC_MOCK_RUN_DEFAULT {
return DEV_SIZE_1GB; /* 1 GiB */
}
FUNC_MOCK_END
/*
* ndctl_namespace_get_resource - mock ndctl_namespace_get_resource
*/
FUNC_MOCK(ndctl_namespace_get_resource, unsigned long long,
struct ndctl_namespace *ndns)
FUNC_MOCK_RUN_DEFAULT {
return RESOURCE_ADDRESS;
}
FUNC_MOCK_END
/*
* ndctl_namespace_get_size - mock ndctl_namespace_get_size
*/
FUNC_MOCK(ndctl_namespace_get_size, unsigned long long,
struct ndctl_namespace *ndns)
FUNC_MOCK_RUN_DEFAULT {
return DEV_SIZE_1GB; /* 1 GiB */
}
FUNC_MOCK_END
/*
* ndctl_region_get_resource - mock ndctl_region_get_resource
*/
FUNC_MOCK(ndctl_region_get_resource, unsigned long long,
struct ndctl_region *region)
FUNC_MOCK_RUN_DEFAULT {
return RESOURCE_ADDRESS;
}
FUNC_MOCK_END
/*
* ndctl_region_get_bus - mock ndctl_region_get_bus
*/
FUNC_MOCK(ndctl_region_get_bus, struct ndctl_bus *,
struct ndctl_region *region)
FUNC_MOCK_RUN_DEFAULT {
return (struct ndctl_bus *)region;
}
FUNC_MOCK_END
/*
* ndctl_namespace_get_first_badblock - mock ndctl_namespace_get_first_badblock
*/
FUNC_MOCK(ndctl_namespace_get_first_badblock, struct badblock *,
struct ndctl_namespace *ndns)
FUNC_MOCK_RUN_DEFAULT {
i_bb = 0;
return get_nth_hw_badblock(UINT(ndns), &i_bb);
}
FUNC_MOCK_END
/*
* ndctl_namespace_get_next_badblock - mock ndctl_namespace_get_next_badblock
*/
FUNC_MOCK(ndctl_namespace_get_next_badblock, struct badblock *,
struct ndctl_namespace *ndns)
FUNC_MOCK_RUN_DEFAULT {
return get_nth_hw_badblock(UINT(ndns), &i_bb);
}
FUNC_MOCK_END
/*
* ndctl_region_get_first_badblock - mock ndctl_region_get_first_badblock
*/
FUNC_MOCK(ndctl_region_get_first_badblock, struct badblock *,
struct ndctl_region *region)
FUNC_MOCK_RUN_DEFAULT {
i_bb = 0;
return get_nth_hw_badblock(UINT(region), &i_bb);
}
FUNC_MOCK_END
/*
* ndctl_region_get_next_badblock - mock ndctl_region_get_next_badblock
*/
FUNC_MOCK(ndctl_region_get_next_badblock, struct badblock *,
struct ndctl_region *region)
FUNC_MOCK_RUN_DEFAULT {
return get_nth_hw_badblock(UINT(region), &i_bb);
}
FUNC_MOCK_END
static struct ndctl_data {
uintptr_t bus;
unsigned long long address;
unsigned long long length;
} data;
/*
* ndctl_bus_cmd_new_ars_cap - mock ndctl_bus_cmd_new_ars_cap
*/
FUNC_MOCK(ndctl_bus_cmd_new_ars_cap, struct ndctl_cmd *,
struct ndctl_bus *bus, unsigned long long address,
unsigned long long len)
FUNC_MOCK_RUN_DEFAULT {
data.bus = (uintptr_t)bus;
data.address = address;
data.length = len;
return (struct ndctl_cmd *)&data;
}
FUNC_MOCK_END
/*
* ndctl_cmd_submit - mock ndctl_cmd_submit
*/
FUNC_MOCK(ndctl_cmd_submit, int, struct ndctl_cmd *cmd)
FUNC_MOCK_RUN_DEFAULT {
return 0;
}
FUNC_MOCK_END
/*
* ndctl_cmd_ars_cap_get_range - mock ndctl_cmd_ars_cap_get_range
*/
FUNC_MOCK(ndctl_cmd_ars_cap_get_range, int,
struct ndctl_cmd *ars_cap, struct ndctl_range *range)
FUNC_MOCK_RUN_DEFAULT {
return 0;
}
FUNC_MOCK_END
/*
* ndctl_bus_cmd_new_clear_error - mock ndctl_bus_cmd_new_clear_error
*/
FUNC_MOCK(ndctl_bus_cmd_new_clear_error, struct ndctl_cmd *,
unsigned long long address,
unsigned long long len,
struct ndctl_cmd *ars_cap)
FUNC_MOCK_RUN_DEFAULT {
return ars_cap;
}
FUNC_MOCK_END
/*
* ndctl_cmd_clear_error_get_cleared - mock ndctl_cmd_clear_error_get_cleared
*/
FUNC_MOCK(ndctl_cmd_clear_error_get_cleared, unsigned long long,
struct ndctl_cmd *clear_err)
FUNC_MOCK_RUN_DEFAULT {
struct ndctl_data *pdata = (struct ndctl_data *)clear_err;
UT_OUT("ndctl_clear_error(%lu, %llu, %llu)",
pdata->bus, pdata->address, pdata->length);
return pdata->length;
}
FUNC_MOCK_END
/*
* ndctl_cmd_unref - mock ndctl_cmd_unref
*/
FUNC_MOCK(ndctl_cmd_unref, void, struct ndctl_cmd *cmd)
FUNC_MOCK_RUN_DEFAULT {
}
FUNC_MOCK_END
| 5,900 | 22.050781 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_badblock_mocks/pmem2_badblock_mocks.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_badblock_mocks.h -- definitions for pmem2_badblock_mocks test
*/
#include "extent.h"
/* fd bits 6-8: type of device */
#define FD_REG_FILE (1 << 6) /* regular file */
#define FD_CHR_DEV (2 << 6) /* character device */
#define FD_DIRECTORY (3 << 6) /* directory */
#define FD_BLK_DEV (4 << 6) /* block device */
/* fd bits 4-5: ndctl mode */
#define MODE_NO_DEVICE (1 << 4) /* did not found any matching device */
#define MODE_NAMESPACE (2 << 4) /* namespace mode */
#define MODE_REGION (3 << 4) /* region mode */
/* fd bits 0-3: number of test */
/* masks */
#define MASK_DEVICE 0b0111000000 /* bits 6-8: device mask */
#define MASK_MODE 0b0000110000 /* bits 4-5: mode mask */
#define MASK_TEST 0b0000001111 /* bits 0-3: test mask */
/* checks */
#define IS_MODE_NO_DEVICE(x) ((x & MASK_MODE) == MODE_NO_DEVICE)
#define IS_MODE_NAMESPACE(x) ((x & MASK_MODE) == MODE_NAMESPACE)
#define IS_MODE_REGION(x) ((x & MASK_MODE) == MODE_REGION)
/* default block size: 1kB */
#define BLK_SIZE_1KB 1024
/* default size of device: 1 GiB */
#define DEV_SIZE_1GB (1024 * 1024 * 1024)
struct badblock *get_nth_hw_badblock(unsigned test, unsigned *i_bb);
int get_extents(int fd, struct extents **exts);
| 1,290 | 31.275 | 71 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_badblock_mocks/mocks_pmem2.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* mocks_pmem2.c -- mocked pmem2 functions used
* indirectly in pmem2_badblock_mocks.c
*/
#include <ndctl/libndctl.h>
#include "unittest.h"
#include "out.h"
#include "extent.h"
#include "source.h"
#include "pmem2_utils.h"
#include "pmem2_badblock_mocks.h"
/*
* pmem2_region_namespace - mock pmem2_region_namespace
*/
FUNC_MOCK(pmem2_region_namespace, int,
struct ndctl_ctx *ctx,
const struct pmem2_source *src,
struct ndctl_region **pregion,
struct ndctl_namespace **pndns)
FUNC_MOCK_RUN_DEFAULT {
UT_ASSERTne(pregion, NULL);
dev_t st_rdev = src->value.st_rdev;
*pregion = (void *)st_rdev;
if (pndns == NULL)
return 0;
UT_ASSERT(src->value.ftype == PMEM2_FTYPE_REG ||
src->value.ftype == PMEM2_FTYPE_DEVDAX);
if (IS_MODE_NO_DEVICE(st_rdev)) {
/* did not found any matching device */
*pndns = NULL;
return 0;
}
*pndns = (void *)st_rdev;
return 0;
}
FUNC_MOCK_END
/*
* pmem2_extents_create_get -- allocate extents structure and get extents
* of the given file
*/
FUNC_MOCK(pmem2_extents_create_get, int,
int fd, struct extents **exts)
FUNC_MOCK_RUN_DEFAULT {
return get_extents(fd, exts);
}
FUNC_MOCK_END
| 1,279 | 20.694915 | 73 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_badblock_mocks/pmem2_badblock_mocks.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_badblock_mocks.c -- unit test for pmem2_badblock_*()
*/
#include <ndctl/libndctl.h>
#include "unittest.h"
#include "out.h"
#include "source.h"
#include "badblocks.h"
#include "pmem2_badblock_mocks.h"
#define BAD_BLOCKS_NUMBER 10
#define EXTENTS_NUMBER 8
#define MAX_BB_SET_STR "4"
#define MAX_BB_SET 4
#define DEFAULT_BB_SET 1
#define USAGE_MSG \
"Usage: pmem2_badblock_mocks <test_case> <file_type> <mode> [bad_blocks_set]\n"\
"Possible values of arguments:\n"\
" test_case : test_basic, test_read_clear_bb \n"\
" file_type : reg_file, chr_dev\n"\
" mode : no_device, namespace, region\n"\
" bad_blocks_set : 1-"MAX_BB_SET_STR"\n\n"
/* indexes of arguments */
enum args_t {
ARG_TEST_CASE = 1,
ARG_FILE_TYPE,
ARG_MODE,
ARG_BB_SET,
/* it always has to be the last one */
ARG_NUMBER, /* number of arguments */
};
typedef int test_fn(struct pmem2_source *src);
typedef struct badblock bad_blocks_array[BAD_BLOCKS_NUMBER];
/* HW bad blocks expressed in 512b sectors */
static bad_blocks_array hw_bad_blocks[] =
{
/* test #1 - no bad blocks */
{ {0, 0} },
/* test #2 - 1 HW bad block */
{ {1, 1}, {0, 0} },
/* test #3 - 6 HW bad blocks */
{ {4, 10}, {16, 10}, {28, 2}, {32, 4}, {40, 4}, {50, 2}, {0, 0} },
/* test #4 - 7 HW bad blocks */
{ {2, 4}, {8, 2}, {12, 6}, {20, 2}, {24, 10}, {38, 4}, {46, 2}, \
{0, 0} },
};
/* file's bad blocks expressed in 512b sectors */
static bad_blocks_array file_bad_blocks[] =
{
/* test #1 - no bad blocks */
{ {0, 0} },
/* test #2 - 1 file bad block */
{ {0, 2}, {0, 0} },
/* test #3 - 9 file bad blocks */
{ {4, 2}, {8, 2}, {12, 2}, {16, 2}, {20, 2}, {24, 2}, {28, 2}, \
{32, 2}, {40, 2}, {0, 0} },
/* test #4 - 9 file bad blocks */
{ {4, 2}, {8, 2}, {12, 2}, {16, 2}, {20, 2}, {24, 2}, {28, 2}, \
{32, 2}, {40, 2}, {0, 0} },
};
/* file's extents expressed in 512b sectors */
static struct extent files_extents[][EXTENTS_NUMBER] =
{
/* test #1 - no extents */
{ {0, 0, 0} },
/* test #2 - 1 extent */
{ {0, 0, 2}, {0, 0, 0} },
/* test #3 - 7 extents */
{ {2, 2, 4}, {8, 8, 2}, {12, 12, 6}, {20, 20, 2}, {24, 24, 10}, \
{38, 38, 4}, {46, 46, 2}, {0, 0, 0} },
/* test #4 - 6 extents */
{ {4, 4, 10}, {16, 16, 10}, {28, 28, 2}, {32, 32, 4}, {40, 40, 4}, \
{50, 50, 2}, {0, 0, 0} },
};
/*
* map_test_to_set -- map number of a test to an index of bad blocks' set
*/
static inline unsigned
map_test_to_set(unsigned test)
{
return test & MASK_TEST;
}
/*
* get_nth_typed_badblock -- get next typed badblock
*/
static struct badblock *
get_nth_typed_badblock(unsigned test, unsigned *i_bb,
bad_blocks_array bad_blocks[])
{
unsigned set = map_test_to_set(test);
struct badblock *bb = &bad_blocks[set][*i_bb];
if (bb->offset == 0 && bb->len == 0)
bb = NULL; /* no more bad blocks */
else
(*i_bb)++;
return bb;
}
/*
* get_nth_hw_badblock -- get next HW badblock
*/
struct badblock *
get_nth_hw_badblock(unsigned test, unsigned *i_bb)
{
return get_nth_typed_badblock(test, i_bb, hw_bad_blocks);
}
/*
* get_nth_file_badblock -- get next file's badblock
*/
static struct badblock *
get_nth_file_badblock(unsigned test, unsigned *i_bb)
{
return get_nth_typed_badblock(test, i_bb, file_bad_blocks);
}
/*
* get_nth_badblock -- get next badblock
*/
static struct badblock *
get_nth_badblock(int fd, unsigned *i_bb)
{
UT_ASSERT(fd >= 0);
if ((fd & MASK_MODE) == MODE_NO_DEVICE)
/* no matching device found */
return NULL;
switch (fd & MASK_DEVICE) {
case FD_REG_FILE: /* regular file */
return get_nth_file_badblock((unsigned)fd, i_bb);
case FD_CHR_DEV: /* character device */
return get_nth_hw_badblock((unsigned)fd, i_bb);
case FD_DIRECTORY:
case FD_BLK_DEV:
break;
}
/* no bad blocks found */
return NULL;
}
/*
* get_extents -- get file's extents
*/
int
get_extents(int fd, struct extents **exts)
{
unsigned set = map_test_to_set((unsigned)fd);
*exts = ZALLOC(sizeof(struct extents));
struct extents *pexts = *exts;
/* set block size */
pexts->blksize = BLK_SIZE_1KB;
if ((fd & MASK_DEVICE) != FD_REG_FILE) {
/* not a regular file */
return 0;
}
/* count extents (length > 0) */
while (files_extents[set][pexts->extents_count].length)
pexts->extents_count++;
/*
* It will be freed internally by libpmem2
* (pmem2_badblock_context_delete)
*/
pexts->extents = MALLOC(pexts->extents_count * sizeof(struct extent));
for (int i = 0; i < pexts->extents_count; i++) {
struct extent ext = files_extents[set][i];
uint64_t off_phy = ext.offset_physical;
uint64_t off_log = ext.offset_logical;
uint64_t len = ext.length;
/* check alignment */
UT_ASSERTeq(SEC2B(off_phy) % pexts->blksize, 0);
UT_ASSERTeq(SEC2B(off_log) % pexts->blksize, 0);
UT_ASSERTeq(SEC2B(len) % pexts->blksize, 0);
pexts->extents[i].offset_physical = SEC2B(off_phy);
pexts->extents[i].offset_logical = SEC2B(off_log);
pexts->extents[i].length = SEC2B(len);
}
return 0;
}
/*
* test_basic -- basic test
*/
static int
test_basic(struct pmem2_source *src)
{
UT_OUT("TEST: test_basic: 0x%x", src->value.fd);
struct pmem2_badblock_context *bbctx;
struct pmem2_badblock bb;
int ret;
ret = pmem2_badblock_context_new(src, &bbctx);
if (ret)
return ret;
ret = pmem2_badblock_next(bbctx, &bb);
pmem2_badblock_context_delete(&bbctx);
return ret;
}
/*
* test_read_clear_bb -- test reading and clearing bad blocks
*/
static int
test_read_clear_bb(struct pmem2_source *src)
{
UT_OUT("TEST: test_read_clear_bb: 0x%x", src->value.fd);
struct pmem2_badblock_context *bbctx;
struct pmem2_badblock bb;
struct badblock *bb2;
unsigned i_bb;
int ret;
ret = pmem2_badblock_context_new(src, &bbctx);
if (ret)
return ret;
i_bb = 0;
while ((ret = pmem2_badblock_next(bbctx, &bb)) == 0) {
bb2 = get_nth_badblock(src->value.fd, &i_bb);
UT_ASSERTne(bb2, NULL);
UT_ASSERTeq(bb.offset, SEC2B(bb2->offset));
UT_ASSERTeq(bb.length, SEC2B(bb2->len));
ret = pmem2_badblock_clear(bbctx, &bb);
if (ret)
goto exit_free;
}
bb2 = get_nth_badblock(src->value.fd, &i_bb);
UT_ASSERTeq(bb2, NULL);
exit_free:
pmem2_badblock_context_delete(&bbctx);
return ret;
}
static void
parse_arguments(int argc, char *argv[], int *test, enum pmem2_file_type *ftype,
test_fn **test_func)
{
if (argc < (ARG_NUMBER - 1) || argc > ARG_NUMBER) {
UT_OUT(USAGE_MSG);
if (argc > ARG_NUMBER)
UT_FATAL("too many arguments");
else
UT_FATAL("missing required argument(s)");
}
char *test_case = argv[ARG_TEST_CASE];
char *file_type = argv[ARG_FILE_TYPE];
char *mode = argv[ARG_MODE];
*test = 0;
*test_func = NULL;
if (strcmp(test_case, "test_basic") == 0) {
*test_func = test_basic;
} else if (strcmp(test_case, "test_read_clear_bb") == 0) {
*test_func = test_read_clear_bb;
} else {
UT_OUT(USAGE_MSG);
UT_FATAL("wrong test case: %s", test_case);
}
if (strcmp(file_type, "reg_file") == 0) {
*test |= FD_REG_FILE;
*ftype = PMEM2_FTYPE_REG;
} else if (strcmp(file_type, "chr_dev") == 0) {
*test |= FD_CHR_DEV;
*ftype = PMEM2_FTYPE_DEVDAX;
} else {
UT_OUT(USAGE_MSG);
UT_FATAL("wrong file type: %s", file_type);
}
if (strcmp(mode, "no_device") == 0) {
*test |= MODE_NO_DEVICE;
} else if (strcmp(mode, "namespace") == 0) {
*test |= MODE_NAMESPACE;
} else if (strcmp(mode, "region") == 0) {
*test |= MODE_REGION;
} else {
UT_OUT(USAGE_MSG);
UT_FATAL("wrong mode: %s", mode);
}
int bad_blocks_set =
(argc == 5) ? atoi(argv[ARG_BB_SET]) : DEFAULT_BB_SET;
if (bad_blocks_set >= 1 && bad_blocks_set <= MAX_BB_SET) {
*test |= (bad_blocks_set - 1);
} else {
UT_OUT(USAGE_MSG);
UT_FATAL("wrong bad_blocks_set: %i", bad_blocks_set);
}
}
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem2_badblock_mocks");
/* sanity check of defines */
UT_ASSERTeq(atoi(MAX_BB_SET_STR), MAX_BB_SET);
struct pmem2_source src;
test_fn *test_func;
src.type = PMEM2_SOURCE_FD;
parse_arguments(argc, argv, &src.value.fd, &src.value.ftype,
&test_func);
src.value.st_rdev = (dev_t)src.value.fd;
int result = test_func(&src);
UT_ASSERTeq(result, PMEM2_E_NO_BAD_BLOCK_FOUND);
DONE(NULL);
}
| 8,239 | 22.815029 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_api/pmem2_api.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_api.c -- PMEM2_API_[START|END] unittests
*/
#include "unittest.h"
#include "ut_pmem2.h"
#include "ut_pmem2_setup_integration.h"
/*
* map_valid -- return valid mapped pmem2_map and validate mapped memory length
*/
static struct pmem2_map *
map_valid(struct pmem2_config *cfg, struct pmem2_source *src, size_t size)
{
struct pmem2_map *map = NULL;
PMEM2_MAP(cfg, src, &map);
UT_ASSERTeq(pmem2_map_get_size(map), size);
return map;
}
/*
* test_pmem2_api_logs -- map O_RDWR file and do pmem2_[cpy|set|move]_fns
*/
static int
test_pmem2_api_logs(const struct test_case *tc, int argc,
char *argv[])
{
if (argc < 1)
UT_FATAL(
"usage: test_mem_move_cpy_set_with_map_private <file>");
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
const char *word1 = "Persistent memory...";
const char *word2 = "Nonpersistent memory";
const char *word3 = "XXXXXXXXXXXXXXXXXXXX";
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t size = 0;
PMEM2_SOURCE_SIZE(src, &size);
struct pmem2_map *map = map_valid(cfg, src, size);
char *addr = pmem2_map_get_address(map);
pmem2_memmove_fn memmove_fn = pmem2_get_memmove_fn(map);
pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map);
pmem2_memset_fn memset_fn = pmem2_get_memset_fn(map);
memcpy_fn(addr, word1, strlen(word1), 0);
UT_ASSERTeq(strcmp(addr, word1), 0);
memmove_fn(addr, word2, strlen(word2), 0);
UT_ASSERTeq(strcmp(addr, word2), 0);
memset_fn(addr, 'X', strlen(word3), 0);
UT_ASSERTeq(strcmp(addr, word3), 0);
/* cleanup after the test */
pmem2_unmap(&map);
pmem2_config_delete(&cfg);
pmem2_source_delete(&src);
CLOSE(fd);
return 1;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_pmem2_api_logs),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem2_api");
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
DONE(NULL);
}
| 2,130 | 22.94382 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/rpmemd_obc/rpmemd_obc_test_open.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmemd_obc_test_open.c -- test cases for open request message
*/
#include "rpmemd_obc_test_common.h"
/*
* Number of cases for checking open request message. Must be kept in sync
* with client_bad_msg_open function.
*/
#define BAD_MSG_OPEN_COUNT 11
/*
* client_bad_msg_open -- check if server detects invalid open request
* messages
*/
static void
client_bad_msg_open(const char *ctarget)
{
char *target = STRDUP(ctarget);
size_t msg_size = sizeof(OPEN_MSG) + POOL_DESC_SIZE;
struct rpmem_msg_open *msg = MALLOC(msg_size);
for (int i = 0; i < BAD_MSG_OPEN_COUNT; i++) {
struct rpmem_ssh *ssh = clnt_connect(target);
*msg = OPEN_MSG;
msg->hdr.size = msg_size;
memcpy(msg->pool_desc.desc, POOL_DESC, POOL_DESC_SIZE);
switch (i) {
case 0:
msg->c.provider = 0;
break;
case 1:
msg->c.provider = MAX_RPMEM_PROV;
break;
case 2:
msg->pool_desc.size -= 1;
break;
case 3:
msg->pool_desc.size += 1;
break;
case 4:
msg->pool_desc.size = 0;
msg->hdr.size = sizeof(OPEN_MSG) +
msg->pool_desc.size;
break;
case 5:
msg->pool_desc.size = 1;
msg->hdr.size = sizeof(OPEN_MSG) +
msg->pool_desc.size;
break;
case 6:
msg->pool_desc.desc[0] = '\0';
break;
case 7:
msg->pool_desc.desc[POOL_DESC_SIZE / 2] = '\0';
break;
case 8:
msg->pool_desc.desc[POOL_DESC_SIZE - 1] = 'E';
break;
case 9:
msg->c.major = RPMEM_PROTO_MAJOR + 1;
break;
case 10:
msg->c.minor = RPMEM_PROTO_MINOR + 1;
break;
default:
UT_ASSERT(0);
}
rpmem_hton_msg_open(msg);
clnt_send(ssh, msg, msg_size);
clnt_wait_disconnect(ssh);
clnt_close(ssh);
}
FREE(msg);
FREE(target);
}
/*
* client_msg_open_noresp -- send open request message and don't expect a
* response
*/
static void
client_msg_open_noresp(const char *ctarget)
{
char *target = STRDUP(ctarget);
size_t msg_size = sizeof(OPEN_MSG) + POOL_DESC_SIZE;
struct rpmem_msg_open *msg = MALLOC(msg_size);
struct rpmem_ssh *ssh = clnt_connect(target);
*msg = OPEN_MSG;
msg->hdr.size = msg_size;
memcpy(msg->pool_desc.desc, POOL_DESC, POOL_DESC_SIZE);
rpmem_hton_msg_open(msg);
clnt_send(ssh, msg, msg_size);
clnt_wait_disconnect(ssh);
clnt_close(ssh);
FREE(msg);
FREE(target);
}
/*
* client_msg_open_resp -- send open request message and expect a response
* with specified status. If status is 0, validate open request response
* message
*/
static void
client_msg_open_resp(const char *ctarget, int status)
{
char *target = STRDUP(ctarget);
size_t msg_size = sizeof(OPEN_MSG) + POOL_DESC_SIZE;
struct rpmem_msg_open *msg = MALLOC(msg_size);
struct rpmem_msg_open_resp resp;
struct rpmem_ssh *ssh = clnt_connect(target);
*msg = OPEN_MSG;
msg->hdr.size = msg_size;
memcpy(msg->pool_desc.desc, POOL_DESC, POOL_DESC_SIZE);
rpmem_hton_msg_open(msg);
clnt_send(ssh, msg, msg_size);
clnt_recv(ssh, &resp, sizeof(resp));
rpmem_ntoh_msg_open_resp(&resp);
if (status) {
UT_ASSERTeq(resp.hdr.status, (uint32_t)status);
} else {
UT_ASSERTeq(resp.hdr.type, RPMEM_MSG_TYPE_OPEN_RESP);
UT_ASSERTeq(resp.hdr.size,
sizeof(struct rpmem_msg_open_resp));
UT_ASSERTeq(resp.hdr.status, (uint32_t)status);
UT_ASSERTeq(resp.ibc.port, PORT);
UT_ASSERTeq(resp.ibc.rkey, RKEY);
UT_ASSERTeq(resp.ibc.raddr, RADDR);
UT_ASSERTeq(resp.ibc.persist_method, PERSIST_METHOD);
}
clnt_close(ssh);
FREE(msg);
FREE(target);
}
/*
* client_open -- test case for open request message - client side
*/
int
client_open(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
set_rpmem_cmd("server_bad_msg");
client_bad_msg_open(target);
set_rpmem_cmd("server_msg_noresp %d", RPMEM_MSG_TYPE_OPEN);
client_msg_open_noresp(target);
set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_OPEN, 0);
client_msg_open_resp(target, 0);
set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_OPEN, 1);
client_msg_open_resp(target, 1);
return 1;
}
| 4,105 | 21.56044 | 74 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/rpmemd_obc/rpmemd_obc_test_create.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmemd_obc_test_create.c -- test cases for create request message
*/
#include "rpmemd_obc_test_common.h"
/*
* Number of cases for checking create request message. Must be kept in sync
* with client_bad_msg_create function.
*/
#define BAD_MSG_CREATE_COUNT 11
/*
* client_bad_msg_create -- check if server detects invalid create request
* messages
*/
static void
client_bad_msg_create(const char *ctarget)
{
char *target = STRDUP(ctarget);
size_t msg_size = sizeof(CREATE_MSG) + POOL_DESC_SIZE;
struct rpmem_msg_create *msg = MALLOC(msg_size);
for (int i = 0; i < BAD_MSG_CREATE_COUNT; i++) {
struct rpmem_ssh *ssh = clnt_connect(target);
*msg = CREATE_MSG;
msg->hdr.size = msg_size;
memcpy(msg->pool_desc.desc, POOL_DESC, POOL_DESC_SIZE);
switch (i) {
case 0:
msg->c.provider = 0;
break;
case 1:
msg->c.provider = MAX_RPMEM_PROV;
break;
case 2:
msg->pool_desc.size -= 1;
break;
case 3:
msg->pool_desc.size += 1;
break;
case 4:
msg->pool_desc.size = 0;
msg->hdr.size = sizeof(CREATE_MSG) +
msg->pool_desc.size;
break;
case 5:
msg->pool_desc.size = 1;
msg->hdr.size = sizeof(CREATE_MSG) +
msg->pool_desc.size;
break;
case 6:
msg->pool_desc.desc[0] = '\0';
break;
case 7:
msg->pool_desc.desc[POOL_DESC_SIZE / 2] = '\0';
break;
case 8:
msg->pool_desc.desc[POOL_DESC_SIZE - 1] = 'E';
break;
case 9:
msg->c.major = RPMEM_PROTO_MAJOR + 1;
break;
case 10:
msg->c.minor = RPMEM_PROTO_MINOR + 1;
break;
default:
UT_ASSERT(0);
}
rpmem_hton_msg_create(msg);
clnt_send(ssh, msg, msg_size);
clnt_wait_disconnect(ssh);
clnt_close(ssh);
}
FREE(msg);
FREE(target);
}
/*
* client_msg_create_noresp -- send create request message and don't expect
* a response
*/
static void
client_msg_create_noresp(const char *ctarget)
{
char *target = STRDUP(ctarget);
size_t msg_size = sizeof(CREATE_MSG) + POOL_DESC_SIZE;
struct rpmem_msg_create *msg = MALLOC(msg_size);
struct rpmem_ssh *ssh = clnt_connect(target);
*msg = CREATE_MSG;
msg->hdr.size = msg_size;
memcpy(msg->pool_desc.desc, POOL_DESC, POOL_DESC_SIZE);
rpmem_hton_msg_create(msg);
clnt_send(ssh, msg, msg_size);
clnt_close(ssh);
FREE(msg);
FREE(target);
}
/*
* client_msg_create_resp -- send create request message and expect a response
* with specified status. If status is 0, validate create request response
* message
*/
static void
client_msg_create_resp(const char *ctarget, int status)
{
char *target = STRDUP(ctarget);
size_t msg_size = sizeof(CREATE_MSG) + POOL_DESC_SIZE;
struct rpmem_msg_create *msg = MALLOC(msg_size);
struct rpmem_msg_create_resp resp;
struct rpmem_ssh *ssh = clnt_connect(target);
*msg = CREATE_MSG;
msg->hdr.size = msg_size;
memcpy(msg->pool_desc.desc, POOL_DESC, POOL_DESC_SIZE);
rpmem_hton_msg_create(msg);
clnt_send(ssh, msg, msg_size);
clnt_recv(ssh, &resp, sizeof(resp));
rpmem_ntoh_msg_create_resp(&resp);
if (status) {
UT_ASSERTeq(resp.hdr.status, (uint32_t)status);
} else {
UT_ASSERTeq(resp.hdr.type, RPMEM_MSG_TYPE_CREATE_RESP);
UT_ASSERTeq(resp.hdr.size,
sizeof(struct rpmem_msg_create_resp));
UT_ASSERTeq(resp.hdr.status, (uint32_t)status);
UT_ASSERTeq(resp.ibc.port, PORT);
UT_ASSERTeq(resp.ibc.rkey, RKEY);
UT_ASSERTeq(resp.ibc.raddr, RADDR);
UT_ASSERTeq(resp.ibc.persist_method, PERSIST_METHOD);
}
clnt_close(ssh);
FREE(msg);
FREE(target);
}
/*
* client_create -- test case for create request message - client side
*/
int
client_create(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
set_rpmem_cmd("server_bad_msg");
client_bad_msg_create(target);
set_rpmem_cmd("server_msg_noresp %d", RPMEM_MSG_TYPE_CREATE);
client_msg_create_noresp(target);
set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_CREATE, 0);
client_msg_create_resp(target, 0);
set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_CREATE, 1);
client_msg_create_resp(target, 1);
return 1;
}
| 4,165 | 22.016575 | 78 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/rpmemd_obc/rpmemd_obc_test_set_attr.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* rpmemd_obc_test_set_attr.c -- test cases for set attributes request message
*/
#include "rpmemd_obc_test_common.h"
/*
* client_msg_set_attr_noresp -- send set attributes request message and don't
* expect a response
*/
static void
client_msg_set_attr_noresp(const char *ctarget)
{
char *target = STRDUP(ctarget);
size_t msg_size = sizeof(SET_ATTR_MSG);
struct rpmem_msg_set_attr *msg = MALLOC(msg_size);
struct rpmem_ssh *ssh = clnt_connect(target);
*msg = SET_ATTR_MSG;
rpmem_hton_msg_set_attr(msg);
clnt_send(ssh, msg, msg_size);
clnt_wait_disconnect(ssh);
clnt_close(ssh);
FREE(msg);
FREE(target);
}
/*
* client_msg_set_attr_resp -- send set attributes request message and expect
* a response with specified status. If status is 0, validate set attributes
* request response message
*/
static void
client_msg_set_attr_resp(const char *ctarget, int status)
{
char *target = STRDUP(ctarget);
size_t msg_size = sizeof(SET_ATTR_MSG);
struct rpmem_msg_set_attr *msg = MALLOC(msg_size);
struct rpmem_msg_set_attr_resp resp;
struct rpmem_ssh *ssh = clnt_connect(target);
*msg = SET_ATTR_MSG;
rpmem_hton_msg_set_attr(msg);
clnt_send(ssh, msg, msg_size);
clnt_recv(ssh, &resp, sizeof(resp));
rpmem_ntoh_msg_set_attr_resp(&resp);
if (status) {
UT_ASSERTeq(resp.hdr.status, (uint32_t)status);
} else {
UT_ASSERTeq(resp.hdr.type, RPMEM_MSG_TYPE_SET_ATTR_RESP);
UT_ASSERTeq(resp.hdr.size,
sizeof(struct rpmem_msg_set_attr_resp));
UT_ASSERTeq(resp.hdr.status, (uint32_t)status);
}
clnt_close(ssh);
FREE(msg);
FREE(target);
}
/*
* client_set_attr -- test case for set attributes request message - client
* side
*/
int
client_set_attr(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
set_rpmem_cmd("server_msg_noresp %d", RPMEM_MSG_TYPE_SET_ATTR);
client_msg_set_attr_noresp(target);
set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_SET_ATTR, 0);
client_msg_set_attr_resp(target, 0);
set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_SET_ATTR, 1);
client_msg_set_attr_resp(target, 1);
return 1;
}
| 2,255 | 22.5 | 78 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/rpmemd_obc/rpmemd_obc_test_close.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* rpmemd_obc_test_close.c -- test cases for close request message
*/
#include "rpmemd_obc_test_common.h"
/*
* client_msg_close_noresp -- send close request message and don't expect a
* response
*/
static void
client_msg_close_noresp(const char *ctarget)
{
char *target = STRDUP(ctarget);
struct rpmem_msg_close msg = CLOSE_MSG;
rpmem_hton_msg_close(&msg);
struct rpmem_ssh *ssh = clnt_connect(target);
clnt_send(ssh, &msg, sizeof(msg));
clnt_wait_disconnect(ssh);
clnt_close(ssh);
FREE(target);
}
/*
* client_msg_close_resp -- send close request message and expect a response
* with specified status. If status is 0, validate close request response
* message
*/
static void
client_msg_close_resp(const char *ctarget, int status)
{
char *target = STRDUP(ctarget);
struct rpmem_msg_close msg = CLOSE_MSG;
rpmem_hton_msg_close(&msg);
struct rpmem_msg_close_resp resp;
struct rpmem_ssh *ssh = clnt_connect(target);
clnt_send(ssh, &msg, sizeof(msg));
clnt_recv(ssh, &resp, sizeof(resp));
rpmem_ntoh_msg_close_resp(&resp);
if (status)
UT_ASSERTeq(resp.hdr.status, (uint32_t)status);
clnt_close(ssh);
FREE(target);
}
/*
* client_close -- test case for close request message - client side
*/
int
client_close(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
set_rpmem_cmd("server_msg_noresp %d", RPMEM_MSG_TYPE_CLOSE);
client_msg_close_noresp(target);
set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_CLOSE, 0);
client_msg_close_resp(target, 0);
set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_CLOSE, 1);
client_msg_close_resp(target, 1);
return 1;
}
| 1,791 | 21.683544 | 76 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/rpmemd_obc/rpmemd_obc_test_common.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmemd_obc_test_common.h -- common declarations for rpmemd_obc test
*/
#include "unittest.h"
#include "librpmem.h"
#include "rpmem_proto.h"
#include "rpmem_common.h"
#include "rpmem_ssh.h"
#include "rpmem_util.h"
#include "rpmemd_log.h"
#include "rpmemd_obc.h"
#define PORT 1234
#define RKEY 0x0123456789abcdef
#define RADDR 0xfedcba9876543210
#define PERSIST_METHOD RPMEM_PM_APM
#define POOL_ATTR_INIT {\
.signature = "<RPMEM>",\
.major = 1,\
.compat_features = 2,\
.incompat_features = 3,\
.ro_compat_features = 4,\
.poolset_uuid = "POOLSET_UUID0123",\
.uuid = "UUID0123456789AB",\
.next_uuid = "NEXT_UUID0123456",\
.prev_uuid = "PREV_UUID0123456",\
.user_flags = "USER_FLAGS012345",\
}
#define POOL_ATTR_ALT {\
.signature = "<ALT>",\
.major = 5,\
.compat_features = 6,\
.incompat_features = 7,\
.ro_compat_features = 8,\
.poolset_uuid = "UUID_POOLSET_ALT",\
.uuid = "ALT_UUIDCDEFFEDC",\
.next_uuid = "456UUID_NEXT_ALT",\
.prev_uuid = "UUID012_ALT_PREV",\
.user_flags = "012345USER_FLAGS",\
}
#define POOL_SIZE 0x0001234567abcdef
#define NLANES 0x123
#define NLANES_RESP 16
#define PROVIDER RPMEM_PROV_LIBFABRIC_SOCKETS
#define POOL_DESC "pool.set"
#define BUFF_SIZE 8192
static const char pool_desc[] = POOL_DESC;
#define POOL_DESC_SIZE (sizeof(pool_desc) / sizeof(pool_desc[0]))
struct rpmem_ssh *clnt_connect(char *target);
void clnt_wait_disconnect(struct rpmem_ssh *ssh);
void clnt_send(struct rpmem_ssh *ssh, const void *buff, size_t len);
void clnt_recv(struct rpmem_ssh *ssh, void *buff, size_t len);
void clnt_close(struct rpmem_ssh *ssh);
enum conn_wait_close {
CONN_CLOSE,
CONN_WAIT_CLOSE,
};
void set_rpmem_cmd(const char *fmt, ...);
extern struct rpmemd_obc_requests REQ_CB;
struct req_cb_arg {
int resp;
unsigned long long types;
int force_ret;
int ret;
int status;
};
static const struct rpmem_msg_hdr MSG_HDR = {
.type = RPMEM_MSG_TYPE_CLOSE,
.size = sizeof(struct rpmem_msg_hdr),
};
static const struct rpmem_msg_create CREATE_MSG = {
.hdr = {
.type = RPMEM_MSG_TYPE_CREATE,
.size = sizeof(struct rpmem_msg_create),
},
.c = {
.major = RPMEM_PROTO_MAJOR,
.minor = RPMEM_PROTO_MINOR,
.pool_size = POOL_SIZE,
.nlanes = NLANES,
.provider = PROVIDER,
.buff_size = BUFF_SIZE,
},
.pool_attr = POOL_ATTR_INIT,
.pool_desc = {
.size = POOL_DESC_SIZE,
},
};
static const struct rpmem_msg_open OPEN_MSG = {
.hdr = {
.type = RPMEM_MSG_TYPE_OPEN,
.size = sizeof(struct rpmem_msg_open),
},
.c = {
.major = RPMEM_PROTO_MAJOR,
.minor = RPMEM_PROTO_MINOR,
.pool_size = POOL_SIZE,
.nlanes = NLANES,
.provider = PROVIDER,
.buff_size = BUFF_SIZE,
},
.pool_desc = {
.size = POOL_DESC_SIZE,
},
};
static const struct rpmem_msg_close CLOSE_MSG = {
.hdr = {
.type = RPMEM_MSG_TYPE_CLOSE,
.size = sizeof(struct rpmem_msg_close),
},
};
static const struct rpmem_msg_set_attr SET_ATTR_MSG = {
.hdr = {
.type = RPMEM_MSG_TYPE_SET_ATTR,
.size = sizeof(struct rpmem_msg_set_attr),
},
.pool_attr = POOL_ATTR_ALT,
};
TEST_CASE_DECLARE(server_accept_sim);
TEST_CASE_DECLARE(server_accept_sim_fork);
TEST_CASE_DECLARE(client_accept_sim);
TEST_CASE_DECLARE(server_accept_seq);
TEST_CASE_DECLARE(server_accept_seq_fork);
TEST_CASE_DECLARE(client_accept_seq);
TEST_CASE_DECLARE(client_bad_msg_hdr);
TEST_CASE_DECLARE(server_bad_msg);
TEST_CASE_DECLARE(server_msg_noresp);
TEST_CASE_DECLARE(server_msg_resp);
TEST_CASE_DECLARE(client_econnreset);
TEST_CASE_DECLARE(server_econnreset);
TEST_CASE_DECLARE(client_create);
TEST_CASE_DECLARE(server_open);
TEST_CASE_DECLARE(client_close);
TEST_CASE_DECLARE(server_close);
TEST_CASE_DECLARE(client_open);
TEST_CASE_DECLARE(client_set_attr);
| 3,791 | 23 | 70 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/ctl_prefault/ctl_prefault.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* ctl_prefault.c -- tests for the ctl entry points: prefault
*/
#include <stdlib.h>
#include <string.h>
#include <sys/resource.h>
#include "unittest.h"
#define OBJ_STR "obj"
#define BLK_STR "blk"
#define LOG_STR "log"
#define BSIZE 20
#define LAYOUT "obj_ctl_prefault"
#ifdef __FreeBSD__
typedef char vec_t;
#else
typedef unsigned char vec_t;
#endif
typedef int (*fun)(void *, const char *, void *);
/*
* prefault_fun -- function ctl_get/set testing
*/
static void
prefault_fun(int prefault, fun get_func, fun set_func)
{
int ret;
int arg;
int arg_read;
if (prefault == 1) { /* prefault at open */
arg_read = -1;
ret = get_func(NULL, "prefault.at_open", &arg_read);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg_read, 0);
arg = 1;
ret = set_func(NULL, "prefault.at_open", &arg);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg, 1);
arg_read = -1;
ret = get_func(NULL, "prefault.at_open", &arg_read);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg_read, 1);
} else if (prefault == 2) { /* prefault at create */
arg_read = -1;
ret = get_func(NULL, "prefault.at_create", &arg_read);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg_read, 0);
arg = 1;
ret = set_func(NULL, "prefault.at_create", &arg);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg, 1);
arg_read = -1;
ret = get_func(NULL, "prefault.at_create", &arg_read);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg_read, 1);
}
}
/*
* count_resident_pages -- count resident_pages
*/
static size_t
count_resident_pages(void *pool, size_t length)
{
size_t arr_len = (length + Ut_pagesize - 1) / Ut_pagesize;
vec_t *vec = MALLOC(sizeof(*vec) * arr_len);
int ret = mincore(pool, length, vec);
UT_ASSERTeq(ret, 0);
size_t resident_pages = 0;
for (size_t i = 0; i < arr_len; ++i)
resident_pages += vec[i] & 0x1;
FREE(vec);
return resident_pages;
}
/*
* test_obj -- open/create PMEMobjpool
*/
static void
test_obj(const char *path, int open)
{
PMEMobjpool *pop;
if (open) {
if ((pop = pmemobj_open(path, LAYOUT)) == NULL)
UT_FATAL("!pmemobj_open: %s", path);
} else {
if ((pop = pmemobj_create(path, LAYOUT,
PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
}
size_t resident_pages = count_resident_pages(pop, PMEMOBJ_MIN_POOL);
pmemobj_close(pop);
UT_OUT("%ld", resident_pages);
}
/*
* test_blk -- open/create PMEMblkpool
*/
static void
test_blk(const char *path, int open)
{
PMEMblkpool *pbp;
if (open) {
if ((pbp = pmemblk_open(path, BSIZE)) == NULL)
UT_FATAL("!pmemblk_open: %s", path);
} else {
if ((pbp = pmemblk_create(path, BSIZE, PMEMBLK_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemblk_create: %s", path);
}
size_t resident_pages = count_resident_pages(pbp, PMEMBLK_MIN_POOL);
pmemblk_close(pbp);
UT_OUT("%ld", resident_pages);
}
/*
* test_log -- open/create PMEMlogpool
*/
static void
test_log(const char *path, int open)
{
PMEMlogpool *plp;
/*
* To test prefaulting, pool must have size at least equal to 2 pages.
* If 2MB huge pages are used this is at least 4MB.
*/
size_t pool_size = 2 * PMEMLOG_MIN_POOL;
if (open) {
if ((plp = pmemlog_open(path)) == NULL)
UT_FATAL("!pmemlog_open: %s", path);
} else {
if ((plp = pmemlog_create(path, pool_size,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemlog_create: %s", path);
}
size_t resident_pages = count_resident_pages(plp, pool_size);
pmemlog_close(plp);
UT_OUT("%ld", resident_pages);
}
#define USAGE() do {\
UT_FATAL("usage: %s file-name type(obj/blk/log) prefault(0/1/2) "\
"open(0/1)", argv[0]);\
} while (0)
int
main(int argc, char *argv[])
{
START(argc, argv, "ctl_prefault");
if (argc != 5)
USAGE();
char *type = argv[1];
const char *path = argv[2];
int prefault = atoi(argv[3]);
int open = atoi(argv[4]);
if (strcmp(type, OBJ_STR) == 0) {
prefault_fun(prefault, (fun)pmemobj_ctl_get,
(fun)pmemobj_ctl_set);
test_obj(path, open);
} else if (strcmp(type, BLK_STR) == 0) {
prefault_fun(prefault, (fun)pmemblk_ctl_get,
(fun)pmemblk_ctl_set);
test_blk(path, open);
} else if (strcmp(type, LOG_STR) == 0) {
prefault_fun(prefault, (fun)pmemlog_ctl_get,
(fun)pmemlog_ctl_set);
test_log(path, open);
} else
USAGE();
DONE(NULL);
}
| 4,326 | 20.527363 | 71 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_memcpy/memcpy_common.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* memcpy_common.c -- common part for tests doing a persistent memcpy
*/
#include "unittest.h"
#include "memcpy_common.h"
#include "valgrind_internal.h"
/*
* do_memcpy: Worker function for memcpy
*
* Always work within the boundary of bytes. Fill in 1/2 of the src
* memory with the pattern we want to write. This allows us to check
* that we did not overwrite anything we were not supposed to in the
* dest. Use the non pmem version of the memset/memcpy commands
* so as not to introduce any possible side affects.
*/
void
do_memcpy(int fd, char *dest, int dest_off, char *src, int src_off,
size_t bytes, size_t mapped_len, const char *file_name, memcpy_fn fn,
unsigned flags, persist_fn persist)
{
void *ret;
char *buf = MALLOC(bytes);
memset(buf, 0, bytes);
memset(dest, 0, bytes);
persist(dest, bytes);
memset(src, 0, bytes);
persist(src, bytes);
memset(src, 0x5A, bytes / 4);
persist(src, bytes / 4);
memset(src + bytes / 4, 0x46, bytes / 4);
persist(src + bytes / 4, bytes / 4);
/* dest == src */
ret = fn(dest + dest_off, dest + dest_off, bytes / 2, flags);
UT_ASSERTeq(ret, dest + dest_off);
UT_ASSERTeq(*(char *)(dest + dest_off), 0);
/* len == 0 */
ret = fn(dest + dest_off, src, 0, flags);
UT_ASSERTeq(ret, dest + dest_off);
UT_ASSERTeq(*(char *)(dest + dest_off), 0);
ret = fn(dest + dest_off, src + src_off, bytes / 2, flags);
if (flags & PMEM2_F_MEM_NOFLUSH)
VALGRIND_DO_PERSIST((dest + dest_off), bytes / 2);
UT_ASSERTeq(ret, dest + dest_off);
/* memcmp will validate that what I expect in memory. */
if (memcmp(src + src_off, dest + dest_off, bytes / 2))
UT_FATAL("%s: first %zu bytes do not match",
file_name, bytes / 2);
/* Now validate the contents of the file */
LSEEK(fd, (os_off_t)(dest_off + (int)(mapped_len / 2)), SEEK_SET);
if (READ(fd, buf, bytes / 2) == bytes / 2) {
if (memcmp(src + src_off, buf, bytes / 2))
UT_FATAL("%s: first %zu bytes do not match",
file_name, bytes / 2);
}
FREE(buf);
}
unsigned Flags[] = {
0,
PMEM_F_MEM_NODRAIN,
PMEM_F_MEM_NONTEMPORAL,
PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_NODRAIN,
PMEM_F_MEM_WC,
PMEM_F_MEM_WB,
PMEM_F_MEM_NOFLUSH,
/* all possible flags */
PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH |
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL |
PMEM_F_MEM_WC | PMEM_F_MEM_WB,
};
| 2,491 | 27.643678 | 73 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_memcpy/memcpy_common.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* memcpy_common.h -- header file for common memcpy utilities
*/
#ifndef MEMCPY_COMMON_H
#define MEMCPY_COMMON_H 1
#include "unittest.h"
#include "file.h"
typedef void *(*memcpy_fn)(void *pmemdest, const void *src, size_t len,
unsigned flags);
typedef void (*persist_fn)(const void *ptr, size_t len);
extern unsigned Flags[10];
void do_memcpy(int fd, char *dest, int dest_off, char *src, int src_off,
size_t bytes, size_t mapped_len, const char *file_name, memcpy_fn fn,
unsigned flags, persist_fn p);
#endif
| 611 | 23.48 | 73 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_memcpy/pmem2_memcpy.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_memcpy.c -- test for doing a memcpy from libpmem2
*
* usage: pmem2_memcpy file destoff srcoff length
*
*/
#include "unittest.h"
#include "file.h"
#include "ut_pmem2.h"
#include "memcpy_common.h"
/*
* do_memcpy_variants -- do_memcpy wrapper that tests multiple variants
* of memcpy functions
*/
static void
do_memcpy_variants(int fd, char *dest, int dest_off, char *src, int src_off,
size_t bytes, size_t mapped_len, const char *file_name,
persist_fn p, memcpy_fn fn)
{
for (int i = 0; i < ARRAY_SIZE(Flags); ++i) {
do_memcpy(fd, dest, dest_off, src, src_off, bytes, mapped_len,
file_name, fn, Flags[i], p);
}
}
int
main(int argc, char *argv[])
{
int fd;
char *dest;
char *src;
char *src_orig;
size_t mapped_len;
struct pmem2_config *cfg;
struct pmem2_source *psrc;
struct pmem2_map *map;
if (argc != 5)
UT_FATAL("usage: %s file destoff srcoff length", argv[0]);
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem2_memcpy %s %s %s %s %savx %savx512f",
argv[2], argv[3], argv[4], thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
util_init();
fd = OPEN(argv[1], O_RDWR);
UT_ASSERT(fd != -1);
int dest_off = atoi(argv[2]);
int src_off = atoi(argv[3]);
size_t bytes = strtoul(argv[4], NULL, 0);
PMEM2_CONFIG_NEW(&cfg);
PMEM2_SOURCE_FROM_FD(&psrc, fd);
PMEM2_CONFIG_SET_GRANULARITY(cfg, PMEM2_GRANULARITY_PAGE);
int ret = pmem2_map(cfg, psrc, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_CONFIG_DELETE(&cfg);
/* src > dst */
mapped_len = pmem2_map_get_size(map);
dest = pmem2_map_get_address(map);
if (dest == NULL)
UT_FATAL("!could not map file: %s", argv[1]);
src_orig = src = dest + mapped_len / 2;
UT_ASSERT(src > dest);
pmem2_persist_fn persist = pmem2_get_persist_fn(map);
memset(dest, 0, (2 * bytes));
persist(dest, 2 * bytes);
memset(src, 0, (2 * bytes));
persist(src, 2 * bytes);
pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map);
do_memcpy_variants(fd, dest, dest_off, src, src_off, bytes,
0, argv[1], persist, memcpy_fn);
src = dest;
dest = src_orig;
if (dest <= src)
UT_FATAL("cannot map files in memory order");
do_memcpy_variants(fd, dest, dest_off, src, src_off, bytes, mapped_len,
argv[1], persist, memcpy_fn);
ret = pmem2_unmap(&map);
UT_ASSERTeq(ret, 0);
CLOSE(fd);
DONE(NULL);
}
| 2,527 | 22.849057 | 76 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/util_is_zeroed/util_is_zeroed.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* util_is_zeroed.c -- unit test for util_is_zeroed
*/
#include "unittest.h"
#include "util.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "util_is_zeroed");
util_init();
char bigbuf[3000];
memset(bigbuf + 0, 0x11, 1000);
memset(bigbuf + 1000, 0x0, 1000);
memset(bigbuf + 2000, 0xff, 1000);
UT_ASSERTeq(util_is_zeroed(bigbuf, 1000), 0);
UT_ASSERTeq(util_is_zeroed(bigbuf + 1000, 1000), 1);
UT_ASSERTeq(util_is_zeroed(bigbuf + 2000, 1000), 0);
UT_ASSERTeq(util_is_zeroed(bigbuf, 0), 1);
UT_ASSERTeq(util_is_zeroed(bigbuf + 999, 1000), 0);
UT_ASSERTeq(util_is_zeroed(bigbuf + 1000, 1001), 0);
UT_ASSERTeq(util_is_zeroed(bigbuf + 1001, 1000), 0);
char *buf = bigbuf + 1000;
buf[0] = 1;
UT_ASSERTeq(util_is_zeroed(buf, 1000), 0);
memset(buf, 0, 1000);
buf[1] = 1;
UT_ASSERTeq(util_is_zeroed(buf, 1000), 0);
memset(buf, 0, 1000);
buf[239] = 1;
UT_ASSERTeq(util_is_zeroed(buf, 1000), 0);
memset(buf, 0, 1000);
buf[999] = 1;
UT_ASSERTeq(util_is_zeroed(buf, 1000), 0);
memset(buf, 0, 1000);
buf[1000] = 1;
UT_ASSERTeq(util_is_zeroed(buf, 1000), 1);
DONE(NULL);
}
| 1,196 | 20.763636 | 53 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem_map_file/mocks_windows.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* mocks_windows.h -- redefinitions of libc functions
*
* This file is Windows-specific.
*
* This file should be included (i.e. using Forced Include) by libpmem
* files, when compiled for the purpose of pmem_map_file test.
* It would replace default implementation with mocked functions defined
* in pmem_map_file.c.
*
* These defines could be also passed as preprocessor definitions.
*/
#ifndef WRAP_REAL
#define os_posix_fallocate __wrap_os_posix_fallocate
#define os_ftruncate __wrap_os_ftruncate
#endif
| 608 | 28 | 72 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem_map_file/mocks_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* mocks_windows.c -- mocked functions used in pmem_map_file.c
* (Windows-specific)
*/
#include "unittest.h"
#define MAX_LEN (4 * 1024 * 1024)
/*
* posix_fallocate -- interpose on libc posix_fallocate()
*/
FUNC_MOCK(os_posix_fallocate, int, int fd, os_off_t offset, os_off_t len)
FUNC_MOCK_RUN_DEFAULT {
UT_OUT("posix_fallocate: off %ju len %ju", offset, len);
if (len > MAX_LEN)
return ENOSPC;
return _FUNC_REAL(os_posix_fallocate)(fd, offset, len);
}
FUNC_MOCK_END
/*
* ftruncate -- interpose on libc ftruncate()
*/
FUNC_MOCK(os_ftruncate, int, int fd, os_off_t len)
FUNC_MOCK_RUN_DEFAULT {
UT_OUT("ftruncate: len %ju", len);
if (len > MAX_LEN) {
errno = ENOSPC;
return -1;
}
return _FUNC_REAL(os_ftruncate)(fd, len);
}
FUNC_MOCK_END
| 868 | 21.868421 | 73 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_heap/obj_heap.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_heap.c -- unit test for heap
*
* operations are: 't', 'b', 'r', 'c', 'h', 'a', 'n', 's'
* t: do test_heap, test_recycler
* b: do fault_injection in function container_new_ravl
* r: do fault_injection in function recycler_new
* c: do fault_injection in function container_new_seglists
* h: do fault_injection in function heap_boot
* a: do fault_injection in function alloc_class_new
* n: do fault_injection in function alloc_class_collection_new
* s: do fault_injection in function stats_new
*/
#include "libpmemobj.h"
#include "palloc.h"
#include "heap.h"
#include "recycler.h"
#include "obj.h"
#include "unittest.h"
#include "util.h"
#include "container_ravl.h"
#include "container_seglists.h"
#include "container.h"
#include "alloc_class.h"
#include "valgrind_internal.h"
#include "set.h"
#define MOCK_POOL_SIZE PMEMOBJ_MIN_POOL
#define MAX_BLOCKS 3
struct mock_pop {
PMEMobjpool p;
void *heap;
};
static int
obj_heap_persist(void *ctx, const void *ptr, size_t sz, unsigned flags)
{
UT_ASSERTeq(pmem_msync(ptr, sz), 0);
return 0;
}
static int
obj_heap_flush(void *ctx, const void *ptr, size_t sz, unsigned flags)
{
UT_ASSERTeq(pmem_msync(ptr, sz), 0);
return 0;
}
static void
obj_heap_drain(void *ctx)
{
}
static void *
obj_heap_memset(void *ctx, void *ptr, int c, size_t sz, unsigned flags)
{
memset(ptr, c, sz);
UT_ASSERTeq(pmem_msync(ptr, sz), 0);
return ptr;
}
static void
init_run_with_score(struct heap_layout *l, uint32_t chunk_id, int score)
{
l->zone0.chunk_headers[chunk_id].size_idx = 1;
l->zone0.chunk_headers[chunk_id].type = CHUNK_TYPE_RUN;
l->zone0.chunk_headers[chunk_id].flags = 0;
struct chunk_run *run = (struct chunk_run *)
&l->zone0.chunks[chunk_id];
VALGRIND_DO_MAKE_MEM_UNDEFINED(run, sizeof(*run));
run->hdr.alignment = 0;
run->hdr.block_size = 1024;
memset(run->content, 0xFF, RUN_DEFAULT_BITMAP_SIZE);
UT_ASSERTeq(score % 64, 0);
score /= 64;
uint64_t *bitmap = (uint64_t *)run->content;
for (; score >= 0; --score) {
bitmap[score] = 0;
}
}
static void
init_run_with_max_block(struct heap_layout *l, uint32_t chunk_id)
{
l->zone0.chunk_headers[chunk_id].size_idx = 1;
l->zone0.chunk_headers[chunk_id].type = CHUNK_TYPE_RUN;
l->zone0.chunk_headers[chunk_id].flags = 0;
struct chunk_run *run = (struct chunk_run *)
&l->zone0.chunks[chunk_id];
VALGRIND_DO_MAKE_MEM_UNDEFINED(run, sizeof(*run));
uint64_t *bitmap = (uint64_t *)run->content;
run->hdr.block_size = 1024;
run->hdr.alignment = 0;
memset(bitmap, 0xFF, RUN_DEFAULT_BITMAP_SIZE);
/* the biggest block is 10 bits */
bitmap[3] =
0b1000001110111000111111110000111111000000000011111111110000000011;
}
static void
test_container(struct block_container *bc, struct palloc_heap *heap)
{
UT_ASSERTne(bc, NULL);
struct memory_block a = {1, 0, 1, 4};
struct memory_block b = {1, 0, 2, 8};
struct memory_block c = {1, 0, 3, 16};
struct memory_block d = {1, 0, 5, 32};
init_run_with_score(heap->layout, 1, 128);
memblock_rebuild_state(heap, &a);
memblock_rebuild_state(heap, &b);
memblock_rebuild_state(heap, &c);
memblock_rebuild_state(heap, &d);
int ret;
ret = bc->c_ops->insert(bc, &a);
UT_ASSERTeq(ret, 0);
ret = bc->c_ops->insert(bc, &b);
UT_ASSERTeq(ret, 0);
ret = bc->c_ops->insert(bc, &c);
UT_ASSERTeq(ret, 0);
ret = bc->c_ops->insert(bc, &d);
UT_ASSERTeq(ret, 0);
struct memory_block invalid_ret = {0, 0, 6, 0};
ret = bc->c_ops->get_rm_bestfit(bc, &invalid_ret);
UT_ASSERTeq(ret, ENOMEM);
struct memory_block b_ret = {0, 0, 2, 0};
ret = bc->c_ops->get_rm_bestfit(bc, &b_ret);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(b_ret.chunk_id, b.chunk_id);
struct memory_block a_ret = {0, 0, 1, 0};
ret = bc->c_ops->get_rm_bestfit(bc, &a_ret);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(a_ret.chunk_id, a.chunk_id);
struct memory_block c_ret = {0, 0, 3, 0};
ret = bc->c_ops->get_rm_bestfit(bc, &c_ret);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(c_ret.chunk_id, c.chunk_id);
struct memory_block d_ret = {0, 0, 4, 0}; /* less one than target */
ret = bc->c_ops->get_rm_bestfit(bc, &d_ret);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(d_ret.chunk_id, d.chunk_id);
ret = bc->c_ops->get_rm_bestfit(bc, &c_ret);
UT_ASSERTeq(ret, ENOMEM);
ret = bc->c_ops->insert(bc, &a);
UT_ASSERTeq(ret, 0);
ret = bc->c_ops->insert(bc, &b);
UT_ASSERTeq(ret, 0);
ret = bc->c_ops->insert(bc, &c);
UT_ASSERTeq(ret, 0);
bc->c_ops->rm_all(bc);
ret = bc->c_ops->is_empty(bc);
UT_ASSERTeq(ret, 1);
ret = bc->c_ops->get_rm_bestfit(bc, &c_ret);
UT_ASSERTeq(ret, ENOMEM);
bc->c_ops->destroy(bc);
}
static void
do_fault_injection_new_ravl()
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "container_new_ravl");
struct block_container *bc = container_new_ravl(NULL);
UT_ASSERTeq(bc, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
do_fault_injection_new_seglists()
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "container_new_seglists");
struct block_container *bc = container_new_seglists(NULL);
UT_ASSERTeq(bc, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
do_fault_injection_heap_boot()
{
if (!pmemobj_fault_injection_enabled())
return;
struct mock_pop *mpop = MMAP_ANON_ALIGNED(MOCK_POOL_SIZE,
Ut_mmap_align);
PMEMobjpool *pop = &mpop->p;
pop->p_ops.persist = obj_heap_persist;
uint64_t heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool);
struct pmem_ops *p_ops = &pop->p_ops;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "heap_boot");
int r = heap_boot(NULL, NULL, heap_size, &pop->heap_size, NULL, p_ops,
NULL, NULL);
UT_ASSERTne(r, 0);
UT_ASSERTeq(errno, ENOMEM);
}
static void
do_fault_injection_recycler()
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "recycler_new");
size_t active_arenas = 1;
struct recycler *r = recycler_new(NULL, 0, &active_arenas);
UT_ASSERTeq(r, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
do_fault_injection_class_new(int i)
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, i, "alloc_class_new");
struct alloc_class_collection *c = alloc_class_collection_new();
UT_ASSERTeq(c, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
do_fault_injection_class_collection_new()
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "alloc_class_collection_new");
struct alloc_class_collection *c = alloc_class_collection_new();
UT_ASSERTeq(c, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
do_fault_injection_stats()
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "stats_new");
struct stats *s = stats_new(NULL);
UT_ASSERTeq(s, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
test_heap(void)
{
struct mock_pop *mpop = MMAP_ANON_ALIGNED(MOCK_POOL_SIZE,
Ut_mmap_align);
PMEMobjpool *pop = &mpop->p;
memset(pop, 0, MOCK_POOL_SIZE);
pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop);
pop->p_ops.persist = obj_heap_persist;
pop->p_ops.flush = obj_heap_flush;
pop->p_ops.drain = obj_heap_drain;
pop->p_ops.memset = obj_heap_memset;
pop->p_ops.base = pop;
pop->set = MALLOC(sizeof(*(pop->set)));
pop->set->options = 0;
pop->set->directory_based = 0;
struct stats *s = stats_new(pop);
UT_ASSERTne(s, NULL);
void *heap_start = (char *)pop + pop->heap_offset;
uint64_t heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool);
struct palloc_heap *heap = &pop->heap;
struct pmem_ops *p_ops = &pop->p_ops;
UT_ASSERT(heap_check(heap_start, heap_size) != 0);
UT_ASSERT(heap_init(heap_start, heap_size,
&pop->heap_size, p_ops) == 0);
UT_ASSERT(heap_boot(heap, heap_start, heap_size,
&pop->heap_size,
pop, p_ops, s, pop->set) == 0);
UT_ASSERT(heap_buckets_init(heap) == 0);
UT_ASSERT(pop->heap.rt != NULL);
test_container((struct block_container *)container_new_ravl(heap),
heap);
test_container((struct block_container *)container_new_seglists(heap),
heap);
struct alloc_class *c_small = heap_get_best_class(heap, 1);
struct alloc_class *c_big = heap_get_best_class(heap, 2048);
UT_ASSERT(c_small->unit_size < c_big->unit_size);
/* new small buckets should be empty */
UT_ASSERT(c_big->type == CLASS_RUN);
struct memory_block blocks[MAX_BLOCKS] = {
{0, 0, 1, 0},
{0, 0, 1, 0},
{0, 0, 1, 0}
};
struct bucket *b_def = heap_bucket_acquire(heap,
DEFAULT_ALLOC_CLASS_ID, HEAP_ARENA_PER_THREAD);
for (int i = 0; i < MAX_BLOCKS; ++i) {
heap_get_bestfit_block(heap, b_def, &blocks[i]);
UT_ASSERT(blocks[i].block_off == 0);
}
heap_bucket_release(heap, b_def);
struct memory_block old_run = {0, 0, 1, 0};
struct memory_block new_run = {0, 0, 0, 0};
struct alloc_class *c_run = heap_get_best_class(heap, 1024);
struct bucket *b_run = heap_bucket_acquire(heap, c_run->id,
HEAP_ARENA_PER_THREAD);
/*
* Allocate blocks from a run until one run is exhausted.
*/
UT_ASSERTne(heap_get_bestfit_block(heap, b_run, &old_run), ENOMEM);
do {
new_run.chunk_id = 0;
new_run.block_off = 0;
new_run.size_idx = 1;
UT_ASSERTne(heap_get_bestfit_block(heap, b_run, &new_run),
ENOMEM);
UT_ASSERTne(new_run.size_idx, 0);
} while (old_run.block_off != new_run.block_off);
heap_bucket_release(heap, b_run);
stats_delete(pop, s);
UT_ASSERT(heap_check(heap_start, heap_size) == 0);
heap_cleanup(heap);
UT_ASSERT(heap->rt == NULL);
FREE(pop->set);
MUNMAP_ANON_ALIGNED(mpop, MOCK_POOL_SIZE);
}
/*
* test_heap_with_size -- tests scenarios with not-nicely aligned sizes
*/
static void
test_heap_with_size()
{
/*
* To trigger bug with incorrect metadata alignment we need to
* use a size that uses exactly the size used in bugged zone size
* calculations.
*/
size_t size = PMEMOBJ_MIN_POOL + sizeof(struct zone_header) +
sizeof(struct chunk_header) * MAX_CHUNK +
sizeof(PMEMobjpool);
struct mock_pop *mpop = MMAP_ANON_ALIGNED(size,
Ut_mmap_align);
PMEMobjpool *pop = &mpop->p;
memset(pop, 0, size);
pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop);
pop->p_ops.persist = obj_heap_persist;
pop->p_ops.flush = obj_heap_flush;
pop->p_ops.drain = obj_heap_drain;
pop->p_ops.memset = obj_heap_memset;
pop->p_ops.base = pop;
pop->set = MALLOC(sizeof(*(pop->set)));
pop->set->options = 0;
pop->set->directory_based = 0;
void *heap_start = (char *)pop + pop->heap_offset;
uint64_t heap_size = size - sizeof(PMEMobjpool);
struct palloc_heap *heap = &pop->heap;
struct pmem_ops *p_ops = &pop->p_ops;
UT_ASSERT(heap_check(heap_start, heap_size) != 0);
UT_ASSERT(heap_init(heap_start, heap_size,
&pop->heap_size, p_ops) == 0);
UT_ASSERT(heap_boot(heap, heap_start, heap_size,
&pop->heap_size,
pop, p_ops, NULL, pop->set) == 0);
UT_ASSERT(heap_buckets_init(heap) == 0);
UT_ASSERT(pop->heap.rt != NULL);
struct bucket *b_def = heap_bucket_acquire(heap,
DEFAULT_ALLOC_CLASS_ID, HEAP_ARENA_PER_THREAD);
struct memory_block mb;
mb.size_idx = 1;
while (heap_get_bestfit_block(heap, b_def, &mb) == 0)
;
/* mb should now be the last chunk in the heap */
char *ptr = mb.m_ops->get_real_data(&mb);
size_t s = mb.m_ops->get_real_size(&mb);
/* last chunk should be within the heap and accessible */
UT_ASSERT((size_t)ptr + s <= (size_t)mpop + size);
VALGRIND_DO_MAKE_MEM_DEFINED(ptr, s);
memset(ptr, 0xc, s);
heap_bucket_release(heap, b_def);
UT_ASSERT(heap_check(heap_start, heap_size) == 0);
heap_cleanup(heap);
UT_ASSERT(heap->rt == NULL);
FREE(pop->set);
MUNMAP_ANON_ALIGNED(mpop, size);
}
static void
test_recycler(void)
{
struct mock_pop *mpop = MMAP_ANON_ALIGNED(MOCK_POOL_SIZE,
Ut_mmap_align);
PMEMobjpool *pop = &mpop->p;
memset(pop, 0, MOCK_POOL_SIZE);
pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop);
pop->p_ops.persist = obj_heap_persist;
pop->p_ops.flush = obj_heap_flush;
pop->p_ops.drain = obj_heap_drain;
pop->p_ops.memset = obj_heap_memset;
pop->p_ops.base = pop;
pop->set = MALLOC(sizeof(*(pop->set)));
pop->set->options = 0;
pop->set->directory_based = 0;
void *heap_start = (char *)pop + pop->heap_offset;
uint64_t heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool);
struct palloc_heap *heap = &pop->heap;
struct pmem_ops *p_ops = &pop->p_ops;
struct stats *s = stats_new(pop);
UT_ASSERTne(s, NULL);
UT_ASSERT(heap_check(heap_start, heap_size) != 0);
UT_ASSERT(heap_init(heap_start, heap_size,
&pop->heap_size, p_ops) == 0);
UT_ASSERT(heap_boot(heap, heap_start, heap_size,
&pop->heap_size,
pop, p_ops, s, pop->set) == 0);
UT_ASSERT(heap_buckets_init(heap) == 0);
UT_ASSERT(pop->heap.rt != NULL);
/* trigger heap bucket populate */
struct memory_block m = MEMORY_BLOCK_NONE;
m.size_idx = 1;
struct bucket *b = heap_bucket_acquire(heap,
DEFAULT_ALLOC_CLASS_ID,
HEAP_ARENA_PER_THREAD);
UT_ASSERT(heap_get_bestfit_block(heap, b, &m) == 0);
heap_bucket_release(heap, b);
int ret;
size_t active_arenas = 1;
struct recycler *r = recycler_new(&pop->heap, 10000 /* never recalc */,
&active_arenas);
UT_ASSERTne(r, NULL);
init_run_with_score(pop->heap.layout, 0, 64);
init_run_with_score(pop->heap.layout, 1, 128);
init_run_with_score(pop->heap.layout, 15, 0);
struct memory_block mrun = {0, 0, 1, 0};
struct memory_block mrun2 = {1, 0, 1, 0};
memblock_rebuild_state(&pop->heap, &mrun);
memblock_rebuild_state(&pop->heap, &mrun2);
ret = recycler_put(r, &mrun,
recycler_element_new(&pop->heap, &mrun));
UT_ASSERTeq(ret, 0);
ret = recycler_put(r, &mrun2,
recycler_element_new(&pop->heap, &mrun2));
UT_ASSERTeq(ret, 0);
struct memory_block mrun_ret = MEMORY_BLOCK_NONE;
mrun_ret.size_idx = 1;
struct memory_block mrun2_ret = MEMORY_BLOCK_NONE;
mrun2_ret.size_idx = 1;
ret = recycler_get(r, &mrun_ret);
UT_ASSERTeq(ret, 0);
ret = recycler_get(r, &mrun2_ret);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(mrun2.chunk_id, mrun2_ret.chunk_id);
UT_ASSERTeq(mrun.chunk_id, mrun_ret.chunk_id);
init_run_with_score(pop->heap.layout, 7, 64);
init_run_with_score(pop->heap.layout, 2, 128);
init_run_with_score(pop->heap.layout, 5, 192);
init_run_with_score(pop->heap.layout, 10, 256);
mrun.chunk_id = 7;
mrun2.chunk_id = 2;
struct memory_block mrun3 = {5, 0, 1, 0};
struct memory_block mrun4 = {10, 0, 1, 0};
memblock_rebuild_state(&pop->heap, &mrun3);
memblock_rebuild_state(&pop->heap, &mrun4);
mrun_ret.size_idx = 1;
mrun2_ret.size_idx = 1;
struct memory_block mrun3_ret = MEMORY_BLOCK_NONE;
mrun3_ret.size_idx = 1;
struct memory_block mrun4_ret = MEMORY_BLOCK_NONE;
mrun4_ret.size_idx = 1;
ret = recycler_put(r, &mrun,
recycler_element_new(&pop->heap, &mrun));
UT_ASSERTeq(ret, 0);
ret = recycler_put(r, &mrun2,
recycler_element_new(&pop->heap, &mrun2));
UT_ASSERTeq(ret, 0);
ret = recycler_put(r, &mrun3,
recycler_element_new(&pop->heap, &mrun3));
UT_ASSERTeq(ret, 0);
ret = recycler_put(r, &mrun4,
recycler_element_new(&pop->heap, &mrun4));
UT_ASSERTeq(ret, 0);
ret = recycler_get(r, &mrun_ret);
UT_ASSERTeq(ret, 0);
ret = recycler_get(r, &mrun2_ret);
UT_ASSERTeq(ret, 0);
ret = recycler_get(r, &mrun3_ret);
UT_ASSERTeq(ret, 0);
ret = recycler_get(r, &mrun4_ret);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(mrun.chunk_id, mrun_ret.chunk_id);
UT_ASSERTeq(mrun2.chunk_id, mrun2_ret.chunk_id);
UT_ASSERTeq(mrun3.chunk_id, mrun3_ret.chunk_id);
UT_ASSERTeq(mrun4.chunk_id, mrun4_ret.chunk_id);
init_run_with_max_block(pop->heap.layout, 1);
struct memory_block mrun5 = {1, 0, 1, 0};
memblock_rebuild_state(&pop->heap, &mrun5);
ret = recycler_put(r, &mrun5,
recycler_element_new(&pop->heap, &mrun5));
UT_ASSERTeq(ret, 0);
struct memory_block mrun5_ret = MEMORY_BLOCK_NONE;
mrun5_ret.size_idx = 11;
ret = recycler_get(r, &mrun5_ret);
UT_ASSERTeq(ret, ENOMEM);
mrun5_ret = MEMORY_BLOCK_NONE;
mrun5_ret.size_idx = 10;
ret = recycler_get(r, &mrun5_ret);
UT_ASSERTeq(ret, 0);
recycler_delete(r);
stats_delete(pop, s);
heap_cleanup(heap);
UT_ASSERT(heap->rt == NULL);
FREE(pop->set);
MUNMAP_ANON_ALIGNED(mpop, MOCK_POOL_SIZE);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_heap");
if (argc < 2)
UT_FATAL("usage: %s path <t|b|r|c|h|a|n|s>", argv[0]);
switch (argv[1][0]) {
case 't':
test_heap();
test_heap_with_size();
test_recycler();
break;
case 'b':
do_fault_injection_new_ravl();
break;
case 'r':
do_fault_injection_recycler();
break;
case 'c':
do_fault_injection_new_seglists();
break;
case 'h':
do_fault_injection_heap_boot();
break;
case 'a':
/* first call alloc_class_new */
do_fault_injection_class_new(1);
/* second call alloc_class_new */
do_fault_injection_class_new(2);
break;
case 'n':
do_fault_injection_class_collection_new();
break;
case 's':
do_fault_injection_stats();
break;
default:
UT_FATAL("unknown operation");
}
DONE(NULL);
}
| 16,917 | 25.027692 | 72 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_movnt_align/movnt_align_common.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* movnt_align_common.c -- common part for tests doing a persistent movnt align
*/
#include "unittest.h"
#include "movnt_align_common.h"
char *Src;
char *Dst;
char *Scratch;
/*
* check_memmove -- invoke check function with pmem_memmove_persist
*/
void
check_memmove(size_t doff, size_t soff, size_t len, pmem_memmove_fn fn,
unsigned flags)
{
memset(Dst + doff, 1, len);
memset(Src + soff, 0, len);
fn(Dst + doff, Src + soff, len, flags);
if (memcmp(Dst + doff, Src + soff, len))
UT_FATAL("memcpy/memmove failed");
}
/*
* check_memmove -- invoke check function with pmem_memcpy_persist
*/
void
check_memcpy(size_t doff, size_t soff, size_t len, pmem_memcpy_fn fn,
unsigned flags)
{
memset(Dst, 2, N_BYTES);
memset(Src, 3, N_BYTES);
memset(Scratch, 2, N_BYTES);
memset(Dst + doff, 1, len);
memset(Src + soff, 0, len);
memcpy(Scratch + doff, Src + soff, len);
fn(Dst + doff, Src + soff, len, flags);
if (memcmp(Dst, Scratch, N_BYTES))
UT_FATAL("memcpy/memmove failed");
}
/*
* check_memset -- check pmem_memset_no_drain function
*/
void
check_memset(size_t off, size_t len, pmem_memset_fn fn, unsigned flags)
{
memset(Scratch, 2, N_BYTES);
memset(Scratch + off, 1, len);
memset(Dst, 2, N_BYTES);
fn(Dst + off, 1, len, flags);
if (memcmp(Dst, Scratch, N_BYTES))
UT_FATAL("memset failed");
}
unsigned Flags[] = {
0,
PMEM_F_MEM_NODRAIN,
PMEM_F_MEM_NONTEMPORAL,
PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_NODRAIN,
PMEM_F_MEM_WC,
PMEM_F_MEM_WB,
PMEM_F_MEM_NOFLUSH,
/* all possible flags */
PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH |
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL |
PMEM_F_MEM_WC | PMEM_F_MEM_WB,
};
| 1,830 | 21.060241 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_movnt_align/pmem2_movnt_align.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_movnt_align.c -- test for functions with non-temporal stores
*
* usage: pmem2_movnt_align file [C|F|B|S]
*
* C - pmem2_memcpy()
* B - pmem2_memmove() in backward direction
* F - pmem2_memmove() in forward direction
* S - pmem2_memset()
*/
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include "libpmem2.h"
#include "unittest.h"
#include "movnt_align_common.h"
#include "ut_pmem2.h"
static pmem2_memset_fn memset_fn;
static pmem2_memcpy_fn memcpy_fn;
static pmem2_memmove_fn memmove_fn;
static void
check_memmove_variants(size_t doff, size_t soff, size_t len)
{
for (int i = 0; i < ARRAY_SIZE(Flags); ++i)
check_memmove(doff, soff, len, memmove_fn, Flags[i]);
}
static void
check_memcpy_variants(size_t doff, size_t soff, size_t len)
{
for (int i = 0; i < ARRAY_SIZE(Flags); ++i)
check_memcpy(doff, soff, len, memcpy_fn, Flags[i]);
}
static void
check_memset_variants(size_t off, size_t len)
{
for (int i = 0; i < ARRAY_SIZE(Flags); ++i)
check_memset(off, len, memset_fn, Flags[i]);
}
int
main(int argc, char *argv[])
{
if (argc != 3)
UT_FATAL("usage: %s file type", argv[0]);
struct pmem2_config *cfg;
struct pmem2_source *src;
struct pmem2_map *map;
int fd;
char type = argv[2][0];
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem2_movnt_align %c %s %savx %savx512f", type,
thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
fd = OPEN(argv[1], O_RDWR);
PMEM2_CONFIG_NEW(&cfg);
PMEM2_SOURCE_FROM_FD(&src, fd);
PMEM2_CONFIG_SET_GRANULARITY(cfg, PMEM2_GRANULARITY_PAGE);
int ret = pmem2_map(cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_CONFIG_DELETE(&cfg);
memset_fn = pmem2_get_memset_fn(map);
memcpy_fn = pmem2_get_memcpy_fn(map);
memmove_fn = pmem2_get_memmove_fn(map);
ret = pmem2_unmap(&map);
UT_ASSERTeq(ret, 0);
CLOSE(fd);
size_t page_size = Ut_pagesize;
size_t s;
switch (type) {
case 'C': /* memcpy */
/* mmap with guard pages */
Src = MMAP_ANON_ALIGNED(N_BYTES, 0);
Dst = MMAP_ANON_ALIGNED(N_BYTES, 0);
if (Src == NULL || Dst == NULL)
UT_FATAL("!mmap");
Scratch = MALLOC(N_BYTES);
/* check memcpy with 0 size */
check_memcpy_variants(0, 0, 0);
/* check memcpy with unaligned size */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memcpy_variants(0, 0, N_BYTES - s);
/* check memcpy with unaligned begin */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memcpy_variants(s, 0, N_BYTES - s);
/* check memcpy with unaligned begin and end */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memcpy_variants(s, s, N_BYTES - 2 * s);
MUNMAP_ANON_ALIGNED(Src, N_BYTES);
MUNMAP_ANON_ALIGNED(Dst, N_BYTES);
FREE(Scratch);
break;
case 'B': /* memmove backward */
/* mmap with guard pages */
Src = MMAP_ANON_ALIGNED(2 * N_BYTES - page_size, 0);
Dst = Src + N_BYTES - page_size;
if (Src == NULL)
UT_FATAL("!mmap");
/* check memmove in backward direction with 0 size */
check_memmove_variants(0, 0, 0);
/* check memmove in backward direction with unaligned size */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(0, 0, N_BYTES - s);
/* check memmove in backward direction with unaligned begin */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(s, 0, N_BYTES - s);
/*
* check memmove in backward direction with unaligned begin
* and end
*/
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(s, s, N_BYTES - 2 * s);
MUNMAP_ANON_ALIGNED(Src, 2 * N_BYTES - page_size);
break;
case 'F': /* memmove forward */
/* mmap with guard pages */
Dst = MMAP_ANON_ALIGNED(2 * N_BYTES - page_size, 0);
Src = Dst + N_BYTES - page_size;
if (Src == NULL)
UT_FATAL("!mmap");
/* check memmove in forward direction with 0 size */
check_memmove_variants(0, 0, 0);
/* check memmove in forward direction with unaligned size */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(0, 0, N_BYTES - s);
/* check memmove in forward direction with unaligned begin */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(s, 0, N_BYTES - s);
/*
* check memmove in forward direction with unaligned begin
* and end
*/
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(s, s, N_BYTES - 2 * s);
MUNMAP_ANON_ALIGNED(Dst, 2 * N_BYTES - page_size);
break;
case 'S': /* memset */
/* mmap with guard pages */
Dst = MMAP_ANON_ALIGNED(N_BYTES, 0);
if (Dst == NULL)
UT_FATAL("!mmap");
Scratch = MALLOC(N_BYTES);
/* check memset with 0 size */
check_memset_variants(0, 0);
/* check memset with unaligned size */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memset_variants(0, N_BYTES - s);
/* check memset with unaligned begin */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memset_variants(s, N_BYTES - s);
/* check memset with unaligned begin and end */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memset_variants(s, N_BYTES - 2 * s);
MUNMAP_ANON_ALIGNED(Dst, N_BYTES);
FREE(Scratch);
break;
default:
UT_FATAL("!wrong type of test");
break;
}
DONE(NULL);
}
| 5,283 | 24.042654 | 69 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_movnt_align/movnt_align_common.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* movnt_align_common.h -- header file for common movnt_align test utilities
*/
#ifndef MOVNT_ALIGN_COMMON_H
#define MOVNT_ALIGN_COMMON_H 1
#include "unittest.h"
#include "file.h"
#define N_BYTES (Ut_pagesize * 2)
extern char *Src;
extern char *Dst;
extern char *Scratch;
extern unsigned Flags[10];
typedef void *(*mem_fn)(void *, const void *, size_t);
typedef void *pmem_memcpy_fn(void *pmemdest, const void *src, size_t len,
unsigned flags);
typedef void *pmem_memmove_fn(void *pmemdest, const void *src, size_t len,
unsigned flags);
typedef void *pmem_memset_fn(void *pmemdest, int c, size_t len, unsigned flags);
void check_memmove(size_t doff, size_t soff, size_t len, pmem_memmove_fn fn,
unsigned flags);
void check_memcpy(size_t doff, size_t soff, size_t len, pmem_memcpy_fn fn,
unsigned flags);
void check_memset(size_t off, size_t len, pmem_memset_fn fn, unsigned flags);
#endif
| 989 | 26.5 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem_memmove/pmem_memmove.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* pmem_memmove.c -- unit test for doing a memmove
*
* usage:
* pmem_memmove file b:length [d:{offset}] [s:offset] [o:{1|2} S:{overlap}]
*
*/
#include "unittest.h"
#include "util_pmem.h"
#include "file.h"
#include "memmove_common.h"
typedef void *pmem_memmove_fn(void *pmemdest, const void *src, size_t len,
unsigned flags);
static void *
pmem_memmove_persist_wrapper(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
(void) flags;
return pmem_memmove_persist(pmemdest, src, len);
}
static void *
pmem_memmove_nodrain_wrapper(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
(void) flags;
return pmem_memmove_nodrain(pmemdest, src, len);
}
static void
do_persist_ddax(const void *ptr, size_t size)
{
util_persist_auto(1, ptr, size);
}
static void
do_persist(const void *ptr, size_t size)
{
util_persist_auto(0, ptr, size);
}
/*
* swap_mappings - given to mmapped regions swap them.
*
* Try swapping src and dest by unmapping src, mapping a new dest with
* the original src address as a hint. If successful, unmap original dest.
* Map a new src with the original dest as a hint.
* In the event of an error caller must unmap all passed in mappings.
*/
static void
swap_mappings(char **dest, char **src, size_t size, int fd)
{
char *d = *dest;
char *s = *src;
char *ts;
char *td;
MUNMAP(*src, size);
/* mmap destination using src addr as hint */
td = MMAP(s, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
MUNMAP(*dest, size);
*dest = td;
/* mmap src using original destination addr as a hint */
ts = MMAP(d, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
-1, 0);
*src = ts;
}
static void
do_memmove_variants(char *dst, char *src, const char *file_name,
size_t dest_off, size_t src_off, size_t bytes, persist_fn p)
{
do_memmove(dst, src, file_name, dest_off, src_off,
bytes, pmem_memmove_persist_wrapper, 0, p);
do_memmove(dst, src, file_name, dest_off, src_off,
bytes, pmem_memmove_nodrain_wrapper, 0, p);
for (int i = 0; i < ARRAY_SIZE(Flags); ++i) {
do_memmove(dst, src, file_name, dest_off, src_off,
bytes, pmem_memmove, Flags[i], p);
}
}
int
main(int argc, char *argv[])
{
int fd;
char *dst;
char *src;
size_t dst_off = 0;
size_t src_off = 0;
size_t bytes = 0;
int who = 0;
size_t mapped_len;
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem_memmove %s %s %s %s %savx %savx512f",
argc > 2 ? argv[2] : "null",
argc > 3 ? argv[3] : "null",
argc > 4 ? argv[4] : "null",
thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
fd = OPEN(argv[1], O_RDWR);
enum file_type type = util_fd_get_type(fd);
if (type < 0)
UT_FATAL("cannot check type of file %s", argv[1]);
persist_fn p;
p = type == TYPE_DEVDAX ? do_persist_ddax : do_persist;
if (argc < 3)
USAGE();
for (int arg = 2; arg < argc; arg++) {
if (strchr("dsbo",
argv[arg][0]) == NULL || argv[arg][1] != ':')
UT_FATAL("op must be d: or s: or b: or o:");
size_t val = STRTOUL(&argv[arg][2], NULL, 0);
switch (argv[arg][0]) {
case 'd':
if (val <= 0)
UT_FATAL("bad offset (%lu) with d: option",
val);
dst_off = val;
break;
case 's':
if (val <= 0)
UT_FATAL("bad offset (%lu) with s: option",
val);
src_off = val;
break;
case 'b':
if (val <= 0)
UT_FATAL("bad length (%lu) with b: option",
val);
bytes = val;
break;
case 'o':
if (val != 1 && val != 0)
UT_FATAL("bad val (%lu) with o: option",
val);
who = (int)val;
break;
}
}
if (who == 0) {
/* src > dest */
dst = pmem_map_file(argv[1], 0, 0, 0, &mapped_len, NULL);
if (dst == NULL)
UT_FATAL("!could not mmap dest file %s", argv[1]);
src = MMAP(dst + mapped_len, mapped_len,
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
-1, 0);
/*
* Its very unlikely that src would not be > dest. pmem_map_file
* chooses the first unused address >= 1TB, large
* enough to hold the give range, and 1GB aligned. Log
* the error if the mapped addresses cannot be swapped
* but allow the test to continue.
*/
if (src <= dst) {
swap_mappings(&dst, &src, mapped_len, fd);
if (src <= dst)
UT_FATAL("cannot map files in memory order");
}
do_memmove_variants(dst, src, argv[1],
dst_off, src_off, bytes, p);
/* dest > src */
swap_mappings(&dst, &src, mapped_len, fd);
if (dst <= src)
UT_FATAL("cannot map files in memory order");
do_memmove_variants(dst, src, argv[1],
dst_off, src_off, bytes, p);
int ret = pmem_unmap(dst, mapped_len);
UT_ASSERTeq(ret, 0);
MUNMAP(src, mapped_len);
} else {
/* use the same buffer for source and destination */
dst = pmem_map_file(argv[1], 0, 0, 0, &mapped_len, NULL);
if (dst == NULL)
UT_FATAL("!Could not mmap %s: \n", argv[1]);
memset(dst, 0, bytes);
p(dst, bytes);
do_memmove_variants(dst, dst, argv[1],
dst_off, src_off, bytes, p);
int ret = pmem_unmap(dst, mapped_len);
UT_ASSERTeq(ret, 0);
}
CLOSE(fd);
DONE(NULL);
}
| 5,226 | 22.334821 | 75 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_ctl_heap_size/obj_ctl_heap_size.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* obj_ctl_heap_size.c -- tests for the ctl entry points: heap.size.*
*/
#include "unittest.h"
#define LAYOUT "obj_ctl_heap_size"
#define CUSTOM_GRANULARITY ((1 << 20) * 10)
#define OBJ_SIZE 1024
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_ctl_heap_size");
if (argc != 3)
UT_FATAL("usage: %s poolset [w|x]", argv[0]);
const char *path = argv[1];
char t = argv[2][0];
PMEMobjpool *pop;
if ((pop = pmemobj_open(path, LAYOUT)) == NULL)
UT_FATAL("!pmemobj_open: %s", path);
int ret = 0;
size_t disable_granularity = 0;
ret = pmemobj_ctl_set(pop, "heap.size.granularity",
&disable_granularity);
UT_ASSERTeq(ret, 0);
/* allocate until OOM */
while (pmemobj_alloc(pop, NULL, OBJ_SIZE, 0, NULL, NULL) == 0)
;
if (t == 'x') {
ssize_t extend_size = CUSTOM_GRANULARITY;
ret = pmemobj_ctl_exec(pop, "heap.size.extend", &extend_size);
UT_ASSERTeq(ret, 0);
} else if (t == 'w') {
ssize_t new_granularity = CUSTOM_GRANULARITY;
ret = pmemobj_ctl_set(pop, "heap.size.granularity",
&new_granularity);
UT_ASSERTeq(ret, 0);
ssize_t curr_granularity;
ret = pmemobj_ctl_get(pop, "heap.size.granularity",
&curr_granularity);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(new_granularity, curr_granularity);
} else {
UT_ASSERT(0);
}
/* should succeed */
ret = pmemobj_alloc(pop, NULL, OBJ_SIZE, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
pmemobj_close(pop);
DONE(NULL);
}
| 1,500 | 21.402985 | 69 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_basic_integration/obj_basic_integration.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_basic_integration.c -- Basic integration tests
*
*/
#include <stddef.h>
#include "unittest.h"
#include "obj.h"
#define TEST_STR "abcdefgh"
#define TEST_STR_LEN 8
#define TEST_VALUE 5
/*
* Layout definition
*/
POBJ_LAYOUT_BEGIN(basic);
POBJ_LAYOUT_ROOT(basic, struct dummy_root);
POBJ_LAYOUT_TOID(basic, struct dummy_node);
POBJ_LAYOUT_TOID(basic, struct dummy_node_c);
POBJ_LAYOUT_END(basic);
struct dummy_node {
int value;
char teststr[TEST_STR_LEN];
POBJ_LIST_ENTRY(struct dummy_node) plist;
POBJ_LIST_ENTRY(struct dummy_node) plist_m;
};
struct dummy_node_c {
int value;
char teststr[TEST_STR_LEN];
POBJ_LIST_ENTRY(struct dummy_node) plist;
POBJ_LIST_ENTRY(struct dummy_node) plist_m;
};
struct dummy_root {
int value;
PMEMmutex lock;
TOID(struct dummy_node) node;
POBJ_LIST_HEAD(dummy_list, struct dummy_node) dummies;
POBJ_LIST_HEAD(moved_list, struct dummy_node) moved;
};
static int
dummy_node_constructor(PMEMobjpool *pop, void *ptr, void *arg)
{
struct dummy_node *n = (struct dummy_node *)ptr;
int *test_val = (int *)arg;
n->value = *test_val;
pmemobj_persist(pop, &n->value, sizeof(n->value));
return 0;
}
static void
test_alloc_api(PMEMobjpool *pop)
{
TOID(struct dummy_node) node_zeroed;
TOID(struct dummy_node_c) node_constructed;
POBJ_ZNEW(pop, &node_zeroed, struct dummy_node);
UT_ASSERT_rt(OID_INSTANCEOF(node_zeroed.oid, struct dummy_node));
int *test_val = (int *)MALLOC(sizeof(*test_val));
*test_val = TEST_VALUE;
POBJ_NEW(pop, &node_constructed, struct dummy_node_c,
dummy_node_constructor, test_val);
FREE(test_val);
TOID(struct dummy_node) iter;
POBJ_FOREACH_TYPE(pop, iter) {
UT_ASSERTeq(D_RO(iter)->value, 0);
}
TOID(struct dummy_node_c) iter_c;
POBJ_FOREACH_TYPE(pop, iter_c) {
UT_ASSERTeq(D_RO(iter_c)->value, TEST_VALUE);
}
PMEMoid oid_iter;
int nodes_count = 0;
POBJ_FOREACH(pop, oid_iter) {
nodes_count++;
}
UT_ASSERTne(nodes_count, 0);
POBJ_FREE(&node_zeroed);
POBJ_FREE(&node_constructed);
nodes_count = 0;
POBJ_FOREACH(pop, oid_iter) {
nodes_count++;
}
UT_ASSERTeq(nodes_count, 0);
int val = 10;
POBJ_ALLOC(pop, &node_constructed, struct dummy_node_c,
sizeof(struct dummy_node_c),
dummy_node_constructor, &val);
POBJ_REALLOC(pop, &node_constructed, struct dummy_node_c,
sizeof(struct dummy_node_c) + 1000);
UT_ASSERTeq(pmemobj_type_num(node_constructed.oid),
TOID_TYPE_NUM(struct dummy_node_c));
POBJ_ZREALLOC(pop, &node_constructed, struct dummy_node_c,
sizeof(struct dummy_node_c) + 2000);
UT_ASSERTeq(pmemobj_type_num(node_constructed.oid),
TOID_TYPE_NUM(struct dummy_node_c));
POBJ_FREE(&node_constructed);
POBJ_ZALLOC(pop, &node_zeroed, struct dummy_node,
sizeof(struct dummy_node));
POBJ_FREE(&node_zeroed);
PMEMoid oid = OID_NULL;
POBJ_FREE(&oid);
int err = 0;
err = pmemobj_alloc(pop, NULL, SIZE_MAX, 0, NULL, NULL);
UT_ASSERTeq(err, -1);
UT_ASSERTeq(errno, ENOMEM);
err = pmemobj_zalloc(pop, NULL, SIZE_MAX, 0);
UT_ASSERTeq(err, -1);
UT_ASSERTeq(errno, ENOMEM);
err = pmemobj_alloc(pop, NULL, PMEMOBJ_MAX_ALLOC_SIZE + 1, 0, NULL,
NULL);
UT_ASSERTeq(err, -1);
UT_ASSERTeq(errno, ENOMEM);
err = pmemobj_zalloc(pop, NULL, PMEMOBJ_MAX_ALLOC_SIZE + 1, 0);
UT_ASSERTeq(err, -1);
UT_ASSERTeq(errno, ENOMEM);
}
static void
test_realloc_api(PMEMobjpool *pop)
{
PMEMoid oid = OID_NULL;
int ret;
ret = pmemobj_alloc(pop, &oid, 128, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("alloc: %u, size: %zu", 128,
pmemobj_alloc_usable_size(oid));
/* grow */
ret = pmemobj_realloc(pop, &oid, 655360, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("realloc: %u => %u, size: %zu", 128, 655360,
pmemobj_alloc_usable_size(oid));
/* shrink */
ret = pmemobj_realloc(pop, &oid, 1, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("realloc: %u => %u, size: %zu", 655360, 1,
pmemobj_alloc_usable_size(oid));
/* free */
ret = pmemobj_realloc(pop, &oid, 0, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(OID_IS_NULL(oid));
UT_OUT("free");
/* alloc */
ret = pmemobj_realloc(pop, &oid, 777, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("realloc: %u => %u, size: %zu", 0, 777,
pmemobj_alloc_usable_size(oid));
/* shrink */
ret = pmemobj_realloc(pop, &oid, 1, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("realloc: %u => %u, size: %zu", 777, 1,
pmemobj_alloc_usable_size(oid));
pmemobj_free(&oid);
UT_ASSERT(OID_IS_NULL(oid));
UT_ASSERTeq(pmemobj_alloc_usable_size(oid), 0);
UT_OUT("free");
/* alloc */
ret = pmemobj_realloc(pop, &oid, 1, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("realloc: %u => %u, size: %zu", 0, 1,
pmemobj_alloc_usable_size(oid));
/* do nothing */
ret = pmemobj_realloc(pop, &oid, 1, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("realloc: %u => %u, size: %zu", 1, 1,
pmemobj_alloc_usable_size(oid));
pmemobj_free(&oid);
UT_ASSERT(OID_IS_NULL(oid));
UT_OUT("free");
/* do nothing */
ret = pmemobj_realloc(pop, &oid, 0, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(OID_IS_NULL(oid));
/* alloc */
ret = pmemobj_realloc(pop, &oid, 1, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
/* grow beyond reasonable size */
ret = pmemobj_realloc(pop, &oid, SIZE_MAX, 0);
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(errno, ENOMEM);
ret = pmemobj_realloc(pop, &oid, PMEMOBJ_MAX_ALLOC_SIZE + 1, 0);
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(errno, ENOMEM);
pmemobj_free(&oid);
UT_ASSERT(OID_IS_NULL(oid));
}
static void
test_list_api(PMEMobjpool *pop)
{
TOID(struct dummy_root) root;
root = POBJ_ROOT(pop, struct dummy_root);
int nodes_count = 0;
UT_ASSERTeq(pmemobj_type_num(root.oid), POBJ_ROOT_TYPE_NUM);
UT_COMPILE_ERROR_ON(TOID_TYPE_NUM_OF(root) != POBJ_ROOT_TYPE_NUM);
TOID(struct dummy_node) first;
TOID(struct dummy_node) iter;
POBJ_LIST_FOREACH_REVERSE(iter, &D_RO(root)->dummies, plist) {
UT_OUT("POBJ_LIST_FOREACH_REVERSE: dummy_node %d",
D_RO(iter)->value);
nodes_count++;
}
UT_ASSERTeq(nodes_count, 0);
int test_val = TEST_VALUE;
PMEMoid ret;
/* should fail */
ret = POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(root)->dummies, plist,
SIZE_MAX, dummy_node_constructor,
&test_val);
UT_ASSERTeq(errno, ENOMEM);
UT_ASSERT(OID_IS_NULL(ret));
errno = 0;
ret = POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(root)->dummies, plist,
PMEMOBJ_MAX_ALLOC_SIZE + 1, dummy_node_constructor,
&test_val);
UT_ASSERTeq(errno, ENOMEM);
UT_ASSERT(OID_IS_NULL(ret));
POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(root)->dummies, plist,
sizeof(struct dummy_node), dummy_node_constructor,
&test_val);
test_val++;
POBJ_LIST_INSERT_NEW_TAIL(pop, &D_RW(root)->dummies, plist,
sizeof(struct dummy_node), dummy_node_constructor,
&test_val);
TOID(struct dummy_node) inserted =
POBJ_LIST_FIRST(&D_RW(root)->dummies);
UT_ASSERTeq(pmemobj_type_num(inserted.oid),
TOID_TYPE_NUM(struct dummy_node));
TOID(struct dummy_node) node;
POBJ_ZNEW(pop, &node, struct dummy_node);
POBJ_LIST_INSERT_HEAD(pop, &D_RW(root)->dummies, node, plist);
nodes_count = 0;
POBJ_LIST_FOREACH(iter, &D_RO(root)->dummies, plist) {
UT_OUT("POBJ_LIST_FOREACH: dummy_node %d", D_RO(iter)->value);
nodes_count++;
}
UT_ASSERTeq(nodes_count, 3);
/* now do the same, but w/o using FOREACH macro */
nodes_count = 0;
first = POBJ_LIST_FIRST(&D_RO(root)->dummies);
iter = first;
do {
UT_OUT("POBJ_LIST_NEXT: dummy_node %d", D_RO(iter)->value);
nodes_count++;
iter = POBJ_LIST_NEXT(iter, plist);
} while (!TOID_EQUALS(iter, first));
UT_ASSERTeq(nodes_count, 3);
POBJ_LIST_MOVE_ELEMENT_HEAD(pop, &D_RW(root)->dummies,
&D_RW(root)->moved, node, plist, plist_m);
UT_ASSERTeq(POBJ_LIST_EMPTY(&D_RW(root)->moved), 0);
POBJ_LIST_MOVE_ELEMENT_HEAD(pop, &D_RW(root)->moved,
&D_RW(root)->dummies, node, plist_m, plist);
POBJ_LIST_MOVE_ELEMENT_TAIL(pop, &D_RW(root)->dummies,
&D_RW(root)->moved, node, plist, plist_m);
UT_ASSERTeq(POBJ_LIST_EMPTY(&D_RW(root)->moved), 0);
POBJ_LIST_MOVE_ELEMENT_TAIL(pop, &D_RW(root)->moved,
&D_RW(root)->dummies, node, plist_m, plist);
POBJ_LIST_REMOVE(pop, &D_RW(root)->dummies, node, plist);
POBJ_LIST_INSERT_TAIL(pop, &D_RW(root)->dummies, node, plist);
POBJ_LIST_REMOVE_FREE(pop, &D_RW(root)->dummies, node, plist);
nodes_count = 0;
POBJ_LIST_FOREACH_REVERSE(iter, &D_RO(root)->dummies, plist) {
UT_OUT("POBJ_LIST_FOREACH_REVERSE: dummy_node %d",
D_RO(iter)->value);
nodes_count++;
}
UT_ASSERTeq(nodes_count, 2);
/* now do the same, but w/o using FOREACH macro */
nodes_count = 0;
first = POBJ_LIST_FIRST(&D_RO(root)->dummies);
iter = first;
do {
UT_OUT("POBJ_LIST_PREV: dummy_node %d", D_RO(iter)->value);
nodes_count++;
iter = POBJ_LIST_PREV(iter, plist);
} while (!TOID_EQUALS(iter, first));
UT_ASSERTeq(nodes_count, 2);
test_val++;
POBJ_LIST_INSERT_NEW_AFTER(pop, &D_RW(root)->dummies,
POBJ_LIST_FIRST(&D_RO(root)->dummies), plist,
sizeof(struct dummy_node), dummy_node_constructor,
&test_val);
test_val++;
POBJ_LIST_INSERT_NEW_BEFORE(pop, &D_RW(root)->dummies,
POBJ_LIST_LAST(&D_RO(root)->dummies, plist), plist,
sizeof(struct dummy_node), dummy_node_constructor,
&test_val);
nodes_count = 0;
POBJ_LIST_FOREACH_REVERSE(iter, &D_RO(root)->dummies, plist) {
UT_OUT("POBJ_LIST_FOREACH_REVERSE: dummy_node %d",
D_RO(iter)->value);
nodes_count++;
}
UT_ASSERTeq(nodes_count, 4);
/* now do the same, but w/o using FOREACH macro */
nodes_count = 0;
first = POBJ_LIST_LAST(&D_RO(root)->dummies, plist);
iter = first;
do {
UT_OUT("POBJ_LIST_PREV: dummy_node %d", D_RO(iter)->value);
nodes_count++;
iter = POBJ_LIST_PREV(iter, plist);
} while (!TOID_EQUALS(iter, first));
UT_ASSERTeq(nodes_count, 4);
}
static void
test_tx_api(PMEMobjpool *pop)
{
TOID(struct dummy_root) root;
TOID_ASSIGN(root, pmemobj_root(pop, sizeof(struct dummy_root)));
int *vstate = NULL; /* volatile state */
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
vstate = (int *)MALLOC(sizeof(*vstate));
*vstate = TEST_VALUE;
TX_ADD(root);
D_RW(root)->value = *vstate;
TOID_ASSIGN(D_RW(root)->node, OID_NULL);
} TX_FINALLY {
FREE(vstate);
vstate = NULL;
} TX_END
UT_ASSERTeq(vstate, NULL);
UT_ASSERTeq(D_RW(root)->value, TEST_VALUE);
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
TX_ADD(root);
D_RW(root)->node = TX_ALLOC(struct dummy_node, SIZE_MAX);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
UT_ASSERTeq(errno, ENOMEM);
} TX_END
errno = 0;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
D_RW(root)->node = TX_ZALLOC(struct dummy_node, SIZE_MAX);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
UT_ASSERTeq(errno, ENOMEM);
} TX_END
errno = 0;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
D_RW(root)->node = TX_XALLOC(struct dummy_node, SIZE_MAX,
POBJ_XALLOC_ZERO);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
UT_ASSERTeq(errno, ENOMEM);
} TX_END
errno = 0;
TX_BEGIN_LOCK(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
D_RW(root)->node = TX_ALLOC(struct dummy_node,
PMEMOBJ_MAX_ALLOC_SIZE + 1);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
UT_ASSERTeq(errno, ENOMEM);
} TX_END
errno = 0;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
D_RW(root)->node = TX_ZALLOC(struct dummy_node,
PMEMOBJ_MAX_ALLOC_SIZE + 1);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
UT_ASSERTeq(errno, ENOMEM);
} TX_END
errno = 0;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
TX_ADD(root);
D_RW(root)->node = TX_ZNEW(struct dummy_node);
D_RW(root)->node = TX_REALLOC(D_RO(root)->node, SIZE_MAX);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERTeq(errno, ENOMEM);
} TX_END
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
errno = 0;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
TX_ADD(root);
D_RW(root)->node = TX_ZNEW(struct dummy_node);
D_RW(root)->node = TX_REALLOC(D_RO(root)->node,
PMEMOBJ_MAX_ALLOC_SIZE + 1);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERTeq(errno, ENOMEM);
} TX_END
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
errno = 0;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
TX_ADD(root);
D_RW(root)->node = TX_ZNEW(struct dummy_node);
TX_MEMSET(D_RW(D_RW(root)->node)->teststr, 'a', TEST_STR_LEN);
TX_MEMCPY(D_RW(D_RW(root)->node)->teststr, TEST_STR,
TEST_STR_LEN);
TX_SET(D_RW(root)->node, value, TEST_VALUE);
} TX_END
UT_ASSERTeq(D_RW(D_RW(root)->node)->value, TEST_VALUE);
UT_ASSERT(strncmp(D_RW(D_RW(root)->node)->teststr, TEST_STR,
TEST_STR_LEN) == 0);
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
TX_ADD(root);
UT_ASSERT(!TOID_IS_NULL(D_RW(root)->node));
TX_FREE(D_RW(root)->node);
D_RW(root)->node = TOID_NULL(struct dummy_node);
TOID_ASSIGN(D_RW(root)->node, OID_NULL);
} TX_END
errno = 0;
TX_BEGIN(pop) {
TX_BEGIN(NULL) {
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERT(errno == EFAULT);
} TX_END
errno = 0;
TX_BEGIN(pop) {
TX_BEGIN((PMEMobjpool *)(uintptr_t)7) {
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERT(errno == EINVAL);
} TX_END
UT_OUT("%s", pmemobj_errormsg());
TX_BEGIN(pop) {
pmemobj_tx_abort(ECANCELED);
} TX_END
UT_OUT("%s", pmemobj_errormsg());
}
static void
test_action_api(PMEMobjpool *pop)
{
struct pobj_action act[2];
uint64_t dest_value = 0;
PMEMoid oid = pmemobj_reserve(pop, &act[0], 1, 1);
pmemobj_set_value(pop, &act[1], &dest_value, 1);
pmemobj_publish(pop, act, 2);
UT_ASSERTeq(dest_value, 1);
pmemobj_free(&oid);
UT_ASSERT(OID_IS_NULL(oid));
oid = pmemobj_reserve(pop, &act[0], 1, 1);
TX_BEGIN(pop) {
pmemobj_tx_publish(act, 1);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
pmemobj_free(&oid);
UT_ASSERT(OID_IS_NULL(oid));
dest_value = 0;
oid = pmemobj_reserve(pop, &act[0], 1, 1);
pmemobj_set_value(pop, &act[1], &dest_value, 1);
pmemobj_cancel(pop, act, 2);
UT_ASSERTeq(dest_value, 0);
TOID(struct dummy_node) n =
POBJ_RESERVE_NEW(pop, struct dummy_node, &act[0]);
TOID(struct dummy_node_c) c =
POBJ_RESERVE_ALLOC(pop, struct dummy_node_c,
sizeof(struct dummy_node_c), &act[1]);
pmemobj_publish(pop, act, 2);
/* valgrind would warn in case they were not allocated */
D_RW(n)->value = 1;
D_RW(c)->value = 1;
pmemobj_persist(pop, D_RW(n), sizeof(struct dummy_node));
pmemobj_persist(pop, D_RW(c), sizeof(struct dummy_node_c));
}
static void
test_offsetof(void)
{
TOID(struct dummy_root) r;
TOID(struct dummy_node) n;
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(r, value) !=
offsetof(struct dummy_root, value));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(r, lock) !=
offsetof(struct dummy_root, lock));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(r, node) !=
offsetof(struct dummy_root, node));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(r, dummies) !=
offsetof(struct dummy_root, dummies));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(r, moved) !=
offsetof(struct dummy_root, moved));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(n, value) !=
offsetof(struct dummy_node, value));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(n, teststr) !=
offsetof(struct dummy_node, teststr));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(n, plist) !=
offsetof(struct dummy_node, plist));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(n, plist_m) !=
offsetof(struct dummy_node, plist_m));
}
static void
test_layout(void)
{
/* get number of declared types when there are no types declared */
POBJ_LAYOUT_BEGIN(mylayout);
POBJ_LAYOUT_END(mylayout);
size_t number_of_declared_types = POBJ_LAYOUT_TYPES_NUM(mylayout);
UT_ASSERTeq(number_of_declared_types, 0);
}
static void
test_root_size(PMEMobjpool *pop)
{
UT_ASSERTeq(pmemobj_root_size(pop), 0);
size_t alloc_size = sizeof(struct dummy_root);
pmemobj_root(pop, alloc_size);
UT_ASSERTeq(pmemobj_root_size(pop), sizeof(struct dummy_root));
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_basic_integration");
/* root doesn't count */
UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(basic) != 2);
if (argc < 2 || argc > 3)
UT_FATAL("usage: %s file-name [inject_fault]", argv[0]);
const char *path = argv[1];
const char *opt = argv[2];
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(basic),
0, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
test_root_size(pop);
test_alloc_api(pop);
test_realloc_api(pop);
test_list_api(pop);
test_tx_api(pop);
test_action_api(pop);
test_offsetof();
test_layout();
pmemobj_close(pop);
/* fault injection */
if (argc == 3 && strcmp(opt, "inject_fault") == 0) {
if (pmemobj_fault_injection_enabled()) {
pmemobj_inject_fault_at(PMEM_MALLOC, 1,
"heap_check_remote");
pop = pmemobj_open(path, POBJ_LAYOUT_NAME(basic));
UT_ASSERTeq(pop, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
}
if ((pop = pmemobj_open(path, POBJ_LAYOUT_NAME(basic))) == NULL)
UT_FATAL("!pmemobj_open: %s", path);
/* second open should fail, checks file locking */
if ((pmemobj_open(path, POBJ_LAYOUT_NAME(basic))) != NULL)
UT_FATAL("!pmemobj_open: %s", path);
pmemobj_close(pop);
int result = pmemobj_check(path, POBJ_LAYOUT_NAME(basic));
if (result < 0)
UT_OUT("!%s: pmemobj_check", path);
else if (result == 0)
UT_OUT("%s: pmemobj_check: not consistent", path);
DONE(NULL);
}
| 17,784 | 25.154412 | 68 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_pmemcheck/obj_pmemcheck.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
#include "unittest.h"
#include "valgrind_internal.h"
struct foo {
PMEMmutex bar;
};
static void
test_mutex_pmem_mapping_register(PMEMobjpool *pop)
{
PMEMoid foo;
int ret = pmemobj_alloc(pop, &foo, sizeof(struct foo), 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(foo));
struct foo *foop = pmemobj_direct(foo);
ret = pmemobj_mutex_lock(pop, &foop->bar);
/* foo->bar has been removed from pmem mappings collection */
VALGRIND_PRINT_PMEM_MAPPINGS;
UT_ASSERTeq(ret, 0);
ret = pmemobj_mutex_unlock(pop, &foop->bar);
UT_ASSERTeq(ret, 0);
pmemobj_free(&foo);
/* the entire foo object has been re-registered as pmem mapping */
VALGRIND_PRINT_PMEM_MAPPINGS;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_pmemcheck");
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], "pmemcheck", PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
test_mutex_pmem_mapping_register(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 1,127 | 21.56 | 71 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmreorder_simple/pmreorder_simple.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* pmreorder_simple.c -- a simple unit test for store reordering
*
* usage: pmreorder_simple g|b|c|m file
* g - write data in a consistent manner
* b - write data in a possibly inconsistent manner
* c - check data consistency
* m - write data to the pool in a consistent way,
* but at the beginning logs some inconsistent values
*
* See README file for more details.
*/
#include "unittest.h"
#include "util.h"
#include "valgrind_internal.h"
/*
* The struct three_field is inconsistent if flag is set and the fields have
* different values.
*/
struct three_field {
int first_field;
int second_field;
int third_field;
int flag;
};
/*
* write_consistent -- (internal) write data in a consistent manner
*/
static void
write_consistent(struct three_field *structp)
{
structp->first_field = 1;
structp->second_field = 1;
structp->third_field = 1;
pmem_persist(&structp->first_field, sizeof(int) * 3);
structp->flag = 1;
pmem_persist(&structp->flag, sizeof(structp->flag));
}
/*
* write_inconsistent -- (internal) write data in an inconsistent manner.
*/
static void
write_inconsistent(struct three_field *structp)
{
structp->flag = 1;
structp->first_field = 1;
structp->second_field = 1;
structp->third_field = 1;
pmem_persist(structp, sizeof(*structp));
}
/*
* check_consistency -- (internal) check struct three_field consistency
*/
static int
check_consistency(struct three_field *structp)
{
int consistent = 0;
if (structp->flag)
consistent = (structp->first_field != structp->second_field) ||
(structp->first_field != structp->third_field);
return consistent;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "pmreorder_simple");
util_init();
if ((argc != 3) || (strchr("gbcm", argv[1][0]) == NULL) ||
argv[1][1] != '\0')
UT_FATAL("usage: %s g|b|c|m file", argv[0]);
int fd = OPEN(argv[2], O_RDWR);
size_t size;
/* mmap and register in valgrind pmemcheck */
void *map = pmem_map_file(argv[2], 0, 0, 0, &size, NULL);
UT_ASSERTne(map, NULL);
struct three_field *structp = map;
char opt = argv[1][0];
/* clear the struct to get a consistent start state for writing */
if (strchr("gb", opt))
pmem_memset_persist(structp, 0, sizeof(*structp));
else if (strchr("m", opt)) {
/* set test values to log an inconsistent start state */
pmem_memset_persist(&structp->flag, 1, sizeof(int));
pmem_memset_persist(&structp->first_field, 0, sizeof(int) * 2);
pmem_memset_persist(&structp->third_field, 1, sizeof(int));
/* clear the struct to get back a consistent start state */
pmem_memset_persist(structp, 0, sizeof(*structp));
}
/* verify that DEFAULT_REORDER restores default engine */
VALGRIND_EMIT_LOG("PMREORDER_MARKER_CHANGE.BEGIN");
switch (opt) {
case 'g':
write_consistent(structp);
break;
case 'b':
write_inconsistent(structp);
break;
case 'm':
write_consistent(structp);
break;
case 'c':
return check_consistency(structp);
default:
UT_FATAL("Unrecognized option %c", opt);
}
VALGRIND_EMIT_LOG("PMREORDER_MARKER_CHANGE.END");
/* check if undefined marker will not cause an issue */
VALGRIND_EMIT_LOG("PMREORDER_MARKER_UNDEFINED.BEGIN");
VALGRIND_EMIT_LOG("PMREORDER_MARKER_UNDEFINED.END");
CLOSE(fd);
DONE(NULL);
}
| 3,335 | 24.082707 | 76 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/remote_obj_basic/remote_obj_basic.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* remote_obj_basic.c -- unit test for remote tests support
*
* usage: remote_obj_basic <create|open> <poolset-file>
*/
#include "unittest.h"
#define LAYOUT_NAME "remote_obj_basic"
int
main(int argc, char *argv[])
{
PMEMobjpool *pop;
START(argc, argv, "remote_obj_basic");
if (argc != 3)
UT_FATAL("usage: %s <create|open> <poolset-file>", argv[0]);
const char *mode = argv[1];
const char *file = argv[2];
if (strcmp(mode, "create") == 0) {
if ((pop = pmemobj_create(file, LAYOUT_NAME, 0,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", file);
else
UT_OUT("The pool set %s has been created", file);
} else if (strcmp(mode, "open") == 0) {
if ((pop = pmemobj_open(file, LAYOUT_NAME)) == NULL)
UT_FATAL("!pmemobj_open: %s", file);
else
UT_OUT("The pool set %s has been opened", file);
} else {
UT_FATAL("wrong mode: %s\n", argv[1]);
}
pmemobj_close(pop);
DONE(NULL);
}
| 1,019 | 20.25 | 62 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_ctl_debug/obj_ctl_debug.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* obj_ctl_debug.c -- tests for the ctl debug namesapce entry points
*/
#include "unittest.h"
#include "../../libpmemobj/obj.h"
#define LAYOUT "obj_ctl_debug"
#define BUFFER_SIZE 128
#define ALLOC_PATTERN 0xAC
static void
test_alloc_pattern(PMEMobjpool *pop)
{
int ret;
int pattern;
PMEMoid oid;
/* check default pattern */
ret = pmemobj_ctl_get(pop, "debug.heap.alloc_pattern", &pattern);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(pattern, PALLOC_CTL_DEBUG_NO_PATTERN);
/* check set pattern */
pattern = ALLOC_PATTERN;
ret = pmemobj_ctl_set(pop, "debug.heap.alloc_pattern", &pattern);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(pop->heap.alloc_pattern, pattern);
/* check alloc with pattern */
ret = pmemobj_alloc(pop, &oid, BUFFER_SIZE, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
char *buff = pmemobj_direct(oid);
int i;
for (i = 0; i < BUFFER_SIZE; i++)
/* should trigger memcheck error: read uninitialized values */
UT_ASSERTeq(*(buff + i), (char)pattern);
pmemobj_free(&oid);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_ctl_debug");
if (argc < 2)
UT_FATAL("usage: %s filename", argv[0]);
const char *path = argv[1];
PMEMobjpool *pop;
if ((pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_open: %s", path);
test_alloc_pattern(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 1,452 | 20.367647 | 68 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_list_macro/obj_list_macro.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* obj_list_macro.c -- unit tests for list module
*/
#include <stddef.h>
#include "libpmemobj.h"
#include "unittest.h"
TOID_DECLARE(struct item, 0);
TOID_DECLARE(struct list, 1);
struct item {
int id;
POBJ_LIST_ENTRY(struct item) next;
};
struct list {
POBJ_LIST_HEAD(listhead, struct item) head;
};
/* global lists */
static TOID(struct list) List;
static TOID(struct list) List_sec;
#define LAYOUT_NAME "list_macros"
/* usage macros */
#define FATAL_USAGE()\
UT_FATAL("usage: obj_list_macro <file> [PRnifr]")
#define FATAL_USAGE_PRINT()\
UT_FATAL("usage: obj_list_macro <file> P:<list>")
#define FATAL_USAGE_PRINT_REVERSE()\
UT_FATAL("usage: obj_list_macro <file> R:<list>")
#define FATAL_USAGE_INSERT()\
UT_FATAL("usage: obj_list_macro <file> i:<where>:<num>[:<id>]")
#define FATAL_USAGE_INSERT_NEW()\
UT_FATAL("usage: obj_list_macro <file> n:<where>:<num>[:<id>]")
#define FATAL_USAGE_REMOVE_FREE()\
UT_FATAL("usage: obj_list_macro <file> f:<list>:<num>")
#define FATAL_USAGE_REMOVE()\
UT_FATAL("usage: obj_list_macro <file> r:<list>:<num>")
#define FATAL_USAGE_MOVE()\
UT_FATAL("usage: obj_list_macro <file> m:<num>:<where>:<num>")
/*
* get_item_list -- get nth item from list
*/
static TOID(struct item)
get_item_list(TOID(struct list) list, int n)
{
TOID(struct item) item;
if (n >= 0) {
POBJ_LIST_FOREACH(item, &D_RO(list)->head, next) {
if (n == 0)
return item;
n--;
}
} else {
POBJ_LIST_FOREACH_REVERSE(item, &D_RO(list)->head, next) {
n++;
if (n == 0)
return item;
}
}
return TOID_NULL(struct item);
}
/*
* do_print -- print list elements in normal order
*/
static void
do_print(PMEMobjpool *pop, const char *arg)
{
int L; /* which list */
if (sscanf(arg, "P:%d", &L) != 1)
FATAL_USAGE_PRINT();
TOID(struct item) item;
if (L == 1) {
UT_OUT("list:");
POBJ_LIST_FOREACH(item, &D_RW(List)->head, next) {
UT_OUT("id = %d", D_RO(item)->id);
}
} else if (L == 2) {
UT_OUT("list sec:");
POBJ_LIST_FOREACH(item, &D_RW(List_sec)->head, next) {
UT_OUT("id = %d", D_RO(item)->id);
}
} else {
FATAL_USAGE_PRINT();
}
}
/*
* do_print_reverse -- print list elements in reverse order
*/
static void
do_print_reverse(PMEMobjpool *pop, const char *arg)
{
int L; /* which list */
if (sscanf(arg, "R:%d", &L) != 1)
FATAL_USAGE_PRINT_REVERSE();
TOID(struct item) item;
if (L == 1) {
UT_OUT("list reverse:");
POBJ_LIST_FOREACH_REVERSE(item, &D_RW(List)->head, next) {
UT_OUT("id = %d", D_RO(item)->id);
}
} else if (L == 2) {
UT_OUT("list sec reverse:");
POBJ_LIST_FOREACH_REVERSE(item, &D_RW(List_sec)->head, next) {
UT_OUT("id = %d", D_RO(item)->id);
}
} else {
FATAL_USAGE_PRINT_REVERSE();
}
}
/*
* item_constructor -- constructor which sets the item's id to
* new value
*/
static int
item_constructor(PMEMobjpool *pop, void *ptr, void *arg)
{
int id = *(int *)arg;
struct item *item = (struct item *)ptr;
item->id = id;
UT_OUT("constructor(id = %d)", id);
return 0;
}
/*
* do_insert_new -- insert new element to list
*/
static void
do_insert_new(PMEMobjpool *pop, const char *arg)
{
int n; /* which element on List */
int before;
int id;
int ret = sscanf(arg, "n:%d:%d:%d", &before, &n, &id);
if (ret != 3 && ret != 2)
FATAL_USAGE_INSERT_NEW();
int ptr = (ret == 3) ? id : 0;
TOID(struct item) item;
if (POBJ_LIST_EMPTY(&D_RW(List)->head)) {
POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(List)->head, next,
sizeof(struct item), item_constructor, &ptr);
if (POBJ_LIST_EMPTY(&D_RW(List)->head))
UT_FATAL("POBJ_LIST_INSERT_NEW_HEAD");
} else {
item = get_item_list(List, n);
UT_ASSERT(!TOID_IS_NULL(item));
if (!before) {
POBJ_LIST_INSERT_NEW_AFTER(pop, &D_RW(List)->head,
item, next, sizeof(struct item),
item_constructor, &ptr);
if (TOID_IS_NULL(POBJ_LIST_NEXT(item, next)))
UT_FATAL("POBJ_LIST_INSERT_NEW_AFTER");
} else {
POBJ_LIST_INSERT_NEW_BEFORE(pop, &D_RW(List)->head,
item, next, sizeof(struct item),
item_constructor, &ptr);
if (TOID_IS_NULL(POBJ_LIST_PREV(item, next)))
UT_FATAL("POBJ_LIST_INSERT_NEW_BEFORE");
}
}
}
/*
* do_insert -- insert element to list
*/
static void
do_insert(PMEMobjpool *pop, const char *arg)
{
int n; /* which element on List */
int before;
int id;
int ret = sscanf(arg, "i:%d:%d:%d", &before, &n, &id);
if (ret != 3 && ret != 2)
FATAL_USAGE_INSERT();
int ptr = (ret == 3) ? id : 0;
TOID(struct item) item;
POBJ_NEW(pop, &item, struct item, item_constructor, &ptr);
UT_ASSERT(!TOID_IS_NULL(item));
errno = 0;
if (POBJ_LIST_EMPTY(&D_RW(List)->head)) {
ret = POBJ_LIST_INSERT_HEAD(pop, &D_RW(List)->head,
item, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_INSERT_HEAD");
}
if (POBJ_LIST_EMPTY(&D_RW(List)->head))
UT_FATAL("POBJ_LIST_INSERT_HEAD");
} else {
TOID(struct item) elm = get_item_list(List, n);
UT_ASSERT(!TOID_IS_NULL(elm));
if (!before) {
ret = POBJ_LIST_INSERT_AFTER(pop, &D_RW(List)->head,
elm, item, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_INSERT_AFTER");
}
if (!TOID_EQUALS(item, POBJ_LIST_NEXT(elm, next)))
UT_FATAL("POBJ_LIST_INSERT_AFTER");
} else {
ret = POBJ_LIST_INSERT_BEFORE(pop, &D_RW(List)->head,
elm, item, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_INSERT_BEFORE");
}
if (!TOID_EQUALS(item, POBJ_LIST_PREV(elm, next)))
UT_FATAL("POBJ_LIST_INSERT_BEFORE");
}
}
}
/*
* do_remove_free -- remove and free element from list
*/
static void
do_remove_free(PMEMobjpool *pop, const char *arg)
{
int L; /* which list */
int n; /* which element */
if (sscanf(arg, "f:%d:%d", &L, &n) != 2)
FATAL_USAGE_REMOVE_FREE();
TOID(struct item) item;
TOID(struct list) tmp_list;
if (L == 1)
tmp_list = List;
else if (L == 2)
tmp_list = List_sec;
else
FATAL_USAGE_REMOVE_FREE();
if (POBJ_LIST_EMPTY(&D_RW(tmp_list)->head))
return;
item = get_item_list(tmp_list, n);
UT_ASSERT(!TOID_IS_NULL(item));
errno = 0;
int ret = POBJ_LIST_REMOVE_FREE(pop, &D_RW(tmp_list)->head,
item, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_REMOVE_FREE");
}
}
/*
* do_remove -- remove element from list
*/
static void
do_remove(PMEMobjpool *pop, const char *arg)
{
int L; /* which list */
int n; /* which element */
if (sscanf(arg, "r:%d:%d", &L, &n) != 2)
FATAL_USAGE_REMOVE();
TOID(struct item) item;
TOID(struct list) tmp_list;
if (L == 1)
tmp_list = List;
else if (L == 2)
tmp_list = List_sec;
else
FATAL_USAGE_REMOVE_FREE();
if (POBJ_LIST_EMPTY(&D_RW(tmp_list)->head))
return;
item = get_item_list(tmp_list, n);
UT_ASSERT(!TOID_IS_NULL(item));
errno = 0;
int ret = POBJ_LIST_REMOVE(pop, &D_RW(tmp_list)->head, item, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_REMOVE");
}
POBJ_FREE(&item);
}
/*
* do_move -- move element from one list to another
*/
static void
do_move(PMEMobjpool *pop, const char *arg)
{
int n;
int d;
int before;
if (sscanf(arg, "m:%d:%d:%d", &n, &before, &d) != 3)
FATAL_USAGE_MOVE();
int ret;
errno = 0;
if (POBJ_LIST_EMPTY(&D_RW(List)->head))
return;
if (POBJ_LIST_EMPTY(&D_RW(List_sec)->head)) {
ret = POBJ_LIST_MOVE_ELEMENT_HEAD(pop, &D_RW(List)->head,
&D_RW(List_sec)->head,
get_item_list(List, n),
next, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_MOVE_ELEMENT_HEAD");
}
} else {
if (before) {
ret = POBJ_LIST_MOVE_ELEMENT_BEFORE(pop,
&D_RW(List)->head,
&D_RW(List_sec)->head,
get_item_list(List_sec, d),
get_item_list(List, n),
next, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_MOVE_ELEMENT_BEFORE");
}
} else {
ret = POBJ_LIST_MOVE_ELEMENT_AFTER(pop,
&D_RW(List)->head,
&D_RW(List_sec)->head,
get_item_list(List_sec, d),
get_item_list(List, n),
next, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_MOVE_ELEMENT_AFTER");
}
}
}
}
/*
* do_cleanup -- de-initialization function
*/
static void
do_cleanup(PMEMobjpool *pop, TOID(struct list) list)
{
int ret;
errno = 0;
while (!POBJ_LIST_EMPTY(&D_RW(list)->head)) {
TOID(struct item) tmp = POBJ_LIST_FIRST(&D_RW(list)->head);
ret = POBJ_LIST_REMOVE_FREE(pop, &D_RW(list)->head, tmp, next);
UT_ASSERTeq(errno, 0);
UT_ASSERTeq(ret, 0);
}
POBJ_FREE(&list);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_list_macro");
if (argc < 2)
FATAL_USAGE();
const char *path = argv[1];
PMEMobjpool *pop;
if ((pop = pmemobj_create(path, LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
POBJ_ZNEW(pop, &List, struct list);
POBJ_ZNEW(pop, &List_sec, struct list);
int i;
for (i = 2; i < argc; i++) {
switch (argv[i][0]) {
case 'P':
do_print(pop, argv[i]);
break;
case 'R':
do_print_reverse(pop, argv[i]);
break;
case 'n':
do_insert_new(pop, argv[i]);
break;
case 'i':
do_insert(pop, argv[i]);
break;
case 'f':
do_remove_free(pop, argv[i]);
break;
case 'r':
do_remove(pop, argv[i]);
break;
case 'm':
do_move(pop, argv[i]);
break;
default:
FATAL_USAGE();
}
}
do_cleanup(pop, List);
do_cleanup(pop, List_sec);
pmemobj_close(pop);
DONE(NULL);
}
| 9,625 | 21.756501 | 68 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_critnib_mt/obj_critnib_mt.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* obj_critnib_mt.c -- multithreaded unit test for critnib
*/
#include <errno.h>
#include "critnib.h"
#include "rand.h"
#include "os_thread.h"
#include "unittest.h"
#include "util.h"
#include "valgrind_internal.h"
#define NITER_FAST 200000000
#define NITER_MID 20000000
#define NITER_SLOW 2000000
#define MAXTHREADS 4096
static int nthreads; /* number of threads */
static int nrthreads; /* in mixed tests, read threads */
static int nwthreads; /* ... and write threads */
static uint64_t
rnd_thid_r64(rng_t *seedp, uint16_t thid)
{
/*
* Stick arg (thread index) onto bits 16..31, to make it impossible for
* two worker threads to write the same value, while keeping both ends
* pseudo-random.
*/
uint64_t r = rnd64_r(seedp);
r &= ~0xffff0000ULL;
r |= ((uint64_t)thid) << 16;
return r;
}
static uint64_t
helgrind_count(uint64_t x)
{
/* Convert total number of ops to per-thread. */
x /= (unsigned)nthreads;
/*
* Reduce iteration count when running on foogrind, by a factor of 64.
* Multiple instances of foogrind cause exponential slowdown, so handle
* that as well (not that it's very useful for us...).
*/
return x >> (6 * On_valgrind);
}
/* 1024 random numbers, shared between threads. */
static uint64_t the1024[1024];
static struct critnib *c;
#define K 0xdeadbeefcafebabe
static void *
thread_read1(void *arg)
{
uint64_t niter = helgrind_count(NITER_FAST);
for (uint64_t count = 0; count < niter; count++)
UT_ASSERTeq(critnib_get(c, K), (void *)K);
return NULL;
}
static void *
thread_read1024(void *arg)
{
uint64_t niter = helgrind_count(NITER_FAST);
for (uint64_t count = 0; count < niter; count++) {
uint64_t v = the1024[count % ARRAY_SIZE(the1024)];
UT_ASSERTeq(critnib_get(c, v), (void *)v);
}
return NULL;
}
static void *
thread_write1024(void *arg)
{
rng_t rng;
randomize_r(&rng, (uintptr_t)arg);
uint64_t w1024[1024];
for (int i = 0; i < ARRAY_SIZE(w1024); i++)
w1024[i] = rnd_thid_r64(&rng, (uint16_t)(uintptr_t)arg);
uint64_t niter = helgrind_count(NITER_SLOW);
for (uint64_t count = 0; count < niter; count++) {
uint64_t v = w1024[count % ARRAY_SIZE(w1024)];
critnib_insert(c, v, (void *)v);
uint64_t r = (uint64_t)critnib_remove(c, v);
UT_ASSERTeq(v, r);
}
return NULL;
}
static void *
thread_read_write_remove(void *arg)
{
rng_t rng;
randomize_r(&rng, (uintptr_t)arg);
uint64_t niter = helgrind_count(NITER_SLOW);
for (uint64_t count = 0; count < niter; count++) {
uint64_t r, v = rnd_thid_r64(&rng, (uint16_t)(uintptr_t)arg);
critnib_insert(c, v, (void *)v);
r = (uint64_t)critnib_get(c, v);
UT_ASSERTeq(r, v);
r = (uint64_t)critnib_remove(c, v);
UT_ASSERTeq(r, v);
}
return NULL;
}
/*
* Reverse bits in a number: 1234 -> 4321 (swap _bit_ endianness).
*
* Doing this on successive numbers produces a van der Corput sequence,
* which covers the space nicely (relevant for <= tests).
*/
static uint64_t
revbits(uint64_t x)
{
uint64_t y = 0;
uint64_t a = 1;
uint64_t b = 0x8000000000000000;
for (; b; a <<= 1, b >>= 1) {
if (x & a)
y |= b;
}
return y;
}
static void *
thread_le1(void *arg)
{
uint64_t niter = helgrind_count(NITER_MID);
for (uint64_t count = 0; count < niter; count++) {
uint64_t y = revbits(count);
if (y < K)
UT_ASSERTeq(critnib_find_le(c, y), NULL);
else
UT_ASSERTeq(critnib_find_le(c, y), (void *)K);
}
return NULL;
}
static void *
thread_le1024(void *arg)
{
uint64_t niter = helgrind_count(NITER_MID);
for (uint64_t count = 0; count < niter; count++) {
uint64_t y = revbits(count);
critnib_find_le(c, y);
}
return NULL;
}
typedef void *(*thread_func_t)(void *);
/*
* Before starting the threads, we add "fixed_preload" of static values
* (K and 1), or "random_preload" of random numbers. Can't have both.
*/
static void
test(int fixed_preload, int random_preload, thread_func_t rthread,
thread_func_t wthread)
{
c = critnib_new();
if (fixed_preload >= 1)
critnib_insert(c, K, (void *)K);
if (fixed_preload >= 2)
critnib_insert(c, 1, (void *)1);
for (int i = 0; i < random_preload; i++)
critnib_insert(c, the1024[i], (void *)the1024[i]);
os_thread_t th[MAXTHREADS], wr[MAXTHREADS];
int ntr = wthread ? nrthreads : nthreads;
int ntw = wthread ? nwthreads : 0;
for (int i = 0; i < ntr; i++)
THREAD_CREATE(&th[i], 0, rthread, (void *)(uint64_t)i);
for (int i = 0; i < ntw; i++)
THREAD_CREATE(&wr[i], 0, wthread, (void *)(uint64_t)i);
/* The threads work here... */
for (int i = 0; i < ntr; i++) {
void *retval;
THREAD_JOIN(&th[i], &retval);
}
for (int i = 0; i < ntw; i++) {
void *retval;
THREAD_JOIN(&wr[i], &retval);
}
critnib_delete(c);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_critnib_mt");
util_init();
randomize(1); /* use a fixed reproducible seed */
for (int i = 0; i < ARRAY_SIZE(the1024); i++)
the1024[i] = rnd64();
nthreads = sysconf(_SC_NPROCESSORS_ONLN);
if (nthreads > MAXTHREADS)
nthreads = MAXTHREADS;
if (!nthreads)
nthreads = 8;
nwthreads = nthreads / 2;
if (!nwthreads)
nwthreads = 1;
nrthreads = nthreads - nwthreads;
if (!nrthreads)
nrthreads = 1;
test(1, 0, thread_read1, thread_write1024);
test(0, 1024, thread_read1024, thread_write1024);
test(0, 0, thread_read_write_remove, NULL);
test(1, 0, thread_le1, NULL);
test(0, 1024, thread_le1024, NULL);
DONE(NULL);
}
| 5,467 | 20.527559 | 72 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_ctl_arenas/obj_ctl_arenas.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* obj_ctl_arenas.c -- tests for the ctl entry points
* usage:
* obj_ctl_arenas <file> n - test for heap.narenas.total
*
* obj_ctl_arenas <file> s - test for heap.arena.[idx].size
* and heap.thread.arena_id (RW)
*
* obj_ctl_arenas <file> c - test for heap.arena.create,
* heap.arena.[idx].automatic and heap.narenas.automatic
* obj_ctl_arenas <file> a - mt test for heap.arena.create
* and heap.thread.arena_id
*
* obj_ctl_arenas <file> f - test for POBJ_ARENA_ID flag,
*
* obj_ctl_arenas <file> q - test for POBJ_ARENA_ID with
* non-exists arena id
*
* obj_ctl_arenas <file> m - test for heap.narenas.max (RW)
*/
#include <sched.h>
#include "sys_util.h"
#include "unittest.h"
#include "util.h"
#define CHUNKSIZE ((size_t)1024 * 256) /* 256 kilobytes */
#define LAYOUT "obj_ctl_arenas"
#define CTL_QUERY_LEN 256
#define NTHREAD 2
#define NTHREAD_ARENA 32
#define NOBJECT_THREAD 64
#define ALLOC_CLASS_ARENA 2
#define NTHREADX 16
#define NARENAS 16
#define DEFAULT_ARENAS_MAX (1 << 10)
static os_mutex_t lock;
static os_cond_t cond;
static PMEMobjpool *pop;
static int nth;
static struct pobj_alloc_class_desc alloc_class[] = {
{
.header_type = POBJ_HEADER_NONE,
.unit_size = 128,
.units_per_block = 1000,
.alignment = 0
},
{
.header_type = POBJ_HEADER_NONE,
.unit_size = 1024,
.units_per_block = 1000,
.alignment = 0
},
{
.header_type = POBJ_HEADER_NONE,
.unit_size = 111,
.units_per_block = CHUNKSIZE / 111,
.alignment = 0
},
};
struct arena_alloc {
unsigned arena;
PMEMoid oid;
};
static struct arena_alloc ref;
static void
check_arena_size(unsigned arena_id, unsigned class_id)
{
int ret;
size_t arena_size;
char arena_idx_size[CTL_QUERY_LEN];
SNPRINTF(arena_idx_size, CTL_QUERY_LEN,
"heap.arena.%u.size", arena_id);
ret = pmemobj_ctl_get(pop, arena_idx_size, &arena_size);
UT_ASSERTeq(ret, 0);
size_t test = ALIGN_UP(alloc_class[class_id].unit_size *
alloc_class[class_id].units_per_block, CHUNKSIZE);
UT_ASSERTeq(test, arena_size);
}
static void
create_alloc_class(void)
{
int ret;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.128.desc",
&alloc_class[0]);
UT_ASSERTeq(ret, 0);
ret = pmemobj_ctl_set(pop, "heap.alloc_class.129.desc",
&alloc_class[1]);
UT_ASSERTeq(ret, 0);
}
static void *
worker_arenas_size(void *arg)
{
int ret = -1;
int idx = (int)(intptr_t)arg;
int off_idx = idx + 128;
unsigned arena_id;
unsigned arena_id_new;
ret = pmemobj_ctl_exec(pop, "heap.arena.create",
&arena_id_new);
UT_ASSERTeq(ret, 0);
UT_ASSERT(arena_id_new >= 1);
ret = pmemobj_ctl_set(pop, "heap.thread.arena_id",
&arena_id_new);
UT_ASSERTeq(ret, 0);
ret = pmemobj_xalloc(pop, NULL, alloc_class[idx].unit_size, 0,
POBJ_CLASS_ID(off_idx), NULL, NULL);
UT_ASSERTeq(ret, 0);
/* we need to test 2 arenas so 2 threads are needed here */
util_mutex_lock(&lock);
nth++;
if (nth == NTHREAD)
os_cond_broadcast(&cond);
else
while (nth < NTHREAD)
os_cond_wait(&cond, &lock);
util_mutex_unlock(&lock);
ret = pmemobj_ctl_get(pop, "heap.thread.arena_id", &arena_id);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arena_id_new, arena_id);
check_arena_size(arena_id, (unsigned)idx);
return NULL;
}
static void *
worker_arenas_flag(void *arg)
{
int ret;
unsigned arenas[NARENAS];
for (unsigned i = 0; i < NARENAS; ++i) {
ret = pmemobj_ctl_exec(pop, "heap.arena.create",
&arenas[i]);
UT_ASSERTeq(ret, 0);
}
/*
* Tests POBJ_ARENA_ID with pmemobj_xalloc.
* All object are frees after pthread join.
*/
for (unsigned i = 0; i < 2; i++) {
ret = pmemobj_xalloc(pop,
NULL, alloc_class[i].unit_size, 0,
POBJ_CLASS_ID(i + 128) | \
POBJ_ARENA_ID(arenas[i]),
NULL, NULL);
UT_ASSERTeq(ret, 0);
check_arena_size(arenas[i], i);
}
/* test POBJ_ARENA_ID with pmemobj_xreserve */
struct pobj_action act;
PMEMoid oid = pmemobj_xreserve(pop, &act,
alloc_class[0].unit_size, 1,
POBJ_CLASS_ID(128) |
POBJ_ARENA_ID(arenas[2]));
pmemobj_publish(pop, &act, 1);
pmemobj_free(&oid);
UT_ASSERT(OID_IS_NULL(oid));
/* test POBJ_ARENA_ID with pmemobj_tx_xalloc */
TX_BEGIN(pop) {
pmemobj_tx_xalloc(alloc_class[1].unit_size, 0,
POBJ_CLASS_ID(129) | POBJ_ARENA_ID(arenas[3]));
} TX_END
check_arena_size(arenas[3], 1);
return NULL;
}
static void *
worker_arena_threads(void *arg)
{
int ret = -1;
struct arena_alloc *ref = (struct arena_alloc *)arg;
unsigned arena_id;
ret = pmemobj_ctl_get(pop, "heap.thread.arena_id", &arena_id);
UT_ASSERTeq(ret, 0);
UT_ASSERT(arena_id != 0);
ret = pmemobj_ctl_set(pop, "heap.thread.arena_id", &ref->arena);
UT_ASSERTeq(ret, 0);
PMEMoid oid[NOBJECT_THREAD];
unsigned d;
for (int i = 0; i < NOBJECT_THREAD; i++) {
ret = pmemobj_xalloc(pop, &oid[i],
alloc_class[ALLOC_CLASS_ARENA].unit_size,
0, POBJ_CLASS_ID(ALLOC_CLASS_ARENA + 128),
NULL, NULL);
UT_ASSERTeq(ret, 0);
d = labs((long)ref->oid.off - (long)oid[i].off);
/* objects are in the same block as the first one */
ASSERT(d <= alloc_class[ALLOC_CLASS_ARENA].unit_size *
(alloc_class[ALLOC_CLASS_ARENA].units_per_block - 1));
}
for (int i = 0; i < NOBJECT_THREAD; i++)
pmemobj_free(&oid[i]);
return NULL;
}
static void
worker_arena_ref_obj(struct arena_alloc *ref)
{
int ret = -1;
ret = pmemobj_ctl_set(pop, "heap.thread.arena_id", &ref->arena);
UT_ASSERTeq(ret, 0);
ret = pmemobj_xalloc(pop, &ref->oid,
alloc_class[ALLOC_CLASS_ARENA].unit_size,
0, POBJ_CLASS_ID(ALLOC_CLASS_ARENA + 128), NULL, NULL);
UT_ASSERTeq(ret, 0);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_ctl_arenas");
if (argc != 3)
UT_FATAL("usage: %s poolset [n|s|c|f|q|m|a]", argv[0]);
const char *path = argv[1];
char t = argv[2][0];
if ((pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL * 20,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_open: %s", path);
int ret = 0;
if (t == 'n') {
unsigned narenas = 0;
ret = pmemobj_ctl_get(pop, "heap.narenas.total", &narenas);
UT_ASSERTeq(ret, 0);
UT_ASSERTne(narenas, 0);
} else if (t == 's') {
os_thread_t threads[NTHREAD];
util_mutex_init(&lock);
util_cond_init(&cond);
create_alloc_class();
for (int i = 0; i < NTHREAD; i++)
THREAD_CREATE(&threads[i], NULL, worker_arenas_size,
(void *)(intptr_t)i);
for (int i = 0; i < NTHREAD; i++)
THREAD_JOIN(&threads[i], NULL);
PMEMoid oid, oid2;
POBJ_FOREACH_SAFE(pop, oid, oid2)
pmemobj_free(&oid);
util_mutex_destroy(&lock);
util_cond_destroy(&cond);
} else if (t == 'c') {
char arena_idx_auto[CTL_QUERY_LEN];
unsigned narenas_b = 0;
unsigned narenas_a = 0;
unsigned narenas_n = 4;
unsigned arena_id;
unsigned all_auto;
int automatic;
ret = pmemobj_ctl_get(pop, "heap.narenas.total", &narenas_b);
UT_ASSERTeq(ret, 0);
/* all arenas created at the start should be set to auto */
for (unsigned i = 1; i <= narenas_b; i++) {
SNPRINTF(arena_idx_auto, CTL_QUERY_LEN,
"heap.arena.%u.automatic", i);
ret = pmemobj_ctl_get(pop, arena_idx_auto, &automatic);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(automatic, 1);
}
ret = pmemobj_ctl_get(pop, "heap.narenas.automatic", &all_auto);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(narenas_b, all_auto);
/* all arenas created by user should not be auto */
for (unsigned i = 1; i <= narenas_n; i++) {
ret = pmemobj_ctl_exec(pop, "heap.arena.create",
&arena_id);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arena_id, narenas_b + i);
SNPRINTF(arena_idx_auto, CTL_QUERY_LEN,
"heap.arena.%u.automatic", arena_id);
ret = pmemobj_ctl_get(pop, arena_idx_auto, &automatic);
UT_ASSERTeq(automatic, 0);
/*
* after creation, number of auto
* arenas should be the same
*/
ret = pmemobj_ctl_get(pop, "heap.narenas.automatic",
&all_auto);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(narenas_b + i - 1, all_auto);
/* change the state of created arena to auto */
int activate = 1;
ret = pmemobj_ctl_set(pop, arena_idx_auto,
&activate);
UT_ASSERTeq(ret, 0);
ret = pmemobj_ctl_get(pop, arena_idx_auto, &automatic);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(automatic, 1);
/* number of auto arenas should increase */
ret = pmemobj_ctl_get(pop, "heap.narenas.automatic",
&all_auto);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(narenas_b + i, all_auto);
}
ret = pmemobj_ctl_get(pop, "heap.narenas.total", &narenas_a);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(narenas_b + narenas_n, narenas_a);
/* at least one automatic arena must exist */
for (unsigned i = 1; i <= narenas_a; i++) {
SNPRINTF(arena_idx_auto, CTL_QUERY_LEN,
"heap.arena.%u.automatic", i);
automatic = 0;
if (i < narenas_a) {
ret = pmemobj_ctl_set(pop, arena_idx_auto,
&automatic);
UT_ASSERTeq(ret, 0);
} else {
/*
* last auto arena -
* cannot change the state to 0...
*/
ret = pmemobj_ctl_set(pop, arena_idx_auto,
&automatic);
UT_ASSERTeq(ret, -1);
/* ...but can change (overwrite) to 1 */
automatic = 1;
ret = pmemobj_ctl_set(pop, arena_idx_auto,
&automatic);
UT_ASSERTeq(ret, 0);
}
}
} else if (t == 'a') {
int ret;
unsigned arena_id_new;
char alloc_class_idx_desc[CTL_QUERY_LEN];
ret = pmemobj_ctl_exec(pop, "heap.arena.create",
&arena_id_new);
UT_ASSERTeq(ret, 0);
UT_ASSERT(arena_id_new >= 1);
SNPRINTF(alloc_class_idx_desc, CTL_QUERY_LEN,
"heap.alloc_class.%d.desc",
ALLOC_CLASS_ARENA + 128);
ret = pmemobj_ctl_set(pop, alloc_class_idx_desc,
&alloc_class[ALLOC_CLASS_ARENA]);
UT_ASSERTeq(ret, 0);
ref.arena = arena_id_new;
worker_arena_ref_obj(&ref);
os_thread_t threads[NTHREAD_ARENA];
for (int i = 0; i < NTHREAD_ARENA; i++) {
THREAD_CREATE(&threads[i], NULL, worker_arena_threads,
&ref);
}
for (int i = 0; i < NTHREAD_ARENA; i++)
THREAD_JOIN(&threads[i], NULL);
} else if (t == 'f') {
os_thread_t threads[NTHREADX];
create_alloc_class();
for (int i = 0; i < NTHREADX; i++)
THREAD_CREATE(&threads[i], NULL,
worker_arenas_flag, NULL);
for (int i = 0; i < NTHREADX; i++)
THREAD_JOIN(&threads[i], NULL);
PMEMoid oid, oid2;
POBJ_FOREACH_SAFE(pop, oid, oid2)
pmemobj_free(&oid);
} else if (t == 'q') {
unsigned total;
ret = pmemobj_ctl_get(pop, "heap.narenas.total", &total);
UT_ASSERTeq(ret, 0);
ret = pmemobj_xalloc(pop, NULL, alloc_class[0].unit_size, 0,
POBJ_ARENA_ID(total), NULL, NULL);
UT_ASSERTne(ret, 0);
} else if (t == 'm') {
unsigned max;
unsigned new_max;
ret = pmemobj_ctl_get(pop, "heap.narenas.max", &max);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(DEFAULT_ARENAS_MAX, max);
/* size should not decrease */
new_max = DEFAULT_ARENAS_MAX - 1;
ret = pmemobj_ctl_set(pop, "heap.narenas.max", &new_max);
UT_ASSERTne(ret, 0);
ret = pmemobj_ctl_get(pop, "heap.narenas.max", &max);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(DEFAULT_ARENAS_MAX, max);
/* size should increase */
new_max = DEFAULT_ARENAS_MAX + 1;
ret = pmemobj_ctl_set(pop, "heap.narenas.max", &new_max);
UT_ASSERTeq(ret, 0);
ret = pmemobj_ctl_get(pop, "heap.narenas.max", &max);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(DEFAULT_ARENAS_MAX + 1, max);
} else {
UT_ASSERT(0);
}
pmemobj_close(pop);
DONE(NULL);
}
| 11,314 | 23.651416 | 66 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/win_poolset_unmap/win_poolset_unmap.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* win_poolset_unmap.c -- test for windows mmap destructor.
*
* It checks whether all mappings are properly unmpapped and memory is properly
* unreserved when auto growing pool is used.
*/
#include "unittest.h"
#include "os.h"
#include "libpmemobj.h"
#define KILOBYTE (1 << 10)
#define MEGABYTE (1 << 20)
#define LAYOUT_NAME "poolset_unmap"
int
main(int argc, char *argv[])
{
START(argc, argv, "win_poolset_unmap");
if (argc != 2)
UT_FATAL("usage: %s path", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, 0,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
MEMORY_BASIC_INFORMATION basic_info;
SIZE_T bytes_returned;
SIZE_T offset = 0;
bytes_returned = VirtualQuery(pop, &basic_info,
sizeof(basic_info));
/*
* When opening pool, we try to remove all permissions on header.
* If this action fails VirtualQuery will return one region with
* size 8MB. If it succeeds, RegionSize will be equal to 4KB due
* to different header and rest of the mapping permissions.
*/
if (basic_info.RegionSize == 4 * KILOBYTE) {
/* header */
UT_ASSERTeq(bytes_returned, sizeof(basic_info));
UT_ASSERTeq(basic_info.State, MEM_COMMIT);
offset += basic_info.RegionSize;
/* first part */
bytes_returned = VirtualQuery((char *)pop + offset, &basic_info,
sizeof(basic_info));
UT_ASSERTeq(bytes_returned, sizeof(basic_info));
UT_ASSERTeq(basic_info.RegionSize, 8 * MEGABYTE - 4 * KILOBYTE);
UT_ASSERTeq(basic_info.State, MEM_COMMIT);
} else {
/* first part with header */
UT_ASSERTeq(bytes_returned, sizeof(basic_info));
UT_ASSERTeq(basic_info.RegionSize, 8 * MEGABYTE);
UT_ASSERTeq(basic_info.State, MEM_COMMIT);
}
offset += basic_info.RegionSize;
/* reservation after first part */
bytes_returned = VirtualQuery((char *)pop + offset, &basic_info,
sizeof(basic_info));
UT_ASSERTeq(bytes_returned, sizeof(basic_info));
UT_ASSERTeq(basic_info.RegionSize, (50 - 8) * MEGABYTE);
UT_ASSERTeq(basic_info.State, MEM_RESERVE);
DONE(NULL);
}
| 2,117 | 25.810127 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_compat/pmem2_compat.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* pmem2_compat.c -- compatibility test for libpmem vs libpmem2
*/
#include "unittest.h"
int
main(int argc, char *argv[])
{
UT_COMPILE_ERROR_ON(PMEM_F_MEM_NODRAIN != PMEM2_F_MEM_NODRAIN);
UT_COMPILE_ERROR_ON(PMEM_F_MEM_NONTEMPORAL != PMEM2_F_MEM_NONTEMPORAL);
UT_COMPILE_ERROR_ON(PMEM_F_MEM_TEMPORAL != PMEM2_F_MEM_TEMPORAL);
UT_COMPILE_ERROR_ON(PMEM_F_MEM_WC != PMEM2_F_MEM_WC);
UT_COMPILE_ERROR_ON(PMEM_F_MEM_WB != PMEM2_F_MEM_WB);
UT_COMPILE_ERROR_ON(PMEM_F_MEM_NOFLUSH != PMEM2_F_MEM_NOFLUSH);
return 0;
}
| 606 | 26.590909 | 72 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_tx_strdup/obj_tx_strdup.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_tx_strdup.c -- unit test for pmemobj_tx_strdup
*/
#include <sys/param.h>
#include <string.h>
#include <wchar.h>
#include "unittest.h"
#define LAYOUT_NAME "tx_strdup"
TOID_DECLARE(char, 0);
TOID_DECLARE(wchar_t, 1);
enum type_number {
TYPE_NO_TX,
TYPE_WCS_NO_TX,
TYPE_COMMIT,
TYPE_WCS_COMMIT,
TYPE_ABORT,
TYPE_WCS_ABORT,
TYPE_FREE_COMMIT,
TYPE_WCS_FREE_COMMIT,
TYPE_FREE_ABORT,
TYPE_WCS_FREE_ABORT,
TYPE_COMMIT_NESTED1,
TYPE_WCS_COMMIT_NESTED1,
TYPE_COMMIT_NESTED2,
TYPE_WCS_COMMIT_NESTED2,
TYPE_ABORT_NESTED1,
TYPE_WCS_ABORT_NESTED1,
TYPE_ABORT_NESTED2,
TYPE_WCS_ABORT_NESTED2,
TYPE_ABORT_AFTER_NESTED1,
TYPE_WCS_ABORT_AFTER_NESTED1,
TYPE_ABORT_AFTER_NESTED2,
TYPE_WCS_ABORT_AFTER_NESTED2,
TYPE_NOFLUSH,
TYPE_WCS_NOFLUSH,
};
#define TEST_STR_1 "Test string 1"
#define TEST_STR_2 "Test string 2"
#define TEST_WCS_1 L"Test string 3"
#define TEST_WCS_2 L"Test string 4"
#define MAX_FUNC 2
typedef void (*fn_tx_strdup)(TOID(char) *str, const char *s,
unsigned type_num);
typedef void (*fn_tx_wcsdup)(TOID(wchar_t) *wcs, const wchar_t *s,
unsigned type_num);
static unsigned counter;
/*
* tx_strdup -- duplicate a string using pmemobj_tx_strdup
*/
static void
tx_strdup(TOID(char) *str, const char *s, unsigned type_num)
{
TOID_ASSIGN(*str, pmemobj_tx_strdup(s, type_num));
}
/*
* tx_wcsdup -- duplicate a string using pmemobj_tx_wcsdup
*/
static void
tx_wcsdup(TOID(wchar_t) *wcs, const wchar_t *s, unsigned type_num)
{
TOID_ASSIGN(*wcs, pmemobj_tx_wcsdup(s, type_num));
}
/*
* tx_strdup_macro -- duplicate a string using macro
*/
static void
tx_strdup_macro(TOID(char) *str, const char *s, unsigned type_num)
{
TOID_ASSIGN(*str, TX_STRDUP(s, type_num));
}
/*
* tx_wcsdup_macro -- duplicate a wide character string using macro
*/
static void
tx_wcsdup_macro(TOID(wchar_t) *wcs, const wchar_t *s, unsigned type_num)
{
TOID_ASSIGN(*wcs, TX_WCSDUP(s, type_num));
}
static fn_tx_strdup do_tx_strdup[MAX_FUNC] = {tx_strdup, tx_strdup_macro};
static fn_tx_wcsdup do_tx_wcsdup[MAX_FUNC] = {tx_wcsdup, tx_wcsdup_macro};
/*
* do_tx_strdup_commit -- duplicate a string and commit the transaction
*/
static void
do_tx_strdup_commit(PMEMobjpool *pop)
{
TOID(char) str;
TOID(wchar_t) wcs;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str, TEST_STR_1, TYPE_COMMIT);
do_tx_wcsdup[counter](&wcs, TEST_WCS_1, TYPE_WCS_COMMIT);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERT(!TOID_IS_NULL(wcs));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT));
TOID_ASSIGN(wcs, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_COMMIT));
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERTeq(strcmp(TEST_STR_1, D_RO(str)), 0);
UT_ASSERTeq(wcscmp(TEST_WCS_1, D_RO(wcs)), 0);
}
/*
* do_tx_strdup_abort -- duplicate a string and abort the transaction
*/
static void
do_tx_strdup_abort(PMEMobjpool *pop)
{
TOID(char) str;
TOID(wchar_t) wcs;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str, TEST_STR_1, TYPE_ABORT);
do_tx_wcsdup[counter](&wcs, TEST_WCS_1, TYPE_WCS_ABORT);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERT(!TOID_IS_NULL(wcs));
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT));
TOID_ASSIGN(wcs, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_ABORT));
UT_ASSERT(TOID_IS_NULL(str));
UT_ASSERT(TOID_IS_NULL(wcs));
}
/*
* do_tx_strdup_null -- duplicate a NULL string to trigger tx abort
*/
static void
do_tx_strdup_null(PMEMobjpool *pop)
{
TOID(char) str;
TOID(wchar_t) wcs;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str, NULL, TYPE_ABORT);
do_tx_wcsdup[counter](&wcs, NULL, TYPE_WCS_ABORT);
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT));
TOID_ASSIGN(wcs, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_ABORT));
UT_ASSERT(TOID_IS_NULL(str));
UT_ASSERT(TOID_IS_NULL(wcs));
TX_BEGIN(pop) {
pmemobj_tx_xstrdup(NULL, TYPE_ABORT, POBJ_XALLOC_NO_ABORT);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, EINVAL);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
pmemobj_tx_strdup(NULL, TYPE_ABORT);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, EINVAL);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
pmemobj_tx_xstrdup(NULL, TYPE_ABORT, 0);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, EINVAL);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
/*
* do_tx_strdup_free_commit -- duplicate a string, free and commit the
* transaction
*/
static void
do_tx_strdup_free_commit(PMEMobjpool *pop)
{
TOID(char) str;
TOID(wchar_t) wcs;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str, TEST_STR_1, TYPE_FREE_COMMIT);
do_tx_wcsdup[counter](&wcs, TEST_WCS_1, TYPE_WCS_FREE_COMMIT);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERT(!TOID_IS_NULL(wcs));
int ret = pmemobj_tx_free(str.oid);
UT_ASSERTeq(ret, 0);
ret = pmemobj_tx_free(wcs.oid);
UT_ASSERTeq(ret, 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_COMMIT));
TOID_ASSIGN(wcs, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_FREE_COMMIT));
UT_ASSERT(TOID_IS_NULL(str));
UT_ASSERT(TOID_IS_NULL(wcs));
}
/*
* do_tx_strdup_free_abort -- duplicate a string, free and abort the
* transaction
*/
static void
do_tx_strdup_free_abort(PMEMobjpool *pop)
{
TOID(char) str;
TOID(wchar_t) wcs;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str, TEST_STR_1, TYPE_FREE_ABORT);
do_tx_wcsdup[counter](&wcs, TEST_WCS_1, TYPE_WCS_FREE_ABORT);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERT(!TOID_IS_NULL(wcs));
int ret = pmemobj_tx_free(str.oid);
UT_ASSERTeq(ret, 0);
ret = pmemobj_tx_free(wcs.oid);
UT_ASSERTeq(ret, 0);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ABORT));
TOID_ASSIGN(wcs, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_FREE_ABORT));
UT_ASSERT(TOID_IS_NULL(str));
UT_ASSERT(TOID_IS_NULL(wcs));
}
/*
* do_tx_strdup_commit_nested -- duplicate two string suing nested
* transaction and commit the transaction
*/
static void
do_tx_strdup_commit_nested(PMEMobjpool *pop)
{
TOID(char) str1;
TOID(char) str2;
TOID(wchar_t) wcs1;
TOID(wchar_t) wcs2;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str1, TEST_STR_1, TYPE_COMMIT_NESTED1);
do_tx_wcsdup[counter](&wcs1, TEST_WCS_1,
TYPE_WCS_COMMIT_NESTED1);
UT_ASSERT(!TOID_IS_NULL(str1));
UT_ASSERT(!TOID_IS_NULL(wcs1));
TX_BEGIN(pop) {
do_tx_strdup[counter](&str2, TEST_STR_2,
TYPE_COMMIT_NESTED2);
do_tx_wcsdup[counter](&wcs2, TEST_WCS_2,
TYPE_WCS_COMMIT_NESTED2);
UT_ASSERT(!TOID_IS_NULL(str2));
UT_ASSERT(!TOID_IS_NULL(wcs2));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str1, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_NESTED1));
TOID_ASSIGN(wcs1, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_COMMIT_NESTED1));
UT_ASSERT(!TOID_IS_NULL(str1));
UT_ASSERT(!TOID_IS_NULL(wcs1));
UT_ASSERTeq(strcmp(TEST_STR_1, D_RO(str1)), 0);
UT_ASSERTeq(wcscmp(TEST_WCS_1, D_RO(wcs1)), 0);
TOID_ASSIGN(str2, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_NESTED2));
TOID_ASSIGN(wcs2, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_COMMIT_NESTED2));
UT_ASSERT(!TOID_IS_NULL(str2));
UT_ASSERT(!TOID_IS_NULL(wcs2));
UT_ASSERTeq(strcmp(TEST_STR_2, D_RO(str2)), 0);
UT_ASSERTeq(wcscmp(TEST_WCS_2, D_RO(wcs2)), 0);
}
/*
* do_tx_strdup_commit_abort -- duplicate two string suing nested
* transaction and abort the transaction
*/
static void
do_tx_strdup_abort_nested(PMEMobjpool *pop)
{
TOID(char) str1;
TOID(char) str2;
TOID(wchar_t) wcs1;
TOID(wchar_t) wcs2;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str1, TEST_STR_1, TYPE_ABORT_NESTED1);
do_tx_wcsdup[counter](&wcs1, TEST_WCS_1,
TYPE_WCS_ABORT_NESTED1);
UT_ASSERT(!TOID_IS_NULL(str1));
UT_ASSERT(!TOID_IS_NULL(wcs1));
TX_BEGIN(pop) {
do_tx_strdup[counter](&str2, TEST_STR_2,
TYPE_ABORT_NESTED2);
do_tx_wcsdup[counter](&wcs2, TEST_WCS_2,
TYPE_WCS_ABORT_NESTED2);
UT_ASSERT(!TOID_IS_NULL(str2));
UT_ASSERT(!TOID_IS_NULL(wcs2));
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str1, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_NESTED1));
TOID_ASSIGN(wcs1, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_ABORT_NESTED1));
UT_ASSERT(TOID_IS_NULL(str1));
UT_ASSERT(TOID_IS_NULL(wcs1));
TOID_ASSIGN(str2, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_NESTED2));
TOID_ASSIGN(wcs2, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_ABORT_NESTED2));
UT_ASSERT(TOID_IS_NULL(str2));
UT_ASSERT(TOID_IS_NULL(wcs2));
}
/*
* do_tx_strdup_commit_abort -- duplicate two string suing nested
* transaction and abort after the nested transaction
*/
static void
do_tx_strdup_abort_after_nested(PMEMobjpool *pop)
{
TOID(char) str1;
TOID(char) str2;
TOID(wchar_t) wcs1;
TOID(wchar_t) wcs2;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str1, TEST_STR_1,
TYPE_ABORT_AFTER_NESTED1);
do_tx_wcsdup[counter](&wcs1, TEST_WCS_1,
TYPE_WCS_ABORT_AFTER_NESTED1);
UT_ASSERT(!TOID_IS_NULL(str1));
UT_ASSERT(!TOID_IS_NULL(wcs1));
TX_BEGIN(pop) {
do_tx_strdup[counter](&str2, TEST_STR_2,
TYPE_ABORT_AFTER_NESTED2);
do_tx_wcsdup[counter](&wcs2, TEST_WCS_2,
TYPE_WCS_ABORT_AFTER_NESTED2);
UT_ASSERT(!TOID_IS_NULL(str2));
UT_ASSERT(!TOID_IS_NULL(wcs2));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str1, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_AFTER_NESTED1));
TOID_ASSIGN(wcs1, POBJ_FIRST_TYPE_NUM(pop,
TYPE_WCS_ABORT_AFTER_NESTED1));
UT_ASSERT(TOID_IS_NULL(str1));
UT_ASSERT(TOID_IS_NULL(wcs1));
TOID_ASSIGN(str2, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_AFTER_NESTED2));
TOID_ASSIGN(wcs2, POBJ_FIRST_TYPE_NUM(pop,
TYPE_WCS_ABORT_AFTER_NESTED2));
UT_ASSERT(TOID_IS_NULL(str2));
UT_ASSERT(TOID_IS_NULL(wcs2));
}
/*
* do_tx_strdup_noflush -- allocates zeroed object
*/
static void
do_tx_strdup_noflush(PMEMobjpool *pop)
{
TX_BEGIN(pop) {
errno = 0;
pmemobj_tx_xstrdup(TEST_STR_1, TYPE_NOFLUSH,
POBJ_XALLOC_NO_FLUSH);
pmemobj_tx_xwcsdup(TEST_WCS_1, TYPE_WCS_NOFLUSH,
POBJ_XALLOC_NO_FLUSH);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_strdup");
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
for (counter = 0; counter < MAX_FUNC; counter++) {
do_tx_strdup_commit(pop);
do_tx_strdup_abort(pop);
do_tx_strdup_null(pop);
do_tx_strdup_free_commit(pop);
do_tx_strdup_free_abort(pop);
do_tx_strdup_commit_nested(pop);
do_tx_strdup_abort_nested(pop);
do_tx_strdup_abort_after_nested(pop);
}
do_tx_strdup_noflush(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 11,087 | 24.315068 | 74 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_tx_realloc/obj_tx_realloc.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* obj_tx_realloc.c -- unit test for pmemobj_tx_realloc and pmemobj_tx_zrealloc
*/
#include <sys/param.h>
#include <string.h>
#include "unittest.h"
#include "util.h"
#define LAYOUT_NAME "tx_realloc"
#define TEST_VALUE_1 1
#define OBJ_SIZE 1024
enum type_number {
TYPE_NO_TX,
TYPE_COMMIT,
TYPE_ABORT,
TYPE_TYPE,
TYPE_COMMIT_ZERO,
TYPE_COMMIT_ZERO_MACRO,
TYPE_ABORT_ZERO,
TYPE_ABORT_ZERO_MACRO,
TYPE_COMMIT_ALLOC,
TYPE_ABORT_ALLOC,
TYPE_ABORT_HUGE,
TYPE_ABORT_ZERO_HUGE,
TYPE_ABORT_ZERO_HUGE_MACRO,
TYPE_FREE,
};
struct object {
size_t value;
char data[OBJ_SIZE - sizeof(size_t)];
};
TOID_DECLARE(struct object, 0);
struct object_macro {
size_t value;
char data[OBJ_SIZE - sizeof(size_t)];
};
TOID_DECLARE(struct object_macro, TYPE_COMMIT_ZERO_MACRO);
/*
* do_tx_alloc -- do tx allocation with specified type number
*/
static PMEMoid
do_tx_alloc(PMEMobjpool *pop, unsigned type_num, size_t value)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, OID_NULL);
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_alloc(
sizeof(struct object), type_num));
if (!TOID_IS_NULL(obj)) {
D_RW(obj)->value = value;
}
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
return obj.oid;
}
/*
* do_tx_realloc_commit -- reallocate an object and commit the transaction
*/
static void
do_tx_realloc_commit(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_COMMIT, TEST_VALUE_1));
size_t new_size = 2 * pmemobj_alloc_usable_size(obj.oid);
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_realloc(obj.oid,
new_size, TYPE_COMMIT));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_abort -- reallocate an object and commit the transaction
*/
static void
do_tx_realloc_abort(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT, TEST_VALUE_1));
size_t new_size = 2 * pmemobj_alloc_usable_size(obj.oid);
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_realloc(obj.oid,
new_size, TYPE_ABORT));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) < new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_huge -- reallocate an object to a huge size to trigger tx abort
*/
static void
do_tx_realloc_huge(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT_HUGE, TEST_VALUE_1));
size_t new_size = PMEMOBJ_MAX_ALLOC_SIZE + 1;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_realloc(obj.oid,
new_size, TYPE_ABORT_HUGE));
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_HUGE));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) < new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_zrealloc_commit_macro -- reallocate an object, zero it and commit
* the transaction using macro
*/
static void
do_tx_zrealloc_commit_macro(PMEMobjpool *pop)
{
TOID(struct object_macro) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_COMMIT_ZERO_MACRO,
TEST_VALUE_1));
size_t old_size = pmemobj_alloc_usable_size(obj.oid);
size_t new_size = 2 * old_size;
TX_BEGIN(pop) {
obj = TX_ZREALLOC(obj, new_size);
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
void *new_ptr = (void *)((uintptr_t)D_RW(obj) + old_size);
UT_ASSERT(util_is_zeroed(new_ptr, new_size - old_size));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_ZERO_MACRO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
void *new_ptr = (void *)((uintptr_t)D_RW(obj) + old_size);
UT_ASSERT(util_is_zeroed(new_ptr, new_size - old_size));
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_zrealloc_commit -- reallocate an object, zero it and commit
* the transaction
*/
static void
do_tx_zrealloc_commit(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_COMMIT_ZERO, TEST_VALUE_1));
size_t old_size = pmemobj_alloc_usable_size(obj.oid);
size_t new_size = 2 * old_size;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_zrealloc(obj.oid,
new_size, TYPE_COMMIT_ZERO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
void *new_ptr = (void *)((uintptr_t)D_RW(obj) + old_size);
UT_ASSERT(util_is_zeroed(new_ptr, new_size - old_size));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_ZERO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
void *new_ptr = (void *)((uintptr_t)D_RW(obj) + old_size);
UT_ASSERT(util_is_zeroed(new_ptr, new_size - old_size));
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_abort_macro -- reallocate an object, zero it and commit the
* transaction using macro
*/
static void
do_tx_zrealloc_abort_macro(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT_ZERO_MACRO, TEST_VALUE_1));
size_t old_size = pmemobj_alloc_usable_size(obj.oid);
size_t new_size = 2 * old_size;
TX_BEGIN(pop) {
obj = TX_ZREALLOC(obj, new_size);
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
void *new_ptr = (void *)((uintptr_t)D_RW(obj) + old_size);
UT_ASSERT(util_is_zeroed(new_ptr, new_size - old_size));
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_ZERO_MACRO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) < new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_abort -- reallocate an object and commit the transaction
*/
static void
do_tx_zrealloc_abort(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT_ZERO, TEST_VALUE_1));
size_t old_size = pmemobj_alloc_usable_size(obj.oid);
size_t new_size = 2 * old_size;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_zrealloc(obj.oid,
new_size, TYPE_ABORT_ZERO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
void *new_ptr = (void *)((uintptr_t)D_RW(obj) + old_size);
UT_ASSERT(util_is_zeroed(new_ptr, new_size - old_size));
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_ZERO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) < new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_huge_macro -- reallocate an object to a huge size to trigger
* tx abort and zero it using macro
*/
static void
do_tx_zrealloc_huge_macro(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT_ZERO_HUGE_MACRO,
TEST_VALUE_1));
size_t old_size = pmemobj_alloc_usable_size(obj.oid);
size_t new_size = 2 * old_size;
TX_BEGIN(pop) {
obj = TX_ZREALLOC(obj, PMEMOBJ_MAX_ALLOC_SIZE + 1);
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_ZERO_HUGE_MACRO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) < new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_huge -- reallocate an object to a huge size to trigger tx abort
*/
static void
do_tx_zrealloc_huge(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT_ZERO_HUGE, TEST_VALUE_1));
size_t old_size = pmemobj_alloc_usable_size(obj.oid);
size_t new_size = 2 * old_size;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_zrealloc(obj.oid,
PMEMOBJ_MAX_ALLOC_SIZE + 1, TYPE_ABORT_ZERO_HUGE));
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_ZERO_HUGE));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) < new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_alloc_commit -- reallocate an allocated object
* and commit the transaction
*/
static void
do_tx_realloc_alloc_commit(PMEMobjpool *pop)
{
TOID(struct object) obj;
size_t new_size = 0;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_COMMIT_ALLOC,
TEST_VALUE_1));
UT_ASSERT(!TOID_IS_NULL(obj));
new_size = 2 * pmemobj_alloc_usable_size(obj.oid);
TOID_ASSIGN(obj, pmemobj_tx_realloc(obj.oid,
new_size, TYPE_COMMIT_ALLOC));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_ALLOC));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_alloc_abort -- reallocate an allocated object
* and commit the transaction
*/
static void
do_tx_realloc_alloc_abort(PMEMobjpool *pop)
{
TOID(struct object) obj;
size_t new_size = 0;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT_ALLOC,
TEST_VALUE_1));
UT_ASSERT(!TOID_IS_NULL(obj));
new_size = 2 * pmemobj_alloc_usable_size(obj.oid);
TOID_ASSIGN(obj, pmemobj_tx_realloc(obj.oid,
new_size, TYPE_ABORT_ALLOC));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_ALLOC));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_root_realloc -- retrieve root inside of transaction
*/
static void
do_tx_root_realloc(PMEMobjpool *pop)
{
TX_BEGIN(pop) {
PMEMoid root = pmemobj_root(pop, sizeof(struct object));
UT_ASSERT(!OID_IS_NULL(root));
UT_ASSERT(util_is_zeroed(pmemobj_direct(root),
sizeof(struct object)));
UT_ASSERTeq(sizeof(struct object), pmemobj_root_size(pop));
root = pmemobj_root(pop, 2 * sizeof(struct object));
UT_ASSERT(!OID_IS_NULL(root));
UT_ASSERT(util_is_zeroed(pmemobj_direct(root),
2 * sizeof(struct object)));
UT_ASSERTeq(2 * sizeof(struct object), pmemobj_root_size(pop));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
/*
* do_tx_realloc_free -- reallocate an allocated object
* and commit the transaction
*/
static void
do_tx_realloc_free(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_FREE, TEST_VALUE_1));
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_realloc(obj.oid,
0, TYPE_COMMIT));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE));
UT_ASSERT(TOID_IS_NULL(obj));
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_realloc");
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, 0,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
do_tx_root_realloc(pop);
do_tx_realloc_commit(pop);
do_tx_realloc_abort(pop);
do_tx_realloc_huge(pop);
do_tx_zrealloc_commit(pop);
do_tx_zrealloc_commit_macro(pop);
do_tx_zrealloc_abort(pop);
do_tx_zrealloc_abort_macro(pop);
do_tx_zrealloc_huge(pop);
do_tx_zrealloc_huge_macro(pop);
do_tx_realloc_alloc_commit(pop);
do_tx_realloc_alloc_abort(pop);
do_tx_realloc_free(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 12,874 | 25.767152 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_tx_lock/obj_tx_lock.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* obj_tx_lock.c -- unit test for pmemobj_tx_lock()
*/
#include "unittest.h"
#include "libpmemobj.h"
#include "obj.h"
#define LAYOUT_NAME "obj_tx_lock"
#define NUM_LOCKS 2
struct transaction_data {
PMEMmutex mutexes[NUM_LOCKS];
PMEMrwlock rwlocks[NUM_LOCKS];
};
static PMEMobjpool *Pop;
#define DO_LOCK(mtx, rwlock)\
pmemobj_tx_lock(TX_PARAM_MUTEX, &(mtx)[0]);\
pmemobj_tx_lock(TX_PARAM_MUTEX, &(mtx)[1]);\
pmemobj_tx_lock(TX_PARAM_RWLOCK, &(rwlock)[0]);\
pmemobj_tx_lock(TX_PARAM_RWLOCK, &(rwlock)[1])
#define IS_UNLOCKED(pop, mtx, rwlock)\
ret = 0;\
ret += pmemobj_mutex_trylock((pop), &(mtx)[0]);\
ret += pmemobj_mutex_trylock((pop), &(mtx)[1]);\
ret += pmemobj_rwlock_trywrlock((pop), &(rwlock)[0]);\
ret += pmemobj_rwlock_trywrlock((pop), &(rwlock)[1]);\
UT_ASSERTeq(ret, 0);\
pmemobj_mutex_unlock((pop), &(mtx)[0]);\
pmemobj_mutex_unlock((pop), &(mtx)[1]);\
pmemobj_rwlock_unlock((pop), &(rwlock)[0]);\
pmemobj_rwlock_unlock((pop), &(rwlock)[1])
#define IS_LOCKED(pop, mtx, rwlock)\
ret = pmemobj_mutex_trylock((pop), &(mtx)[0]);\
UT_ASSERT(ret != 0);\
ret = pmemobj_mutex_trylock((pop), &(mtx)[1]);\
UT_ASSERT(ret != 0);\
ret = pmemobj_rwlock_trywrlock((pop), &(rwlock)[0]);\
UT_ASSERT(ret != 0);\
ret = pmemobj_rwlock_trywrlock((pop), &(rwlock)[1]);\
UT_ASSERT(ret != 0)
/*
* do_tx_add_locks -- (internal) transaction where locks are added after
* transaction begins
*/
static void *
do_tx_add_locks(struct transaction_data *data)
{
int ret;
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
TX_BEGIN(Pop) {
DO_LOCK(data->mutexes, data->rwlocks);
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
} TX_ONABORT { /* not called */
UT_ASSERT(0);
} TX_END
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
return NULL;
}
/*
* do_tx_add_locks_nested -- (internal) transaction where locks
* are added after nested transaction begins
*/
static void *
do_tx_add_locks_nested(struct transaction_data *data)
{
int ret;
TX_BEGIN(Pop) {
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
TX_BEGIN(Pop) {
DO_LOCK(data->mutexes, data->rwlocks);
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
} TX_END
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
return NULL;
}
/*
* do_tx_add_locks_nested_all -- (internal) transaction where all locks
* are added in both transactions after transaction begins
*/
static void *
do_tx_add_locks_nested_all(struct transaction_data *data)
{
int ret;
TX_BEGIN(Pop) {
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
DO_LOCK(data->mutexes, data->rwlocks);
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
TX_BEGIN(Pop) {
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
DO_LOCK(data->mutexes, data->rwlocks);
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
} TX_END
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
return NULL;
}
/*
* do_tx_add_taken_lock -- (internal) verify that failed tx_lock doesn't add
* the lock to transaction
*/
static void *
do_tx_add_taken_lock(struct transaction_data *data)
{
/* wrlocks on Windows don't detect self-deadlocks */
#ifdef _WIN32
(void) data;
#else
UT_ASSERTeq(pmemobj_rwlock_wrlock(Pop, &data->rwlocks[0]), 0);
TX_BEGIN(Pop) {
UT_ASSERTne(pmemobj_tx_lock(TX_PARAM_RWLOCK, &data->rwlocks[0]),
0);
} TX_END
UT_ASSERTne(pmemobj_rwlock_trywrlock(Pop, &data->rwlocks[0]), 0);
UT_ASSERTeq(pmemobj_rwlock_unlock(Pop, &data->rwlocks[0]), 0);
#endif
return NULL;
}
/*
* do_tx_lock_fail -- call pmemobj_tx_lock with POBJ_TX_NO_ABORT flag
* and taken lock
*/
static void *
do_tx_lock_fail(struct transaction_data *data)
{
/* wrlocks on Windows don't detect self-deadlocks */
#ifdef _WIN32
(void) data;
#else
UT_ASSERTeq(pmemobj_rwlock_wrlock(Pop, &data->rwlocks[0]), 0);
int ret = 0;
/* return errno and abort transaction */
TX_BEGIN(Pop) {
pmemobj_tx_xlock(TX_PARAM_RWLOCK, &data->rwlocks[0], 0);
} TX_ONABORT {
UT_ASSERTne(errno, 0);
UT_ASSERTeq(pmemobj_rwlock_unlock(Pop, &data->rwlocks[0]), 0);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
/* return ret without abort transaction */
UT_ASSERTeq(pmemobj_rwlock_wrlock(Pop, &data->rwlocks[0]), 0);
TX_BEGIN(Pop) {
ret = pmemobj_tx_xlock(TX_PARAM_RWLOCK, &data->rwlocks[0],
POBJ_XLOCK_NO_ABORT);
} TX_ONCOMMIT {
UT_ASSERTne(ret, 0);
UT_ASSERTeq(pmemobj_rwlock_unlock(Pop, &data->rwlocks[0]), 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
/* return ret without abort transaction */
UT_ASSERTeq(pmemobj_rwlock_wrlock(Pop, &data->rwlocks[0]), 0);
TX_BEGIN(Pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
ret = pmemobj_tx_lock(TX_PARAM_RWLOCK, &data->rwlocks[0]);
} TX_ONCOMMIT {
UT_ASSERTne(ret, 0);
UT_ASSERTeq(pmemobj_rwlock_unlock(Pop, &data->rwlocks[0]), 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
/* return ret without abort transaction */
UT_ASSERTeq(pmemobj_rwlock_wrlock(Pop, &data->rwlocks[0]), 0);
TX_BEGIN(Pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
ret = pmemobj_tx_xlock(TX_PARAM_RWLOCK, &data->rwlocks[0], 0);
} TX_ONCOMMIT {
UT_ASSERTne(ret, 0);
UT_ASSERTeq(pmemobj_rwlock_unlock(Pop, &data->rwlocks[0]), 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
#endif
return NULL;
}
static void
do_fault_injection(struct transaction_data *data)
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "add_to_tx_and_lock");
int ret;
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
TX_BEGIN(Pop) {
int err = pmemobj_tx_lock(TX_PARAM_MUTEX, &data->mutexes[0]);
if (err)
pmemobj_tx_abort(err);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
UT_ASSERTeq(errno, ENOMEM);
} TX_END
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_lock");
if (argc < 3)
UT_FATAL("usage: %s <file> [l|n|a|t|f|w]", argv[0]);
if ((Pop = pmemobj_create(argv[1], LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
PMEMoid root = pmemobj_root(Pop, sizeof(struct transaction_data));
struct transaction_data *test_obj =
(struct transaction_data *)pmemobj_direct(root);
/* go through all arguments one by one */
for (int arg = 2; arg < argc; arg++) {
/* Scan the character of each argument. */
if (strchr("lnatfw", argv[arg][0]) == NULL ||
argv[arg][1] != '\0')
UT_FATAL("op must be l or n or a or t or f or w");
switch (argv[arg][0]) {
case 'l':
do_tx_add_locks(test_obj);
break;
case 'n':
do_tx_add_locks_nested(test_obj);
break;
case 'a':
do_tx_add_locks_nested_all(test_obj);
break;
case 't':
do_tx_add_taken_lock(test_obj);
break;
case 'f':
do_fault_injection(test_obj);
break;
case 'w':
do_tx_lock_fail(test_obj);
break;
}
}
pmemobj_close(Pop);
DONE(NULL);
}
| 7,003 | 24.75 | 76 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_memops/obj_memops.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* obj_memops.c -- basic memory operations tests
*
*/
#include <stddef.h>
#include "obj.h"
#include "memops.h"
#include "ulog.h"
#include "unittest.h"
#define TEST_ENTRIES 256
#define TEST_VALUES TEST_ENTRIES
enum fail_types {
FAIL_NONE,
FAIL_CHECKSUM,
FAIL_MODIFY_NEXT,
FAIL_MODIFY_VALUE,
};
struct test_object {
struct ULOG(TEST_ENTRIES) redo;
struct ULOG(TEST_ENTRIES) undo;
uint64_t values[TEST_VALUES];
};
static void
clear_test_values(struct test_object *object)
{
memset(object->values, 0, sizeof(uint64_t) * TEST_VALUES);
}
static int
redo_log_constructor(void *ctx, void *ptr, size_t usable_size, void *arg)
{
PMEMobjpool *pop = ctx;
const struct pmem_ops *p_ops = &pop->p_ops;
size_t capacity = ALIGN_DOWN(usable_size - sizeof(struct ulog),
CACHELINE_SIZE);
ulog_construct(OBJ_PTR_TO_OFF(ctx, ptr), capacity,
*(uint64_t *)arg, 1, 0, p_ops);
return 0;
}
static int
pmalloc_redo_extend(void *base, uint64_t *redo, uint64_t gen_num)
{
size_t s = SIZEOF_ALIGNED_ULOG(TEST_ENTRIES);
return pmalloc_construct(base, redo, s, redo_log_constructor, &gen_num,
0, OBJ_INTERNAL_OBJECT_MASK, 0);
}
static void
test_free_entry(void *base, uint64_t *next)
{
/* noop for fake ulog entries */
}
static void
test_set_entries(PMEMobjpool *pop,
struct operation_context *ctx, struct test_object *object,
size_t nentries, enum fail_types fail, enum operation_log_type type)
{
operation_start(ctx);
UT_ASSERT(nentries <= ARRAY_SIZE(object->values));
for (size_t i = 0; i < nentries; ++i) {
operation_add_typed_entry(ctx,
&object->values[i], i + 1,
ULOG_OPERATION_SET, type);
}
operation_reserve(ctx, nentries * 16);
if (fail != FAIL_NONE) {
operation_cancel(ctx);
switch (fail) {
case FAIL_CHECKSUM:
object->redo.checksum += 1;
break;
case FAIL_MODIFY_NEXT:
pmalloc_redo_extend(pop,
&object->redo.next, 0);
break;
case FAIL_MODIFY_VALUE:
object->redo.data[16] += 8;
break;
default:
UT_ASSERT(0);
}
ulog_recover((struct ulog *)&object->redo,
OBJ_OFF_IS_VALID_FROM_CTX, &pop->p_ops);
for (size_t i = 0; i < nentries; ++i)
UT_ASSERTeq(object->values[i], 0);
} else {
operation_process(ctx);
operation_finish(ctx, 0);
for (size_t i = 0; i < nentries; ++i)
UT_ASSERTeq(object->values[i], i + 1);
}
}
static void
test_merge_op(struct operation_context *ctx, struct test_object *object)
{
operation_start(ctx);
operation_add_typed_entry(ctx,
&object->values[0], 0b10,
ULOG_OPERATION_OR, LOG_PERSISTENT);
operation_add_typed_entry(ctx,
&object->values[0], 0b01,
ULOG_OPERATION_OR, LOG_PERSISTENT);
operation_add_typed_entry(ctx,
&object->values[0], 0b00,
ULOG_OPERATION_AND, LOG_PERSISTENT);
operation_add_typed_entry(ctx,
&object->values[0], 0b01,
ULOG_OPERATION_OR, LOG_PERSISTENT);
operation_process(ctx);
operation_finish(ctx, 0);
UT_ASSERTeq(object->values[0], 0b01);
}
static void
test_same_twice(struct operation_context *ctx, struct test_object *object)
{
operation_start(ctx);
operation_add_typed_entry(ctx,
&object->values[0], 5,
ULOG_OPERATION_SET, LOG_PERSISTENT);
operation_add_typed_entry(ctx,
&object->values[0], 10,
ULOG_OPERATION_SET, LOG_PERSISTENT);
operation_process(ctx);
UT_ASSERTeq(object->values[0], 10);
operation_cancel(ctx);
}
static void
test_redo(PMEMobjpool *pop, struct test_object *object)
{
struct operation_context *ctx = operation_new(
(struct ulog *)&object->redo, TEST_ENTRIES,
pmalloc_redo_extend, (ulog_free_fn)pfree,
&pop->p_ops, LOG_TYPE_REDO);
/*
* Keep this test first.
* It tests a situation where the number of objects being added
* is equal to the capacity of the log.
*/
test_set_entries(pop, ctx, object, TEST_ENTRIES - 1,
FAIL_NONE, LOG_PERSISTENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 100, FAIL_NONE, LOG_TRANSIENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 10, FAIL_NONE, LOG_PERSISTENT);
clear_test_values(object);
test_merge_op(ctx, object);
clear_test_values(object);
test_set_entries(pop, ctx, object, 100, FAIL_NONE, LOG_PERSISTENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 100, FAIL_CHECKSUM, LOG_PERSISTENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 10, FAIL_CHECKSUM, LOG_PERSISTENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 100, FAIL_MODIFY_VALUE,
LOG_PERSISTENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 10, FAIL_MODIFY_VALUE,
LOG_PERSISTENT);
clear_test_values(object);
test_same_twice(ctx, object);
clear_test_values(object);
operation_delete(ctx);
/*
* Verify that rebuilding redo_next works. This requires that
* object->redo->next is != 0 - to achieve that, this test must
* be preceded by a test that fails to finish the ulog's operation.
*/
ctx = operation_new(
(struct ulog *)&object->redo, TEST_ENTRIES,
NULL, test_free_entry, &pop->p_ops, LOG_TYPE_REDO);
test_set_entries(pop, ctx, object, 100, 0, LOG_PERSISTENT);
clear_test_values(object);
/* FAIL_MODIFY_NEXT tests can only happen after redo_next test */
test_set_entries(pop, ctx, object, 100, FAIL_MODIFY_NEXT,
LOG_PERSISTENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 10, FAIL_MODIFY_NEXT,
LOG_PERSISTENT);
clear_test_values(object);
operation_delete(ctx);
}
static void
test_undo_small_single_copy(struct operation_context *ctx,
struct test_object *object)
{
operation_start(ctx);
object->values[0] = 1;
object->values[1] = 2;
operation_add_buffer(ctx,
&object->values, &object->values, sizeof(*object->values) * 2,
ULOG_OPERATION_BUF_CPY);
object->values[0] = 2;
object->values[1] = 1;
operation_process(ctx);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
operation_start(ctx);
UT_ASSERTeq(object->values[0], 1);
UT_ASSERTeq(object->values[1], 2);
object->values[0] = 2;
object->values[1] = 1;
operation_process(ctx);
UT_ASSERTeq(object->values[0], 2);
UT_ASSERTeq(object->values[1], 1);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
}
static void
test_undo_small_single_set(struct operation_context *ctx,
struct test_object *object)
{
operation_start(ctx);
object->values[0] = 1;
object->values[1] = 2;
int c = 0;
operation_add_buffer(ctx,
&object->values, &c, sizeof(*object->values) * 2,
ULOG_OPERATION_BUF_SET);
operation_process(ctx);
UT_ASSERTeq(object->values[0], 0);
UT_ASSERTeq(object->values[1], 0);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
}
static void
test_undo_small_multiple_set(struct operation_context *ctx,
struct test_object *object)
{
operation_start(ctx);
object->values[0] = 1;
object->values[1] = 2;
int c = 0;
operation_add_buffer(ctx,
&object->values[0], &c, sizeof(*object->values),
ULOG_OPERATION_BUF_SET);
operation_add_buffer(ctx,
&object->values[1], &c, sizeof(*object->values),
ULOG_OPERATION_BUF_SET);
operation_process(ctx);
UT_ASSERTeq(object->values[0], 0);
UT_ASSERTeq(object->values[1], 0);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
}
static void
test_undo_large_single_copy(struct operation_context *ctx,
struct test_object *object)
{
operation_start(ctx);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
object->values[i] = i + 1;
operation_add_buffer(ctx,
&object->values, &object->values, sizeof(object->values),
ULOG_OPERATION_BUF_CPY);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
object->values[i] = i + 2;
operation_process(ctx);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
UT_ASSERTeq(object->values[i], i + 1);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
}
static void
test_undo_checksum_mismatch(PMEMobjpool *pop, struct operation_context *ctx,
struct test_object *object, struct ulog *log)
{
operation_start(ctx);
for (uint64_t i = 0; i < 20; ++i)
object->values[i] = i + 1;
operation_add_buffer(ctx,
&object->values, &object->values, sizeof(*object->values) * 20,
ULOG_OPERATION_BUF_CPY);
for (uint64_t i = 0; i < 20; ++i)
object->values[i] = i + 2;
pmemobj_persist(pop, &object->values, sizeof(*object->values) * 20);
log->data[100] += 1; /* corrupt the log somewhere */
pmemobj_persist(pop, &log->data[100], sizeof(log->data[100]));
operation_process(ctx);
/* the log shouldn't get applied */
for (uint64_t i = 0; i < 20; ++i)
UT_ASSERTeq(object->values[i], i + 2);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
}
static void
test_undo_large_copy(PMEMobjpool *pop, struct operation_context *ctx,
struct test_object *object)
{
operation_start(ctx);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
object->values[i] = i + 1;
operation_add_buffer(ctx,
&object->values, &object->values, sizeof(object->values),
ULOG_OPERATION_BUF_CPY);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
object->values[i] = i + 2;
operation_process(ctx);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
UT_ASSERTeq(object->values[i], i + 1);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
object->values[i] = i + 3;
operation_start(ctx);
operation_add_buffer(ctx,
&object->values, &object->values, sizeof(*object->values) * 26,
ULOG_OPERATION_BUF_CPY);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
object->values[i] = i + 4;
pmemobj_persist(pop, &object->values, sizeof(object->values));
operation_process(ctx);
for (uint64_t i = 0; i < 26; ++i)
UT_ASSERTeq(object->values[i], i + 3);
for (uint64_t i = 26; i < TEST_VALUES; ++i)
UT_ASSERTeq(object->values[i], i + 4);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
}
static int
test_undo_foreach(struct ulog_entry_base *e, void *arg,
const struct pmem_ops *p_ops)
{
size_t *nentries = arg;
++(*nentries);
return 0;
}
/*
* drain_empty -- drain for pmem_ops
*/
static void
drain_empty(void *ctx)
{
/* do nothing */
}
/*
* persist_empty -- persist for pmem_ops
*/
static int
persist_empty(void *ctx, const void *addr, size_t len, unsigned flags)
{
return 0;
}
/*
* flush_empty -- flush for pmem_ops
*/
static int
flush_empty(void *ctx, const void *addr, size_t len, unsigned flags)
{
return 0;
}
/*
* memcpy_libc -- memcpy for pmem_ops
*/
static void *
memcpy_libc(void *ctx, void *dest, const void *src, size_t len, unsigned flags)
{
return memcpy(dest, src, len);
}
/*
* memset_libc -- memset for pmem_ops
*/
static void *
memset_libc(void *ctx, void *ptr, int c, size_t sz, unsigned flags)
{
return memset(ptr, c, sz);
}
/*
* test_undo_log_reuse -- test for correct reuse of log space
*/
static void
test_undo_log_reuse()
{
#define ULOG_SIZE 1024
struct pmem_ops ops = {
.persist = persist_empty,
.flush = flush_empty,
.drain = drain_empty,
.memcpy = memcpy_libc,
.memmove = NULL,
.memset = memset_libc,
.base = NULL,
};
struct ULOG(ULOG_SIZE) *first = util_aligned_malloc(CACHELINE_SIZE,
SIZEOF_ULOG(ULOG_SIZE));
struct ULOG(ULOG_SIZE) *second = util_aligned_malloc(CACHELINE_SIZE,
SIZEOF_ULOG(ULOG_SIZE));
ulog_construct((uint64_t)(first), ULOG_SIZE, 0, 0, 0, &ops);
ulog_construct((uint64_t)(second), ULOG_SIZE, 0, 0, 0, &ops);
first->next = (uint64_t)(second);
struct operation_context *ctx = operation_new(
(struct ulog *)first, ULOG_SIZE,
NULL, test_free_entry,
&ops, LOG_TYPE_UNDO);
size_t nentries = 0;
ulog_foreach_entry((struct ulog *)first,
test_undo_foreach, &nentries, &ops,NULL);
UT_ASSERTeq(nentries, 0);
/* first, let's populate the log with some valid entries */
size_t entry_size = (ULOG_SIZE / 2) - sizeof(struct ulog_entry_buf);
size_t total_entries = ((ULOG_SIZE * 2) / entry_size);
char *data = MALLOC(entry_size);
memset(data, 0xc, entry_size); /* fill it with something */
for (size_t i = 0; i < total_entries; ++i) {
operation_add_buffer(ctx, (void *)0x123, data,
entry_size,
ULOG_OPERATION_BUF_CPY);
nentries = 0;
ulog_foreach_entry((struct ulog *)first,
test_undo_foreach, &nentries, &ops,NULL);
UT_ASSERTeq(nentries, i + 1);
}
operation_init(ctx); /* initialize a new operation */
/* let's overwrite old entries and see if they are no longer visible */
for (size_t i = 0; i < total_entries; ++i) {
operation_add_buffer(ctx, (void *)0x123, data,
entry_size,
ULOG_OPERATION_BUF_CPY);
nentries = 0;
ulog_foreach_entry((struct ulog *)first,
test_undo_foreach, &nentries, &ops,NULL);
UT_ASSERTeq(nentries, i + 1);
}
FREE(data);
operation_delete(ctx);
util_aligned_free(first);
util_aligned_free(second);
#undef ULOG_SIZE
}
/*
* test_undo_log_reuse -- test for correct reuse of log space
*/
static void
test_redo_cleanup_same_size(PMEMobjpool *pop, struct test_object *object)
{
#define ULOG_SIZE 1024
struct operation_context *ctx = operation_new(
(struct ulog *)&object->redo, TEST_ENTRIES,
pmalloc_redo_extend, (ulog_free_fn)pfree,
&pop->p_ops, LOG_TYPE_REDO);
int ret = pmalloc(pop, &object->redo.next, ULOG_SIZE, 0, 0);
UT_ASSERTeq(ret, 0);
/* undo logs are clobbered at the end, which shrinks their size */
size_t capacity = ulog_capacity((struct ulog *)&object->undo,
TEST_ENTRIES, &pop->p_ops);
/* builtin log + one next */
UT_ASSERTeq(capacity, TEST_ENTRIES * 2 + CACHELINE_SIZE);
operation_start(ctx); /* initialize a new operation */
struct pobj_action act;
pmemobj_reserve(pop, &act, ULOG_SIZE, 0);
palloc_publish(&pop->heap, &act, 1, ctx);
operation_delete(ctx);
#undef ULOG_SIZE
}
static void
test_undo(PMEMobjpool *pop, struct test_object *object)
{
struct operation_context *ctx = operation_new(
(struct ulog *)&object->undo, TEST_ENTRIES,
pmalloc_redo_extend, (ulog_free_fn)pfree,
&pop->p_ops, LOG_TYPE_UNDO);
test_undo_small_single_copy(ctx, object);
test_undo_small_single_set(ctx, object);
test_undo_small_multiple_set(ctx, object);
test_undo_large_single_copy(ctx, object);
test_undo_large_copy(pop, ctx, object);
test_undo_checksum_mismatch(pop, ctx, object,
(struct ulog *)&object->undo);
/* undo logs are clobbered at the end, which shrinks their size */
size_t capacity = ulog_capacity((struct ulog *)&object->undo,
TEST_ENTRIES, &pop->p_ops);
/* builtin log + one next */
UT_ASSERTeq(capacity, TEST_ENTRIES * 2 + CACHELINE_SIZE);
operation_delete(ctx);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_memops");
if (argc != 2)
UT_FATAL("usage: %s file-name", argv[0]);
const char *path = argv[1];
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, "obj_memops",
PMEMOBJ_MIN_POOL * 10, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
/*
* The ulog API requires cacheline alignment. A cacheline aligned new
* new allocator is created here to properly test the ulog api.
* A aligned object can then be allocated using pmemobj_xalloc.
*/
struct pobj_alloc_class_desc new_ac = {
.unit_size = sizeof(struct test_object),
.alignment = CACHELINE_SIZE,
.units_per_block = 1,
.header_type = POBJ_HEADER_NONE,
};
if (pmemobj_ctl_set(pop, "heap.alloc_class.new.desc", &new_ac) == -1)
UT_FATAL("Failed to set allocation class");
PMEMoid pobject;
if (pmemobj_xalloc(pop, &pobject, sizeof(struct test_object), 0,
POBJ_CLASS_ID(new_ac.class_id), NULL, NULL) == -1)
UT_FATAL("Failed to allocate object");
struct test_object *object = pmemobj_direct(pobject);
UT_ASSERTne(object, NULL);
ulog_construct(OBJ_PTR_TO_OFF(pop, &object->undo),
TEST_ENTRIES, 0, 0, 0, &pop->p_ops);
ulog_construct(OBJ_PTR_TO_OFF(pop, &object->redo),
TEST_ENTRIES, 0, 0, 0, &pop->p_ops);
test_redo(pop, object);
test_undo(pop, object);
test_redo_cleanup_same_size(pop, object);
test_undo_log_reuse();
pmemobj_close(pop);
DONE(NULL);
}
#ifdef _MSC_VER
/*
* Since libpmemobj is linked statically, we need to invoke its ctor/dtor.
*/
MSVC_CONSTR(libpmemobj_init)
MSVC_DESTR(libpmemobj_fini)
#endif
| 15,904 | 23.319572 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_strdup/obj_strdup.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* obj_strdup.c -- unit test for pmemobj_strdup
*/
#include <sys/param.h>
#include <string.h>
#include <wchar.h>
#include "unittest.h"
#include "libpmemobj.h"
#define LAYOUT_NAME "strdup"
TOID_DECLARE(char, 0);
TOID_DECLARE(wchar_t, 1);
enum type_number {
TYPE_SIMPLE,
TYPE_NULL,
TYPE_SIMPLE_ALLOC,
TYPE_SIMPLE_ALLOC_1,
TYPE_SIMPLE_ALLOC_2,
TYPE_NULL_ALLOC,
TYPE_NULL_ALLOC_1,
};
#define TEST_STR_1 "Test string 1"
#define TEST_STR_2 "Test string 2"
#define TEST_WCS_1 L"Test string 3"
#define TEST_WCS_2 L"Test string 4"
#define TEST_STR_EMPTY ""
#define TEST_WCS_EMPTY L""
/*
* do_strdup -- duplicate a string to not allocated toid using pmemobj_strdup
*/
static void
do_strdup(PMEMobjpool *pop)
{
TOID(char) str = TOID_NULL(char);
TOID(wchar_t) wcs = TOID_NULL(wchar_t);
pmemobj_strdup(pop, &str.oid, TEST_STR_1, TYPE_SIMPLE);
pmemobj_wcsdup(pop, &wcs.oid, TEST_WCS_1, TYPE_SIMPLE);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERT(!TOID_IS_NULL(wcs));
UT_ASSERTeq(strcmp(D_RO(str), TEST_STR_1), 0);
UT_ASSERTeq(wcscmp(D_RO(wcs), TEST_WCS_1), 0);
}
/*
* do_strdup_null -- duplicate a NULL string to not allocated toid
*/
static void
do_strdup_null(PMEMobjpool *pop)
{
TOID(char) str = TOID_NULL(char);
TOID(wchar_t) wcs = TOID_NULL(wchar_t);
pmemobj_strdup(pop, &str.oid, NULL, TYPE_NULL);
pmemobj_wcsdup(pop, &wcs.oid, NULL, TYPE_NULL);
UT_ASSERT(TOID_IS_NULL(str));
UT_ASSERT(TOID_IS_NULL(wcs));
}
/*
* do_alloc -- allocate toid and duplicate a string
*/
static TOID(char)
do_alloc(PMEMobjpool *pop, const char *s, unsigned type_num)
{
TOID(char) str;
POBJ_ZNEW(pop, &str, char);
pmemobj_strdup(pop, &str.oid, s, type_num);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERTeq(strcmp(D_RO(str), s), 0);
return str;
}
/*
* do_wcs_alloc -- allocate toid and duplicate a wide character string
*/
static TOID(wchar_t)
do_wcs_alloc(PMEMobjpool *pop, const wchar_t *s, unsigned type_num)
{
TOID(wchar_t) str;
POBJ_ZNEW(pop, &str, wchar_t);
pmemobj_wcsdup(pop, &str.oid, s, type_num);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERTeq(wcscmp(D_RO(str), s), 0);
return str;
}
/*
* do_strdup_alloc -- duplicate a string to allocated toid
*/
static void
do_strdup_alloc(PMEMobjpool *pop)
{
TOID(char) str1 = do_alloc(pop, TEST_STR_1, TYPE_SIMPLE_ALLOC_1);
TOID(wchar_t) wcs1 = do_wcs_alloc(pop, TEST_WCS_1, TYPE_SIMPLE_ALLOC_1);
TOID(char) str2 = do_alloc(pop, TEST_STR_2, TYPE_SIMPLE_ALLOC_2);
TOID(wchar_t) wcs2 = do_wcs_alloc(pop, TEST_WCS_2, TYPE_SIMPLE_ALLOC_2);
pmemobj_strdup(pop, &str1.oid, D_RO(str2), TYPE_SIMPLE_ALLOC);
pmemobj_wcsdup(pop, &wcs1.oid, D_RO(wcs2), TYPE_SIMPLE_ALLOC);
UT_ASSERTeq(strcmp(D_RO(str1), D_RO(str2)), 0);
UT_ASSERTeq(wcscmp(D_RO(wcs1), D_RO(wcs2)), 0);
}
/*
* do_strdup_null_alloc -- duplicate a NULL string to allocated toid
*/
static void
do_strdup_null_alloc(PMEMobjpool *pop)
{
TOID(char) str1 = do_alloc(pop, TEST_STR_1, TYPE_NULL_ALLOC_1);
TOID(wchar_t) wcs1 = do_wcs_alloc(pop, TEST_WCS_1, TYPE_NULL_ALLOC_1);
TOID(char) str2 = TOID_NULL(char);
TOID(wchar_t) wcs2 = TOID_NULL(wchar_t);
pmemobj_strdup(pop, &str1.oid, D_RO(str2), TYPE_NULL_ALLOC);
pmemobj_wcsdup(pop, &wcs1.oid, D_RO(wcs2), TYPE_NULL_ALLOC);
UT_ASSERT(!TOID_IS_NULL(str1));
UT_ASSERT(!TOID_IS_NULL(wcs1));
}
/*
* do_strdup_uint64_range -- duplicate string with
* type number equal to range of unsigned long long int
*/
static void
do_strdup_uint64_range(PMEMobjpool *pop)
{
TOID(char) str1;
TOID(char) str2 = do_alloc(pop, TEST_STR_2, TYPE_SIMPLE_ALLOC_1);
TOID(char) str3;
TOID(char) str4 = do_alloc(pop, TEST_STR_2, TYPE_SIMPLE_ALLOC_1);
pmemobj_strdup(pop, &str1.oid, D_RO(str2), UINT64_MAX);
pmemobj_strdup(pop, &str3.oid, D_RO(str4), UINT64_MAX - 1);
UT_ASSERTeq(strcmp(D_RO(str1), D_RO(str2)), 0);
UT_ASSERTeq(strcmp(D_RO(str3), D_RO(str4)), 0);
}
/*
* do_strdup_alloc_empty_string -- duplicate string to internal container
* associated with type number equal to range of unsigned long long int
* and unsigned long long int - 1
*/
static void
do_strdup_alloc_empty_string(PMEMobjpool *pop)
{
TOID(char) str1 = do_alloc(pop, TEST_STR_1, TYPE_SIMPLE_ALLOC_1);
TOID(wchar_t) wcs1 = do_wcs_alloc(pop, TEST_WCS_1, TYPE_SIMPLE_ALLOC_1);
pmemobj_strdup(pop, &str1.oid, TEST_STR_EMPTY, TYPE_SIMPLE_ALLOC);
pmemobj_wcsdup(pop, &wcs1.oid, TEST_WCS_EMPTY, TYPE_SIMPLE_ALLOC);
UT_ASSERTeq(strcmp(D_RO(str1), TEST_STR_EMPTY), 0);
UT_ASSERTeq(wcscmp(D_RO(wcs1), TEST_WCS_EMPTY), 0);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_strdup");
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
do_strdup(pop);
do_strdup_null(pop);
do_strdup_alloc(pop);
do_strdup_null_alloc(pop);
do_strdup_uint64_range(pop);
do_strdup_alloc_empty_string(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 5,017 | 26.571429 | 77 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem_is_pmem/pmem_is_pmem.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmem_is_pmem.c -- unit test for pmem_is_pmem()
*
* usage: pmem_is_pmem file [env]
*/
#include "unittest.h"
#define NTHREAD 16
static void *Addr;
static size_t Size;
/*
* worker -- the work each thread performs
*/
static void *
worker(void *arg)
{
int *ret = (int *)arg;
*ret = pmem_is_pmem(Addr, Size);
return NULL;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem_is_pmem");
if (argc < 2 || argc > 3)
UT_FATAL("usage: %s file [env]", argv[0]);
if (argc == 3)
UT_ASSERTeq(os_setenv("PMEM_IS_PMEM_FORCE", argv[2], 1), 0);
Addr = pmem_map_file(argv[1], 0, 0, 0, &Size, NULL);
UT_ASSERTne(Addr, NULL);
os_thread_t threads[NTHREAD];
int ret[NTHREAD];
/* kick off NTHREAD threads */
for (int i = 0; i < NTHREAD; i++)
THREAD_CREATE(&threads[i], NULL, worker, &ret[i]);
/* wait for all the threads to complete */
for (int i = 0; i < NTHREAD; i++)
THREAD_JOIN(&threads[i], NULL);
/* verify that all the threads return the same value */
for (int i = 1; i < NTHREAD; i++)
UT_ASSERTeq(ret[0], ret[i]);
UT_OUT("threads.is_pmem(Addr, Size): %d", ret[0]);
UT_ASSERTeq(os_unsetenv("PMEM_IS_PMEM_FORCE"), 0);
UT_OUT("is_pmem(Addr, Size): %d", pmem_is_pmem(Addr, Size));
/* zero-sized region is not pmem */
UT_OUT("is_pmem(Addr, 0): %d", pmem_is_pmem(Addr, 0));
UT_OUT("is_pmem(Addr + Size / 2, 0): %d",
pmem_is_pmem((char *)Addr + Size / 2, 0));
UT_OUT("is_pmem(Addr + Size, 0): %d",
pmem_is_pmem((char *)Addr + Size, 0));
DONE(NULL);
}
| 3,216 | 30.23301 | 74 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/rpmem_obc_int/rpmem_obc_int.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_obc_int.c -- integration test for rpmem_obc and rpmemd_obc modules
*/
#include "unittest.h"
#include "pmemcommon.h"
#include "librpmem.h"
#include "rpmem.h"
#include "rpmem_proto.h"
#include "rpmem_common.h"
#include "rpmem_util.h"
#include "rpmem_obc.h"
#include "rpmemd_obc.h"
#include "rpmemd_log.h"
#include "os.h"
#define POOL_SIZE 1024
#define NLANES 32
#define NLANES_RESP 16
#define PROVIDER RPMEM_PROV_LIBFABRIC_SOCKETS
#define POOL_DESC "pool_desc"
#define RKEY 0xabababababababab
#define RADDR 0x0101010101010101
#define PORT 1234
#define PERSIST_METHOD RPMEM_PM_GPSPM
#define RESP_ATTR_INIT {\
.port = PORT,\
.rkey = RKEY,\
.raddr = RADDR,\
.persist_method = PERSIST_METHOD,\
.nlanes = NLANES_RESP,\
}
#define REQ_ATTR_INIT {\
.pool_size = POOL_SIZE,\
.nlanes = NLANES,\
.provider = PROVIDER,\
.pool_desc = POOL_DESC,\
}
#define POOL_ATTR_INIT {\
.signature = "<RPMEM>",\
.major = 1,\
.compat_features = 2,\
.incompat_features = 3,\
.ro_compat_features = 4,\
.poolset_uuid = "POOLSET_UUID0123",\
.uuid = "UUID0123456789AB",\
.next_uuid = "NEXT_UUID0123456",\
.prev_uuid = "PREV_UUID0123456",\
.user_flags = "USER_FLAGS012345",\
}
#define POOL_ATTR_ALT {\
.signature = "<ALT>",\
.major = 5,\
.compat_features = 6,\
.incompat_features = 7,\
.ro_compat_features = 8,\
.poolset_uuid = "UUID_POOLSET_ALT",\
.uuid = "ALT_UUIDCDEFFEDC",\
.next_uuid = "456UUID_NEXT_ALT",\
.prev_uuid = "UUID012_ALT_PREV",\
.user_flags = "012345USER_FLAGS",\
}
TEST_CASE_DECLARE(client_create);
TEST_CASE_DECLARE(client_open);
TEST_CASE_DECLARE(client_set_attr);
TEST_CASE_DECLARE(server);
/*
* client_create -- perform create request
*/
int
client_create(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
int ret;
struct rpmem_obc *rpc;
struct rpmem_target_info *info;
struct rpmem_req_attr req = REQ_ATTR_INIT;
struct rpmem_pool_attr pool_attr = POOL_ATTR_INIT;
struct rpmem_resp_attr ex_res = RESP_ATTR_INIT;
struct rpmem_resp_attr res;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
ret = rpmem_obc_connect(rpc, info);
UT_ASSERTeq(ret, 0);
rpmem_target_free(info);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_create(rpc, &req, &res, &pool_attr);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(ex_res.port, res.port);
UT_ASSERTeq(ex_res.rkey, res.rkey);
UT_ASSERTeq(ex_res.raddr, res.raddr);
UT_ASSERTeq(ex_res.persist_method, res.persist_method);
UT_ASSERTeq(ex_res.nlanes, res.nlanes);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_close(rpc, 0);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_disconnect(rpc);
UT_ASSERTeq(ret, 0);
rpmem_obc_fini(rpc);
return 1;
}
/*
* client_open -- perform open request
*/
int
client_open(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
int ret;
struct rpmem_obc *rpc;
struct rpmem_target_info *info;
struct rpmem_req_attr req = REQ_ATTR_INIT;
struct rpmem_pool_attr ex_pool_attr = POOL_ATTR_INIT;
struct rpmem_pool_attr pool_attr;
struct rpmem_resp_attr ex_res = RESP_ATTR_INIT;
struct rpmem_resp_attr res;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
ret = rpmem_obc_connect(rpc, info);
UT_ASSERTeq(ret, 0);
rpmem_target_free(info);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_open(rpc, &req, &res, &pool_attr);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(ex_res.port, res.port);
UT_ASSERTeq(ex_res.rkey, res.rkey);
UT_ASSERTeq(ex_res.raddr, res.raddr);
UT_ASSERTeq(ex_res.persist_method, res.persist_method);
UT_ASSERTeq(ex_res.nlanes, res.nlanes);
UT_ASSERTeq(memcmp(&ex_pool_attr, &pool_attr,
sizeof(ex_pool_attr)), 0);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_close(rpc, 0);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_disconnect(rpc);
UT_ASSERTeq(ret, 0);
rpmem_obc_fini(rpc);
return 1;
}
/*
* client_set_attr -- perform set attributes request
*/
int
client_set_attr(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
int ret;
struct rpmem_obc *rpc;
struct rpmem_target_info *info;
const struct rpmem_pool_attr pool_attr = POOL_ATTR_ALT;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
ret = rpmem_obc_connect(rpc, info);
UT_ASSERTeq(ret, 0);
rpmem_target_free(info);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_set_attr(rpc, &pool_attr);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_close(rpc, 0);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_disconnect(rpc);
UT_ASSERTeq(ret, 0);
rpmem_obc_fini(rpc);
return 1;
}
/*
* req_arg -- request callbacks argument
*/
struct req_arg {
struct rpmem_resp_attr resp;
struct rpmem_pool_attr pool_attr;
int closing;
};
/*
* req_create -- process create request
*/
static int
req_create(struct rpmemd_obc *obc, void *arg,
const struct rpmem_req_attr *req,
const struct rpmem_pool_attr *pool_attr)
{
struct rpmem_req_attr ex_req = REQ_ATTR_INIT;
struct rpmem_pool_attr ex_pool_attr = POOL_ATTR_INIT;
UT_ASSERTne(arg, NULL);
UT_ASSERTeq(ex_req.provider, req->provider);
UT_ASSERTeq(ex_req.pool_size, req->pool_size);
UT_ASSERTeq(ex_req.nlanes, req->nlanes);
UT_ASSERTeq(strcmp(ex_req.pool_desc, req->pool_desc), 0);
UT_ASSERTeq(memcmp(&ex_pool_attr, pool_attr, sizeof(ex_pool_attr)), 0);
struct req_arg *args = arg;
return rpmemd_obc_create_resp(obc, 0, &args->resp);
}
/*
* req_open -- process open request
*/
static int
req_open(struct rpmemd_obc *obc, void *arg,
const struct rpmem_req_attr *req)
{
struct rpmem_req_attr ex_req = REQ_ATTR_INIT;
UT_ASSERTne(arg, NULL);
UT_ASSERTeq(ex_req.provider, req->provider);
UT_ASSERTeq(ex_req.pool_size, req->pool_size);
UT_ASSERTeq(ex_req.nlanes, req->nlanes);
UT_ASSERTeq(strcmp(ex_req.pool_desc, req->pool_desc), 0);
struct req_arg *args = arg;
return rpmemd_obc_open_resp(obc, 0,
&args->resp, &args->pool_attr);
}
/*
* req_set_attr -- process set attributes request
*/
static int
req_set_attr(struct rpmemd_obc *obc, void *arg,
const struct rpmem_pool_attr *pool_attr)
{
struct rpmem_pool_attr ex_pool_attr = POOL_ATTR_ALT;
UT_ASSERTne(arg, NULL);
UT_ASSERTeq(memcmp(&ex_pool_attr, pool_attr, sizeof(ex_pool_attr)), 0);
return rpmemd_obc_set_attr_resp(obc, 0);
}
/*
* req_close -- process close request
*/
static int
req_close(struct rpmemd_obc *obc, void *arg, int flags)
{
UT_ASSERTne(arg, NULL);
struct req_arg *args = arg;
args->closing = 1;
return rpmemd_obc_close_resp(obc, 0);
}
/*
* REQ -- server request callbacks
*/
static struct rpmemd_obc_requests REQ = {
.create = req_create,
.open = req_open,
.close = req_close,
.set_attr = req_set_attr,
};
/*
* server -- run server and process clients requests
*/
int
server(const struct test_case *tc, int argc, char *argv[])
{
int ret;
struct req_arg arg = {
.resp = RESP_ATTR_INIT,
.pool_attr = POOL_ATTR_INIT,
.closing = 0,
};
struct rpmemd_obc *obc;
obc = rpmemd_obc_init(0, 1);
UT_ASSERTne(obc, NULL);
ret = rpmemd_obc_status(obc, 0);
UT_ASSERTeq(ret, 0);
while (1) {
ret = rpmemd_obc_process(obc, &REQ, &arg);
if (arg.closing) {
break;
} else {
UT_ASSERTeq(ret, 0);
}
}
ret = rpmemd_obc_process(obc, &REQ, &arg);
UT_ASSERTeq(ret, 1);
rpmemd_obc_fini(obc);
return 0;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(server),
TEST_CASE(client_create),
TEST_CASE(client_open),
TEST_CASE(client_set_attr),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
START(argc, argv, "rpmem_obc");
common_init("rpmem_fip",
"RPMEM_LOG_LEVEL",
"RPMEM_LOG_FILE", 0, 0);
rpmemd_log_init("rpmemd", os_getenv("RPMEMD_LOG_FILE"), 0);
rpmemd_log_level = rpmemd_log_level_from_str(
os_getenv("RPMEMD_LOG_LEVEL"));
rpmem_util_cmds_init();
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
rpmem_util_cmds_fini();
common_fini();
rpmemd_log_close();
DONE(NULL);
}
| 8,537 | 20.780612 | 75 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/mmap_fixed/mmap_fixed.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* mmap_fixed.c -- test memory mapping with MAP_FIXED for various lengths
*
* This test is intended to be used for testing Windows implementation
* of memory mapping routines - mmap(), munmap(), msync() and mprotect().
* Those functions should provide the same functionality as their Linux
* counterparts, at least with respect to the features that are used
* in PMDK libraries.
*
* Known issues and differences between Linux and Windows implementation
* are described in src/common/mmap_windows.c.
*/
#include "unittest.h"
#include <sys/mman.h>
#define ALIGN(size) ((size) & ~(Ut_mmap_align - 1))
/*
* test_mmap_fixed -- test fixed mappings
*/
static void
test_mmap_fixed(const char *name1, const char *name2, size_t len1, size_t len2)
{
size_t len1_aligned = ALIGN(len1);
size_t len2_aligned = ALIGN(len2);
UT_OUT("len: %zu (%zu) + %zu (%zu) = %zu", len1, len1_aligned,
len2, len2_aligned, len1_aligned + len2_aligned);
int fd1 = OPEN(name1, O_CREAT|O_RDWR, S_IWUSR|S_IRUSR);
int fd2 = OPEN(name2, O_CREAT|O_RDWR, S_IWUSR|S_IRUSR);
POSIX_FALLOCATE(fd1, 0, (os_off_t)len1);
POSIX_FALLOCATE(fd2, 0, (os_off_t)len2);
char *ptr1 = mmap(NULL, len1_aligned + len2_aligned,
PROT_READ|PROT_WRITE, MAP_SHARED, fd1, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
UT_OUT("ptr1: %p, ptr2: %p", ptr1, ptr1 + len1_aligned);
char *ptr2 = mmap(ptr1 + len1_aligned, len2_aligned,
PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, fd2, 0);
UT_ASSERTne(ptr2, MAP_FAILED);
UT_ASSERTeq(ptr2, ptr1 + len1_aligned);
UT_ASSERTne(munmap(ptr1, len1_aligned), -1);
UT_ASSERTne(munmap(ptr2, len2_aligned), -1);
CLOSE(fd1);
CLOSE(fd2);
UNLINK(name1);
UNLINK(name2);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "mmap_fixed");
if (argc < 4)
UT_FATAL("usage: %s dirname len1 len2 ...", argv[0]);
size_t *lengths = MALLOC(sizeof(size_t) * (size_t)argc - 2);
UT_ASSERTne(lengths, NULL);
size_t appendix_length = 20; /* a file name length */
char *name1 = MALLOC(strlen(argv[1]) + appendix_length);
char *name2 = MALLOC(strlen(argv[1]) + appendix_length);
sprintf(name1, "%s\\testfile1", argv[1]);
sprintf(name2, "%s\\testfile2", argv[1]);
for (int i = 0; i < argc - 2; i++)
lengths[i] = ATOULL(argv[i + 2]);
for (int i = 0; i < argc - 2; i++)
for (int j = 0; j < argc - 2; j++)
test_mmap_fixed(name1, name2, lengths[i], lengths[j]);
FREE(name1);
FREE(name2);
FREE(lengths);
DONE(NULL);
}
| 2,522 | 26.129032 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_movnt/pmem2_movnt.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_movnt.c -- test for MOVNT threshold
*
* usage: pmem2_movnt
*/
#include "unittest.h"
#include "ut_pmem2.h"
int
main(int argc, char *argv[])
{
int fd;
char *dst;
char *src;
struct pmem2_config *cfg;
struct pmem2_source *psrc;
struct pmem2_map *map;
if (argc != 2)
UT_FATAL("usage: %s file", argv[0]);
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem2_movnt %s %savx %savx512f",
thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
fd = OPEN(argv[1], O_RDWR);
PMEM2_CONFIG_NEW(&cfg);
PMEM2_SOURCE_FROM_FD(&psrc, fd);
PMEM2_CONFIG_SET_GRANULARITY(cfg, PMEM2_GRANULARITY_PAGE);
int ret = pmem2_map(cfg, psrc, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_CONFIG_DELETE(&cfg);
src = MEMALIGN(64, 8192);
dst = MEMALIGN(64, 8192);
memset(src, 0x88, 8192);
memset(dst, 0, 8192);
pmem2_memset_fn memset_fn = pmem2_get_memset_fn(map);
pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map);
pmem2_memmove_fn memmove_fn = pmem2_get_memmove_fn(map);
for (size_t size = 1; size <= 4096; size *= 2) {
memset(dst, 0, 4096);
memcpy_fn(dst, src, size, PMEM2_F_MEM_NODRAIN);
UT_ASSERTeq(memcmp(src, dst, size), 0);
UT_ASSERTeq(dst[size], 0);
}
for (size_t size = 1; size <= 4096; size *= 2) {
memset(dst, 0, 4096);
memmove_fn(dst, src, size, PMEM2_F_MEM_NODRAIN);
UT_ASSERTeq(memcmp(src, dst, size), 0);
UT_ASSERTeq(dst[size], 0);
}
for (size_t size = 1; size <= 4096; size *= 2) {
memset(dst, 0, 4096);
memset_fn(dst, 0x77, size, PMEM2_F_MEM_NODRAIN);
UT_ASSERTeq(dst[0], 0x77);
UT_ASSERTeq(dst[size - 1], 0x77);
UT_ASSERTeq(dst[size], 0);
}
ALIGNED_FREE(dst);
ALIGNED_FREE(src);
ret = pmem2_unmap(&map);
UT_ASSERTeq(ret, 0);
CLOSE(fd);
DONE(NULL);
}
| 1,945 | 21.113636 | 59 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_memmove/pmem2_memmove.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* pmem2_memmove.c -- test for doing a memmove
*
* usage:
* pmem2_memmove file b:length [d:{offset}] [s:offset] [o:{1|2} S:{overlap}]
*
*/
#include "unittest.h"
#include "ut_pmem2.h"
#include "file.h"
#include "memmove_common.h"
static void
do_memmove_variants(char *dst, char *src, const char *file_name,
size_t dest_off, size_t src_off, size_t bytes, persist_fn p,
memmove_fn fn)
{
for (int i = 0; i < ARRAY_SIZE(Flags); ++i) {
do_memmove(dst, src, file_name, dest_off, src_off,
bytes, fn, Flags[i], p);
}
}
int
main(int argc, char *argv[])
{
int fd;
char *dst;
char *src;
char *src_orig;
size_t dst_off = 0;
size_t src_off = 0;
size_t bytes = 0;
int who = 0;
size_t mapped_len;
struct pmem2_config *cfg;
struct pmem2_source *psrc;
struct pmem2_map *map;
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem2_memmove %s %s %s %s %savx %savx512f",
argc > 2 ? argv[2] : "null",
argc > 3 ? argv[3] : "null",
argc > 4 ? argv[4] : "null",
thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
fd = OPEN(argv[1], O_RDWR);
if (argc < 3)
USAGE();
PMEM2_CONFIG_NEW(&cfg);
PMEM2_SOURCE_FROM_FD(&psrc, fd);
PMEM2_CONFIG_SET_GRANULARITY(cfg, PMEM2_GRANULARITY_PAGE);
int ret = pmem2_map(cfg, psrc, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_CONFIG_DELETE(&cfg);
pmem2_persist_fn persist = pmem2_get_persist_fn(map);
mapped_len = pmem2_map_get_size(map);
dst = pmem2_map_get_address(map);
if (dst == NULL)
UT_FATAL("!could not map file: %s", argv[1]);
pmem2_memmove_fn memmove_fn = pmem2_get_memmove_fn(map);
for (int arg = 2; arg < argc; arg++) {
if (strchr("dsbo",
argv[arg][0]) == NULL || argv[arg][1] != ':')
UT_FATAL("op must be d: or s: or b: or o:");
size_t val = STRTOUL(&argv[arg][2], NULL, 0);
switch (argv[arg][0]) {
case 'd':
if (val <= 0)
UT_FATAL("bad offset (%lu) with d: option",
val);
dst_off = val;
break;
case 's':
if (val <= 0)
UT_FATAL("bad offset (%lu) with s: option",
val);
src_off = val;
break;
case 'b':
if (val <= 0)
UT_FATAL("bad length (%lu) with b: option",
val);
bytes = val;
break;
case 'o':
if (val != 1 && val != 0)
UT_FATAL("bad val (%lu) with o: option",
val);
who = (int)val;
break;
}
}
if (who == 0) {
src_orig = src = dst + mapped_len / 2;
UT_ASSERT(src > dst);
do_memmove_variants(dst, src, argv[1], dst_off, src_off,
bytes, persist, memmove_fn);
/* dest > src */
src = dst;
dst = src_orig;
if (dst <= src)
UT_FATAL("cannot map files in memory order");
do_memmove_variants(dst, src, argv[1], dst_off, src_off,
bytes, persist, memmove_fn);
} else {
/* use the same buffer for source and destination */
memset(dst, 0, bytes);
persist(dst, bytes);
do_memmove_variants(dst, dst, argv[1], dst_off, src_off,
bytes, persist, memmove_fn);
}
ret = pmem2_unmap(&map);
UT_ASSERTeq(ret, 0);
CLOSE(fd);
DONE(NULL);
}
| 3,184 | 20.52027 | 76 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_memmove/memmove_common.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* memmove_common.h -- header file for common memmove_common test utilities
*/
#ifndef MEMMOVE_COMMON_H
#define MEMMOVE_COMMON_H 1
#include "unittest.h"
#include "file.h"
extern unsigned Flags[10];
#define USAGE() do { UT_FATAL("usage: %s file b:length [d:{offset}] "\
"[s:{offset}] [o:{0|1}]", argv[0]); } while (0)
typedef void *(*memmove_fn)(void *pmemdest, const void *src, size_t len,
unsigned flags);
typedef void (*persist_fn)(const void *ptr, size_t len);
void do_memmove(char *dst, char *src, const char *file_name,
size_t dest_off, size_t src_off, size_t bytes,
memmove_fn fn, unsigned flags, persist_fn p);
void verify_contents(const char *file_name, int test, const char *buf1,
const char *buf2, size_t len);
#endif
| 832 | 25.870968 | 75 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_memmove/memmove_common.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* memmove_common.c -- common part for tests doing a persistent memmove
*/
#include "unittest.h"
#include "memmove_common.h"
/*
* verify_contents -- verify that buffers match, if they don't - print contents
* of both and abort the test
*/
void
verify_contents(const char *file_name, int test,
const char *buf1, const char *buf2,
size_t len)
{
if (memcmp(buf1, buf2, len) == 0)
return;
for (size_t i = 0; i < len; ++i)
UT_ERR("%04zu 0x%02x 0x%02x %s", i, (uint8_t)buf1[i],
(uint8_t)buf2[i],
buf1[i] != buf2[i] ? "!!!" : "");
UT_FATAL("%s %d: %zu bytes do not match with memcmp",
file_name, test, len);
}
/*
* do_memmove: Worker function for memmove.
*
* Always work within the boundary of bytes. Fill in 1/2 of the src
* memory with the pattern we want to write. This allows us to check
* that we did not overwrite anything we were not supposed to in the
* dest. Use the non pmem version of the memset/memcpy commands
* so as not to introduce any possible side affects.
*/
void
do_memmove(char *dst, char *src, const char *file_name,
size_t dest_off, size_t src_off, size_t bytes,
memmove_fn fn, unsigned flags, persist_fn persist)
{
void *ret;
char *srcshadow = MALLOC(dest_off + src_off + bytes);
char *dstshadow = srcshadow;
if (src != dst)
dstshadow = MALLOC(dest_off + src_off + bytes);
char old;
memset(src, 0x11, bytes);
memset(dst, 0x22, bytes);
memset(src, 0x33, bytes / 4);
memset(src + bytes / 4, 0x44, bytes / 4);
persist(src, bytes);
persist(dst, bytes);
memcpy(srcshadow, src, bytes);
memcpy(dstshadow, dst, bytes);
/* TEST 1, dest == src */
old = *(char *)(dst + dest_off);
ret = fn(dst + dest_off, dst + dest_off, bytes / 2, flags);
UT_ASSERTeq(ret, dst + dest_off);
UT_ASSERTeq(*(char *)(dst + dest_off), old);
/* do the same using regular memmove and verify that buffers match */
memmove(dstshadow + dest_off, dstshadow + dest_off, bytes / 2);
verify_contents(file_name, 0, dstshadow, dst, bytes);
verify_contents(file_name, 1, srcshadow, src, bytes);
/* TEST 2, len == 0 */
old = *(char *)(dst + dest_off);
ret = fn(dst + dest_off, src + src_off, 0, flags);
UT_ASSERTeq(ret, dst + dest_off);
UT_ASSERTeq(*(char *)(dst + dest_off), old);
/* do the same using regular memmove and verify that buffers match */
memmove(dstshadow + dest_off, srcshadow + src_off, 0);
verify_contents(file_name, 2, dstshadow, dst, bytes);
verify_contents(file_name, 3, srcshadow, src, bytes);
/* TEST 3, len == bytes / 2 */
ret = fn(dst + dest_off, src + src_off, bytes / 2, flags);
UT_ASSERTeq(ret, dst + dest_off);
if (flags & PMEM_F_MEM_NOFLUSH)
/* for pmemcheck */
persist(dst + dest_off, bytes / 2);
/* do the same using regular memmove and verify that buffers match */
memmove(dstshadow + dest_off, srcshadow + src_off, bytes / 2);
verify_contents(file_name, 4, dstshadow, dst, bytes);
verify_contents(file_name, 5, srcshadow, src, bytes);
FREE(srcshadow);
if (dstshadow != srcshadow)
FREE(dstshadow);
}
unsigned Flags[] = {
0,
PMEM_F_MEM_NODRAIN,
PMEM_F_MEM_NONTEMPORAL,
PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_NODRAIN,
PMEM_F_MEM_WC,
PMEM_F_MEM_WB,
PMEM_F_MEM_NOFLUSH,
/* all possible flags */
PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH |
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL |
PMEM_F_MEM_WC | PMEM_F_MEM_WB,
};
| 3,503 | 28.694915 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_zones/obj_zones.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_zones.c -- allocates from a very large pool (exceeding 1 zone)
*
*/
#include <stddef.h>
#include <page_size.h>
#include "unittest.h"
#define LAYOUT_NAME "obj_zones"
#define ALLOC_SIZE ((8191 * (256 * 1024)) - 16) /* must evenly divide a zone */
/*
* test_create -- allocate all possible objects and log the number. It should
* exceed what would be possible on a single zone.
* Additionally, free one object so that we can later check that it can be
* allocated after the next open.
*/
static void
test_create(const char *path)
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, LAYOUT_NAME,
0, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
PMEMoid oid;
int n = 0;
while (1) {
if (pmemobj_alloc(pop, &oid, ALLOC_SIZE, 0, NULL, NULL) != 0)
break;
n++;
}
UT_OUT("allocated: %d", n);
pmemobj_free(&oid);
pmemobj_close(pop);
}
/*
* test_open -- in the open test we should be able to allocate exactly
* one object.
*/
static void
test_open(const char *path)
{
PMEMobjpool *pop;
if ((pop = pmemobj_open(path, LAYOUT_NAME)) == NULL)
UT_FATAL("!pmemobj_open: %s", path);
int ret = pmemobj_alloc(pop, NULL, ALLOC_SIZE, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
ret = pmemobj_alloc(pop, NULL, ALLOC_SIZE, 0, NULL, NULL);
UT_ASSERTne(ret, 0);
pmemobj_close(pop);
}
/*
* test_malloc_free -- test if alloc until OOM/free/alloc until OOM sequence
* produces the same number of allocations for the second alloc loop.
*/
static void
test_malloc_free(const char *path)
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, LAYOUT_NAME,
0, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
size_t alloc_size = PMEM_PAGESIZE * 32;
size_t max_allocs = 1000000;
PMEMoid *oid = MALLOC(sizeof(PMEMoid) * max_allocs);
size_t n = 0;
while (1) {
if (pmemobj_alloc(pop, &oid[n], alloc_size, 0, NULL, NULL) != 0)
break;
n++;
UT_ASSERTne(n, max_allocs);
}
size_t first_run_allocated = n;
for (size_t i = 0; i < n; ++i) {
pmemobj_free(&oid[i]);
}
n = 0;
while (1) {
if (pmemobj_alloc(pop, &oid[n], alloc_size, 0, NULL, NULL) != 0)
break;
n++;
}
UT_ASSERTeq(first_run_allocated, n);
pmemobj_close(pop);
FREE(oid);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_zones");
if (argc != 3)
UT_FATAL("usage: %s file-name [open|create]", argv[0]);
const char *path = argv[1];
char op = argv[2][0];
if (op == 'c')
test_create(path);
else if (op == 'o')
test_open(path);
else if (op == 'f')
test_malloc_free(path);
else
UT_FATAL("invalid operation");
DONE(NULL);
}
| 2,706 | 20.148438 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_tx_locks_abort/obj_tx_locks_abort.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* obj_tx_locks_nested.c -- unit test for transaction locks
*/
#include "unittest.h"
#define LAYOUT_NAME "locks"
TOID_DECLARE_ROOT(struct root_obj);
TOID_DECLARE(struct obj, 1);
struct root_obj {
PMEMmutex lock;
TOID(struct obj) head;
};
struct obj {
int data;
PMEMmutex lock;
TOID(struct obj) next;
};
/*
* do_nested_tx-- (internal) nested transaction
*/
static void
do_nested_tx(PMEMobjpool *pop, TOID(struct obj) o, int value)
{
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(o)->lock, TX_PARAM_NONE) {
TX_ADD(o);
D_RW(o)->data = value;
if (!TOID_IS_NULL(D_RO(o)->next)) {
/*
* Add the object to undo log, while the mutex
* it contains is not locked.
*/
TX_ADD(D_RO(o)->next);
do_nested_tx(pop, D_RO(o)->next, value);
}
} TX_END;
}
/*
* do_aborted_nested_tx -- (internal) aborted nested transaction
*/
static void
do_aborted_nested_tx(PMEMobjpool *pop, TOID(struct obj) oid, int value)
{
TOID(struct obj) o = oid;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(o)->lock, TX_PARAM_NONE) {
TX_ADD(o);
D_RW(o)->data = value;
if (!TOID_IS_NULL(D_RO(o)->next)) {
/*
* Add the object to undo log, while the mutex
* it contains is not locked.
*/
TX_ADD(D_RO(o)->next);
do_nested_tx(pop, D_RO(o)->next, value);
}
pmemobj_tx_abort(EINVAL);
} TX_FINALLY {
o = oid;
while (!TOID_IS_NULL(o)) {
if (pmemobj_mutex_trylock(pop, &D_RW(o)->lock)) {
UT_OUT("trylock failed");
} else {
UT_OUT("trylock succeeded");
pmemobj_mutex_unlock(pop, &D_RW(o)->lock);
}
o = D_RO(o)->next;
}
} TX_END;
}
/*
* do_check -- (internal) print 'data' value of each object on the list
*/
static void
do_check(TOID(struct obj) o)
{
while (!TOID_IS_NULL(o)) {
UT_OUT("data = %d", D_RO(o)->data);
o = D_RO(o)->next;
}
}
int
main(int argc, char *argv[])
{
PMEMobjpool *pop;
START(argc, argv, "obj_tx_locks_abort");
if (argc > 3)
UT_FATAL("usage: %s <file>", argv[0]);
pop = pmemobj_create(argv[1], LAYOUT_NAME,
PMEMOBJ_MIN_POOL * 4, S_IWUSR | S_IRUSR);
if (pop == NULL)
UT_FATAL("!pmemobj_create");
TOID(struct root_obj) root = POBJ_ROOT(pop, struct root_obj);
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
TX_ADD(root);
D_RW(root)->head = TX_ZNEW(struct obj);
TOID(struct obj) o;
o = D_RW(root)->head;
D_RW(o)->data = 100;
pmemobj_mutex_zero(pop, &D_RW(o)->lock);
for (int i = 0; i < 3; i++) {
D_RW(o)->next = TX_ZNEW(struct obj);
o = D_RO(o)->next;
D_RW(o)->data = 101 + i;
pmemobj_mutex_zero(pop, &D_RW(o)->lock);
}
TOID_ASSIGN(D_RW(o)->next, OID_NULL);
} TX_END;
UT_OUT("initial state");
do_check(D_RO(root)->head);
UT_OUT("nested tx");
do_nested_tx(pop, D_RW(root)->head, 200);
do_check(D_RO(root)->head);
UT_OUT("aborted nested tx");
do_aborted_nested_tx(pop, D_RW(root)->head, 300);
do_check(D_RO(root)->head);
pmemobj_close(pop);
DONE(NULL);
}
| 2,994 | 20.392857 | 71 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_persist_valgrind/pmem2_persist_valgrind.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_persist_valgrind.c -- pmem2_persist_valgrind tests
*/
#include "out.h"
#include "unittest.h"
#include "ut_pmem2_utils.h"
#define DATA "XXXXXXXX"
#define STRIDE_SIZE 4096
/*
* test_ctx -- essential parameters used by test
*/
struct test_ctx {
int fd;
struct pmem2_map *map;
};
/*
* test_init -- prepare resources required for testing
*/
static int
test_init(const struct test_case *tc, int argc, char *argv[],
struct test_ctx *ctx)
{
if (argc < 1)
UT_FATAL("usage: %s <file>", tc->name);
char *file = argv[0];
ctx->fd = OPEN(file, O_RDWR);
struct pmem2_source *src;
int ret = pmem2_source_from_fd(&src, ctx->fd);
UT_PMEM2_EXPECT_RETURN(ret, 0);
struct pmem2_config *cfg;
/* fill pmem2_config in minimal scope */
ret = pmem2_config_new(&cfg);
UT_PMEM2_EXPECT_RETURN(ret, 0);
ret = pmem2_config_set_required_store_granularity(
cfg, PMEM2_GRANULARITY_PAGE);
UT_PMEM2_EXPECT_RETURN(ret, 0);
/* execute pmem2_map and validate the result */
ret = pmem2_map(cfg, src, &ctx->map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTne(ctx->map, NULL);
size_t size;
UT_ASSERTeq(pmem2_source_size(src, &size), 0);
UT_ASSERTeq(pmem2_map_get_size(ctx->map), size);
pmem2_config_delete(&cfg);
/* the function returns the number of consumed arguments */
return 1;
}
/*
* test_fini -- cleanup the test resources
*/
static void
test_fini(struct test_ctx *ctx)
{
pmem2_unmap(&ctx->map);
CLOSE(ctx->fd);
}
/*
* data_write -- write the data in mapped memory
*/
static void
data_write(void *addr, size_t size, size_t stride)
{
for (size_t offset = 0; offset + sizeof(DATA) <= size;
offset += stride) {
memcpy((void *)((uintptr_t)addr + offset), DATA, sizeof(DATA));
}
}
/*
* data_persist -- persist data in a range of mapped memory with defined stride
*/
static void
data_persist(struct pmem2_map *map, size_t len, size_t stride)
{
size_t map_size = pmem2_map_get_size(map);
char *addr = pmem2_map_get_address(map);
pmem2_persist_fn p_func = pmem2_get_persist_fn(map);
for (size_t offset = 0; offset + len <= map_size;
offset += stride) {
p_func(addr + offset, len);
}
}
/*
* test_persist_continuous_range -- persist continuous data in a range of
* the persistent memory
*/
static int
test_persist_continuous_range(const struct test_case *tc, int argc,
char *argv[])
{
struct test_ctx ctx = {0};
int ret = test_init(tc, argc, argv, &ctx);
char *addr = pmem2_map_get_address(ctx.map);
size_t map_size = pmem2_map_get_size(ctx.map);
data_write(addr, map_size, sizeof(DATA) /* stride */);
data_persist(ctx.map, map_size, map_size /* stride */);
test_fini(&ctx);
return ret;
}
/*
* test_persist_discontinuous_range -- persist discontinuous data in a range of
* the persistent memory
*/
static int
test_persist_discontinuous_range(const struct test_case *tc, int argc,
char *argv[])
{
struct test_ctx ctx = {0};
int ret = test_init(tc, argc, argv, &ctx);
char *addr = pmem2_map_get_address(ctx.map);
size_t map_size = pmem2_map_get_size(ctx.map);
data_write(addr, map_size, STRIDE_SIZE);
data_persist(ctx.map, sizeof(DATA), STRIDE_SIZE);
test_fini(&ctx);
return ret;
}
/*
* test_persist_discontinuous_range_partially -- persist part of discontinuous
* data in a range of persistent memory
*/
static int
test_persist_discontinuous_range_partially(const struct test_case *tc, int argc,
char *argv[])
{
struct test_ctx ctx = {0};
int ret = test_init(tc, argc, argv, &ctx);
char *addr = pmem2_map_get_address(ctx.map);
size_t map_size = pmem2_map_get_size(ctx.map);
data_write(addr, map_size, STRIDE_SIZE);
/* persist only a half of the writes */
data_persist(ctx.map, sizeof(DATA), 2 * STRIDE_SIZE);
test_fini(&ctx);
return ret;
}
/*
* test_persist_nonpmem_data -- persist data in a range of the memory mapped
* by mmap()
*/
static int
test_persist_nonpmem_data(const struct test_case *tc, int argc, char *argv[])
{
struct test_ctx ctx = {0};
/* pmem2_map is needed to get persist function */
int ret = test_init(tc, argc, argv, &ctx);
size_t size = pmem2_map_get_size(ctx.map);
int flags = MAP_SHARED;
int proto = PROT_READ | PROT_WRITE;
char *addr;
addr = mmap(NULL, size, proto, flags, ctx.fd, 0);
data_write(addr, size, sizeof(DATA) /* stride */);
pmem2_persist_fn p_func = pmem2_get_persist_fn(ctx.map);
p_func(addr, size);
munmap(addr, size);
test_fini(&ctx);
return ret;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_persist_continuous_range),
TEST_CASE(test_persist_discontinuous_range),
TEST_CASE(test_persist_discontinuous_range_partially),
TEST_CASE(test_persist_nonpmem_data),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem2_persist_valgrind");
out_init("pmem2_persist_valgrind", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0,
0);
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
out_fini();
DONE(NULL);
}
| 5,072 | 22.37788 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_ctl_alloc_class/obj_ctl_alloc_class.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2018, Intel Corporation */
/*
* obj_ctl_alloc_class.c -- tests for the ctl entry points: heap.alloc_class
*/
#include <sys/resource.h>
#include "unittest.h"
#define LAYOUT "obj_ctl_alloc_class"
static void
basic(const char *path)
{
PMEMobjpool *pop;
if ((pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL * 20,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
int ret;
PMEMoid oid;
size_t usable_size;
struct pobj_alloc_class_desc alloc_class_128;
alloc_class_128.header_type = POBJ_HEADER_NONE;
alloc_class_128.unit_size = 128;
alloc_class_128.units_per_block = 1000;
alloc_class_128.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.128.desc",
&alloc_class_128);
UT_ASSERTeq(ret, 0);
struct pobj_alloc_class_desc alloc_class_129;
alloc_class_129.header_type = POBJ_HEADER_COMPACT;
alloc_class_129.unit_size = 1024;
alloc_class_129.units_per_block = 1000;
alloc_class_129.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.129.desc",
&alloc_class_129);
UT_ASSERTeq(ret, 0);
struct pobj_alloc_class_desc alloc_class_128_r;
ret = pmemobj_ctl_get(pop, "heap.alloc_class.128.desc",
&alloc_class_128_r);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(alloc_class_128.header_type, alloc_class_128_r.header_type);
UT_ASSERTeq(alloc_class_128.unit_size, alloc_class_128_r.unit_size);
UT_ASSERT(alloc_class_128.units_per_block <=
alloc_class_128_r.units_per_block);
/*
* One unit from alloc class 128 - 128 bytes unit size, minimal headers.
*/
ret = pmemobj_xalloc(pop, &oid, 128, 0, POBJ_CLASS_ID(128), NULL, NULL);
UT_ASSERTeq(ret, 0);
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, 128);
pmemobj_free(&oid);
/*
* Reserve as above.
*/
struct pobj_action act;
oid = pmemobj_xreserve(pop, &act, 128, 0, POBJ_CLASS_ID(128));
UT_ASSERT(!OID_IS_NULL(oid));
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, 128);
pmemobj_cancel(pop, &act, 1);
/*
* One unit from alloc class 128 - 128 bytes unit size, minimal headers,
* but request size 1 byte.
*/
ret = pmemobj_xalloc(pop, &oid, 1, 0, POBJ_CLASS_ID(128), NULL, NULL);
UT_ASSERTeq(ret, 0);
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, 128);
pmemobj_free(&oid);
/*
* Two units from alloc class 129 -
* 1024 bytes unit size, compact headers.
*/
ret = pmemobj_xalloc(pop, &oid, 1024 + 1,
0, POBJ_CLASS_ID(129), NULL, NULL);
UT_ASSERTeq(ret, 0);
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, (1024 * 2) - 16); /* 2 units minus hdr */
pmemobj_free(&oid);
/*
* 64 units from alloc class 129
* - 1024 bytes unit size, compact headers.
*/
ret = pmemobj_xalloc(pop, &oid, (1024 * 64) - 16,
0, POBJ_CLASS_ID(129), NULL, NULL);
UT_ASSERTeq(ret, 0);
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, (1024 * 64) - 16);
pmemobj_free(&oid);
/*
* 65 units from alloc class 129 -
* 1024 bytes unit size, compact headers.
* Should fail, as it would require two bitmap modifications.
*/
ret = pmemobj_xalloc(pop, &oid, 1024 * 64 + 1, 0,
POBJ_CLASS_ID(129), NULL, NULL);
UT_ASSERTeq(ret, -1);
/*
* Nonexistent alloc class.
*/
ret = pmemobj_xalloc(pop, &oid, 1, 0, POBJ_CLASS_ID(130), NULL, NULL);
UT_ASSERTeq(ret, -1);
struct pobj_alloc_class_desc alloc_class_new;
alloc_class_new.header_type = POBJ_HEADER_NONE;
alloc_class_new.unit_size = 777;
alloc_class_new.units_per_block = 200;
alloc_class_new.class_id = 0;
alloc_class_new.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_new);
UT_ASSERTeq(ret, 0);
struct pobj_alloc_class_desc alloc_class_fail;
alloc_class_fail.header_type = POBJ_HEADER_NONE;
alloc_class_fail.unit_size = 777;
alloc_class_fail.units_per_block = 200;
alloc_class_fail.class_id = 0;
alloc_class_fail.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_fail);
UT_ASSERTeq(ret, -1);
ret = pmemobj_ctl_set(pop, "heap.alloc_class.200.desc",
&alloc_class_fail);
UT_ASSERTeq(ret, -1);
ret = pmemobj_xalloc(pop, &oid, 1, 0,
POBJ_CLASS_ID(alloc_class_new.class_id), NULL, NULL);
UT_ASSERTeq(ret, 0);
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, 777);
struct pobj_alloc_class_desc alloc_class_new_huge;
alloc_class_new_huge.header_type = POBJ_HEADER_NONE;
alloc_class_new_huge.unit_size = (2 << 23);
alloc_class_new_huge.units_per_block = 1;
alloc_class_new_huge.class_id = 0;
alloc_class_new_huge.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_new_huge);
UT_ASSERTeq(ret, 0);
ret = pmemobj_xalloc(pop, &oid, 1, 0,
POBJ_CLASS_ID(alloc_class_new_huge.class_id), NULL, NULL);
UT_ASSERTeq(ret, 0);
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, (2 << 23));
struct pobj_alloc_class_desc alloc_class_new_max;
alloc_class_new_max.header_type = POBJ_HEADER_COMPACT;
alloc_class_new_max.unit_size = PMEMOBJ_MAX_ALLOC_SIZE;
alloc_class_new_max.units_per_block = 1024;
alloc_class_new_max.class_id = 0;
alloc_class_new_max.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_new_max);
UT_ASSERTeq(ret, 0);
ret = pmemobj_xalloc(pop, &oid, 1, 0,
POBJ_CLASS_ID(alloc_class_new_max.class_id), NULL, NULL);
UT_ASSERTne(ret, 0);
struct pobj_alloc_class_desc alloc_class_new_loop;
alloc_class_new_loop.header_type = POBJ_HEADER_COMPACT;
alloc_class_new_loop.unit_size = 16384;
alloc_class_new_loop.units_per_block = 63;
alloc_class_new_loop.class_id = 0;
alloc_class_new_loop.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_new_loop);
UT_ASSERTeq(ret, 0);
size_t s = (63 * 16384) - 16;
ret = pmemobj_xalloc(pop, &oid, s + 1, 0,
POBJ_CLASS_ID(alloc_class_new_loop.class_id), NULL, NULL);
UT_ASSERTne(ret, 0);
struct pobj_alloc_class_desc alloc_class_tiny;
alloc_class_tiny.header_type = POBJ_HEADER_NONE;
alloc_class_tiny.unit_size = 7;
alloc_class_tiny.units_per_block = 1;
alloc_class_tiny.class_id = 0;
alloc_class_tiny.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_tiny);
UT_ASSERTeq(ret, 0);
UT_ASSERT(alloc_class_tiny.units_per_block > 1);
for (int i = 0; i < 1000; ++i) {
ret = pmemobj_xalloc(pop, &oid, 7, 0,
POBJ_CLASS_ID(alloc_class_tiny.class_id), NULL, NULL);
UT_ASSERTeq(ret, 0);
}
pmemobj_close(pop);
}
static void
many(const char *path)
{
PMEMobjpool *pop;
if ((pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
unsigned nunits = UINT16_MAX + 1;
struct pobj_alloc_class_desc alloc_class_tiny;
alloc_class_tiny.header_type = POBJ_HEADER_NONE;
alloc_class_tiny.unit_size = 8;
alloc_class_tiny.units_per_block = nunits;
alloc_class_tiny.class_id = 0;
alloc_class_tiny.alignment = 0;
int ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_tiny);
UT_ASSERTeq(ret, 0);
PMEMoid oid;
uint64_t *counterp = NULL;
for (size_t i = 0; i < nunits; ++i) {
pmemobj_xalloc(pop, &oid, 8, 0,
POBJ_CLASS_ID(alloc_class_tiny.class_id), NULL, NULL);
counterp = pmemobj_direct(oid);
(*counterp)++;
/*
* This works only because this is a fresh pool in a new file
* and so the counter must be initially zero.
* This might have to be fixed if that ever changes.
*/
UT_ASSERTeq(*counterp, 1);
}
pmemobj_close(pop);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_ctl_alloc_class");
if (argc != 3)
UT_FATAL("usage: %s file-name b|m", argv[0]);
const char *path = argv[1];
if (argv[2][0] == 'b')
basic(path);
else if (argv[2][0] == 'm')
many(path);
DONE(NULL);
}
| 7,857 | 26.865248 | 76 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/traces_pmem/traces_pmem.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* traces_pmem.c -- unit test traces for libraries pmem
*/
#include "unittest.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "traces_pmem");
UT_ASSERT(!pmem_check_version(PMEM_MAJOR_VERSION,
PMEM_MINOR_VERSION));
UT_ASSERT(!pmemblk_check_version(PMEMBLK_MAJOR_VERSION,
PMEMBLK_MINOR_VERSION));
UT_ASSERT(!pmemlog_check_version(PMEMLOG_MAJOR_VERSION,
PMEMLOG_MINOR_VERSION));
UT_ASSERT(!pmemobj_check_version(PMEMOBJ_MAJOR_VERSION,
PMEMOBJ_MINOR_VERSION));
DONE(NULL);
}
| 596 | 21.961538 | 56 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_debug/obj_debug.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* obj_debug.c -- unit test for debug features
*
* usage: obj_debug file operation [op_index]:...
*
* operations are 'f' or 'l' or 'r' or 'a' or 'n' or 's'
*
*/
#include <stddef.h>
#include <stdlib.h>
#include <sys/param.h>
#include "unittest.h"
#include "libpmemobj.h"
#define LAYOUT_NAME "layout_obj_debug"
TOID_DECLARE_ROOT(struct root);
TOID_DECLARE(struct tobj, 0);
TOID_DECLARE(struct int3_s, 1);
struct root {
POBJ_LIST_HEAD(listhead, struct tobj) lhead, lhead2;
uint32_t val;
};
struct tobj {
POBJ_LIST_ENTRY(struct tobj) next;
};
struct int3_s {
uint32_t i1;
uint32_t i2;
uint32_t i3;
};
typedef void (*func)(PMEMobjpool *pop, void *sync, void *cond);
static void
test_FOREACH(const char *path)
{
PMEMobjpool *pop = NULL;
PMEMoid varoid, nvaroid;
TOID(struct root) root;
TOID(struct tobj) var, nvar;
#define COMMANDS_FOREACH()\
do {\
POBJ_FOREACH(pop, varoid) {}\
POBJ_FOREACH_SAFE(pop, varoid, nvaroid) {}\
POBJ_FOREACH_TYPE(pop, var) {}\
POBJ_FOREACH_SAFE_TYPE(pop, var, nvar) {}\
POBJ_LIST_FOREACH(var, &D_RW(root)->lhead, next) {}\
POBJ_LIST_FOREACH_REVERSE(var, &D_RW(root)->lhead, next) {}\
} while (0)
if ((pop = pmemobj_create(path, LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
TOID_ASSIGN(root, pmemobj_root(pop, sizeof(struct root)));
POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(root)->lhead, next,
sizeof(struct tobj), NULL, NULL);
COMMANDS_FOREACH();
TX_BEGIN(pop) {
COMMANDS_FOREACH();
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
COMMANDS_FOREACH();
pmemobj_close(pop);
}
static void
test_lists(const char *path)
{
PMEMobjpool *pop = NULL;
TOID(struct root) root;
TOID(struct tobj) elm;
#define COMMANDS_LISTS()\
do {\
POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(root)->lhead, next,\
sizeof(struct tobj), NULL, NULL);\
POBJ_NEW(pop, &elm, struct tobj, NULL, NULL);\
POBJ_LIST_INSERT_AFTER(pop, &D_RW(root)->lhead,\
POBJ_LIST_FIRST(&D_RW(root)->lhead), elm, next);\
POBJ_LIST_MOVE_ELEMENT_HEAD(pop, &D_RW(root)->lhead,\
&D_RW(root)->lhead2, elm, next, next);\
POBJ_LIST_REMOVE(pop, &D_RW(root)->lhead2, elm, next);\
POBJ_FREE(&elm);\
} while (0)
if ((pop = pmemobj_create(path, LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
TOID_ASSIGN(root, pmemobj_root(pop, sizeof(struct root)));
COMMANDS_LISTS();
TX_BEGIN(pop) {
COMMANDS_LISTS();
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
COMMANDS_LISTS();
pmemobj_close(pop);
}
static int
int3_constructor(PMEMobjpool *pop, void *ptr, void *arg)
{
struct int3_s *args = (struct int3_s *)arg;
struct int3_s *val = (struct int3_s *)ptr;
val->i1 = args->i1;
val->i2 = args->i2;
val->i3 = args->i3;
pmemobj_persist(pop, val, sizeof(*val));
return 0;
}
static void
test_alloc_construct(const char *path)
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
TX_BEGIN(pop) {
struct int3_s args = { 1, 2, 3 };
PMEMoid allocation;
pmemobj_alloc(pop, &allocation, sizeof(allocation), 1,
int3_constructor, &args);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
pmemobj_close(pop);
}
static void
test_double_free(const char *path)
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
PMEMoid oid, oid2;
int err = pmemobj_zalloc(pop, &oid, 100, 0);
UT_ASSERTeq(err, 0);
UT_ASSERT(!OID_IS_NULL(oid));
oid2 = oid;
pmemobj_free(&oid);
pmemobj_free(&oid2);
}
static int
test_constr(PMEMobjpool *pop, void *ptr, void *arg)
{
PMEMoid oid;
pmemobj_alloc(pop, &oid, 1, 1, test_constr, NULL);
return 0;
}
static void
test_alloc_in_constructor(const char *path)
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
PMEMoid oid;
pmemobj_alloc(pop, &oid, 1, 1, test_constr, NULL);
}
static void
test_mutex_lock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_mutex_lock(pop, (PMEMmutex *)sync);
}
static void
test_mutex_unlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_mutex_unlock(pop, (PMEMmutex *)sync);
}
static void
test_mutex_trylock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_mutex_trylock(pop, (PMEMmutex *)sync);
}
static void
test_mutex_timedlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_mutex_timedlock(pop, (PMEMmutex *)sync, NULL);
}
static void
test_mutex_zero(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_mutex_zero(pop, (PMEMmutex *)sync);
}
static void
test_rwlock_rdlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_rdlock(pop, (PMEMrwlock *)sync);
}
static void
test_rwlock_wrlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_wrlock(pop, (PMEMrwlock *)sync);
}
static void
test_rwlock_timedrdlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_timedrdlock(pop, (PMEMrwlock *)sync, NULL);
}
static void
test_rwlock_timedwrlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_timedwrlock(pop, (PMEMrwlock *)sync, NULL);
}
static void
test_rwlock_tryrdlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_tryrdlock(pop, (PMEMrwlock *)sync);
}
static void
test_rwlock_trywrlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_trywrlock(pop, (PMEMrwlock *)sync);
}
static void
test_rwlock_unlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_unlock(pop, (PMEMrwlock *)sync);
}
static void
test_rwlock_zero(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_zero(pop, (PMEMrwlock *)sync);
}
static void
test_cond_wait(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_cond_wait(pop, (PMEMcond *)cond, (PMEMmutex *)sync);
}
static void
test_cond_signal(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_cond_signal(pop, (PMEMcond *)cond);
}
static void
test_cond_broadcast(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_cond_broadcast(pop, (PMEMcond *)cond);
}
static void
test_cond_timedwait(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_cond_timedwait(pop, (PMEMcond *)cond, (PMEMmutex *)sync, NULL);
}
static void
test_cond_zero(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_cond_zero(pop, (PMEMcond *)cond);
}
static void
test_sync_pop_check(unsigned long op_index)
{
PMEMobjpool *pop = (PMEMobjpool *)(uintptr_t)0x1;
func to_test[] = {
test_mutex_lock, test_mutex_unlock, test_mutex_trylock,
test_mutex_timedlock, test_mutex_zero, test_rwlock_rdlock,
test_rwlock_wrlock, test_rwlock_timedrdlock,
test_rwlock_timedwrlock, test_rwlock_tryrdlock,
test_rwlock_trywrlock, test_rwlock_unlock, test_rwlock_zero,
test_cond_wait, test_cond_signal, test_cond_broadcast,
test_cond_timedwait, test_cond_zero
};
if (op_index >= (sizeof(to_test) / sizeof(to_test[0])))
UT_FATAL("Invalid op_index provided");
PMEMmutex stack_sync;
PMEMcond stack_cond;
to_test[op_index](pop, &stack_sync, &stack_cond);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_debug");
if (argc < 3)
UT_FATAL("usage: %s file-name op:f|l|r|a|s [op_index]",
argv[0]);
const char *path = argv[1];
if (strchr("flrapns", argv[2][0]) == NULL || argv[2][1] != '\0')
UT_FATAL("op must be f or l or r or a or p or n or s");
unsigned long op_index;
char *tailptr;
switch (argv[2][0]) {
case 'f':
test_FOREACH(path);
break;
case 'l':
test_lists(path);
break;
case 'a':
test_alloc_construct(path);
break;
case 'p':
test_double_free(path);
break;
case 'n':
test_alloc_in_constructor(path);
break;
case 's':
if (argc != 4)
UT_FATAL("Provide an op_index with option s");
op_index = strtoul(argv[3], &tailptr, 10);
if (tailptr[0] != '\0')
UT_FATAL("Wrong op_index format");
test_sync_pop_check(op_index);
break;
}
DONE(NULL);
}
| 8,098 | 20.771505 | 72 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_config/pmem2_config.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* pmem_config.c -- pmem2_config unittests
*/
#include "fault_injection.h"
#include "unittest.h"
#include "ut_pmem2.h"
#include "config.h"
#include "out.h"
#include "source.h"
/*
* test_cfg_create_and_delete_valid - test pmem2_config allocation
*/
static int
test_cfg_create_and_delete_valid(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config *cfg;
int ret = pmem2_config_new(&cfg);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTne(cfg, NULL);
ret = pmem2_config_delete(&cfg);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTeq(cfg, NULL);
return 0;
}
/*
* test_cfg_alloc_enomem - test pmem2_config allocation with error injection
*/
static int
test_alloc_cfg_enomem(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config *cfg;
if (!core_fault_injection_enabled()) {
return 0;
}
core_inject_fault_at(PMEM_MALLOC, 1, "pmem2_malloc");
int ret = pmem2_config_new(&cfg);
UT_PMEM2_EXPECT_RETURN(ret, -ENOMEM);
UT_ASSERTeq(cfg, NULL);
return 0;
}
/*
* test_delete_null_config - test pmem2_delete on NULL config
*/
static int
test_delete_null_config(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config *cfg = NULL;
/* should not crash */
int ret = pmem2_config_delete(&cfg);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTeq(cfg, NULL);
return 0;
}
/*
* test_config_set_granularity_valid - check valid granularity values
*/
static int
test_config_set_granularity_valid(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
/* check default granularity */
enum pmem2_granularity g =
(enum pmem2_granularity)PMEM2_GRANULARITY_INVALID;
UT_ASSERTeq(cfg.requested_max_granularity, g);
/* change default granularity */
int ret = -1;
g = PMEM2_GRANULARITY_BYTE;
ret = pmem2_config_set_required_store_granularity(&cfg, g);
UT_ASSERTeq(cfg.requested_max_granularity, g);
UT_PMEM2_EXPECT_RETURN(ret, 0);
/* set granularity once more */
ret = -1;
g = PMEM2_GRANULARITY_PAGE;
ret = pmem2_config_set_required_store_granularity(&cfg, g);
UT_ASSERTeq(cfg.requested_max_granularity, g);
UT_PMEM2_EXPECT_RETURN(ret, 0);
return 0;
}
/*
* test_config_set_granularity_invalid - check invalid granularity values
*/
static int
test_config_set_granularity_invalid(const struct test_case *tc, int argc,
char *argv[])
{
/* pass invalid granularity */
int ret = 0;
enum pmem2_granularity g_inval = 999;
struct pmem2_config cfg;
pmem2_config_init(&cfg);
ret = pmem2_config_set_required_store_granularity(&cfg, g_inval);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_GRANULARITY_NOT_SUPPORTED);
return 0;
}
/*
* test_set_offset_too_large - setting offset which is too large
*/
static int
test_set_offset_too_large(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
/* let's try to set the offset which is too large */
size_t offset = (size_t)INT64_MAX + 1;
int ret = pmem2_config_set_offset(&cfg, offset);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_OFFSET_OUT_OF_RANGE);
return 0;
}
/*
* test_set_offset_success - setting a valid offset
*/
static int
test_set_offset_success(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
/* let's try to successfully set the offset */
size_t offset = Ut_mmap_align;
int ret = pmem2_config_set_offset(&cfg, offset);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(cfg.offset, offset);
return 0;
}
/*
* test_set_length_success - setting a valid length
*/
static int
test_set_length_success(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
/* let's try to successfully set the length, can be any length */
size_t length = Ut_mmap_align;
int ret = pmem2_config_set_length(&cfg, length);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(cfg.length, length);
return 0;
}
/*
* test_set_offset_max - setting maximum possible offset
*/
static int
test_set_offset_max(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
/* let's try to successfully set maximum possible offset */
size_t offset = (INT64_MAX / Ut_mmap_align) * Ut_mmap_align;
int ret = pmem2_config_set_offset(&cfg, offset);
UT_ASSERTeq(ret, 0);
return 0;
}
/*
* test_set_sharing_valid - setting valid sharing
*/
static int
test_set_sharing_valid(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
/* check sharing default value */
UT_ASSERTeq(cfg.sharing, PMEM2_SHARED);
int ret = pmem2_config_set_sharing(&cfg, PMEM2_PRIVATE);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(cfg.sharing, PMEM2_PRIVATE);
return 0;
}
/*
* test_set_sharing_invalid - setting invalid sharing
*/
static int
test_set_sharing_invalid(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
unsigned invalid_sharing = 777;
int ret = pmem2_config_set_sharing(&cfg, invalid_sharing);
UT_ASSERTeq(ret, PMEM2_E_INVALID_SHARING_VALUE);
return 0;
}
/*
* test_validate_unaligned_addr - setting unaligned addr and validating it
*/
static int
test_validate_unaligned_addr(const struct test_case *tc, int argc,
char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_validate_unaligned_addr <file>");
/* needed for source alignment */
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_source *src;
PMEM2_SOURCE_FROM_FD(&src, fd);
struct pmem2_config cfg;
pmem2_config_init(&cfg);
/* let's set addr which is unaligned */
cfg.addr = (char *)1;
int ret = pmem2_config_validate_addr_alignment(&cfg, src);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_ADDRESS_UNALIGNED);
PMEM2_SOURCE_DELETE(&src);
CLOSE(fd);
return 1;
}
/*
* test_set_wrong_addr_req_type - setting wrong addr request type
*/
static int
test_set_wrong_addr_req_type(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
/* "randomly" chosen invalid addr request type */
enum pmem2_address_request_type request_type = 999;
int ret = pmem2_config_set_address(&cfg, NULL, request_type);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_ADDRESS_REQUEST_TYPE);
return 0;
}
/*
* test_null_addr_noreplace - setting null addr when request type
* PMEM2_ADDRESS_FIXED_NOREPLACE is used
*/
static int
test_null_addr_noreplace(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
int ret = pmem2_config_set_address(
&cfg, NULL, PMEM2_ADDRESS_FIXED_NOREPLACE);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_ADDRESS_NULL);
return 0;
}
/*
* test_clear_address - using pmem2_config_clear_address func
*/
static int
test_clear_address(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
/* "randomly" chosen value of address and addr request type */
void *addr = (void *)(1024 * 1024);
int ret = pmem2_config_set_address(
&cfg, addr, PMEM2_ADDRESS_FIXED_NOREPLACE);
UT_ASSERTeq(ret, 0);
UT_ASSERTne(cfg.addr, NULL);
UT_ASSERTne(cfg.addr_request, PMEM2_ADDRESS_ANY);
pmem2_config_clear_address(&cfg);
UT_ASSERTeq(cfg.addr, NULL);
UT_ASSERTeq(cfg.addr_request, PMEM2_ADDRESS_ANY);
return 0;
}
/*
* test_set_valid_prot_flag -- set valid protection flag
*/
static int
test_set_valid_prot_flag(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
int ret = pmem2_config_set_protection(&cfg, PMEM2_PROT_READ);
UT_ASSERTeq(ret, 0);
ret = pmem2_config_set_protection(&cfg, PMEM2_PROT_WRITE);
UT_ASSERTeq(ret, 0);
ret = pmem2_config_set_protection(&cfg, PMEM2_PROT_EXEC);
UT_ASSERTeq(ret, 0);
ret = pmem2_config_set_protection(&cfg, PMEM2_PROT_NONE);
UT_ASSERTeq(ret, 0);
ret = pmem2_config_set_protection(&cfg,
PMEM2_PROT_WRITE | PMEM2_PROT_READ | PMEM2_PROT_EXEC);
UT_ASSERTeq(ret, 0);
return 0;
}
/*
* test_set_invalid_prot_flag -- set invalid protection flag
*/
static int
test_set_invalid_prot_flag(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
int ret = pmem2_config_set_protection(&cfg, PROT_WRITE);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_PROT_FLAG);
UT_ASSERTeq(cfg.protection_flag, PMEM2_PROT_READ | PMEM2_PROT_WRITE);
return 0;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_cfg_create_and_delete_valid),
TEST_CASE(test_alloc_cfg_enomem),
TEST_CASE(test_delete_null_config),
TEST_CASE(test_config_set_granularity_valid),
TEST_CASE(test_config_set_granularity_invalid),
TEST_CASE(test_set_offset_too_large),
TEST_CASE(test_set_offset_success),
TEST_CASE(test_set_length_success),
TEST_CASE(test_set_offset_max),
TEST_CASE(test_set_sharing_valid),
TEST_CASE(test_set_sharing_invalid),
TEST_CASE(test_validate_unaligned_addr),
TEST_CASE(test_set_wrong_addr_req_type),
TEST_CASE(test_null_addr_noreplace),
TEST_CASE(test_clear_address),
TEST_CASE(test_set_valid_prot_flag),
TEST_CASE(test_set_invalid_prot_flag),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char **argv)
{
START(argc, argv, "pmem2_config");
util_init();
out_init("pmem2_config", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0, 0);
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
out_fini();
DONE(NULL);
}
| 9,397 | 22.792405 | 77 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem_map_file_trunc/pmem_map_file_trunc.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* pmem_map_file_trunc.c -- test for mapping specially crafted files,
* which used to confuse Windows libc to truncate it by 1 byte
*
* See https://github.com/pmem/pmdk/pull/3728 for full description.
*
* usage: pmem_map_file_trunc file
*/
#include "unittest.h"
#define EXPECTED_SIZE (4 * 1024)
/*
* so called "Ctrl-Z" or EOF character
* https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/fopen-wfopen
*/
#define FILL_CHAR 0x1a
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem_map_file_trunc");
if (argc < 2)
UT_FATAL("not enough args");
size_t mapped;
int ispmem;
char *p;
os_stat_t st;
p = pmem_map_file(argv[1], EXPECTED_SIZE, PMEM_FILE_CREATE, 0644,
&mapped, &ispmem);
UT_ASSERT(p);
UT_ASSERTeq(mapped, EXPECTED_SIZE);
p[EXPECTED_SIZE - 1] = FILL_CHAR;
pmem_persist(&p[EXPECTED_SIZE - 1], 1);
pmem_unmap(p, EXPECTED_SIZE);
STAT(argv[1], &st);
UT_ASSERTeq(st.st_size, EXPECTED_SIZE);
p = pmem_map_file(argv[1], 0, 0, 0644, &mapped, &ispmem);
UT_ASSERT(p);
UT_ASSERTeq(mapped, EXPECTED_SIZE);
UT_ASSERTeq(p[EXPECTED_SIZE - 1], FILL_CHAR);
pmem_unmap(p, EXPECTED_SIZE);
STAT(argv[1], &st);
UT_ASSERTeq(st.st_size, EXPECTED_SIZE);
DONE(NULL);
}
| 1,302 | 20.716667 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/util_ravl/util_ravl.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* util_ravl.c -- unit test for ravl tree
*/
#include <stdint.h>
#include <stdlib.h>
#include "ravl.h"
#include "util.h"
#include "unittest.h"
#include "fault_injection.h"
static int
cmpkey(const void *lhs, const void *rhs)
{
intptr_t l = (intptr_t)lhs;
intptr_t r = (intptr_t)rhs;
return (int)(l - r);
}
static void
test_misc(void)
{
struct ravl *r = ravl_new(cmpkey);
struct ravl_node *n = NULL;
ravl_insert(r, (void *)3);
ravl_insert(r, (void *)6);
ravl_insert(r, (void *)1);
ravl_insert(r, (void *)7);
ravl_insert(r, (void *)9);
ravl_insert(r, (void *)5);
ravl_insert(r, (void *)8);
ravl_insert(r, (void *)2);
ravl_insert(r, (void *)4);
ravl_insert(r, (void *)10);
n = ravl_find(r, (void *)11, RAVL_PREDICATE_EQUAL);
UT_ASSERTeq(n, NULL);
n = ravl_find(r, (void *)10, RAVL_PREDICATE_GREATER);
UT_ASSERTeq(n, NULL);
n = ravl_find(r, (void *)11, RAVL_PREDICATE_GREATER);
UT_ASSERTeq(n, NULL);
n = ravl_find(r, (void *)11,
RAVL_PREDICATE_GREATER | RAVL_PREDICATE_EQUAL);
UT_ASSERTeq(n, NULL);
n = ravl_find(r, (void *)1, RAVL_PREDICATE_LESS);
UT_ASSERTeq(n, NULL);
n = ravl_find(r, (void *)0, RAVL_PREDICATE_LESS_EQUAL);
UT_ASSERTeq(n, NULL);
n = ravl_find(r, (void *)9, RAVL_PREDICATE_GREATER);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)10);
n = ravl_find(r, (void *)9, RAVL_PREDICATE_LESS);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)8);
n = ravl_find(r, (void *)9,
RAVL_PREDICATE_GREATER | RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)9);
n = ravl_find(r, (void *)9,
RAVL_PREDICATE_LESS | RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)9);
n = ravl_find(r, (void *)100, RAVL_PREDICATE_LESS);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)10);
n = ravl_find(r, (void *)0, RAVL_PREDICATE_GREATER);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)1);
n = ravl_find(r, (void *)3, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)10, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)6, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)9, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)7, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)1, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)5, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)8, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)2, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)4, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
ravl_delete(r);
}
static void
test_predicate(void)
{
struct ravl *r = ravl_new(cmpkey);
struct ravl_node *n = NULL;
ravl_insert(r, (void *)10);
ravl_insert(r, (void *)5);
ravl_insert(r, (void *)7);
n = ravl_find(r, (void *)6, RAVL_PREDICATE_GREATER);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)7);
n = ravl_find(r, (void *)6, RAVL_PREDICATE_LESS);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)5);
ravl_delete(r);
}
static void
test_stress(void)
{
struct ravl *r = ravl_new(cmpkey);
for (int i = 0; i < 1000000; ++i) {
ravl_insert(r, (void *)(uintptr_t)rand());
}
ravl_delete(r);
}
struct foo {
int a;
int b;
int c;
};
static int
cmpfoo(const void *lhs, const void *rhs)
{
const struct foo *l = lhs;
const struct foo *r = rhs;
return ((l->a + l->b + l->c) - (r->a + r->b + r->c));
}
static void
test_emplace(void)
{
struct ravl *r = ravl_new_sized(cmpfoo, sizeof(struct foo));
struct foo a = {1, 2, 3};
struct foo b = {2, 3, 4};
struct foo z = {0, 0, 0};
ravl_emplace_copy(r, &a);
ravl_emplace_copy(r, &b);
struct ravl_node *n = ravl_find(r, &z, RAVL_PREDICATE_GREATER);
struct foo *fn = ravl_data(n);
UT_ASSERTeq(fn->a, a.a);
UT_ASSERTeq(fn->b, a.b);
UT_ASSERTeq(fn->c, a.c);
ravl_remove(r, n);
n = ravl_find(r, &z, RAVL_PREDICATE_GREATER);
fn = ravl_data(n);
UT_ASSERTeq(fn->a, b.a);
UT_ASSERTeq(fn->b, b.b);
UT_ASSERTeq(fn->c, b.c);
ravl_remove(r, n);
ravl_delete(r);
}
static void
test_fault_injection_ravl_sized()
{
if (!core_fault_injection_enabled())
return;
core_inject_fault_at(PMEM_MALLOC, 1, "ravl_new_sized");
struct ravl *r = ravl_new_sized(NULL, 0);
UT_ASSERTeq(r, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
test_fault_injection_ravl_node()
{
if (!core_fault_injection_enabled())
return;
struct foo a = {1, 2, 3};
struct ravl *r = ravl_new_sized(cmpfoo, sizeof(struct foo));
UT_ASSERTne(r, NULL);
core_inject_fault_at(PMEM_MALLOC, 1, "ravl_new_node");
int ret = ravl_emplace_copy(r, &a);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOMEM);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "util_ravl");
test_predicate();
test_misc();
test_stress();
test_emplace();
test_fault_injection_ravl_sized();
test_fault_injection_ravl_node();
DONE(NULL);
}
| 5,271 | 20.34413 | 64 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_sync/mocks_windows.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* mocks_windows.h -- redefinitions of pthread functions
*
* This file is Windows-specific.
*
* This file should be included (i.e. using Forced Include) by libpmemobj
* files, when compiled for the purpose of obj_sync test.
* It would replace default implementation with mocked functions defined
* in obj_sync.c.
*
* These defines could be also passed as preprocessor definitions.
*/
#ifndef WRAP_REAL
#define os_mutex_init __wrap_os_mutex_init
#define os_rwlock_init __wrap_os_rwlock_init
#define os_cond_init __wrap_os_cond_init
#endif
| 2,265 | 41.754717 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_sync/obj_sync.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_sync.c -- unit test for PMEM-resident locks
*/
#include "obj.h"
#include "sync.h"
#include "unittest.h"
#include "sys_util.h"
#include "util.h"
#include "os.h"
#define MAX_THREAD_NUM 200
#define DATA_SIZE 128
#define LOCKED_MUTEX 1
#define NANO_PER_ONE 1000000000LL
#define TIMEOUT (NANO_PER_ONE / 1000LL)
#define WORKER_RUNS 10
#define MAX_OPENS 5
#define FATAL_USAGE() UT_FATAL("usage: obj_sync [mrc] <num_threads> <runs>\n")
/* posix thread worker typedef */
typedef void *(*worker)(void *);
/* the mock pmemobj pool */
static PMEMobjpool Mock_pop;
/* the tested object containing persistent synchronization primitives */
static struct mock_obj {
PMEMmutex mutex;
PMEMmutex mutex_locked;
PMEMcond cond;
PMEMrwlock rwlock;
int check_data;
uint8_t data[DATA_SIZE];
} *Test_obj;
PMEMobjpool *
pmemobj_pool_by_ptr(const void *arg)
{
return &Mock_pop;
}
/*
* mock_open_pool -- (internal) simulate pool opening
*/
static void
mock_open_pool(PMEMobjpool *pop)
{
util_fetch_and_add64(&pop->run_id, 2);
}
/*
* mutex_write_worker -- (internal) write data with mutex
*/
static void *
mutex_write_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
if (pmemobj_mutex_lock(&Mock_pop, &Test_obj->mutex)) {
UT_ERR("pmemobj_mutex_lock");
return NULL;
}
memset(Test_obj->data, (int)(uintptr_t)arg, DATA_SIZE);
if (pmemobj_mutex_unlock(&Mock_pop, &Test_obj->mutex))
UT_ERR("pmemobj_mutex_unlock");
}
return NULL;
}
/*
* mutex_check_worker -- (internal) check consistency with mutex
*/
static void *
mutex_check_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
if (pmemobj_mutex_lock(&Mock_pop, &Test_obj->mutex)) {
UT_ERR("pmemobj_mutex_lock");
return NULL;
}
uint8_t val = Test_obj->data[0];
for (int i = 1; i < DATA_SIZE; i++)
UT_ASSERTeq(Test_obj->data[i], val);
memset(Test_obj->data, 0, DATA_SIZE);
if (pmemobj_mutex_unlock(&Mock_pop, &Test_obj->mutex))
UT_ERR("pmemobj_mutex_unlock");
}
return NULL;
}
/*
* cond_write_worker -- (internal) write data with cond variable
*/
static void *
cond_write_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
if (pmemobj_mutex_lock(&Mock_pop, &Test_obj->mutex))
return NULL;
memset(Test_obj->data, (int)(uintptr_t)arg, DATA_SIZE);
Test_obj->check_data = 1;
if (pmemobj_cond_signal(&Mock_pop, &Test_obj->cond))
UT_ERR("pmemobj_cond_signal");
pmemobj_mutex_unlock(&Mock_pop, &Test_obj->mutex);
}
return NULL;
}
/*
* cond_check_worker -- (internal) check consistency with cond variable
*/
static void *
cond_check_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
if (pmemobj_mutex_lock(&Mock_pop, &Test_obj->mutex))
return NULL;
while (Test_obj->check_data != 1) {
if (pmemobj_cond_wait(&Mock_pop, &Test_obj->cond,
&Test_obj->mutex))
UT_ERR("pmemobj_cond_wait");
}
uint8_t val = Test_obj->data[0];
for (int i = 1; i < DATA_SIZE; i++)
UT_ASSERTeq(Test_obj->data[i], val);
memset(Test_obj->data, 0, DATA_SIZE);
pmemobj_mutex_unlock(&Mock_pop, &Test_obj->mutex);
}
return NULL;
}
/*
* rwlock_write_worker -- (internal) write data with rwlock
*/
static void *
rwlock_write_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
if (pmemobj_rwlock_wrlock(&Mock_pop, &Test_obj->rwlock)) {
UT_ERR("pmemobj_rwlock_wrlock");
return NULL;
}
memset(Test_obj->data, (int)(uintptr_t)arg, DATA_SIZE);
if (pmemobj_rwlock_unlock(&Mock_pop, &Test_obj->rwlock))
UT_ERR("pmemobj_rwlock_unlock");
}
return NULL;
}
/*
* rwlock_check_worker -- (internal) check consistency with rwlock
*/
static void *
rwlock_check_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
if (pmemobj_rwlock_rdlock(&Mock_pop, &Test_obj->rwlock)) {
UT_ERR("pmemobj_rwlock_rdlock");
return NULL;
}
uint8_t val = Test_obj->data[0];
for (int i = 1; i < DATA_SIZE; i++)
UT_ASSERTeq(Test_obj->data[i], val);
if (pmemobj_rwlock_unlock(&Mock_pop, &Test_obj->rwlock))
UT_ERR("pmemobj_rwlock_unlock");
}
return NULL;
}
/*
* timed_write_worker -- (internal) intentionally doing nothing
*/
static void *
timed_write_worker(void *arg)
{
return NULL;
}
/*
* timed_check_worker -- (internal) check consistency with mutex
*/
static void *
timed_check_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
int mutex_id = (int)(uintptr_t)arg % 2;
PMEMmutex *mtx = mutex_id == LOCKED_MUTEX ?
&Test_obj->mutex_locked : &Test_obj->mutex;
struct timespec t1, t2, abs_time;
os_clock_gettime(CLOCK_REALTIME, &t1);
abs_time = t1;
abs_time.tv_nsec += TIMEOUT;
if (abs_time.tv_nsec >= NANO_PER_ONE) {
abs_time.tv_sec++;
abs_time.tv_nsec -= NANO_PER_ONE;
}
int ret = pmemobj_mutex_timedlock(&Mock_pop, mtx, &abs_time);
os_clock_gettime(CLOCK_REALTIME, &t2);
if (mutex_id == LOCKED_MUTEX) {
UT_ASSERTeq(ret, ETIMEDOUT);
uint64_t diff = (uint64_t)((t2.tv_sec - t1.tv_sec) *
NANO_PER_ONE + t2.tv_nsec - t1.tv_nsec);
UT_ASSERT(diff >= TIMEOUT);
return NULL;
}
if (ret == 0) {
UT_ASSERTne(mutex_id, LOCKED_MUTEX);
pmemobj_mutex_unlock(&Mock_pop, mtx);
} else if (ret == ETIMEDOUT) {
uint64_t diff = (uint64_t)((t2.tv_sec - t1.tv_sec)
* NANO_PER_ONE + t2.tv_nsec - t1.tv_nsec);
UT_ASSERT(diff >= TIMEOUT);
} else {
errno = ret;
UT_ERR("!pmemobj_mutex_timedlock");
}
}
return NULL;
}
/*
* cleanup -- (internal) clean up after each run
*/
static void
cleanup(char test_type)
{
switch (test_type) {
case 'm':
util_mutex_destroy(&((PMEMmutex_internal *)
&(Test_obj->mutex))->PMEMmutex_lock);
break;
case 'r':
util_rwlock_destroy(&((PMEMrwlock_internal *)
&(Test_obj->rwlock))->PMEMrwlock_lock);
break;
case 'c':
util_mutex_destroy(&((PMEMmutex_internal *)
&(Test_obj->mutex))->PMEMmutex_lock);
util_cond_destroy(&((PMEMcond_internal *)
&(Test_obj->cond))->PMEMcond_cond);
break;
case 't':
util_mutex_destroy(&((PMEMmutex_internal *)
&(Test_obj->mutex))->PMEMmutex_lock);
util_mutex_destroy(&((PMEMmutex_internal *)
&(Test_obj->mutex_locked))->PMEMmutex_lock);
break;
default:
FATAL_USAGE();
}
}
static int
obj_sync_persist(void *ctx, const void *ptr, size_t sz, unsigned flags)
{
/* no-op */
return 0;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_sync");
util_init();
if (argc < 4)
FATAL_USAGE();
worker writer;
worker checker;
char test_type = argv[1][0];
switch (test_type) {
case 'm':
writer = mutex_write_worker;
checker = mutex_check_worker;
break;
case 'r':
writer = rwlock_write_worker;
checker = rwlock_check_worker;
break;
case 'c':
writer = cond_write_worker;
checker = cond_check_worker;
break;
case 't':
writer = timed_write_worker;
checker = timed_check_worker;
break;
default:
FATAL_USAGE();
}
unsigned long num_threads = strtoul(argv[2], NULL, 10);
if (num_threads > MAX_THREAD_NUM)
UT_FATAL("Do not use more than %d threads.\n", MAX_THREAD_NUM);
unsigned long opens = strtoul(argv[3], NULL, 10);
if (opens > MAX_OPENS)
UT_FATAL("Do not use more than %d runs.\n", MAX_OPENS);
os_thread_t *write_threads
= (os_thread_t *)MALLOC(num_threads * sizeof(os_thread_t));
os_thread_t *check_threads
= (os_thread_t *)MALLOC(num_threads * sizeof(os_thread_t));
/* first pool open */
mock_open_pool(&Mock_pop);
Mock_pop.p_ops.persist = obj_sync_persist;
Mock_pop.p_ops.base = &Mock_pop;
Test_obj = (struct mock_obj *)MALLOC(sizeof(struct mock_obj));
/* zero-initialize the test object */
pmemobj_mutex_zero(&Mock_pop, &Test_obj->mutex);
pmemobj_mutex_zero(&Mock_pop, &Test_obj->mutex_locked);
pmemobj_cond_zero(&Mock_pop, &Test_obj->cond);
pmemobj_rwlock_zero(&Mock_pop, &Test_obj->rwlock);
Test_obj->check_data = 0;
memset(&Test_obj->data, 0, DATA_SIZE);
for (unsigned long run = 0; run < opens; run++) {
if (test_type == 't') {
pmemobj_mutex_lock(&Mock_pop,
&Test_obj->mutex_locked);
}
for (unsigned i = 0; i < num_threads; i++) {
THREAD_CREATE(&write_threads[i], NULL, writer,
(void *)(uintptr_t)i);
THREAD_CREATE(&check_threads[i], NULL, checker,
(void *)(uintptr_t)i);
}
for (unsigned i = 0; i < num_threads; i++) {
THREAD_JOIN(&write_threads[i], NULL);
THREAD_JOIN(&check_threads[i], NULL);
}
if (test_type == 't') {
pmemobj_mutex_unlock(&Mock_pop,
&Test_obj->mutex_locked);
}
/* up the run_id counter and cleanup */
mock_open_pool(&Mock_pop);
cleanup(test_type);
}
FREE(check_threads);
FREE(write_threads);
FREE(Test_obj);
DONE(NULL);
}
| 8,776 | 21.97644 | 78 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_sync/mocks_posix.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* mocks_posix.c -- redefinitions of lock functions (Posix implementation)
*/
#include <pthread.h>
#include "util.h"
#include "os.h"
#include "unittest.h"
FUNC_MOCK(pthread_mutex_init, int,
pthread_mutex_t *__restrict mutex,
const pthread_mutexattr_t *__restrict attr)
FUNC_MOCK_RUN_RET_DEFAULT_REAL(pthread_mutex_init, mutex, attr)
FUNC_MOCK_RUN(1) {
return -1;
}
FUNC_MOCK_END
FUNC_MOCK(pthread_rwlock_init, int,
pthread_rwlock_t *__restrict rwlock,
const pthread_rwlockattr_t *__restrict attr)
FUNC_MOCK_RUN_RET_DEFAULT_REAL(pthread_rwlock_init, rwlock, attr)
FUNC_MOCK_RUN(1) {
return -1;
}
FUNC_MOCK_END
FUNC_MOCK(pthread_cond_init, int,
pthread_cond_t *__restrict cond,
const pthread_condattr_t *__restrict attr)
FUNC_MOCK_RUN_RET_DEFAULT_REAL(pthread_cond_init, cond, attr)
FUNC_MOCK_RUN(1) {
return -1;
}
FUNC_MOCK_END
| 950 | 22.775 | 74 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/out_err_mt_win/out_err_mt_win.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* out_err_mt_win.c -- unit test for error messages
*/
#include <sys/types.h>
#include <stdarg.h>
#include <errno.h>
#include "unittest.h"
#include "valgrind_internal.h"
#include "util.h"
#define NUM_THREADS 16
static void
print_errors(const wchar_t *msg)
{
UT_OUT("%S", msg);
UT_OUT("PMEM: %S", pmem_errormsgW());
UT_OUT("PMEMOBJ: %S", pmemobj_errormsgW());
UT_OUT("PMEMLOG: %S", pmemlog_errormsgW());
UT_OUT("PMEMBLK: %S", pmemblk_errormsgW());
UT_OUT("PMEMPOOL: %S", pmempool_errormsgW());
}
static void
check_errors(int ver)
{
int ret;
int err_need;
int err_found;
ret = swscanf(pmem_errormsgW(),
L"libpmem major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEM_MAJOR_VERSION);
ret = swscanf(pmemobj_errormsgW(),
L"libpmemobj major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMOBJ_MAJOR_VERSION);
ret = swscanf(pmemlog_errormsgW(),
L"libpmemlog major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMLOG_MAJOR_VERSION);
ret = swscanf(pmemblk_errormsgW(),
L"libpmemblk major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMBLK_MAJOR_VERSION);
ret = swscanf(pmempool_errormsgW(),
L"libpmempool major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMPOOL_MAJOR_VERSION);
}
static void *
do_test(void *arg)
{
int ver = *(int *)arg;
pmem_check_version(ver, 0);
pmemobj_check_version(ver, 0);
pmemlog_check_version(ver, 0);
pmemblk_check_version(ver, 0);
pmempool_check_version(ver, 0);
check_errors(ver);
return NULL;
}
static void
run_mt_test(void *(*worker)(void *))
{
os_thread_t thread[NUM_THREADS];
int ver[NUM_THREADS];
for (int i = 0; i < NUM_THREADS; ++i) {
ver[i] = 10000 + i;
THREAD_CREATE(&thread[i], NULL, worker, &ver[i]);
}
for (int i = 0; i < NUM_THREADS; ++i) {
THREAD_JOIN(&thread[i], NULL);
}
}
int
wmain(int argc, wchar_t *argv[])
{
STARTW(argc, argv, "out_err_mt_win");
if (argc != 6)
UT_FATAL("usage: %S file1 file2 file3 file4 dir",
argv[0]);
print_errors(L"start");
PMEMobjpool *pop = pmemobj_createW(argv[1], L"test",
PMEMOBJ_MIN_POOL, 0666);
PMEMlogpool *plp = pmemlog_createW(argv[2],
PMEMLOG_MIN_POOL, 0666);
PMEMblkpool *pbp = pmemblk_createW(argv[3],
128, PMEMBLK_MIN_POOL, 0666);
util_init();
pmem_check_version(10000, 0);
pmemobj_check_version(10001, 0);
pmemlog_check_version(10002, 0);
pmemblk_check_version(10003, 0);
pmempool_check_version(10006, 0);
print_errors(L"version check");
void *ptr = NULL;
/*
* We are testing library error reporting and we don't want this test
* to fail under memcheck.
*/
VALGRIND_DO_DISABLE_ERROR_REPORTING;
pmem_msync(ptr, 1);
VALGRIND_DO_ENABLE_ERROR_REPORTING;
print_errors(L"pmem_msync");
int ret;
PMEMoid oid;
ret = pmemobj_alloc(pop, &oid, 0, 0, NULL, NULL);
UT_ASSERTeq(ret, -1);
print_errors(L"pmemobj_alloc");
pmemlog_append(plp, NULL, PMEMLOG_MIN_POOL);
print_errors(L"pmemlog_append");
size_t nblock = pmemblk_nblock(pbp);
pmemblk_set_error(pbp, nblock + 1);
print_errors(L"pmemblk_set_error");
run_mt_test(do_test);
pmemobj_close(pop);
pmemlog_close(plp);
pmemblk_close(pbp);
PMEMpoolcheck *ppc;
struct pmempool_check_args args = {0, };
ppc = pmempool_check_init(&args, sizeof(args) / 2);
UT_ASSERTeq(ppc, NULL);
print_errors(L"pmempool_check_init");
DONEW(NULL);
}
| 3,844 | 22.30303 | 70 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_oid_thread/obj_oid_thread.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_oid_thread.c -- unit test for the reverse direct operation
*/
#include "unittest.h"
#include "lane.h"
#include "obj.h"
#include "sys_util.h"
#define MAX_PATH_LEN 255
#define LAYOUT_NAME "direct"
static os_mutex_t lock;
static os_cond_t cond;
static int flag = 1;
static PMEMoid thread_oid;
/*
* test_worker -- (internal) test worker thread
*/
static void *
test_worker(void *arg)
{
util_mutex_lock(&lock);
/* before pool is closed */
void *direct = pmemobj_direct(thread_oid);
UT_ASSERT(OID_EQUALS(thread_oid, pmemobj_oid(direct)));
flag = 0;
os_cond_signal(&cond);
util_mutex_unlock(&lock);
util_mutex_lock(&lock);
while (flag == 0)
os_cond_wait(&cond, &lock);
/* after pool is closed */
UT_ASSERT(OID_IS_NULL(pmemobj_oid(direct)));
util_mutex_unlock(&lock);
return NULL;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_oid_thread");
if (argc != 3)
UT_FATAL("usage: %s [directory] [# of pools]", argv[0]);
util_mutex_init(&lock);
util_cond_init(&cond);
unsigned npools = ATOU(argv[2]);
const char *dir = argv[1];
int r;
PMEMobjpool **pops = MALLOC(npools * sizeof(PMEMoid *));
size_t length = strlen(dir) + MAX_PATH_LEN;
char *path = MALLOC(length);
for (unsigned i = 0; i < npools; ++i) {
int ret = snprintf(path, length, "%s"OS_DIR_SEP_STR"testfile%d",
dir, i);
if (ret < 0 || ret >= length)
UT_FATAL("snprintf: %d", ret);
pops[i] = pmemobj_create(path, LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR);
if (pops[i] == NULL)
UT_FATAL("!pmemobj_create");
}
/* Address outside the pmemobj pool */
void *allocated_memory = MALLOC(sizeof(int));
UT_ASSERT(OID_IS_NULL(pmemobj_oid(allocated_memory)));
PMEMoid *oids = MALLOC(npools * sizeof(PMEMoid));
PMEMoid *tmpoids = MALLOC(npools * sizeof(PMEMoid));
UT_ASSERT(OID_IS_NULL(pmemobj_oid(NULL)));
oids[0] = OID_NULL;
for (unsigned i = 0; i < npools; ++i) {
uint64_t off = pops[i]->heap_offset;
oids[i] = (PMEMoid) {pops[i]->uuid_lo, off};
UT_ASSERT(OID_EQUALS(oids[i],
pmemobj_oid(pmemobj_direct(oids[i]))));
r = pmemobj_alloc(pops[i], &tmpoids[i], 100, 1, NULL, NULL);
UT_ASSERTeq(r, 0);
UT_ASSERT(OID_EQUALS(tmpoids[i],
pmemobj_oid(pmemobj_direct(tmpoids[i]))));
}
r = pmemobj_alloc(pops[0], &thread_oid, 100, 2, NULL, NULL);
UT_ASSERTeq(r, 0);
UT_ASSERT(!OID_IS_NULL(pmemobj_oid(pmemobj_direct(thread_oid))));
util_mutex_lock(&lock);
os_thread_t t;
THREAD_CREATE(&t, NULL, test_worker, NULL);
/* wait for the thread to perform the first direct */
while (flag != 0)
os_cond_wait(&cond, &lock);
for (unsigned i = 0; i < npools; ++i) {
pmemobj_free(&tmpoids[i]);
UT_ASSERT(OID_IS_NULL(pmemobj_oid(
pmemobj_direct(tmpoids[i]))));
pmemobj_close(pops[i]);
UT_ASSERT(OID_IS_NULL(pmemobj_oid(
pmemobj_direct(oids[i]))));
}
/* signal the waiting thread */
flag = 1;
os_cond_signal(&cond);
util_mutex_unlock(&lock);
THREAD_JOIN(&t, NULL);
FREE(path);
FREE(tmpoids);
FREE(oids);
FREE(pops);
FREE(allocated_memory);
util_mutex_destroy(&lock);
util_cond_destroy(&cond);
DONE(NULL);
}
| 3,186 | 21.602837 | 66 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/rpmem_common/rpmem_fip_common.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_fip_common.h -- common definitions for librpmem and rpmemd
*/
#ifndef RPMEM_FIP_COMMON_H
#define RPMEM_FIP_COMMON_H 1
#include <string.h>
#include <netinet/in.h>
#include <rdma/fabric.h>
#include <rdma/fi_cm.h>
#include <rdma/fi_rma.h>
#ifdef __cplusplus
extern "C" {
#endif
#define RPMEM_FIVERSION FI_VERSION(1, 4)
#define RPMEM_FIP_CQ_WAIT_MS 100
#define min(a, b) ((a) < (b) ? (a) : (b))
#define max(a, b) ((a) > (b) ? (a) : (b))
/*
* rpmem_fip_node -- client or server node type
*/
enum rpmem_fip_node {
RPMEM_FIP_NODE_CLIENT,
RPMEM_FIP_NODE_SERVER,
MAX_RPMEM_FIP_NODE,
};
/*
* rpmem_fip_probe -- list of providers
*/
struct rpmem_fip_probe {
unsigned providers;
size_t max_wq_size[MAX_RPMEM_PROV];
};
/*
* rpmem_fip_probe -- returns true if specified provider is available
*/
static inline int
rpmem_fip_probe(struct rpmem_fip_probe probe, enum rpmem_provider provider)
{
return (probe.providers & (1U << provider)) != 0;
}
/*
* rpmem_fip_probe_any -- returns true if any provider is available
*/
static inline int
rpmem_fip_probe_any(struct rpmem_fip_probe probe)
{
return probe.providers != 0;
}
int rpmem_fip_probe_get(const char *target, struct rpmem_fip_probe *probe);
struct fi_info *rpmem_fip_get_hints(enum rpmem_provider provider);
int rpmem_fip_read_eq_check(struct fid_eq *eq, struct fi_eq_cm_entry *entry,
uint32_t exp_event, fid_t exp_fid, int timeout);
int rpmem_fip_read_eq(struct fid_eq *eq, struct fi_eq_cm_entry *entry,
uint32_t *event, int timeout);
size_t rpmem_fip_cq_size(enum rpmem_persist_method pm,
enum rpmem_fip_node node);
size_t rpmem_fip_wq_size(enum rpmem_persist_method pm,
enum rpmem_fip_node node);
size_t rpmem_fip_rx_size(enum rpmem_persist_method pm,
enum rpmem_fip_node node);
size_t rpmem_fip_max_nlanes(struct fi_info *fi);
void rpmem_fip_print_info(struct fi_info *fi);
#ifdef __cplusplus
}
#endif
#endif
| 1,992 | 21.144444 | 76 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/rpmem_common/rpmem_fip_common.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_common.c -- common definitions for librpmem and rpmemd
*/
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <stddef.h>
#include <errno.h>
#include "rpmem_common.h"
#include "rpmem_fip_common.h"
#include "rpmem_proto.h"
#include "rpmem_common_log.h"
#include "valgrind_internal.h"
#include <rdma/fi_errno.h>
/*
* rpmem_fip_get_hints -- return fabric interface information hints
*/
struct fi_info *
rpmem_fip_get_hints(enum rpmem_provider provider)
{
RPMEMC_ASSERT(provider < MAX_RPMEM_PROV);
struct fi_info *hints = fi_allocinfo();
if (!hints) {
RPMEMC_LOG(ERR, "!fi_allocinfo");
return NULL;
}
/* connection-oriented endpoint */
hints->ep_attr->type = FI_EP_MSG;
/*
* Basic memory registration mode indicates that MR attributes
* (rkey, lkey) are selected by provider.
*/
hints->domain_attr->mr_mode = FI_MR_BASIC;
/*
* FI_THREAD_SAFE indicates MT applications can access any
* resources through interface without any restrictions
*/
hints->domain_attr->threading = FI_THREAD_SAFE;
/*
* FI_MSG - SEND and RECV
* FI_RMA - WRITE and READ
*/
hints->caps = FI_MSG | FI_RMA;
/* must register locally accessed buffers */
hints->mode = FI_CONTEXT | FI_LOCAL_MR | FI_RX_CQ_DATA;
/* READ-after-WRITE and SEND-after-WRITE message ordering required */
hints->tx_attr->msg_order = FI_ORDER_RAW | FI_ORDER_SAW;
hints->addr_format = FI_SOCKADDR;
if (provider != RPMEM_PROV_UNKNOWN) {
const char *prov_name = rpmem_provider_to_str(provider);
RPMEMC_ASSERT(prov_name != NULL);
hints->fabric_attr->prov_name = strdup(prov_name);
if (!hints->fabric_attr->prov_name) {
RPMEMC_LOG(ERR, "!strdup(provider)");
goto err_strdup;
}
}
return hints;
err_strdup:
fi_freeinfo(hints);
return NULL;
}
/*
* rpmem_fip_probe_get -- return list of available providers
*/
int
rpmem_fip_probe_get(const char *target, struct rpmem_fip_probe *probe)
{
struct fi_info *hints = rpmem_fip_get_hints(RPMEM_PROV_UNKNOWN);
if (!hints)
return -1;
int ret;
struct fi_info *fi;
ret = fi_getinfo(RPMEM_FIVERSION, target, NULL, 0, hints, &fi);
if (ret) {
goto err_getinfo;
}
if (probe) {
memset(probe, 0, sizeof(*probe));
struct fi_info *prov = fi;
while (prov) {
enum rpmem_provider p = rpmem_provider_from_str(
prov->fabric_attr->prov_name);
if (p == RPMEM_PROV_UNKNOWN) {
prov = prov->next;
continue;
}
probe->providers |= (1U << p);
probe->max_wq_size[p] = prov->tx_attr->size;
prov = prov->next;
}
}
fi_freeinfo(fi);
err_getinfo:
fi_freeinfo(hints);
return ret;
}
/*
* rpmem_fip_read_eq -- read event queue entry with specified timeout
*/
int
rpmem_fip_read_eq(struct fid_eq *eq, struct fi_eq_cm_entry *entry,
uint32_t *event, int timeout)
{
int ret;
ssize_t sret;
struct fi_eq_err_entry err;
sret = fi_eq_sread(eq, event, entry, sizeof(*entry), timeout, 0);
VALGRIND_DO_MAKE_MEM_DEFINED(&sret, sizeof(sret));
if (timeout != -1 && (sret == -FI_ETIMEDOUT || sret == -FI_EAGAIN)) {
errno = ETIMEDOUT;
return 1;
}
if (sret < 0 || (size_t)sret != sizeof(*entry)) {
if (sret < 0)
ret = (int)sret;
else
ret = -1;
sret = fi_eq_readerr(eq, &err, 0);
if (sret < 0) {
errno = EIO;
RPMEMC_LOG(ERR, "error reading from event queue: "
"cannot read error from event queue: %s",
fi_strerror((int)sret));
} else if (sret > 0) {
RPMEMC_ASSERT(sret == sizeof(err));
errno = -err.prov_errno;
RPMEMC_LOG(ERR, "error reading from event queue: %s",
fi_eq_strerror(eq, err.prov_errno,
NULL, NULL, 0));
}
return ret;
}
return 0;
}
/*
* rpmem_fip_read_eq -- read event queue entry and expect specified event
* and fid
*
* Returns:
* 1 - timeout
* 0 - success
* otherwise - error
*/
int
rpmem_fip_read_eq_check(struct fid_eq *eq, struct fi_eq_cm_entry *entry,
uint32_t exp_event, fid_t exp_fid, int timeout)
{
uint32_t event;
int ret = rpmem_fip_read_eq(eq, entry, &event, timeout);
if (ret)
return ret;
if (event != exp_event || entry->fid != exp_fid) {
errno = EIO;
RPMEMC_LOG(ERR, "unexpected event received (%u) "
"expected (%u)%s", event, exp_event,
entry->fid != exp_fid ?
" invalid endpoint" : "");
return -1;
}
return 0;
}
/*
* rpmem_fip_lane_attr -- lane attributes
*
* This structure describes how many SQ, RQ and CQ entries are
* required for a single lane.
*
* NOTE:
* - WRITE, READ and SEND requests are placed in SQ,
* - RECV requests are placed in RQ.
*/
struct rpmem_fip_lane_attr {
size_t n_per_sq; /* number of entries per lane in send queue */
size_t n_per_rq; /* number of entries per lane in receive queue */
size_t n_per_cq; /* number of entries per lane in completion queue */
};
/* queues size required by remote persist operation methods */
static const struct rpmem_fip_lane_attr
rpmem_fip_lane_attrs[MAX_RPMEM_FIP_NODE][MAX_RPMEM_PM] = {
[RPMEM_FIP_NODE_CLIENT][RPMEM_PM_GPSPM] = {
.n_per_sq = 2, /* WRITE + SEND */
.n_per_rq = 1, /* RECV */
.n_per_cq = 3,
},
[RPMEM_FIP_NODE_CLIENT][RPMEM_PM_APM] = {
/* WRITE + READ for persist, WRITE + SEND for deep persist */
.n_per_sq = 2, /* WRITE + SEND */
.n_per_rq = 1, /* RECV */
.n_per_cq = 3,
},
[RPMEM_FIP_NODE_SERVER][RPMEM_PM_GPSPM] = {
.n_per_sq = 1, /* SEND */
.n_per_rq = 1, /* RECV */
.n_per_cq = 3,
},
[RPMEM_FIP_NODE_SERVER][RPMEM_PM_APM] = {
.n_per_sq = 1, /* SEND */
.n_per_rq = 1, /* RECV */
.n_per_cq = 3,
},
};
/*
* rpmem_fip_cq_size -- returns completion queue size based on
* persist method and node type
*/
size_t
rpmem_fip_cq_size(enum rpmem_persist_method pm, enum rpmem_fip_node node)
{
RPMEMC_ASSERT(pm < MAX_RPMEM_PM);
RPMEMC_ASSERT(node < MAX_RPMEM_FIP_NODE);
const struct rpmem_fip_lane_attr *attr =
&rpmem_fip_lane_attrs[node][pm];
return attr->n_per_cq ? : 1;
}
/*
* rpmem_fip_wq_size -- returns submission queue (transmit queue) size based
* on persist method and node type
*/
size_t
rpmem_fip_wq_size(enum rpmem_persist_method pm, enum rpmem_fip_node node)
{
RPMEMC_ASSERT(pm < MAX_RPMEM_PM);
RPMEMC_ASSERT(node < MAX_RPMEM_FIP_NODE);
const struct rpmem_fip_lane_attr *attr =
&rpmem_fip_lane_attrs[node][pm];
return attr->n_per_sq ? : 1;
}
/*
* rpmem_fip_rx_size -- returns receive queue size based
* on persist method and node type
*/
size_t
rpmem_fip_rx_size(enum rpmem_persist_method pm, enum rpmem_fip_node node)
{
RPMEMC_ASSERT(pm < MAX_RPMEM_PM);
RPMEMC_ASSERT(node < MAX_RPMEM_FIP_NODE);
const struct rpmem_fip_lane_attr *attr =
&rpmem_fip_lane_attrs[node][pm];
return attr->n_per_rq ? : 1;
}
/*
* rpmem_fip_max_nlanes -- returns maximum number of lanes
*/
size_t
rpmem_fip_max_nlanes(struct fi_info *fi)
{
return min(min(fi->domain_attr->tx_ctx_cnt,
fi->domain_attr->rx_ctx_cnt),
fi->domain_attr->cq_cnt);
}
/*
* rpmem_fip_print_info -- print some useful info about fabric interface
*/
void
rpmem_fip_print_info(struct fi_info *fi)
{
RPMEMC_LOG(INFO, "libfabric version: %s",
fi_tostr(fi, FI_TYPE_VERSION));
char *str = fi_tostr(fi, FI_TYPE_INFO);
char *buff = strdup(str);
if (!buff) {
RPMEMC_LOG(ERR, "!allocating string buffer for "
"libfabric interface information");
return;
}
RPMEMC_LOG(INFO, "libfabric interface info:");
char *nl;
char *last = buff;
while (last != NULL) {
nl = strchr(last, '\n');
if (nl) {
*nl = '\0';
nl++;
}
RPMEMC_LOG(INFO, "%s", last);
last = nl;
}
free(buff);
}
| 7,550 | 21.675676 | 76 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/rpmem_common/rpmem_common_log.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* rpmem_common_log.h -- common log macros for librpmem and rpmemd
*/
#if defined(RPMEMC_LOG_RPMEM) && defined(RPMEMC_LOG_RPMEMD)
#error Both RPMEMC_LOG_RPMEM and RPMEMC_LOG_RPMEMD defined
#elif !defined(RPMEMC_LOG_RPMEM) && !defined(RPMEMC_LOG_RPMEMD)
#define RPMEMC_LOG(level, fmt, args...) do {} while (0)
#define RPMEMC_DBG(level, fmt, args...) do {} while (0)
#define RPMEMC_FATAL(fmt, args...) do {} while (0)
#define RPMEMC_ASSERT(cond) do {} while (0)
#elif defined(RPMEMC_LOG_RPMEM)
#include "out.h"
#include "rpmem_util.h"
#define RPMEMC_LOG(level, fmt, args...) RPMEM_LOG(level, fmt, ## args)
#define RPMEMC_DBG(level, fmt, args...) RPMEM_DBG(fmt, ## args)
#define RPMEMC_FATAL(fmt, args...) RPMEM_FATAL(fmt, ## args)
#define RPMEMC_ASSERT(cond) RPMEM_ASSERT(cond)
#else
#include "rpmemd_log.h"
#define RPMEMC_LOG(level, fmt, args...) RPMEMD_LOG(level, fmt, ## args)
#define RPMEMC_DBG(level, fmt, args...) RPMEMD_DBG(fmt, ## args)
#define RPMEMC_FATAL(fmt, args...) RPMEMD_FATAL(fmt, ## args)
#define RPMEMC_ASSERT(cond) RPMEMD_ASSERT(cond)
#endif
| 1,160 | 28.769231 | 71 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/rpmem_common/rpmem_common.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_common.h -- common definitions for librpmem and rpmemd
*/
#ifndef RPMEM_COMMON_H
#define RPMEM_COMMON_H 1
/*
* Values for SO_KEEPALIVE socket option
*/
#define RPMEM_CMD_ENV "RPMEM_CMD"
#define RPMEM_SSH_ENV "RPMEM_SSH"
#define RPMEM_DEF_CMD "rpmemd"
#define RPMEM_DEF_SSH "ssh"
#define RPMEM_PROV_SOCKET_ENV "RPMEM_ENABLE_SOCKETS"
#define RPMEM_PROV_VERBS_ENV "RPMEM_ENABLE_VERBS"
#define RPMEM_MAX_NLANES_ENV "RPMEM_MAX_NLANES"
#define RPMEM_WQ_SIZE_ENV "RPMEM_WORK_QUEUE_SIZE"
#define RPMEM_ACCEPT_TIMEOUT 30000
#define RPMEM_CONNECT_TIMEOUT 30000
#define RPMEM_MONITOR_TIMEOUT 1000
#include <stdint.h>
#include <sys/socket.h>
#include <netdb.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* rpmem_err -- error codes
*/
enum rpmem_err {
RPMEM_SUCCESS = 0,
RPMEM_ERR_BADPROTO = 1,
RPMEM_ERR_BADNAME = 2,
RPMEM_ERR_BADSIZE = 3,
RPMEM_ERR_BADNLANES = 4,
RPMEM_ERR_BADPROVIDER = 5,
RPMEM_ERR_FATAL = 6,
RPMEM_ERR_FATAL_CONN = 7,
RPMEM_ERR_BUSY = 8,
RPMEM_ERR_EXISTS = 9,
RPMEM_ERR_PROVNOSUP = 10,
RPMEM_ERR_NOEXIST = 11,
RPMEM_ERR_NOACCESS = 12,
RPMEM_ERR_POOL_CFG = 13,
MAX_RPMEM_ERR,
};
/*
* rpmem_persist_method -- remote persist operation method
*/
enum rpmem_persist_method {
RPMEM_PM_GPSPM = 1, /* General Purpose Server Persistency Method */
RPMEM_PM_APM = 2, /* Appliance Persistency Method */
MAX_RPMEM_PM,
};
const char *rpmem_persist_method_to_str(enum rpmem_persist_method pm);
/*
* rpmem_provider -- supported providers
*/
enum rpmem_provider {
RPMEM_PROV_UNKNOWN = 0,
RPMEM_PROV_LIBFABRIC_VERBS = 1,
RPMEM_PROV_LIBFABRIC_SOCKETS = 2,
MAX_RPMEM_PROV,
};
enum rpmem_provider rpmem_provider_from_str(const char *str);
const char *rpmem_provider_to_str(enum rpmem_provider provider);
/*
* rpmem_req_attr -- arguments for open/create request
*/
struct rpmem_req_attr {
size_t pool_size;
unsigned nlanes;
size_t buff_size;
enum rpmem_provider provider;
const char *pool_desc;
};
/*
* rpmem_resp_attr -- return arguments from open/create request
*/
struct rpmem_resp_attr {
unsigned short port;
uint64_t rkey;
uint64_t raddr;
unsigned nlanes;
enum rpmem_persist_method persist_method;
};
#define RPMEM_HAS_USER 0x1
#define RPMEM_HAS_SERVICE 0x2
#define RPMEM_FLAGS_USE_IPV4 0x4
#define RPMEM_MAX_USER (32 + 1) /* see useradd(8) + 1 for '\0' */
#define RPMEM_MAX_NODE (255 + 1) /* see gethostname(2) + 1 for '\0' */
#define RPMEM_MAX_SERVICE (NI_MAXSERV + 1) /* + 1 for '\0' */
#define RPMEM_HDR_SIZE 4096
#define RPMEM_CLOSE_FLAGS_REMOVE 0x1
#define RPMEM_DEF_BUFF_SIZE 8192
struct rpmem_target_info {
char user[RPMEM_MAX_USER];
char node[RPMEM_MAX_NODE];
char service[RPMEM_MAX_SERVICE];
unsigned flags;
};
extern unsigned Rpmem_max_nlanes;
extern unsigned Rpmem_wq_size;
extern int Rpmem_fork_unsafe;
int rpmem_b64_write(int sockfd, const void *buf, size_t len, int flags);
int rpmem_b64_read(int sockfd, void *buf, size_t len, int flags);
const char *rpmem_get_ip_str(const struct sockaddr *addr);
struct rpmem_target_info *rpmem_target_parse(const char *target);
void rpmem_target_free(struct rpmem_target_info *info);
int rpmem_xwrite(int fd, const void *buf, size_t len, int flags);
int rpmem_xread(int fd, void *buf, size_t len, int flags);
char *rpmem_get_ssh_conn_addr(void);
#ifdef __cplusplus
}
#endif
#endif
| 3,404 | 23.321429 | 72 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/rpmem_common/rpmem_proto.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_proto.h -- rpmem protocol definitions
*/
#ifndef RPMEM_PROTO_H
#define RPMEM_PROTO_H 1
#include <stdint.h>
#include <endian.h>
#include "librpmem.h"
#ifdef __cplusplus
extern "C" {
#endif
#define PACKED __attribute__((packed))
#define RPMEM_PROTO "tcp"
#define RPMEM_PROTO_MAJOR 0
#define RPMEM_PROTO_MINOR 1
#define RPMEM_SIG_SIZE 8
#define RPMEM_UUID_SIZE 16
#define RPMEM_PROV_SIZE 32
#define RPMEM_USER_SIZE 16
/*
* rpmem_msg_type -- type of messages
*/
enum rpmem_msg_type {
RPMEM_MSG_TYPE_CREATE = 1, /* create request */
RPMEM_MSG_TYPE_CREATE_RESP = 2, /* create request response */
RPMEM_MSG_TYPE_OPEN = 3, /* open request */
RPMEM_MSG_TYPE_OPEN_RESP = 4, /* open request response */
RPMEM_MSG_TYPE_CLOSE = 5, /* close request */
RPMEM_MSG_TYPE_CLOSE_RESP = 6, /* close request response */
RPMEM_MSG_TYPE_SET_ATTR = 7, /* set attributes request */
/* set attributes request response */
RPMEM_MSG_TYPE_SET_ATTR_RESP = 8,
MAX_RPMEM_MSG_TYPE,
};
/*
* rpmem_pool_attr_packed -- a packed version
*/
struct rpmem_pool_attr_packed {
char signature[RPMEM_POOL_HDR_SIG_LEN]; /* pool signature */
uint32_t major; /* format major version number */
uint32_t compat_features; /* mask: compatible "may" features */
uint32_t incompat_features; /* mask: "must support" features */
uint32_t ro_compat_features; /* mask: force RO if unsupported */
unsigned char poolset_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* pool uuid */
unsigned char uuid[RPMEM_POOL_HDR_UUID_LEN]; /* first part uuid */
unsigned char next_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* next pool uuid */
unsigned char prev_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* prev pool uuid */
unsigned char user_flags[RPMEM_POOL_USER_FLAGS_LEN]; /* user flags */
} PACKED;
/*
* rpmem_msg_ibc_attr -- in-band connection attributes
*
* Used by create request response and open request response.
* Contains essential information to proceed with in-band connection
* initialization.
*/
struct rpmem_msg_ibc_attr {
uint32_t port; /* RDMA connection port */
uint32_t persist_method; /* persist method */
uint64_t rkey; /* remote key */
uint64_t raddr; /* remote address */
uint32_t nlanes; /* number of lanes */
} PACKED;
/*
* rpmem_msg_pool_desc -- remote pool descriptor
*/
struct rpmem_msg_pool_desc {
uint32_t size; /* size of pool descriptor */
uint8_t desc[0]; /* pool descriptor, null-terminated string */
} PACKED;
/*
* rpmem_msg_hdr -- message header which consists of type and size of message
*
* The type must be one of the rpmem_msg_type values.
*/
struct rpmem_msg_hdr {
uint32_t type; /* type of message */
uint64_t size; /* size of message */
uint8_t body[0];
} PACKED;
/*
* rpmem_msg_hdr_resp -- message response header which consists of type, size
* and status.
*
* The type must be one of the rpmem_msg_type values.
*/
struct rpmem_msg_hdr_resp {
uint32_t status; /* response status */
uint32_t type; /* type of message */
uint64_t size; /* size of message */
} PACKED;
/*
* rpmem_msg_common -- common fields for open/create messages
*/
struct rpmem_msg_common {
uint16_t major; /* protocol version major number */
uint16_t minor; /* protocol version minor number */
uint64_t pool_size; /* minimum required size of a pool */
uint32_t nlanes; /* number of lanes used by initiator */
uint32_t provider; /* provider */
uint64_t buff_size; /* buffer size for inline persist */
} PACKED;
/*
* rpmem_msg_create -- create request message
*
* The type of message must be set to RPMEM_MSG_TYPE_CREATE.
* The size of message must be set to
* sizeof(struct rpmem_msg_create) + pool_desc_size
*/
struct rpmem_msg_create {
struct rpmem_msg_hdr hdr; /* message header */
struct rpmem_msg_common c;
struct rpmem_pool_attr_packed pool_attr; /* pool attributes */
struct rpmem_msg_pool_desc pool_desc; /* pool descriptor */
} PACKED;
/*
* rpmem_msg_create_resp -- create request response message
*
* The type of message must be set to RPMEM_MSG_TYPE_CREATE_RESP.
* The size of message must be set to sizeof(struct rpmem_msg_create_resp).
*/
struct rpmem_msg_create_resp {
struct rpmem_msg_hdr_resp hdr; /* message header */
struct rpmem_msg_ibc_attr ibc; /* in-band connection attributes */
} PACKED;
/*
* rpmem_msg_open -- open request message
*
* The type of message must be set to RPMEM_MSG_TYPE_OPEN.
* The size of message must be set to
* sizeof(struct rpmem_msg_open) + pool_desc_size
*/
struct rpmem_msg_open {
struct rpmem_msg_hdr hdr; /* message header */
struct rpmem_msg_common c;
struct rpmem_msg_pool_desc pool_desc; /* pool descriptor */
} PACKED;
/*
* rpmem_msg_open_resp -- open request response message
*
* The type of message must be set to RPMEM_MSG_TYPE_OPEN_RESP.
* The size of message must be set to sizeof(struct rpmem_msg_open_resp)
*/
struct rpmem_msg_open_resp {
struct rpmem_msg_hdr_resp hdr; /* message header */
struct rpmem_msg_ibc_attr ibc; /* in-band connection attributes */
struct rpmem_pool_attr_packed pool_attr; /* pool attributes */
} PACKED;
/*
* rpmem_msg_close -- close request message
*
* The type of message must be set to RPMEM_MSG_TYPE_CLOSE
* The size of message must be set to sizeof(struct rpmem_msg_close)
*/
struct rpmem_msg_close {
struct rpmem_msg_hdr hdr; /* message header */
uint32_t flags; /* flags */
} PACKED;
/*
* rpmem_msg_close_resp -- close request response message
*
* The type of message must be set to RPMEM_MSG_TYPE_CLOSE_RESP
* The size of message must be set to sizeof(struct rpmem_msg_close_resp)
*/
struct rpmem_msg_close_resp {
struct rpmem_msg_hdr_resp hdr; /* message header */
/* no more fields */
} PACKED;
#define RPMEM_FLUSH_WRITE 0U /* flush / persist using RDMA WRITE */
#define RPMEM_DEEP_PERSIST 1U /* deep persist operation */
#define RPMEM_PERSIST_SEND 2U /* persist using RDMA SEND */
#define RPMEM_COMPLETION 4U /* schedule command with a completion */
/* the two least significant bits are reserved for mode of persist */
#define RPMEM_FLUSH_PERSIST_MASK 0x3U
#define RPMEM_PERSIST_MAX 2U /* maximum valid persist value */
/*
* rpmem_msg_persist -- remote persist message
*/
struct rpmem_msg_persist {
uint32_t flags; /* lane flags */
uint32_t lane; /* lane identifier */
uint64_t addr; /* remote memory address */
uint64_t size; /* remote memory size */
uint8_t data[];
};
/*
* rpmem_msg_persist_resp -- remote persist response message
*/
struct rpmem_msg_persist_resp {
uint32_t flags; /* lane flags */
uint32_t lane; /* lane identifier */
};
/*
* rpmem_msg_set_attr -- set attributes request message
*
* The type of message must be set to RPMEM_MSG_TYPE_SET_ATTR.
* The size of message must be set to sizeof(struct rpmem_msg_set_attr)
*/
struct rpmem_msg_set_attr {
struct rpmem_msg_hdr hdr; /* message header */
struct rpmem_pool_attr_packed pool_attr; /* pool attributes */
} PACKED;
/*
* rpmem_msg_set_attr_resp -- set attributes request response message
*
* The type of message must be set to RPMEM_MSG_TYPE_SET_ATTR_RESP.
* The size of message must be set to sizeof(struct rpmem_msg_set_attr_resp).
*/
struct rpmem_msg_set_attr_resp {
struct rpmem_msg_hdr_resp hdr; /* message header */
} PACKED;
/*
* XXX Begin: Suppress gcc conversion warnings for FreeBSD be*toh macros.
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
/*
* rpmem_ntoh_msg_ibc_attr -- convert rpmem_msg_ibc attr to host byte order
*/
static inline void
rpmem_ntoh_msg_ibc_attr(struct rpmem_msg_ibc_attr *ibc)
{
ibc->port = be32toh(ibc->port);
ibc->persist_method = be32toh(ibc->persist_method);
ibc->rkey = be64toh(ibc->rkey);
ibc->raddr = be64toh(ibc->raddr);
}
/*
* rpmem_ntoh_msg_pool_desc -- convert rpmem_msg_pool_desc to host byte order
*/
static inline void
rpmem_ntoh_msg_pool_desc(struct rpmem_msg_pool_desc *pool_desc)
{
pool_desc->size = be32toh(pool_desc->size);
}
/*
* rpmem_ntoh_pool_attr -- convert rpmem_pool_attr to host byte order
*/
static inline void
rpmem_ntoh_pool_attr(struct rpmem_pool_attr_packed *attr)
{
attr->major = be32toh(attr->major);
attr->ro_compat_features = be32toh(attr->ro_compat_features);
attr->incompat_features = be32toh(attr->incompat_features);
attr->compat_features = be32toh(attr->compat_features);
}
/*
* rpmem_ntoh_msg_hdr -- convert rpmem_msg_hdr to host byte order
*/
static inline void
rpmem_ntoh_msg_hdr(struct rpmem_msg_hdr *hdrp)
{
hdrp->type = be32toh(hdrp->type);
hdrp->size = be64toh(hdrp->size);
}
/*
* rpmem_hton_msg_hdr -- convert rpmem_msg_hdr to network byte order
*/
static inline void
rpmem_hton_msg_hdr(struct rpmem_msg_hdr *hdrp)
{
rpmem_ntoh_msg_hdr(hdrp);
}
/*
* rpmem_ntoh_msg_hdr_resp -- convert rpmem_msg_hdr_resp to host byte order
*/
static inline void
rpmem_ntoh_msg_hdr_resp(struct rpmem_msg_hdr_resp *hdrp)
{
hdrp->status = be32toh(hdrp->status);
hdrp->type = be32toh(hdrp->type);
hdrp->size = be64toh(hdrp->size);
}
/*
* rpmem_hton_msg_hdr_resp -- convert rpmem_msg_hdr_resp to network byte order
*/
static inline void
rpmem_hton_msg_hdr_resp(struct rpmem_msg_hdr_resp *hdrp)
{
rpmem_ntoh_msg_hdr_resp(hdrp);
}
/*
* rpmem_ntoh_msg_common -- convert rpmem_msg_common to host byte order
*/
static inline void
rpmem_ntoh_msg_common(struct rpmem_msg_common *msg)
{
msg->major = be16toh(msg->major);
msg->minor = be16toh(msg->minor);
msg->pool_size = be64toh(msg->pool_size);
msg->nlanes = be32toh(msg->nlanes);
msg->provider = be32toh(msg->provider);
msg->buff_size = be64toh(msg->buff_size);
}
/*
* rpmem_hton_msg_common -- convert rpmem_msg_common to network byte order
*/
static inline void
rpmem_hton_msg_common(struct rpmem_msg_common *msg)
{
rpmem_ntoh_msg_common(msg);
}
/*
* rpmem_ntoh_msg_create -- convert rpmem_msg_create to host byte order
*/
static inline void
rpmem_ntoh_msg_create(struct rpmem_msg_create *msg)
{
rpmem_ntoh_msg_hdr(&msg->hdr);
rpmem_ntoh_msg_common(&msg->c);
rpmem_ntoh_pool_attr(&msg->pool_attr);
rpmem_ntoh_msg_pool_desc(&msg->pool_desc);
}
/*
* rpmem_hton_msg_create -- convert rpmem_msg_create to network byte order
*/
static inline void
rpmem_hton_msg_create(struct rpmem_msg_create *msg)
{
rpmem_ntoh_msg_create(msg);
}
/*
* rpmem_ntoh_msg_create_resp -- convert rpmem_msg_create_resp to host byte
* order
*/
static inline void
rpmem_ntoh_msg_create_resp(struct rpmem_msg_create_resp *msg)
{
rpmem_ntoh_msg_hdr_resp(&msg->hdr);
rpmem_ntoh_msg_ibc_attr(&msg->ibc);
}
/*
* rpmem_hton_msg_create_resp -- convert rpmem_msg_create_resp to network byte
* order
*/
static inline void
rpmem_hton_msg_create_resp(struct rpmem_msg_create_resp *msg)
{
rpmem_ntoh_msg_create_resp(msg);
}
/*
* rpmem_ntoh_msg_open -- convert rpmem_msg_open to host byte order
*/
static inline void
rpmem_ntoh_msg_open(struct rpmem_msg_open *msg)
{
rpmem_ntoh_msg_hdr(&msg->hdr);
rpmem_ntoh_msg_common(&msg->c);
rpmem_ntoh_msg_pool_desc(&msg->pool_desc);
}
/*
* XXX End: Suppress gcc conversion warnings for FreeBSD be*toh macros
*/
#pragma GCC diagnostic pop
/*
* rpmem_hton_msg_open -- convert rpmem_msg_open to network byte order
*/
static inline void
rpmem_hton_msg_open(struct rpmem_msg_open *msg)
{
rpmem_ntoh_msg_open(msg);
}
/*
* rpmem_ntoh_msg_open_resp -- convert rpmem_msg_open_resp to host byte order
*/
static inline void
rpmem_ntoh_msg_open_resp(struct rpmem_msg_open_resp *msg)
{
rpmem_ntoh_msg_hdr_resp(&msg->hdr);
rpmem_ntoh_msg_ibc_attr(&msg->ibc);
rpmem_ntoh_pool_attr(&msg->pool_attr);
}
/*
* rpmem_hton_msg_open_resp -- convert rpmem_msg_open_resp to network byte order
*/
static inline void
rpmem_hton_msg_open_resp(struct rpmem_msg_open_resp *msg)
{
rpmem_ntoh_msg_open_resp(msg);
}
/*
* rpmem_ntoh_msg_set_attr -- convert rpmem_msg_set_attr to host byte order
*/
static inline void
rpmem_ntoh_msg_set_attr(struct rpmem_msg_set_attr *msg)
{
rpmem_ntoh_msg_hdr(&msg->hdr);
rpmem_ntoh_pool_attr(&msg->pool_attr);
}
/*
* rpmem_hton_msg_set_attr -- convert rpmem_msg_set_attr to network byte order
*/
static inline void
rpmem_hton_msg_set_attr(struct rpmem_msg_set_attr *msg)
{
rpmem_ntoh_msg_set_attr(msg);
}
/*
* rpmem_ntoh_msg_set_attr_resp -- convert rpmem_msg_set_attr_resp to host byte
* order
*/
static inline void
rpmem_ntoh_msg_set_attr_resp(struct rpmem_msg_set_attr_resp *msg)
{
rpmem_ntoh_msg_hdr_resp(&msg->hdr);
}
/*
* rpmem_hton_msg_set_attr_resp -- convert rpmem_msg_set_attr_resp to network
* byte order
*/
static inline void
rpmem_hton_msg_set_attr_resp(struct rpmem_msg_set_attr_resp *msg)
{
rpmem_hton_msg_hdr_resp(&msg->hdr);
}
/*
* rpmem_ntoh_msg_close -- convert rpmem_msg_close to host byte order
*/
static inline void
rpmem_ntoh_msg_close(struct rpmem_msg_close *msg)
{
rpmem_ntoh_msg_hdr(&msg->hdr);
}
/*
* rpmem_hton_msg_close -- convert rpmem_msg_close to network byte order
*/
static inline void
rpmem_hton_msg_close(struct rpmem_msg_close *msg)
{
rpmem_ntoh_msg_close(msg);
}
/*
* rpmem_ntoh_msg_close_resp -- convert rpmem_msg_close_resp to host byte order
*/
static inline void
rpmem_ntoh_msg_close_resp(struct rpmem_msg_close_resp *msg)
{
rpmem_ntoh_msg_hdr_resp(&msg->hdr);
}
/*
* rpmem_hton_msg_close_resp -- convert rpmem_msg_close_resp to network byte
* order
*/
static inline void
rpmem_hton_msg_close_resp(struct rpmem_msg_close_resp *msg)
{
rpmem_ntoh_msg_close_resp(msg);
}
/*
* pack_rpmem_pool_attr -- copy pool attributes to a packed structure
*/
static inline void
pack_rpmem_pool_attr(const struct rpmem_pool_attr *src,
struct rpmem_pool_attr_packed *dst)
{
memcpy(dst->signature, src->signature, sizeof(src->signature));
dst->major = src->major;
dst->compat_features = src->compat_features;
dst->incompat_features = src->incompat_features;
dst->ro_compat_features = src->ro_compat_features;
memcpy(dst->poolset_uuid, src->poolset_uuid, sizeof(dst->poolset_uuid));
memcpy(dst->uuid, src->uuid, sizeof(dst->uuid));
memcpy(dst->next_uuid, src->next_uuid, sizeof(dst->next_uuid));
memcpy(dst->prev_uuid, src->prev_uuid, sizeof(dst->prev_uuid));
memcpy(dst->user_flags, src->user_flags, sizeof(dst->user_flags));
}
/*
* unpack_rpmem_pool_attr -- copy pool attributes to an unpacked structure
*/
static inline void
unpack_rpmem_pool_attr(const struct rpmem_pool_attr_packed *src,
struct rpmem_pool_attr *dst)
{
memcpy(dst->signature, src->signature, sizeof(src->signature));
dst->major = src->major;
dst->compat_features = src->compat_features;
dst->incompat_features = src->incompat_features;
dst->ro_compat_features = src->ro_compat_features;
memcpy(dst->poolset_uuid, src->poolset_uuid, sizeof(dst->poolset_uuid));
memcpy(dst->uuid, src->uuid, sizeof(dst->uuid));
memcpy(dst->next_uuid, src->next_uuid, sizeof(dst->next_uuid));
memcpy(dst->prev_uuid, src->prev_uuid, sizeof(dst->prev_uuid));
memcpy(dst->user_flags, src->user_flags, sizeof(dst->user_flags));
}
#ifdef __cplusplus
}
#endif
#endif
| 15,016 | 26.503663 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/rpmem_common/rpmem_fip_lane.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* rpmem_fip_lane.h -- rpmem fabric provider lane definition
*/
#include <sched.h>
#include <stdint.h>
#include "sys_util.h"
/*
* rpmem_fip_lane -- basic lane structure
*
* This structure consist of a synchronization object and a return value.
* It is possible to wait on the lane for specified event. The event can be
* signalled by another thread which can pass the return value if required.
*
* The sync variable can store up to 64 different events, each event on
* separate bit.
*/
struct rpmem_fip_lane {
os_spinlock_t lock;
int ret;
uint64_t sync;
};
/*
* rpmem_fip_lane_init -- initialize basic lane structure
*/
static inline int
rpmem_fip_lane_init(struct rpmem_fip_lane *lanep)
{
lanep->ret = 0;
lanep->sync = 0;
return util_spin_init(&lanep->lock, PTHREAD_PROCESS_PRIVATE);
}
/*
* rpmem_fip_lane_fini -- deinitialize basic lane structure
*/
static inline void
rpmem_fip_lane_fini(struct rpmem_fip_lane *lanep)
{
util_spin_destroy(&lanep->lock);
}
/*
* rpmem_fip_lane_busy -- return true if lane has pending events
*/
static inline int
rpmem_fip_lane_busy(struct rpmem_fip_lane *lanep)
{
util_spin_lock(&lanep->lock);
int ret = lanep->sync != 0;
util_spin_unlock(&lanep->lock);
return ret;
}
/*
* rpmem_fip_lane_begin -- begin waiting for specified event(s)
*/
static inline void
rpmem_fip_lane_begin(struct rpmem_fip_lane *lanep, uint64_t sig)
{
util_spin_lock(&lanep->lock);
lanep->ret = 0;
lanep->sync |= sig;
util_spin_unlock(&lanep->lock);
}
static inline int
rpmem_fip_lane_is_busy(struct rpmem_fip_lane *lanep, uint64_t sig)
{
util_spin_lock(&lanep->lock);
int ret = (lanep->sync & sig) != 0;
util_spin_unlock(&lanep->lock);
return ret;
}
static inline int
rpmem_fip_lane_ret(struct rpmem_fip_lane *lanep)
{
util_spin_lock(&lanep->lock);
int ret = lanep->ret;
util_spin_unlock(&lanep->lock);
return ret;
}
/*
* rpmem_fip_lane_wait -- wait for specified event(s)
*/
static inline int
rpmem_fip_lane_wait(struct rpmem_fip_lane *lanep, uint64_t sig)
{
while (rpmem_fip_lane_is_busy(lanep, sig))
sched_yield();
return rpmem_fip_lane_ret(lanep);
}
/*
* rpmem_fip_lane_signal -- signal lane about specified event
*/
static inline void
rpmem_fip_lane_signal(struct rpmem_fip_lane *lanep, uint64_t sig)
{
util_spin_lock(&lanep->lock);
lanep->sync &= ~sig;
util_spin_unlock(&lanep->lock);
}
/*
* rpmem_fip_lane_signal -- signal lane about specified event and store
* return value
*/
static inline void
rpmem_fip_lane_sigret(struct rpmem_fip_lane *lanep, uint64_t sig, int ret)
{
util_spin_lock(&lanep->lock);
lanep->ret = ret;
lanep->sync &= ~sig;
util_spin_unlock(&lanep->lock);
}
| 2,754 | 20.523438 | 75 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/rpmem_common/rpmem_fip_msg.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmem_fip_msg.h -- simple wrappers for fi_rma(3) and fi_msg(3) functions
*/
#ifndef RPMEM_FIP_MSG_H
#define RPMEM_FIP_MSG_H 1
#include <rdma/fi_rma.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* rpmem_fip_rma -- helper struct for RMA operation
*/
struct rpmem_fip_rma {
struct fi_msg_rma msg; /* message structure */
struct iovec msg_iov; /* IO vector buffer */
struct fi_rma_iov rma_iov; /* RMA IO vector buffer */
void *desc; /* local memory descriptor */
uint64_t flags; /* RMA operation flags */
};
/*
* rpmem_fip_msg -- helper struct for MSG operation
*/
struct rpmem_fip_msg {
struct fi_msg msg; /* message structure */
struct iovec iov; /* IO vector buffer */
void *desc; /* local memory descriptor */
uint64_t flags; /* MSG operation flags */
};
/*
* rpmem_fip_rma_init -- initialize RMA helper struct
*/
static inline void
rpmem_fip_rma_init(struct rpmem_fip_rma *rma, void *desc,
fi_addr_t addr, uint64_t rkey, void *context, uint64_t flags)
{
memset(rma, 0, sizeof(*rma));
rma->desc = desc;
rma->flags = flags;
rma->rma_iov.key = rkey;
rma->msg.context = context;
rma->msg.addr = addr;
rma->msg.desc = &rma->desc;
rma->msg.rma_iov = &rma->rma_iov;
rma->msg.rma_iov_count = 1;
rma->msg.msg_iov = &rma->msg_iov;
rma->msg.iov_count = 1;
}
/*
* rpmem_fip_msg_init -- initialize MSG helper struct
*/
static inline void
rpmem_fip_msg_init(struct rpmem_fip_msg *msg, void *desc, fi_addr_t addr,
void *context, void *buff, size_t len, uint64_t flags)
{
memset(msg, 0, sizeof(*msg));
msg->desc = desc;
msg->flags = flags;
msg->iov.iov_base = buff;
msg->iov.iov_len = len;
msg->msg.context = context;
msg->msg.addr = addr;
msg->msg.desc = &msg->desc;
msg->msg.msg_iov = &msg->iov;
msg->msg.iov_count = 1;
}
/*
* rpmem_fip_writemsg -- wrapper for fi_writemsg
*/
static inline int
rpmem_fip_writemsg(struct fid_ep *ep, struct rpmem_fip_rma *rma,
const void *buff, size_t len, uint64_t addr)
{
rma->rma_iov.addr = addr;
rma->rma_iov.len = len;
rma->msg_iov.iov_base = (void *)buff;
rma->msg_iov.iov_len = len;
return (int)fi_writemsg(ep, &rma->msg, rma->flags);
}
/*
* rpmem_fip_readmsg -- wrapper for fi_readmsg
*/
static inline int
rpmem_fip_readmsg(struct fid_ep *ep, struct rpmem_fip_rma *rma,
void *buff, size_t len, uint64_t addr)
{
rma->rma_iov.addr = addr;
rma->rma_iov.len = len;
rma->msg_iov.iov_base = buff;
rma->msg_iov.iov_len = len;
return (int)fi_readmsg(ep, &rma->msg, rma->flags);
}
/*
* rpmem_fip_sendmsg -- wrapper for fi_sendmsg
*/
static inline int
rpmem_fip_sendmsg(struct fid_ep *ep, struct rpmem_fip_msg *msg, size_t len)
{
msg->iov.iov_len = len;
return (int)fi_sendmsg(ep, &msg->msg, msg->flags);
}
/*
* rpmem_fip_recvmsg -- wrapper for fi_recvmsg
*/
static inline int
rpmem_fip_recvmsg(struct fid_ep *ep, struct rpmem_fip_msg *msg)
{
return (int)fi_recvmsg(ep, &msg->msg, msg->flags);
}
/*
* rpmem_fip_msg_get_pmsg -- returns message buffer as a persist message
*/
static inline struct rpmem_msg_persist *
rpmem_fip_msg_get_pmsg(struct rpmem_fip_msg *msg)
{
return (struct rpmem_msg_persist *)msg->iov.iov_base;
}
/*
* rpmem_fip_msg_get_pres -- returns message buffer as a persist response
*/
static inline struct rpmem_msg_persist_resp *
rpmem_fip_msg_get_pres(struct rpmem_fip_msg *msg)
{
return (struct rpmem_msg_persist_resp *)msg->iov.iov_base;
}
#ifdef __cplusplus
}
#endif
#endif
| 3,494 | 22.77551 | 75 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmempool/libpmempool.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* libpmempool.c -- entry points for libpmempool
*/
#include <stdlib.h>
#include <stdint.h>
#include <errno.h>
#include <sys/param.h>
#include "pmemcommon.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check.h"
#ifdef USE_RPMEM
#include "rpmem_common.h"
#include "rpmem_util.h"
#endif
#ifdef _WIN32
#define ANSWER_BUFFSIZE 256
#endif
/*
* libpmempool_init -- load-time initialization for libpmempool
*
* Called automatically by the run-time loader.
*/
ATTR_CONSTRUCTOR
void
libpmempool_init(void)
{
common_init(PMEMPOOL_LOG_PREFIX, PMEMPOOL_LOG_LEVEL_VAR,
PMEMPOOL_LOG_FILE_VAR, PMEMPOOL_MAJOR_VERSION,
PMEMPOOL_MINOR_VERSION);
LOG(3, NULL);
#ifdef USE_RPMEM
util_remote_init();
rpmem_util_cmds_init();
#endif
}
/*
* libpmempool_fini -- libpmempool cleanup routine
*
* Called automatically when the process terminates.
*/
ATTR_DESTRUCTOR
void
libpmempool_fini(void)
{
LOG(3, NULL);
#ifdef USE_RPMEM
util_remote_unload();
util_remote_fini();
rpmem_util_cmds_fini();
#endif
common_fini();
}
/*
* pmempool_check_versionU -- see if library meets application version
* requirements
*/
#ifndef _WIN32
static inline
#endif
const char *
pmempool_check_versionU(unsigned major_required, unsigned minor_required)
{
LOG(3, "major_required %u minor_required %u",
major_required, minor_required);
if (major_required != PMEMPOOL_MAJOR_VERSION) {
ERR("libpmempool major version mismatch (need %u, found %u)",
major_required, PMEMPOOL_MAJOR_VERSION);
return out_get_errormsg();
}
if (minor_required > PMEMPOOL_MINOR_VERSION) {
ERR("libpmempool minor version mismatch (need %u, found %u)",
minor_required, PMEMPOOL_MINOR_VERSION);
return out_get_errormsg();
}
return NULL;
}
#ifndef _WIN32
/*
* pmempool_check_version -- see if lib meets application version requirements
*/
const char *
pmempool_check_version(unsigned major_required, unsigned minor_required)
{
return pmempool_check_versionU(major_required, minor_required);
}
#else
/*
* pmempool_check_versionW -- see if library meets application version
* requirements as widechar
*/
const wchar_t *
pmempool_check_versionW(unsigned major_required, unsigned minor_required)
{
if (pmempool_check_versionU(major_required, minor_required) != NULL)
return out_get_errormsgW();
else
return NULL;
}
#endif
/*
* pmempool_errormsgU -- return last error message
*/
#ifndef _WIN32
static inline
#endif
const char *
pmempool_errormsgU(void)
{
return out_get_errormsg();
}
#ifndef _WIN32
/*
* pmempool_errormsg -- return last error message
*/
const char *
pmempool_errormsg(void)
{
return pmempool_errormsgU();
}
#else
/*
* pmempool_errormsgW -- return last error message as widechar
*/
const wchar_t *
pmempool_errormsgW(void)
{
return out_get_errormsgW();
}
#endif
/*
* pmempool_ppc_set_default -- (internal) set default values of check context
*/
static void
pmempool_ppc_set_default(PMEMpoolcheck *ppc)
{
/* all other fields should be zeroed */
const PMEMpoolcheck ppc_default = {
.args = {
.pool_type = PMEMPOOL_POOL_TYPE_DETECT,
},
.result = CHECK_RESULT_CONSISTENT,
};
*ppc = ppc_default;
}
/*
* pmempool_check_initU -- initialize check context
*/
#ifndef _WIN32
static inline
#endif
PMEMpoolcheck *
pmempool_check_initU(struct pmempool_check_argsU *args, size_t args_size)
{
LOG(3, "path %s backup_path %s pool_type %u flags %x", args->path,
args->backup_path, args->pool_type, args->flags);
/*
* Currently one size of args structure is supported. The version of the
* pmempool_check_args structure can be distinguished based on provided
* args_size.
*/
if (args_size < sizeof(struct pmempool_check_args)) {
ERR("provided args_size is not supported");
errno = EINVAL;
return NULL;
}
/*
* Dry run does not allow to made changes possibly performed during
* repair. Advanced allow to perform more complex repairs. Questions
* are ask only if repairs are made. So dry run, advanced and always_yes
* can be set only if repair is set.
*/
if (util_flag_isclr(args->flags, PMEMPOOL_CHECK_REPAIR) &&
util_flag_isset(args->flags, PMEMPOOL_CHECK_DRY_RUN |
PMEMPOOL_CHECK_ADVANCED | PMEMPOOL_CHECK_ALWAYS_YES)) {
ERR("dry_run, advanced and always_yes are applicable only if "
"repair is set");
errno = EINVAL;
return NULL;
}
/*
* dry run does not modify anything so performing backup is redundant
*/
if (util_flag_isset(args->flags, PMEMPOOL_CHECK_DRY_RUN) &&
args->backup_path != NULL) {
ERR("dry run does not allow one to perform backup");
errno = EINVAL;
return NULL;
}
/*
* libpmempool uses str format of communication so it must be set
*/
if (util_flag_isclr(args->flags, PMEMPOOL_CHECK_FORMAT_STR)) {
ERR("PMEMPOOL_CHECK_FORMAT_STR flag must be set");
errno = EINVAL;
return NULL;
}
PMEMpoolcheck *ppc = calloc(1, sizeof(*ppc));
if (ppc == NULL) {
ERR("!calloc");
return NULL;
}
pmempool_ppc_set_default(ppc);
memcpy(&ppc->args, args, sizeof(ppc->args));
ppc->path = strdup(args->path);
if (!ppc->path) {
ERR("!strdup");
goto error_path_malloc;
}
ppc->args.path = ppc->path;
if (args->backup_path != NULL) {
ppc->backup_path = strdup(args->backup_path);
if (!ppc->backup_path) {
ERR("!strdup");
goto error_backup_path_malloc;
}
ppc->args.backup_path = ppc->backup_path;
}
if (check_init(ppc) != 0)
goto error_check_init;
return ppc;
error_check_init:
/* in case errno not set by any of the used functions set its value */
if (errno == 0)
errno = EINVAL;
free(ppc->backup_path);
error_backup_path_malloc:
free(ppc->path);
error_path_malloc:
free(ppc);
return NULL;
}
#ifndef _WIN32
/*
* pmempool_check_init -- initialize check context
*/
PMEMpoolcheck *
pmempool_check_init(struct pmempool_check_args *args, size_t args_size)
{
return pmempool_check_initU(args, args_size);
}
#else
/*
* pmempool_check_initW -- initialize check context as widechar
*/
PMEMpoolcheck *
pmempool_check_initW(struct pmempool_check_argsW *args, size_t args_size)
{
char *upath = util_toUTF8(args->path);
if (upath == NULL)
return NULL;
char *ubackup_path = NULL;
if (args->backup_path != NULL) {
ubackup_path = util_toUTF8(args->backup_path);
if (ubackup_path == NULL) {
util_free_UTF8(upath);
return NULL;
}
}
struct pmempool_check_argsU uargs = {
.path = upath,
.backup_path = ubackup_path,
.pool_type = args->pool_type,
.flags = args->flags
};
PMEMpoolcheck *ret = pmempool_check_initU(&uargs, args_size);
util_free_UTF8(ubackup_path);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* pmempool_checkU -- continue check till produce status to consume for caller
*/
#ifndef _WIN32
static inline
#endif
struct pmempool_check_statusU *
pmempool_checkU(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
ASSERTne(ppc, NULL);
struct check_status *result;
do {
result = check_step(ppc);
if (check_is_end(ppc->data) && result == NULL)
return NULL;
} while (result == NULL);
return check_status_get(result);
}
#ifndef _WIN32
/*
* pmempool_check -- continue check till produce status to consume for caller
*/
struct pmempool_check_status *
pmempool_check(PMEMpoolcheck *ppc)
{
return pmempool_checkU(ppc);
}
#else
/*
* pmempool_checkW -- continue check till produce status to consume for caller
*/
struct pmempool_check_statusW *
pmempool_checkW(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
ASSERTne(ppc, NULL);
/* check the cache and convert msg and answer */
char buf[ANSWER_BUFFSIZE];
memset(buf, 0, ANSWER_BUFFSIZE);
convert_status_cache(ppc, buf, ANSWER_BUFFSIZE);
struct check_status *uresult;
do {
uresult = check_step(ppc);
if (check_is_end(ppc->data) && uresult == NULL)
return NULL;
} while (uresult == NULL);
struct pmempool_check_statusU *uret_res = check_status_get(uresult);
const wchar_t *wmsg = util_toUTF16(uret_res->str.msg);
if (wmsg == NULL)
FATAL("!malloc");
struct pmempool_check_statusW *wret_res =
(struct pmempool_check_statusW *)uret_res;
/* pointer to old message is freed in next check step */
wret_res->str.msg = wmsg;
return wret_res;
}
#endif
/*
* pmempool_check_end -- end check and release check context
*/
enum pmempool_check_result
pmempool_check_end(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
const enum check_result result = ppc->result;
const unsigned sync_required = ppc->sync_required;
check_fini(ppc);
free(ppc->path);
free(ppc->backup_path);
free(ppc);
if (sync_required) {
switch (result) {
case CHECK_RESULT_CONSISTENT:
case CHECK_RESULT_REPAIRED:
return PMEMPOOL_CHECK_RESULT_SYNC_REQ;
default:
/* other results require fixing prior to sync */
;
}
}
switch (result) {
case CHECK_RESULT_CONSISTENT:
return PMEMPOOL_CHECK_RESULT_CONSISTENT;
case CHECK_RESULT_NOT_CONSISTENT:
return PMEMPOOL_CHECK_RESULT_NOT_CONSISTENT;
case CHECK_RESULT_REPAIRED:
return PMEMPOOL_CHECK_RESULT_REPAIRED;
case CHECK_RESULT_CANNOT_REPAIR:
return PMEMPOOL_CHECK_RESULT_CANNOT_REPAIR;
default:
return PMEMPOOL_CHECK_RESULT_ERROR;
}
}
| 9,142 | 20.873206 | 78 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmempool/replica.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* replica.h -- module for synchronizing and transforming poolset
*/
#ifndef REPLICA_H
#define REPLICA_H
#include "libpmempool.h"
#include "pool.h"
#include "badblocks.h"
#ifdef __cplusplus
extern "C" {
#endif
#define UNDEF_REPLICA UINT_MAX
#define UNDEF_PART UINT_MAX
/*
* A part marked as broken does not exist or is damaged so that
* it cannot be opened and has to be recreated.
*/
#define IS_BROKEN (1U << 0)
/*
* A replica marked as inconsistent exists but has inconsistent metadata
* (e.g. inconsistent parts or replicas linkage)
*/
#define IS_INCONSISTENT (1U << 1)
/*
* A part or replica marked in this way has bad blocks inside.
*/
#define HAS_BAD_BLOCKS (1U << 2)
/*
* A part marked in this way has bad blocks in the header
*/
#define HAS_CORRUPTED_HEADER (1U << 3)
/*
* A flag which can be passed to sync_replica() to indicate that the function is
* called by pmempool_transform
*/
#define IS_TRANSFORMED (1U << 10)
/*
* Number of lanes utilized when working with remote replicas
*/
#define REMOTE_NLANES 1
/*
* Helping structures for storing part's health status
*/
struct part_health_status {
unsigned flags;
struct badblocks bbs; /* structure with bad blocks */
char *recovery_file_name; /* name of bad block recovery file */
int recovery_file_exists; /* bad block recovery file exists */
};
/*
* Helping structures for storing replica and poolset's health status
*/
struct replica_health_status {
unsigned nparts;
unsigned nhdrs;
/* a flag for the replica */
unsigned flags;
/* effective size of a pool, valid only for healthy replica */
size_t pool_size;
/* flags for each part */
struct part_health_status part[];
};
struct poolset_health_status {
unsigned nreplicas;
/* a flag for the poolset */
unsigned flags;
/* health statuses for each replica */
struct replica_health_status *replica[];
};
/* get index of the (r)th replica health status */
static inline unsigned
REP_HEALTHidx(struct poolset_health_status *set, unsigned r)
{
ASSERTne(set->nreplicas, 0);
return (set->nreplicas + r) % set->nreplicas;
}
/* get index of the (r + 1)th replica health status */
static inline unsigned
REPN_HEALTHidx(struct poolset_health_status *set, unsigned r)
{
ASSERTne(set->nreplicas, 0);
return (set->nreplicas + r + 1) % set->nreplicas;
}
/* get (p)th part health status */
static inline unsigned
PART_HEALTHidx(struct replica_health_status *rep, unsigned p)
{
ASSERTne(rep->nparts, 0);
return (rep->nparts + p) % rep->nparts;
}
/* get (r)th replica health status */
static inline struct replica_health_status *
REP_HEALTH(struct poolset_health_status *set, unsigned r)
{
return set->replica[REP_HEALTHidx(set, r)];
}
/* get (p)th part health status */
static inline unsigned
PART_HEALTH(struct replica_health_status *rep, unsigned p)
{
return rep->part[PART_HEALTHidx(rep, p)].flags;
}
uint64_t replica_get_part_offset(struct pool_set *set,
unsigned repn, unsigned partn);
void replica_align_badblock_offset_length(size_t *offset, size_t *length,
struct pool_set *set_in, unsigned repn, unsigned partn);
size_t replica_get_part_data_len(struct pool_set *set_in, unsigned repn,
unsigned partn);
uint64_t replica_get_part_data_offset(struct pool_set *set_in, unsigned repn,
unsigned part);
/*
* is_dry_run -- (internal) check whether only verification mode is enabled
*/
static inline bool
is_dry_run(unsigned flags)
{
/*
* PMEMPOOL_SYNC_DRY_RUN and PMEMPOOL_TRANSFORM_DRY_RUN
* have to have the same value in order to use this common function.
*/
ASSERT_COMPILE_ERROR_ON(PMEMPOOL_SYNC_DRY_RUN !=
PMEMPOOL_TRANSFORM_DRY_RUN);
return flags & PMEMPOOL_SYNC_DRY_RUN;
}
/*
* fix_bad_blocks -- (internal) fix bad blocks - it causes reading or creating
* bad blocks recovery files
* (depending on if they exist or not)
*/
static inline bool
fix_bad_blocks(unsigned flags)
{
return flags & PMEMPOOL_SYNC_FIX_BAD_BLOCKS;
}
int replica_remove_all_recovery_files(struct poolset_health_status *set_hs);
int replica_remove_part(struct pool_set *set, unsigned repn, unsigned partn,
int fix_bad_blocks);
int replica_create_poolset_health_status(struct pool_set *set,
struct poolset_health_status **set_hsp);
void replica_free_poolset_health_status(struct poolset_health_status *set_s);
int replica_check_poolset_health(struct pool_set *set,
struct poolset_health_status **set_hs,
int called_from_sync, unsigned flags);
int replica_is_part_broken(unsigned repn, unsigned partn,
struct poolset_health_status *set_hs);
int replica_has_bad_blocks(unsigned repn, struct poolset_health_status *set_hs);
int replica_part_has_bad_blocks(struct part_health_status *phs);
int replica_part_has_corrupted_header(unsigned repn, unsigned partn,
struct poolset_health_status *set_hs);
unsigned replica_find_unbroken_part(unsigned repn,
struct poolset_health_status *set_hs);
int replica_is_replica_broken(unsigned repn,
struct poolset_health_status *set_hs);
int replica_is_replica_consistent(unsigned repn,
struct poolset_health_status *set_hs);
int replica_is_replica_healthy(unsigned repn,
struct poolset_health_status *set_hs);
unsigned replica_find_healthy_replica(
struct poolset_health_status *set_hs);
unsigned replica_find_replica_healthy_header(
struct poolset_health_status *set_hs);
int replica_is_poolset_healthy(struct poolset_health_status *set_hs);
int replica_is_poolset_transformed(unsigned flags);
ssize_t replica_get_pool_size(struct pool_set *set, unsigned repn);
int replica_check_part_sizes(struct pool_set *set, size_t min_size);
int replica_check_part_dirs(struct pool_set *set);
int replica_check_local_part_dir(struct pool_set *set, unsigned repn,
unsigned partn);
int replica_open_replica_part_files(struct pool_set *set, unsigned repn);
int replica_open_poolset_part_files(struct pool_set *set);
int replica_sync(struct pool_set *set_in, struct poolset_health_status *set_hs,
unsigned flags);
int replica_transform(struct pool_set *set_in, struct pool_set *set_out,
unsigned flags);
#ifdef __cplusplus
}
#endif
#endif
| 6,216 | 28.325472 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmempool/check_blk.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* check_blk.c -- check pmemblk
*/
#include <inttypes.h>
#include <sys/param.h>
#include <endian.h>
#include "out.h"
#include "btt.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
enum question {
Q_BLK_BSIZE,
};
/*
* blk_get_max_bsize -- (internal) return maximum size of block for given file
* size
*/
static inline uint32_t
blk_get_max_bsize(uint64_t fsize)
{
LOG(3, NULL);
if (fsize == 0)
return 0;
/* default nfree */
uint32_t nfree = BTT_DEFAULT_NFREE;
/* number of blocks must be at least 2 * nfree */
uint32_t internal_nlba = 2 * nfree;
/* compute arena size from file size without pmemblk structure */
uint64_t arena_size = fsize - sizeof(struct pmemblk);
if (arena_size > BTT_MAX_ARENA)
arena_size = BTT_MAX_ARENA;
arena_size = btt_arena_datasize(arena_size, nfree);
/* compute maximum internal LBA size */
uint64_t internal_lbasize = (arena_size - BTT_ALIGNMENT) /
internal_nlba - BTT_MAP_ENTRY_SIZE;
ASSERT(internal_lbasize <= UINT32_MAX);
if (internal_lbasize < BTT_MIN_LBA_SIZE)
internal_lbasize = BTT_MIN_LBA_SIZE;
internal_lbasize = roundup(internal_lbasize, BTT_INTERNAL_LBA_ALIGNMENT)
- BTT_INTERNAL_LBA_ALIGNMENT;
return (uint32_t)internal_lbasize;
}
/*
* blk_read -- (internal) read pmemblk header
*/
static int
blk_read(PMEMpoolcheck *ppc)
{
/*
* Here we want to read the pmemblk header without the pool_hdr as we've
* already done it before.
*
* Take the pointer to fields right after pool_hdr, compute the size and
* offset of remaining fields.
*/
uint8_t *ptr = (uint8_t *)&ppc->pool->hdr.blk;
ptr += sizeof(ppc->pool->hdr.blk.hdr);
size_t size = sizeof(ppc->pool->hdr.blk) -
sizeof(ppc->pool->hdr.blk.hdr);
uint64_t offset = sizeof(ppc->pool->hdr.blk.hdr);
if (pool_read(ppc->pool, ptr, size, offset)) {
return CHECK_ERR(ppc, "cannot read pmemblk structure");
}
/* endianness conversion */
ppc->pool->hdr.blk.bsize = le32toh(ppc->pool->hdr.blk.bsize);
return 0;
}
/*
* blk_bsize_valid -- (internal) check if block size is valid for given file
* size
*/
static int
blk_bsize_valid(uint32_t bsize, uint64_t fsize)
{
uint32_t max_bsize = blk_get_max_bsize(fsize);
return (bsize >= max_bsize);
}
/*
* blk_hdr_check -- (internal) check pmemblk header
*/
static int
blk_hdr_check(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
CHECK_INFO(ppc, "checking pmemblk header");
if (blk_read(ppc)) {
ppc->result = CHECK_RESULT_ERROR;
return -1;
}
/* check for valid BTT Info arena as we can take bsize from it */
if (!ppc->pool->bttc.valid)
pool_blk_get_first_valid_arena(ppc->pool, &ppc->pool->bttc);
if (ppc->pool->bttc.valid) {
const uint32_t btt_bsize =
ppc->pool->bttc.btt_info.external_lbasize;
if (ppc->pool->hdr.blk.bsize != btt_bsize) {
CHECK_ASK(ppc, Q_BLK_BSIZE,
"invalid pmemblk.bsize.|Do you want to set "
"pmemblk.bsize to %u from BTT Info?",
btt_bsize);
}
} else if (!ppc->pool->bttc.zeroed) {
if (ppc->pool->hdr.blk.bsize < BTT_MIN_LBA_SIZE ||
blk_bsize_valid(ppc->pool->hdr.blk.bsize,
ppc->pool->set_file->size)) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
return CHECK_ERR(ppc, "invalid pmemblk.bsize");
}
}
if (ppc->result == CHECK_RESULT_CONSISTENT ||
ppc->result == CHECK_RESULT_REPAIRED)
CHECK_INFO(ppc, "pmemblk header correct");
return check_questions_sequence_validate(ppc);
}
/*
* blk_hdr_fix -- (internal) fix pmemblk header
*/
static int
blk_hdr_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question, void *ctx)
{
LOG(3, NULL);
uint32_t btt_bsize;
switch (question) {
case Q_BLK_BSIZE:
/*
* check for valid BTT Info arena as we can take bsize from it
*/
if (!ppc->pool->bttc.valid)
pool_blk_get_first_valid_arena(ppc->pool,
&ppc->pool->bttc);
btt_bsize = ppc->pool->bttc.btt_info.external_lbasize;
CHECK_INFO(ppc, "setting pmemblk.b_size to 0x%x", btt_bsize);
ppc->pool->hdr.blk.bsize = btt_bsize;
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
struct step {
int (*check)(PMEMpoolcheck *, location *);
int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *);
enum pool_type type;
};
static const struct step steps[] = {
{
.check = blk_hdr_check,
.type = POOL_TYPE_BLK
},
{
.fix = blk_hdr_fix,
.type = POOL_TYPE_BLK
},
{
.check = NULL,
.fix = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static inline int
step_exe(PMEMpoolcheck *ppc, location *loc)
{
ASSERT(loc->step < ARRAY_SIZE(steps));
ASSERTeq(ppc->pool->params.type, POOL_TYPE_BLK);
const struct step *step = &steps[loc->step++];
if (!(step->type & ppc->pool->params.type))
return 0;
if (!step->fix)
return step->check(ppc, loc);
if (blk_read(ppc)) {
ppc->result = CHECK_RESULT_ERROR;
return -1;
}
return check_answer_loop(ppc, loc, NULL, 1, step->fix);
}
/*
* check_blk -- entry point for pmemblk checks
*/
void
check_blk(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
location *loc = check_get_step_data(ppc->data);
/* do all checks */
while (CHECK_NOT_COMPLETE(loc, steps)) {
if (step_exe(ppc, loc))
break;
}
}
| 5,277 | 21.176471 | 78 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmempool/check_sds.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* check_shutdown_state.c -- shutdown state check
*/
#include <stdio.h>
#include <inttypes.h>
#include <sys/mman.h>
#include <endian.h>
#include "out.h"
#include "util_pmem.h"
#include "libpmempool.h"
#include "libpmem.h"
#include "pmempool.h"
#include "pool.h"
#include "set.h"
#include "check_util.h"
enum question {
Q_RESET_SDS,
};
#define SDS_CHECK_STR "checking shutdown state"
#define SDS_OK_STR "shutdown state correct"
#define SDS_DIRTY_STR "shutdown state is dirty"
#define ADR_FAILURE_STR \
"an ADR failure was detected - your pool might be corrupted"
#define ZERO_SDS_STR \
"Do you want to zero shutdown state?"
#define RESET_SDS_STR \
"Do you want to reset shutdown state at your own risk? " \
"If you have more then one replica you will have to " \
"synchronize your pool after this operation."
#define SDS_FAIL_MSG(hdrp) \
IGNORE_SDS(hdrp) ? SDS_DIRTY_STR : ADR_FAILURE_STR
#define SDS_REPAIR_MSG(hdrp) \
IGNORE_SDS(hdrp) \
? SDS_DIRTY_STR ".|" ZERO_SDS_STR \
: ADR_FAILURE_STR ".|" RESET_SDS_STR
/*
* sds_check_replica -- (internal) check if replica is healthy
*/
static int
sds_check_replica(location *loc)
{
LOG(3, NULL);
struct pool_replica *rep = REP(loc->set, loc->replica);
if (rep->remote)
return 0;
/* make a copy of sds as we shouldn't modify a pool */
struct shutdown_state old_sds = loc->hdr.sds;
struct shutdown_state curr_sds;
if (IGNORE_SDS(&loc->hdr))
return util_is_zeroed(&old_sds, sizeof(old_sds)) ? 0 : -1;
shutdown_state_init(&curr_sds, NULL);
/* get current shutdown state */
for (unsigned p = 0; p < rep->nparts; ++p) {
if (shutdown_state_add_part(&curr_sds,
PART(rep, p)->fd, NULL))
return -1;
}
/* compare current and old shutdown state */
return shutdown_state_check(&curr_sds, &old_sds, NULL);
}
/*
* sds_check -- (internal) check shutdown_state
*/
static int
sds_check(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
CHECK_INFO(ppc, "%s" SDS_CHECK_STR, loc->prefix);
/* shutdown state is valid */
if (!sds_check_replica(loc)) {
CHECK_INFO(ppc, "%s" SDS_OK_STR, loc->prefix);
loc->step = CHECK_STEP_COMPLETE;
return 0;
}
/* shutdown state is NOT valid and can NOT be repaired */
if (CHECK_IS_NOT(ppc, REPAIR)) {
check_end(ppc->data);
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
return CHECK_ERR(ppc, "%s%s", loc->prefix,
SDS_FAIL_MSG(&loc->hdr));
}
/* shutdown state is NOT valid but can be repaired */
CHECK_ASK(ppc, Q_RESET_SDS, "%s%s", loc->prefix,
SDS_REPAIR_MSG(&loc->hdr));
return check_questions_sequence_validate(ppc);
}
/*
* sds_fix -- (internal) fix shutdown state
*/
static int
sds_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *context)
{
LOG(3, NULL);
switch (question) {
case Q_RESET_SDS:
CHECK_INFO(ppc, "%sresetting pool_hdr.sds", loc->prefix);
memset(&loc->hdr.sds, 0, sizeof(loc->hdr.sds));
++loc->healthy_replicas;
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
struct step {
int (*check)(PMEMpoolcheck *, location *);
int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *);
};
static const struct step steps[] = {
{
.check = sds_check,
},
{
.fix = sds_fix,
},
{
.check = NULL,
.fix = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static int
step_exe(PMEMpoolcheck *ppc, const struct step *steps, location *loc)
{
const struct step *step = &steps[loc->step++];
if (!step->fix)
return step->check(ppc, loc);
if (!check_has_answer(ppc->data))
return 0;
if (check_answer_loop(ppc, loc, NULL, 0 /* fail on no */, step->fix))
return -1;
util_convert2le_hdr(&loc->hdr);
memcpy(loc->hdrp, &loc->hdr, sizeof(loc->hdr));
util_persist_auto(loc->is_dev_dax, loc->hdrp, sizeof(*loc->hdrp));
util_convert2h_hdr_nocheck(&loc->hdr);
loc->pool_hdr_modified = 1;
return 0;
}
/*
* init_prefix -- prepare prefix for messages
*/
static void
init_prefix(location *loc)
{
if (loc->set->nreplicas > 1) {
int ret = util_snprintf(loc->prefix, PREFIX_MAX_SIZE,
"replica %u: ",
loc->replica);
if (ret < 0)
FATAL("!snprintf");
} else
loc->prefix[0] = '\0';
loc->step = 0;
}
/*
* init_location_data -- (internal) prepare location information
*/
static void
init_location_data(PMEMpoolcheck *ppc, location *loc)
{
ASSERTeq(loc->part, 0);
loc->set = ppc->pool->set_file->poolset;
if (ppc->result != CHECK_RESULT_PROCESS_ANSWERS)
init_prefix(loc);
struct pool_replica *rep = REP(loc->set, loc->replica);
loc->hdrp = HDR(rep, loc->part);
memcpy(&loc->hdr, loc->hdrp, sizeof(loc->hdr));
util_convert2h_hdr_nocheck(&loc->hdr);
loc->is_dev_dax = PART(rep, 0)->is_dev_dax;
}
/*
* sds_get_healthy_replicas_num -- (internal) get number of healthy replicas
*/
static void
sds_get_healthy_replicas_num(PMEMpoolcheck *ppc, location *loc)
{
const unsigned nreplicas = ppc->pool->set_file->poolset->nreplicas;
loc->healthy_replicas = 0;
loc->part = 0;
for (; loc->replica < nreplicas; loc->replica++) {
init_location_data(ppc, loc);
if (!sds_check_replica(loc)) {
++loc->healthy_replicas; /* healthy replica found */
}
}
loc->replica = 0; /* reset replica index */
}
/*
* check_sds -- entry point for shutdown state checks
*/
void
check_sds(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
const unsigned nreplicas = ppc->pool->set_file->poolset->nreplicas;
location *loc = check_get_step_data(ppc->data);
if (!loc->init_done) {
sds_get_healthy_replicas_num(ppc, loc);
if (loc->healthy_replicas == nreplicas) {
/* all replicas have healthy shutdown state */
/* print summary */
for (; loc->replica < nreplicas; loc->replica++) {
init_prefix(loc);
CHECK_INFO(ppc, "%s" SDS_CHECK_STR,
loc->prefix);
CHECK_INFO(ppc, "%s" SDS_OK_STR, loc->prefix);
}
return;
} else if (loc->healthy_replicas > 0) {
ppc->sync_required = true;
return;
}
loc->init_done = true;
}
/* produce single healthy replica */
loc->part = 0;
for (; loc->replica < nreplicas; loc->replica++) {
init_location_data(ppc, loc);
while (CHECK_NOT_COMPLETE(loc, steps)) {
ASSERT(loc->step < ARRAY_SIZE(steps));
if (step_exe(ppc, steps, loc))
return;
}
if (loc->healthy_replicas)
break;
}
if (loc->healthy_replicas == 0) {
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
CHECK_ERR(ppc, "cannot complete repair, reverting changes");
} else if (loc->healthy_replicas < nreplicas) {
ppc->sync_required = true;
}
}
| 6,571 | 21.662069 | 76 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmempool/check_log.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* check_log.c -- check pmemlog
*/
#include <inttypes.h>
#include <sys/param.h>
#include <endian.h>
#include "out.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
enum question {
Q_LOG_START_OFFSET,
Q_LOG_END_OFFSET,
Q_LOG_WRITE_OFFSET,
};
/*
* log_read -- (internal) read pmemlog header
*/
static int
log_read(PMEMpoolcheck *ppc)
{
/*
* Here we want to read the pmemlog header without the pool_hdr as we've
* already done it before.
*
* Take the pointer to fields right after pool_hdr, compute the size and
* offset of remaining fields.
*/
uint8_t *ptr = (uint8_t *)&ppc->pool->hdr.log;
ptr += sizeof(ppc->pool->hdr.log.hdr);
size_t size = sizeof(ppc->pool->hdr.log) -
sizeof(ppc->pool->hdr.log.hdr);
uint64_t offset = sizeof(ppc->pool->hdr.log.hdr);
if (pool_read(ppc->pool, ptr, size, offset))
return CHECK_ERR(ppc, "cannot read pmemlog structure");
/* endianness conversion */
log_convert2h(&ppc->pool->hdr.log);
return 0;
}
/*
* log_hdr_check -- (internal) check pmemlog header
*/
static int
log_hdr_check(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
CHECK_INFO(ppc, "checking pmemlog header");
if (log_read(ppc)) {
ppc->result = CHECK_RESULT_ERROR;
return -1;
}
/* determine constant values for pmemlog */
const uint64_t d_start_offset =
roundup(sizeof(ppc->pool->hdr.log), LOG_FORMAT_DATA_ALIGN);
if (ppc->pool->hdr.log.start_offset != d_start_offset) {
if (CHECK_ASK(ppc, Q_LOG_START_OFFSET,
"invalid pmemlog.start_offset: 0x%jx.|Do you "
"want to set pmemlog.start_offset to default "
"0x%jx?",
ppc->pool->hdr.log.start_offset,
d_start_offset))
goto error;
}
if (ppc->pool->hdr.log.end_offset != ppc->pool->set_file->size) {
if (CHECK_ASK(ppc, Q_LOG_END_OFFSET,
"invalid pmemlog.end_offset: 0x%jx.|Do you "
"want to set pmemlog.end_offset to 0x%jx?",
ppc->pool->hdr.log.end_offset,
ppc->pool->set_file->size))
goto error;
}
if (ppc->pool->hdr.log.write_offset < d_start_offset ||
ppc->pool->hdr.log.write_offset > ppc->pool->set_file->size) {
if (CHECK_ASK(ppc, Q_LOG_WRITE_OFFSET,
"invalid pmemlog.write_offset: 0x%jx.|Do you "
"want to set pmemlog.write_offset to "
"pmemlog.end_offset?",
ppc->pool->hdr.log.write_offset))
goto error;
}
if (ppc->result == CHECK_RESULT_CONSISTENT ||
ppc->result == CHECK_RESULT_REPAIRED)
CHECK_INFO(ppc, "pmemlog header correct");
return check_questions_sequence_validate(ppc);
error:
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
check_end(ppc->data);
return -1;
}
/*
* log_hdr_fix -- (internal) fix pmemlog header
*/
static int
log_hdr_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question, void *ctx)
{
LOG(3, NULL);
uint64_t d_start_offset;
switch (question) {
case Q_LOG_START_OFFSET:
/* determine constant values for pmemlog */
d_start_offset = roundup(sizeof(ppc->pool->hdr.log),
LOG_FORMAT_DATA_ALIGN);
CHECK_INFO(ppc, "setting pmemlog.start_offset to 0x%jx",
d_start_offset);
ppc->pool->hdr.log.start_offset = d_start_offset;
break;
case Q_LOG_END_OFFSET:
CHECK_INFO(ppc, "setting pmemlog.end_offset to 0x%jx",
ppc->pool->set_file->size);
ppc->pool->hdr.log.end_offset = ppc->pool->set_file->size;
break;
case Q_LOG_WRITE_OFFSET:
CHECK_INFO(ppc, "setting pmemlog.write_offset to "
"pmemlog.end_offset");
ppc->pool->hdr.log.write_offset = ppc->pool->set_file->size;
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
struct step {
int (*check)(PMEMpoolcheck *, location *);
int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *);
enum pool_type type;
};
static const struct step steps[] = {
{
.check = log_hdr_check,
.type = POOL_TYPE_LOG
},
{
.fix = log_hdr_fix,
.type = POOL_TYPE_LOG
},
{
.check = NULL,
.fix = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static inline int
step_exe(PMEMpoolcheck *ppc, location *loc)
{
ASSERT(loc->step < ARRAY_SIZE(steps));
ASSERTeq(ppc->pool->params.type, POOL_TYPE_LOG);
const struct step *step = &steps[loc->step++];
if (!(step->type & ppc->pool->params.type))
return 0;
if (!step->fix)
return step->check(ppc, loc);
if (log_read(ppc)) {
ppc->result = CHECK_RESULT_ERROR;
return -1;
}
return check_answer_loop(ppc, loc, NULL, 1, step->fix);
}
/*
* check_log -- entry point for pmemlog checks
*/
void
check_log(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
location *loc = check_get_step_data(ppc->data);
/* do all checks */
while (CHECK_NOT_COMPLETE(loc, steps)) {
if (step_exe(ppc, loc))
break;
}
}
| 4,760 | 21.671429 | 76 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmempool/check_util.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* check_util.h -- internal definitions check util
*/
#ifndef CHECK_UTIL_H
#define CHECK_UTIL_H
#include <time.h>
#include <limits.h>
#include <sys/param.h>
#ifdef __cplusplus
extern "C" {
#endif
#define CHECK_STEP_COMPLETE UINT_MAX
#define CHECK_INVALID_QUESTION UINT_MAX
#define REQUIRE_ADVANCED "the following error can be fixed using " \
"PMEMPOOL_CHECK_ADVANCED flag"
#ifndef min
#define min(a, b) ((a) < (b) ? (a) : (b))
#endif
/* check control context */
struct check_data;
struct arena;
/* queue of check statuses */
struct check_status;
/* container storing state of all check steps */
#define PREFIX_MAX_SIZE 30
typedef struct {
unsigned init_done;
unsigned step;
unsigned replica;
unsigned part;
int single_repl;
int single_part;
struct pool_set *set;
int is_dev_dax;
struct pool_hdr *hdrp;
/* copy of the pool header in host byte order */
struct pool_hdr hdr;
int hdr_valid;
/*
* If pool header has been modified this field indicates that
* the pool parameters structure requires refresh.
*/
int pool_hdr_modified;
unsigned healthy_replicas;
struct pool_hdr *next_part_hdrp;
struct pool_hdr *prev_part_hdrp;
struct pool_hdr *next_repl_hdrp;
struct pool_hdr *prev_repl_hdrp;
int next_part_hdr_valid;
int prev_part_hdr_valid;
int next_repl_hdr_valid;
int prev_repl_hdr_valid;
/* valid poolset uuid */
uuid_t *valid_puuid;
/* valid part uuid */
uuid_t *valid_uuid;
/* valid part pool header */
struct pool_hdr *valid_part_hdrp;
int valid_part_done;
unsigned valid_part_replica;
char prefix[PREFIX_MAX_SIZE];
struct arena *arenap;
uint64_t offset;
uint32_t narena;
uint8_t *bitmap;
uint8_t *dup_bitmap;
uint8_t *fbitmap;
struct list *list_inval;
struct list *list_flog_inval;
struct list *list_unmap;
struct {
int btti_header;
int btti_backup;
} valid;
struct {
struct btt_info btti;
uint64_t btti_offset;
} pool_valid;
} location;
/* check steps */
void check_bad_blocks(PMEMpoolcheck *ppc);
void check_backup(PMEMpoolcheck *ppc);
void check_pool_hdr(PMEMpoolcheck *ppc);
void check_pool_hdr_uuids(PMEMpoolcheck *ppc);
void check_sds(PMEMpoolcheck *ppc);
void check_log(PMEMpoolcheck *ppc);
void check_blk(PMEMpoolcheck *ppc);
void check_btt_info(PMEMpoolcheck *ppc);
void check_btt_map_flog(PMEMpoolcheck *ppc);
void check_write(PMEMpoolcheck *ppc);
struct check_data *check_data_alloc(void);
void check_data_free(struct check_data *data);
uint32_t check_step_get(struct check_data *data);
void check_step_inc(struct check_data *data);
location *check_get_step_data(struct check_data *data);
void check_end(struct check_data *data);
int check_is_end_util(struct check_data *data);
int check_status_create(PMEMpoolcheck *ppc, enum pmempool_check_msg_type type,
uint32_t arg, const char *fmt, ...) FORMAT_PRINTF(4, 5);
void check_status_release(PMEMpoolcheck *ppc, struct check_status *status);
void check_clear_status_cache(struct check_data *data);
struct check_status *check_pop_question(struct check_data *data);
struct check_status *check_pop_error(struct check_data *data);
struct check_status *check_pop_info(struct check_data *data);
bool check_has_error(struct check_data *data);
bool check_has_answer(struct check_data *data);
int check_push_answer(PMEMpoolcheck *ppc);
struct pmempool_check_status *check_status_get_util(
struct check_status *status);
int check_status_is(struct check_status *status,
enum pmempool_check_msg_type type);
/* create info status */
#define CHECK_INFO(ppc, ...)\
check_status_create(ppc, PMEMPOOL_CHECK_MSG_TYPE_INFO, 0, __VA_ARGS__)
/* create info status and append error message based on errno */
#define CHECK_INFO_ERRNO(ppc, ...)\
check_status_create(ppc, PMEMPOOL_CHECK_MSG_TYPE_INFO,\
(uint32_t)errno, __VA_ARGS__)
/* create error status */
#define CHECK_ERR(ppc, ...)\
check_status_create(ppc, PMEMPOOL_CHECK_MSG_TYPE_ERROR, 0, __VA_ARGS__)
/* create question status */
#define CHECK_ASK(ppc, question, ...)\
check_status_create(ppc, PMEMPOOL_CHECK_MSG_TYPE_QUESTION, question,\
__VA_ARGS__)
#define CHECK_NOT_COMPLETE(loc, steps)\
((loc)->step != CHECK_STEP_COMPLETE &&\
((steps)[(loc)->step].check != NULL ||\
(steps)[(loc)->step].fix != NULL))
int check_answer_loop(PMEMpoolcheck *ppc, location *data,
void *ctx, int fail_on_no,
int (*callback)(PMEMpoolcheck *, location *, uint32_t, void *ctx));
int check_questions_sequence_validate(PMEMpoolcheck *ppc);
const char *check_get_time_str(time_t time);
const char *check_get_uuid_str(uuid_t uuid);
const char *check_get_pool_type_str(enum pool_type type);
void check_insert_arena(PMEMpoolcheck *ppc, struct arena *arenap);
#ifdef _WIN32
void cache_to_utf8(struct check_data *data, char *buf, size_t size);
#endif
#define CHECK_IS(ppc, flag)\
util_flag_isset((ppc)->args.flags, PMEMPOOL_CHECK_ ## flag)
#define CHECK_IS_NOT(ppc, flag)\
util_flag_isclr((ppc)->args.flags, PMEMPOOL_CHECK_ ## flag)
#define CHECK_WITHOUT_FIXING(ppc)\
CHECK_IS_NOT(ppc, REPAIR) || CHECK_IS(ppc, DRY_RUN)
#ifdef __cplusplus
}
#endif
#endif
| 5,143 | 25.111675 | 78 | h |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.