repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem_has_auto_flush_win/mocks_windows.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* mocks_windows.h -- redefinitions of EnumSystemFirmwareTables and
* GetSystemFirmwareTable
*
* This file is Windows-specific.
*
* This file should be included (i.e. using Forced Include) by libpmem
* files, when compiled for the purpose of pmem_has_auto_flush_win test.
* It would replace default implementation with mocked functions defined
* in mocks_windows.c
*
* This WRAP_REAL define could be also passed as preprocessor definition.
*/
#include <windows.h>
#ifndef WRAP_REAL
#define EnumSystemFirmwareTables __wrap_EnumSystemFirmwareTables
#define GetSystemFirmwareTable __wrap_GetSystemFirmwareTable
UINT
__wrap_EnumSystemFirmwareTables(DWORD FirmwareTableProviderSignature,
PVOID pFirmwareTableEnumBuffer, DWORD BufferSize);
UINT
__wrap_GetSystemFirmwareTable(DWORD FirmwareTableProviderSignature,
DWORD FirmwareTableID, PVOID pFirmwareTableBuffer, DWORD BufferSize);
#endif
| 988 | 33.103448 | 73 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem_has_auto_flush_win/mocks_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2019, Intel Corporation */
/*
* mocks_windows.c -- mocked functions used in auto_flush_windows.c
*/
#include "util.h"
#include "unittest.h"
#include "set.h"
#include "pmemcommon.h"
#include "auto_flush_windows.h"
#include "pmem_has_auto_flush_win.h"
#include <errno.h>
extern size_t Is_nfit;
extern size_t Pc_type;
extern size_t Pc_capabilities;
FUNC_MOCK_DLLIMPORT(EnumSystemFirmwareTables, UINT,
DWORD FirmwareTableProviderSignature,
PVOID pFirmwareTableBuffer,
DWORD BufferSize)
FUNC_MOCK_RUN_DEFAULT {
if (FirmwareTableProviderSignature != ACPI_SIGNATURE)
return _FUNC_REAL(EnumSystemFirmwareTables)
(FirmwareTableProviderSignature,
pFirmwareTableBuffer, BufferSize);
if (Is_nfit == 1 && pFirmwareTableBuffer != NULL &&
BufferSize != 0) {
UT_OUT("Mock NFIT available");
strncpy(pFirmwareTableBuffer, NFIT_STR_SIGNATURE, BufferSize);
}
return NFIT_SIGNATURE_LEN + sizeof(struct nfit_header);
}
FUNC_MOCK_END
FUNC_MOCK_DLLIMPORT(GetSystemFirmwareTable, UINT,
DWORD FirmwareTableProviderSignature,
DWORD FirmwareTableID,
PVOID pFirmwareTableBuffer,
DWORD BufferSize)
FUNC_MOCK_RUN_DEFAULT {
if (FirmwareTableProviderSignature != ACPI_SIGNATURE ||
FirmwareTableID != NFIT_REV_SIGNATURE)
return _FUNC_REAL(GetSystemFirmwareTable)
(FirmwareTableProviderSignature, FirmwareTableID,
pFirmwareTableBuffer, BufferSize);
if (pFirmwareTableBuffer == NULL && BufferSize == 0) {
UT_OUT("GetSystemFirmwareTable mock");
return sizeof(struct platform_capabilities) +
sizeof(struct nfit_header);
}
struct nfit_header nfit;
struct platform_capabilities pc;
/* fill nfit */
char sig[NFIT_SIGNATURE_LEN] = NFIT_STR_SIGNATURE;
strncpy(nfit.signature, sig, NFIT_SIGNATURE_LEN);
nfit.length = sizeof(nfit);
memcpy(pFirmwareTableBuffer, &nfit, nfit.length);
/* fill platform_capabilities */
pc.length = sizeof(pc);
/* [...] 0000 0011 - proper capabilities bits combination */
pc.capabilities = (uint32_t)Pc_capabilities;
pc.type = (uint16_t)Pc_type;
memcpy((char *)pFirmwareTableBuffer + nfit.length, &pc, pc.length);
return BufferSize;
}
FUNC_MOCK_END
| 2,173 | 28.378378 | 68 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem_has_auto_flush_win/pmem_has_auto_flush_win.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2019, Intel Corporation */
/*
* pmem_has_auto_flush_win.c -- unit test for pmem_has_auto_flush_win()
*
* usage: pmem_has_auto_flush_win <option>
* options:
* n - is nfit available or not (y or n)
* type: number of platform capabilities structure
* capabilities: platform capabilities bits
*/
#include <stdbool.h>
#include <errno.h>
#include "unittest.h"
#include "pmem.h"
#include "pmemcommon.h"
#include "set.h"
#include "mocks_windows.h"
#include "pmem_has_auto_flush_win.h"
#include "util.h"
#define LOG_PREFIX "ut"
#define LOG_LEVEL_VAR "TEST_LOG_LEVEL"
#define LOG_FILE_VAR "TEST_LOG_FILE"
#define MAJOR_VERSION 1
#define MINOR_VERSION 0
size_t Is_nfit = 0;
size_t Pc_type = 0;
size_t Pc_capabilities = 3;
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem_has_auto_flush_win");
common_init(LOG_PREFIX, LOG_LEVEL_VAR, LOG_FILE_VAR,
MAJOR_VERSION, MINOR_VERSION);
if (argc < 4)
UT_FATAL("usage: pmem_has_auto_flush_win "
"<option> <type> <capabilities>",
argv[0]);
pmem_init();
Pc_type = (size_t)atoi(argv[2]);
Pc_capabilities = (size_t)atoi(argv[3]);
Is_nfit = argv[1][0] == 'y';
int eADR = pmem_has_auto_flush();
UT_OUT("pmem_has_auto_flush ret: %d", eADR);
common_fini();
DONE(NULL);
}
| 1,305 | 21.517241 | 71 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_tx_alloc/obj_tx_alloc.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_tx_alloc.c -- unit test for pmemobj_tx_alloc and pmemobj_tx_zalloc
*/
#include <assert.h>
#include <sys/param.h>
#include <string.h>
#include "unittest.h"
#include "libpmemobj.h"
#include "util.h"
#include "valgrind_internal.h"
#define LAYOUT_NAME "tx_alloc"
#define TEST_VALUE_1 1
#define TEST_VALUE_2 2
#define OBJ_SIZE (200 * 1024)
enum type_number {
TYPE_NO_TX,
TYPE_COMMIT,
TYPE_ABORT,
TYPE_ZEROED_COMMIT,
TYPE_ZEROED_ABORT,
TYPE_XCOMMIT,
TYPE_XABORT,
TYPE_XZEROED_COMMIT,
TYPE_XZEROED_ABORT,
TYPE_XNOFLUSHED_COMMIT,
TYPE_COMMIT_NESTED1,
TYPE_COMMIT_NESTED2,
TYPE_ABORT_NESTED1,
TYPE_ABORT_NESTED2,
TYPE_ABORT_AFTER_NESTED1,
TYPE_ABORT_AFTER_NESTED2,
TYPE_OOM,
};
TOID_DECLARE(struct object, TYPE_OOM);
struct object {
size_t value;
char data[OBJ_SIZE - sizeof(size_t)];
};
/*
* do_tx_alloc_oom -- allocates objects until OOM
*/
static void
do_tx_alloc_oom(PMEMobjpool *pop)
{
int do_alloc = 1;
size_t alloc_cnt = 0;
do {
TX_BEGIN(pop) {
TOID(struct object) obj = TX_NEW(struct object);
D_RW(obj)->value = alloc_cnt;
} TX_ONCOMMIT {
alloc_cnt++;
} TX_ONABORT {
do_alloc = 0;
} TX_END
} while (do_alloc);
size_t bitmap_size = howmany(alloc_cnt, 8);
char *bitmap = (char *)MALLOC(bitmap_size);
memset(bitmap, 0, bitmap_size);
size_t obj_cnt = 0;
TOID(struct object) i;
POBJ_FOREACH_TYPE(pop, i) {
UT_ASSERT(D_RO(i)->value < alloc_cnt);
UT_ASSERT(!isset(bitmap, D_RO(i)->value));
setbit(bitmap, D_RO(i)->value);
obj_cnt++;
}
FREE(bitmap);
UT_ASSERTeq(obj_cnt, alloc_cnt);
TOID(struct object) o = POBJ_FIRST(pop, struct object);
while (!TOID_IS_NULL(o)) {
TOID(struct object) next = POBJ_NEXT(o);
POBJ_FREE(&o);
o = next;
}
}
/*
* do_tx_alloc_abort_after_nested -- aborts transaction after allocation
* in nested transaction
*/
static void
do_tx_alloc_abort_after_nested(PMEMobjpool *pop)
{
TOID(struct object) obj1;
TOID(struct object) obj2;
TX_BEGIN(pop) {
TOID_ASSIGN(obj1, pmemobj_tx_alloc(sizeof(struct object),
TYPE_ABORT_AFTER_NESTED1));
UT_ASSERT(!TOID_IS_NULL(obj1));
D_RW(obj1)->value = TEST_VALUE_1;
TX_BEGIN(pop) {
TOID_ASSIGN(obj2, pmemobj_tx_zalloc(
sizeof(struct object),
TYPE_ABORT_AFTER_NESTED2));
UT_ASSERT(!TOID_IS_NULL(obj2));
UT_ASSERT(util_is_zeroed(D_RO(obj2),
sizeof(struct object)));
D_RW(obj2)->value = TEST_VALUE_2;
} TX_ONCOMMIT {
UT_ASSERTeq(D_RO(obj2)->value, TEST_VALUE_2);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj1, OID_NULL);
TOID_ASSIGN(obj2, OID_NULL);
} TX_END
TOID(struct object) first;
/* check the obj1 object */
UT_ASSERT(TOID_IS_NULL(obj1));
first.oid = POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_AFTER_NESTED1);
UT_ASSERT(TOID_IS_NULL(first));
/* check the obj2 object */
UT_ASSERT(TOID_IS_NULL(obj2));
first.oid = POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_AFTER_NESTED2);
UT_ASSERT(TOID_IS_NULL(first));
}
/*
* do_tx_alloc_abort_nested -- aborts transaction in nested transaction
*/
static void
do_tx_alloc_abort_nested(PMEMobjpool *pop)
{
TOID(struct object) obj1;
TOID(struct object) obj2;
TX_BEGIN(pop) {
TOID_ASSIGN(obj1, pmemobj_tx_alloc(sizeof(struct object),
TYPE_ABORT_NESTED1));
UT_ASSERT(!TOID_IS_NULL(obj1));
D_RW(obj1)->value = TEST_VALUE_1;
TX_BEGIN(pop) {
TOID_ASSIGN(obj2, pmemobj_tx_zalloc(
sizeof(struct object),
TYPE_ABORT_NESTED2));
UT_ASSERT(!TOID_IS_NULL(obj2));
UT_ASSERT(util_is_zeroed(D_RO(obj2),
sizeof(struct object)));
D_RW(obj2)->value = TEST_VALUE_2;
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj2, OID_NULL);
} TX_END
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj1, OID_NULL);
} TX_END
TOID(struct object) first;
/* check the obj1 object */
UT_ASSERT(TOID_IS_NULL(obj1));
first.oid = POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_NESTED1);
UT_ASSERT(TOID_IS_NULL(first));
/* check the obj2 object */
UT_ASSERT(TOID_IS_NULL(obj2));
first.oid = POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_NESTED2);
UT_ASSERT(TOID_IS_NULL(first));
}
/*
* do_tx_alloc_commit_nested -- allocates two objects, one in nested transaction
*/
static void
do_tx_alloc_commit_nested(PMEMobjpool *pop)
{
TOID(struct object) obj1;
TOID(struct object) obj2;
TX_BEGIN(pop) {
TOID_ASSIGN(obj1, pmemobj_tx_alloc(sizeof(struct object),
TYPE_COMMIT_NESTED1));
UT_ASSERT(!TOID_IS_NULL(obj1));
D_RW(obj1)->value = TEST_VALUE_1;
TX_BEGIN(pop) {
TOID_ASSIGN(obj2, pmemobj_tx_zalloc(
sizeof(struct object),
TYPE_COMMIT_NESTED2));
UT_ASSERT(!TOID_IS_NULL(obj2));
UT_ASSERT(util_is_zeroed(D_RO(obj2),
sizeof(struct object)));
D_RW(obj2)->value = TEST_VALUE_2;
} TX_ONCOMMIT {
UT_ASSERTeq(D_RO(obj1)->value, TEST_VALUE_1);
UT_ASSERTeq(D_RO(obj2)->value, TEST_VALUE_2);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
} TX_ONCOMMIT {
UT_ASSERTeq(D_RO(obj1)->value, TEST_VALUE_1);
UT_ASSERTeq(D_RO(obj2)->value, TEST_VALUE_2);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID(struct object) first;
TOID(struct object) next;
/* check the obj1 object */
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_NESTED1));
UT_ASSERT(TOID_EQUALS(first, obj1));
UT_ASSERTeq(D_RO(first)->value, TEST_VALUE_1);
TOID_ASSIGN(next, POBJ_NEXT_TYPE_NUM(first.oid));
UT_ASSERT(TOID_IS_NULL(next));
/* check the obj2 object */
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_NESTED2));
UT_ASSERT(TOID_EQUALS(first, obj2));
UT_ASSERTeq(D_RO(first)->value, TEST_VALUE_2);
TOID_ASSIGN(next, POBJ_NEXT_TYPE_NUM(first.oid));
UT_ASSERT(TOID_IS_NULL(next));
}
/*
* do_tx_alloc_abort -- allocates an object and aborts the transaction
*/
static void
do_tx_alloc_abort(PMEMobjpool *pop)
{
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_alloc(sizeof(struct object),
TYPE_ABORT));
UT_ASSERT(!TOID_IS_NULL(obj));
D_RW(obj)->value = TEST_VALUE_1;
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj, OID_NULL);
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
TOID(struct object) first;
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT));
UT_ASSERT(TOID_IS_NULL(first));
}
/*
* do_tx_alloc_zerolen -- allocates an object of zero size to trigger tx abort
*/
static void
do_tx_alloc_zerolen(PMEMobjpool *pop)
{
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_alloc(0, TYPE_ABORT));
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj, OID_NULL);
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
TOID(struct object) first;
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT));
UT_ASSERT(TOID_IS_NULL(first));
}
/*
* do_tx_alloc_huge -- allocates a huge object to trigger tx abort
*/
static void
do_tx_alloc_huge(PMEMobjpool *pop)
{
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_alloc(PMEMOBJ_MAX_ALLOC_SIZE + 1,
TYPE_ABORT));
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj, OID_NULL);
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
TOID(struct object) first;
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT));
UT_ASSERT(TOID_IS_NULL(first));
}
/*
* do_tx_alloc_commit -- allocates and object
*/
static void
do_tx_alloc_commit(PMEMobjpool *pop)
{
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_alloc(sizeof(struct object),
TYPE_COMMIT));
UT_ASSERT(!TOID_IS_NULL(obj));
D_RW(obj)->value = TEST_VALUE_1;
} TX_ONCOMMIT {
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID(struct object) first;
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT));
UT_ASSERT(TOID_EQUALS(first, obj));
UT_ASSERTeq(D_RO(first)->value, D_RO(obj)->value);
TOID(struct object) next;
next = POBJ_NEXT(first);
UT_ASSERT(TOID_IS_NULL(next));
}
/*
* do_tx_zalloc_abort -- allocates a zeroed object and aborts the transaction
*/
static void
do_tx_zalloc_abort(PMEMobjpool *pop)
{
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_zalloc(sizeof(struct object),
TYPE_ZEROED_ABORT));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(util_is_zeroed(D_RO(obj), sizeof(struct object)));
D_RW(obj)->value = TEST_VALUE_1;
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj, OID_NULL);
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
TOID(struct object) first;
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ZEROED_ABORT));
UT_ASSERT(TOID_IS_NULL(first));
}
/*
* do_tx_zalloc_zerolen -- allocate an object of zero size to trigger tx abort
*/
static void
do_tx_zalloc_zerolen(PMEMobjpool *pop)
{
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_zalloc(0, TYPE_ZEROED_ABORT));
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj, OID_NULL);
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
TOID(struct object) first;
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ZEROED_ABORT));
UT_ASSERT(TOID_IS_NULL(first));
}
/*
* do_tx_zalloc_huge -- allocates a huge object to trigger tx abort
*/
static void
do_tx_zalloc_huge(PMEMobjpool *pop)
{
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_zalloc(PMEMOBJ_MAX_ALLOC_SIZE + 1,
TYPE_ZEROED_ABORT));
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj, OID_NULL);
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
TOID(struct object) first;
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ZEROED_ABORT));
UT_ASSERT(TOID_IS_NULL(first));
}
/*
* do_tx_zalloc_commit -- allocates zeroed object
*/
static void
do_tx_zalloc_commit(PMEMobjpool *pop)
{
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_zalloc(sizeof(struct object),
TYPE_ZEROED_COMMIT));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(util_is_zeroed(D_RO(obj), sizeof(struct object)));
D_RW(obj)->value = TEST_VALUE_1;
} TX_ONCOMMIT {
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID(struct object) first;
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ZEROED_COMMIT));
UT_ASSERT(TOID_EQUALS(first, obj));
UT_ASSERTeq(D_RO(first)->value, D_RO(obj)->value);
TOID(struct object) next;
next = POBJ_NEXT(first);
UT_ASSERT(TOID_IS_NULL(next));
}
/*
* do_tx_xalloc_abort -- allocates a zeroed object and aborts the transaction
*/
static void
do_tx_xalloc_abort(PMEMobjpool *pop)
{
/* xalloc 0 */
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_xalloc(sizeof(struct object),
TYPE_XABORT, 0));
UT_ASSERT(!TOID_IS_NULL(obj));
D_RW(obj)->value = TEST_VALUE_1;
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj, OID_NULL);
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
TOID(struct object) first;
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XABORT));
UT_ASSERT(TOID_IS_NULL(first));
/* xalloc ZERO */
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_xalloc(sizeof(struct object),
TYPE_XZEROED_ABORT, POBJ_XALLOC_ZERO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(util_is_zeroed(D_RO(obj), sizeof(struct object)));
D_RW(obj)->value = TEST_VALUE_1;
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj, OID_NULL);
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XZEROED_ABORT));
UT_ASSERT(TOID_IS_NULL(first));
}
/*
* do_tx_xalloc_zerolen -- allocate an object of zero size to trigger tx abort
*/
static void
do_tx_xalloc_zerolen(PMEMobjpool *pop)
{
/* xalloc 0 */
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_xalloc(0, TYPE_XABORT, 0));
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj, OID_NULL);
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
/* xalloc 0 with POBJ_XALLOC_NO_ABORT flag */
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_xalloc(0, TYPE_XABORT,
POBJ_XALLOC_NO_ABORT));
} TX_ONCOMMIT {
TOID_ASSIGN(obj, OID_NULL);
} TX_ONABORT {
UT_ASSERT(0); /* should not get to this point */
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
/* alloc 0 with pmemobj_tx_set_failure_behavior called */
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
TOID_ASSIGN(obj, pmemobj_tx_alloc(0, TYPE_XABORT));
} TX_ONCOMMIT {
TOID_ASSIGN(obj, OID_NULL);
} TX_ONABORT {
UT_ASSERT(0); /* should not get to this point */
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
/* xalloc 0 with pmemobj_tx_set_failure_behavior called */
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
TOID_ASSIGN(obj, pmemobj_tx_xalloc(0, TYPE_XABORT, 0));
} TX_ONCOMMIT {
TOID_ASSIGN(obj, OID_NULL);
} TX_ONABORT {
UT_ASSERT(0); /* should not get to this point */
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
/* zalloc 0 with pmemobj_tx_set_failure_behavior called */
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
TOID_ASSIGN(obj, pmemobj_tx_zalloc(0, TYPE_XABORT));
} TX_ONCOMMIT {
TOID_ASSIGN(obj, OID_NULL);
} TX_ONABORT {
UT_ASSERT(0); /* should not get to this point */
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
TOID(struct object) first;
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XABORT));
UT_ASSERT(TOID_IS_NULL(first));
/* xalloc ZERO */
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_xalloc(0, TYPE_XZEROED_ABORT,
POBJ_XALLOC_ZERO));
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj, OID_NULL);
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XZEROED_ABORT));
UT_ASSERT(TOID_IS_NULL(first));
}
/*
* do_tx_xalloc_huge -- allocates a huge object to trigger tx abort
*/
static void
do_tx_xalloc_huge(PMEMobjpool *pop)
{
/* xalloc 0 */
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_xalloc(PMEMOBJ_MAX_ALLOC_SIZE + 1,
TYPE_XABORT, 0));
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj, OID_NULL);
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
TOID(struct object) first;
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XABORT));
UT_ASSERT(TOID_IS_NULL(first));
/* xalloc ZERO */
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_xalloc(PMEMOBJ_MAX_ALLOC_SIZE + 1,
TYPE_XZEROED_ABORT, POBJ_XALLOC_ZERO));
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj, OID_NULL);
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XZEROED_ABORT));
UT_ASSERT(TOID_IS_NULL(first));
/*
* do xalloc until overfilled and then
* free last successful allocation
*/
uint64_t tot_allocated = 0, alloc_size = (5 * 1024 *1024);
int rc = 0;
PMEMoid oid, prev_oid;
POBJ_FOREACH_SAFE(pop, oid, prev_oid) {
pmemobj_free(&oid);
}
TOID_ASSIGN(first, pmemobj_first(pop));
UT_ASSERT(TOID_IS_NULL(first));
TX_BEGIN(pop) {
while (rc == 0) {
oid = pmemobj_tx_xalloc(alloc_size, 0,
POBJ_XALLOC_NO_ABORT);
if (oid.off == 0)
rc = -1;
else {
tot_allocated += alloc_size;
prev_oid = oid;
}
}
rc = pmemobj_tx_free(prev_oid);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, ENOMEM);
UT_ASSERTeq(rc, 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
/*
* do_tx_xalloc_commit -- allocates zeroed object
*/
static void
do_tx_xalloc_commit(PMEMobjpool *pop)
{
/* xalloc 0 */
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_xalloc(sizeof(struct object),
TYPE_XCOMMIT, 0));
UT_ASSERT(!TOID_IS_NULL(obj));
D_RW(obj)->value = TEST_VALUE_1;
} TX_ONCOMMIT {
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID(struct object) first;
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XCOMMIT));
UT_ASSERT(TOID_EQUALS(first, obj));
UT_ASSERTeq(D_RO(first)->value, D_RO(obj)->value);
TOID(struct object) next;
TOID_ASSIGN(next, POBJ_NEXT_TYPE_NUM(first.oid));
UT_ASSERT(TOID_IS_NULL(next));
/* xalloc ZERO */
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_xalloc(sizeof(struct object),
TYPE_XZEROED_COMMIT, POBJ_XALLOC_ZERO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(util_is_zeroed(D_RO(obj), sizeof(struct object)));
D_RW(obj)->value = TEST_VALUE_1;
} TX_ONCOMMIT {
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XZEROED_COMMIT));
UT_ASSERT(TOID_EQUALS(first, obj));
UT_ASSERTeq(D_RO(first)->value, D_RO(obj)->value);
TOID_ASSIGN(next, POBJ_NEXT_TYPE_NUM(first.oid));
UT_ASSERT(TOID_IS_NULL(next));
}
/*
* do_tx_xalloc_noflush -- allocates zeroed object
*/
static void
do_tx_xalloc_noflush(PMEMobjpool *pop)
{
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_xalloc(sizeof(struct object),
TYPE_XNOFLUSHED_COMMIT, POBJ_XALLOC_NO_FLUSH));
UT_ASSERT(!TOID_IS_NULL(obj));
D_RW(obj)->data[OBJ_SIZE - sizeof(size_t) - 1] = TEST_VALUE_1;
/* let pmemcheck find we didn't flush it */
} TX_ONCOMMIT {
UT_ASSERTeq(D_RO(obj)->data[OBJ_SIZE - sizeof(size_t) - 1],
TEST_VALUE_1);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID(struct object) first;
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XNOFLUSHED_COMMIT));
UT_ASSERT(TOID_EQUALS(first, obj));
UT_ASSERTeq(D_RO(first)->data[OBJ_SIZE - sizeof(size_t) - 1],
D_RO(obj)->data[OBJ_SIZE - sizeof(size_t) - 1]);
TOID(struct object) next;
TOID_ASSIGN(next, POBJ_NEXT_TYPE_NUM(first.oid));
UT_ASSERT(TOID_IS_NULL(next));
}
/*
* do_tx_root -- retrieve root inside of transaction
*/
static void
do_tx_root(PMEMobjpool *pop)
{
size_t root_size = 24;
TX_BEGIN(pop) {
PMEMoid root = pmemobj_root(pop, root_size);
UT_ASSERT(!OID_IS_NULL(root));
UT_ASSERT(util_is_zeroed(pmemobj_direct(root),
root_size));
UT_ASSERTeq(root_size, pmemobj_root_size(pop));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
/*
* do_tx_alloc_many -- allocates many objects inside of a single transaction
*/
static void
do_tx_alloc_many(PMEMobjpool *pop)
{
#define TX_ALLOC_COUNT 70 /* bigger than max reservations */
PMEMoid oid, oid2;
POBJ_FOREACH_SAFE(pop, oid, oid2) {
pmemobj_free(&oid);
}
TOID(struct object) first;
TOID_ASSIGN(first, pmemobj_first(pop));
UT_ASSERT(TOID_IS_NULL(first));
PMEMoid oids[TX_ALLOC_COUNT];
TX_BEGIN(pop) {
for (int i = 0; i < TX_ALLOC_COUNT; ++i) {
oids[i] = pmemobj_tx_alloc(1, 0);
UT_ASSERT(!OID_IS_NULL(oids[i]));
}
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TX_BEGIN(pop) {
/* empty tx to make sure there's no leftover state */
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TX_BEGIN(pop) {
for (int i = 0; i < TX_ALLOC_COUNT; ++i) {
pmemobj_tx_free(oids[i]);
}
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(first, pmemobj_first(pop));
UT_ASSERT(TOID_IS_NULL(first));
#undef TX_ALLOC_COUNT
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_alloc");
util_init();
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, 0,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
do_tx_root(pop);
VALGRIND_WRITE_STATS;
/* alloc */
do_tx_alloc_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_alloc_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_alloc_zerolen(pop);
VALGRIND_WRITE_STATS;
do_tx_alloc_huge(pop);
VALGRIND_WRITE_STATS;
/* zalloc */
do_tx_zalloc_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_zalloc_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_zalloc_zerolen(pop);
VALGRIND_WRITE_STATS;
do_tx_zalloc_huge(pop);
VALGRIND_WRITE_STATS;
/* xalloc */
do_tx_xalloc_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_xalloc_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_xalloc_zerolen(pop);
VALGRIND_WRITE_STATS;
do_tx_xalloc_huge(pop);
VALGRIND_WRITE_STATS;
/* alloc */
do_tx_alloc_commit_nested(pop);
VALGRIND_WRITE_STATS;
do_tx_alloc_abort_nested(pop);
VALGRIND_WRITE_STATS;
do_tx_alloc_abort_after_nested(pop);
VALGRIND_WRITE_STATS;
do_tx_alloc_oom(pop);
VALGRIND_WRITE_STATS;
do_tx_alloc_many(pop);
VALGRIND_WRITE_STATS;
do_tx_xalloc_noflush(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 20,667 | 21.862832 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/blk_pool/blk_pool.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* blk_pool.c -- unit test for pmemblk_create() and pmemblk_open()
*
* usage: blk_pool op path bsize [poolsize mode]
*
* op can be:
* c - create
* o - open
* f - do fault injection
*
* "poolsize" and "mode" arguments are ignored for "open"
*/
#include "unittest.h"
#include "../libpmemblk/blk.h"
#define MB ((size_t)1 << 20)
static void
do_fault_injection(const char *path, size_t bsize,
size_t poolsize, unsigned mode)
{
if (!pmemblk_fault_injection_enabled())
return;
pmemblk_inject_fault_at(PMEM_MALLOC, 1, "blk_runtime_init");
PMEMblkpool *pbp = pmemblk_create(path, bsize, poolsize, mode);
UT_ASSERTeq(pbp, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
pool_create(const char *path, size_t bsize, size_t poolsize, unsigned mode)
{
PMEMblkpool *pbp = pmemblk_create(path, bsize, poolsize, mode);
if (pbp == NULL)
UT_OUT("!%s: pmemblk_create", path);
else {
os_stat_t stbuf;
STAT(path, &stbuf);
UT_OUT("%s: file size %zu usable blocks %zu mode 0%o",
path, stbuf.st_size,
pmemblk_nblock(pbp),
stbuf.st_mode & 0777);
pmemblk_close(pbp);
int result = pmemblk_check(path, bsize);
if (result < 0)
UT_OUT("!%s: pmemblk_check", path);
else if (result == 0)
UT_OUT("%s: pmemblk_check: not consistent", path);
else
UT_ASSERTeq(pmemblk_check(path, bsize * 2), -1);
}
}
static void
pool_open(const char *path, size_t bsize)
{
PMEMblkpool *pbp = pmemblk_open(path, bsize);
if (pbp == NULL)
UT_OUT("!%s: pmemblk_open", path);
else {
UT_OUT("%s: pmemblk_open: Success", path);
pmemblk_close(pbp);
}
}
int
main(int argc, char *argv[])
{
START(argc, argv, "blk_pool");
if (argc < 4)
UT_FATAL("usage: %s op path bsize [poolsize mode]", argv[0]);
size_t bsize = strtoul(argv[3], NULL, 0);
size_t poolsize;
unsigned mode;
switch (argv[1][0]) {
case 'c':
poolsize = strtoul(argv[4], NULL, 0) * MB; /* in megabytes */
mode = strtoul(argv[5], NULL, 8);
pool_create(argv[2], bsize, poolsize, mode);
break;
case 'o':
pool_open(argv[2], bsize);
break;
case 'f':
poolsize = strtoul(argv[4], NULL, 0) * MB; /* in megabytes */
mode = strtoul(argv[5], NULL, 8);
do_fault_injection(argv[2], bsize, poolsize, mode);
break;
default:
UT_FATAL("unknown operation");
}
DONE(NULL);
}
| 2,377 | 20.423423 | 75 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem_memcpy/pmem_memcpy.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* pmem_memcpy.c -- unit test for doing a memcpy
*
* usage: pmem_memcpy file destoff srcoff length
*
*/
#include "unittest.h"
#include "util_pmem.h"
#include "file.h"
#include "memcpy_common.h"
static void *
pmem_memcpy_persist_wrapper(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
(void) flags;
return pmem_memcpy_persist(pmemdest, src, len);
}
static void *
pmem_memcpy_nodrain_wrapper(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
(void) flags;
return pmem_memcpy_nodrain(pmemdest, src, len);
}
static void
do_persist_ddax(const void *ptr, size_t size)
{
util_persist_auto(1, ptr, size);
}
static void
do_persist(const void *ptr, size_t size)
{
util_persist_auto(0, ptr, size);
}
/*
* swap_mappings - swap given two mapped regions.
*
* Try swapping src and dest by unmapping src, mapping a new dest with
* the original src address as a hint. If successful, unmap original dest.
* Map a new src with the original dest as a hint.
*/
static void
swap_mappings(char **dest, char **src, size_t size, int fd)
{
char *d = *dest;
char *s = *src;
char *td, *ts;
MUNMAP(*src, size);
/* mmap destination using src addr as a hint */
td = MMAP(s, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
MUNMAP(*dest, size);
*dest = td;
/* mmap src using original destination addr as a hint */
ts = MMAP(d, size, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS,
-1, 0);
*src = ts;
}
/*
* do_memcpy_variants -- do_memcpy wrapper that tests multiple variants
* of memcpy functions
*/
static void
do_memcpy_variants(int fd, char *dest, int dest_off, char *src, int src_off,
size_t bytes, size_t mapped_len, const char *file_name,
persist_fn p)
{
do_memcpy(fd, dest, dest_off, src, src_off, bytes, mapped_len,
file_name, pmem_memcpy_persist_wrapper, 0, p);
do_memcpy(fd, dest, dest_off, src, src_off, bytes, mapped_len,
file_name, pmem_memcpy_nodrain_wrapper, 0, p);
for (int i = 0; i < ARRAY_SIZE(Flags); ++i) {
do_memcpy(fd, dest, dest_off, src, src_off, bytes, mapped_len,
file_name, pmem_memcpy, Flags[i], p);
}
}
int
main(int argc, char *argv[])
{
int fd;
char *dest;
char *src;
char *dest_orig;
char *src_orig;
size_t mapped_len;
if (argc != 5)
UT_FATAL("usage: %s file srcoff destoff length", argv[0]);
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem_memcpy %s %s %s %s %savx %savx512f",
argv[2], argv[3], argv[4], thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
fd = OPEN(argv[1], O_RDWR);
int dest_off = atoi(argv[2]);
int src_off = atoi(argv[3]);
size_t bytes = strtoul(argv[4], NULL, 0);
/* src > dst */
dest_orig = dest = pmem_map_file(argv[1], 0, 0, 0, &mapped_len, NULL);
if (dest == NULL)
UT_FATAL("!could not map file: %s", argv[1]);
src_orig = src = MMAP(dest + mapped_len, mapped_len,
PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS, -1, 0);
/*
* Its very unlikely that src would not be > dest. pmem_map_file
* chooses the first unused address >= 1TB, large
* enough to hold the give range, and 1GB aligned. If the
* addresses did not get swapped to allow src > dst, log error
* and allow test to continue.
*/
if (src <= dest) {
swap_mappings(&dest, &src, mapped_len, fd);
if (src <= dest)
UT_FATAL("cannot map files in memory order");
}
enum file_type type = util_fd_get_type(fd);
if (type < 0)
UT_FATAL("cannot check type of file with fd %d", fd);
persist_fn persist;
persist = type == TYPE_DEVDAX ? do_persist_ddax : do_persist;
memset(dest, 0, (2 * bytes));
persist(dest, 2 * bytes);
memset(src, 0, (2 * bytes));
do_memcpy_variants(fd, dest, dest_off, src, src_off,
bytes, 0, argv[1], persist);
/* dest > src */
swap_mappings(&dest, &src, mapped_len, fd);
if (dest <= src)
UT_FATAL("cannot map files in memory order");
do_memcpy_variants(fd, dest, dest_off, src, src_off,
bytes, 0, argv[1], persist);
int ret = pmem_unmap(dest_orig, mapped_len);
UT_ASSERTeq(ret, 0);
MUNMAP(src_orig, mapped_len);
CLOSE(fd);
DONE(NULL);
}
| 4,249 | 23.853801 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_heap_interrupt/mocks_windows.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* mocks_windows.h -- redefinitions of memops functions
*
* This file is Windows-specific.
*
* This file should be included (i.e. using Forced Include) by libpmemobj
* files, when compiled for the purpose of obj_heap_interrupt test.
* It would replace default implementation with mocked functions defined
* in obj_heap_interrupt.c.
*
* These defines could be also passed as preprocessor definitions.
*/
#ifndef WRAP_REAL
#define operation_finish __wrap_operation_finish
#endif
| 578 | 27.95 | 73 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_pmalloc_mt/obj_pmalloc_mt.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_pmalloc_mt.c -- multithreaded test of allocator
*/
#include <stdint.h>
#include "file.h"
#include "obj.h"
#include "pmalloc.h"
#include "sys_util.h"
#include "unittest.h"
#define MAX_THREADS 32
#define MAX_OPS_PER_THREAD 1000
#define ALLOC_SIZE 104
#define REALLOC_SIZE (ALLOC_SIZE * 3)
#define MIX_RERUNS 2
#define CHUNKSIZE (1 << 18)
#define CHUNKS_PER_THREAD 3
static unsigned Threads;
static unsigned Ops_per_thread;
static unsigned Tx_per_thread;
struct action {
struct pobj_action pact;
os_mutex_t lock;
os_cond_t cond;
};
struct root {
uint64_t offs[MAX_THREADS][MAX_OPS_PER_THREAD];
struct action actions[MAX_THREADS][MAX_OPS_PER_THREAD];
};
struct worker_args {
PMEMobjpool *pop;
struct root *r;
unsigned idx;
};
static void *
alloc_worker(void *arg)
{
struct worker_args *a = arg;
for (unsigned i = 0; i < Ops_per_thread; ++i) {
pmalloc(a->pop, &a->r->offs[a->idx][i], ALLOC_SIZE, 0, 0);
UT_ASSERTne(a->r->offs[a->idx][i], 0);
}
return NULL;
}
static void *
realloc_worker(void *arg)
{
struct worker_args *a = arg;
for (unsigned i = 0; i < Ops_per_thread; ++i) {
prealloc(a->pop, &a->r->offs[a->idx][i], REALLOC_SIZE, 0, 0);
UT_ASSERTne(a->r->offs[a->idx][i], 0);
}
return NULL;
}
static void *
free_worker(void *arg)
{
struct worker_args *a = arg;
for (unsigned i = 0; i < Ops_per_thread; ++i) {
pfree(a->pop, &a->r->offs[a->idx][i]);
UT_ASSERTeq(a->r->offs[a->idx][i], 0);
}
return NULL;
}
static void *
mix_worker(void *arg)
{
struct worker_args *a = arg;
/*
* The mix scenario is ran twice to increase the chances of run
* contention.
*/
for (unsigned j = 0; j < MIX_RERUNS; ++j) {
for (unsigned i = 0; i < Ops_per_thread; ++i) {
pmalloc(a->pop, &a->r->offs[a->idx][i],
ALLOC_SIZE, 0, 0);
UT_ASSERTne(a->r->offs[a->idx][i], 0);
}
for (unsigned i = 0; i < Ops_per_thread; ++i) {
pfree(a->pop, &a->r->offs[a->idx][i]);
UT_ASSERTeq(a->r->offs[a->idx][i], 0);
}
}
return NULL;
}
static void *
tx_worker(void *arg)
{
struct worker_args *a = arg;
/*
* Allocate objects until exhaustion, once that happens the transaction
* will automatically abort and all of the objects will be freed.
*/
TX_BEGIN(a->pop) {
for (unsigned n = 0; ; ++n) { /* this is NOT an infinite loop */
pmemobj_tx_alloc(ALLOC_SIZE, a->idx);
if (Ops_per_thread != MAX_OPS_PER_THREAD &&
n == Ops_per_thread) {
pmemobj_tx_abort(0);
}
}
} TX_END
return NULL;
}
static void *
tx3_worker(void *arg)
{
struct worker_args *a = arg;
/*
* Allocate N objects, abort, repeat M times. Should reveal issues in
* transaction abort handling.
*/
for (unsigned n = 0; n < Tx_per_thread; ++n) {
TX_BEGIN(a->pop) {
for (unsigned i = 0; i < Ops_per_thread; ++i) {
pmemobj_tx_alloc(ALLOC_SIZE, a->idx);
}
pmemobj_tx_abort(EINVAL);
} TX_END
}
return NULL;
}
static void *
alloc_free_worker(void *arg)
{
struct worker_args *a = arg;
PMEMoid oid;
for (unsigned i = 0; i < Ops_per_thread; ++i) {
int err = pmemobj_alloc(a->pop, &oid, ALLOC_SIZE,
0, NULL, NULL);
UT_ASSERTeq(err, 0);
pmemobj_free(&oid);
}
return NULL;
}
#define OPS_PER_TX 10
#define STEP 8
#define TEST_LANES 4
static void *
tx2_worker(void *arg)
{
struct worker_args *a = arg;
for (unsigned n = 0; n < Tx_per_thread; ++n) {
PMEMoid oids[OPS_PER_TX];
TX_BEGIN(a->pop) {
for (int i = 0; i < OPS_PER_TX; ++i) {
oids[i] = pmemobj_tx_alloc(ALLOC_SIZE, a->idx);
for (unsigned j = 0; j < ALLOC_SIZE;
j += STEP) {
pmemobj_tx_add_range(oids[i], j, STEP);
}
}
} TX_END
TX_BEGIN(a->pop) {
for (int i = 0; i < OPS_PER_TX; ++i)
pmemobj_tx_free(oids[i]);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
return NULL;
}
static void *
action_cancel_worker(void *arg)
{
struct worker_args *a = arg;
PMEMoid oid;
for (unsigned i = 0; i < Ops_per_thread; ++i) {
unsigned arr_id = a->idx / 2;
struct action *act = &a->r->actions[arr_id][i];
if (a->idx % 2 == 0) {
os_mutex_lock(&act->lock);
oid = pmemobj_reserve(a->pop,
&act->pact, ALLOC_SIZE, 0);
UT_ASSERT(!OID_IS_NULL(oid));
os_cond_signal(&act->cond);
os_mutex_unlock(&act->lock);
} else {
os_mutex_lock(&act->lock);
while (act->pact.heap.offset == 0)
os_cond_wait(&act->cond, &act->lock);
pmemobj_cancel(a->pop, &act->pact, 1);
os_mutex_unlock(&act->lock);
}
}
return NULL;
}
static void *
action_publish_worker(void *arg)
{
struct worker_args *a = arg;
PMEMoid oid;
for (unsigned i = 0; i < Ops_per_thread; ++i) {
unsigned arr_id = a->idx / 2;
struct action *act = &a->r->actions[arr_id][i];
if (a->idx % 2 == 0) {
os_mutex_lock(&act->lock);
oid = pmemobj_reserve(a->pop,
&act->pact, ALLOC_SIZE, 0);
UT_ASSERT(!OID_IS_NULL(oid));
os_cond_signal(&act->cond);
os_mutex_unlock(&act->lock);
} else {
os_mutex_lock(&act->lock);
while (act->pact.heap.offset == 0)
os_cond_wait(&act->cond, &act->lock);
pmemobj_publish(a->pop, &act->pact, 1);
os_mutex_unlock(&act->lock);
}
}
return NULL;
}
static void *
action_mix_worker(void *arg)
{
struct worker_args *a = arg;
PMEMoid oid;
for (unsigned i = 0; i < Ops_per_thread; ++i) {
unsigned arr_id = a->idx / 2;
unsigned publish = i % 2;
struct action *act = &a->r->actions[arr_id][i];
if (a->idx % 2 == 0) {
os_mutex_lock(&act->lock);
oid = pmemobj_reserve(a->pop,
&act->pact, ALLOC_SIZE, 0);
UT_ASSERT(!OID_IS_NULL(oid));
os_cond_signal(&act->cond);
os_mutex_unlock(&act->lock);
} else {
os_mutex_lock(&act->lock);
while (act->pact.heap.offset == 0)
os_cond_wait(&act->cond, &act->lock);
if (publish)
pmemobj_publish(a->pop, &act->pact, 1);
else
pmemobj_cancel(a->pop, &act->pact, 1);
os_mutex_unlock(&act->lock);
}
pmemobj_persist(a->pop, act, sizeof(*act));
}
return NULL;
}
static void
actions_clear(PMEMobjpool *pop, struct root *r)
{
for (unsigned i = 0; i < Threads; ++i) {
for (unsigned j = 0; j < Ops_per_thread; ++j) {
struct action *a = &r->actions[i][j];
util_mutex_destroy(&a->lock);
util_mutex_init(&a->lock);
util_cond_destroy(&a->cond);
util_cond_init(&a->cond);
memset(&a->pact, 0, sizeof(a->pact));
pmemobj_persist(pop, a, sizeof(*a));
}
}
}
static void
run_worker(void *(worker_func)(void *arg), struct worker_args args[])
{
os_thread_t t[MAX_THREADS];
for (unsigned i = 0; i < Threads; ++i)
THREAD_CREATE(&t[i], NULL, worker_func, &args[i]);
for (unsigned i = 0; i < Threads; ++i)
THREAD_JOIN(&t[i], NULL);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_pmalloc_mt");
if (argc != 5)
UT_FATAL("usage: %s <threads> <ops/t> <tx/t> [file]", argv[0]);
PMEMobjpool *pop;
Threads = ATOU(argv[1]);
if (Threads > MAX_THREADS)
UT_FATAL("Threads %d > %d", Threads, MAX_THREADS);
Ops_per_thread = ATOU(argv[2]);
if (Ops_per_thread > MAX_OPS_PER_THREAD)
UT_FATAL("Ops per thread %d > %d", Threads, MAX_THREADS);
Tx_per_thread = ATOU(argv[3]);
int exists = util_file_exists(argv[4]);
if (exists < 0)
UT_FATAL("!util_file_exists");
if (!exists) {
pop = pmemobj_create(argv[4], "TEST", (PMEMOBJ_MIN_POOL) +
(MAX_THREADS * CHUNKSIZE * CHUNKS_PER_THREAD),
0666);
if (pop == NULL)
UT_FATAL("!pmemobj_create");
} else {
pop = pmemobj_open(argv[4], "TEST");
if (pop == NULL)
UT_FATAL("!pmemobj_open");
}
PMEMoid oid = pmemobj_root(pop, sizeof(struct root));
struct root *r = pmemobj_direct(oid);
UT_ASSERTne(r, NULL);
struct worker_args args[MAX_THREADS];
for (unsigned i = 0; i < Threads; ++i) {
args[i].pop = pop;
args[i].r = r;
args[i].idx = i;
for (unsigned j = 0; j < Ops_per_thread; ++j) {
struct action *a = &r->actions[i][j];
util_mutex_init(&a->lock);
util_cond_init(&a->cond);
}
}
run_worker(alloc_worker, args);
run_worker(realloc_worker, args);
run_worker(free_worker, args);
run_worker(mix_worker, args);
run_worker(alloc_free_worker, args);
run_worker(action_cancel_worker, args);
actions_clear(pop, r);
run_worker(action_publish_worker, args);
actions_clear(pop, r);
run_worker(action_mix_worker, args);
/*
* Reduce the number of lanes to a value smaller than the number of
* threads. This will ensure that at least some of the state of the lane
* will be shared between threads. Doing this might reveal bugs related
* to runtime race detection instrumentation.
*/
unsigned old_nlanes = pop->lanes_desc.runtime_nlanes;
pop->lanes_desc.runtime_nlanes = TEST_LANES;
run_worker(tx2_worker, args);
pop->lanes_desc.runtime_nlanes = old_nlanes;
/*
* This workload might create many allocation classes due to pvector,
* keep it last.
*/
if (Threads == MAX_THREADS) /* don't run for short tests */
run_worker(tx_worker, args);
run_worker(tx3_worker, args);
pmemobj_close(pop);
DONE(NULL);
}
#ifdef _MSC_VER
/*
* Since libpmemobj is linked statically, we need to invoke its ctor/dtor.
*/
MSVC_CONSTR(libpmemobj_init)
MSVC_DESTR(libpmemobj_fini)
#endif
| 9,123 | 21.09201 | 74 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_ctl_alignment/obj_ctl_alignment.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* obj_ctl_alignment.c -- tests for the alloc class alignment
*/
#include "unittest.h"
#define LAYOUT "obj_ctl_alignment"
static PMEMobjpool *pop;
static void
test_fail(void)
{
struct pobj_alloc_class_desc ac;
ac.header_type = POBJ_HEADER_NONE;
ac.unit_size = 1024 - 1;
ac.units_per_block = 100;
ac.alignment = 512;
int ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc", &ac);
UT_ASSERTeq(ret, -1); /* unit_size must be multiple of alignment */
}
static void
test_aligned_allocs(size_t size, size_t alignment, enum pobj_header_type htype)
{
struct pobj_alloc_class_desc ac;
ac.header_type = htype;
ac.unit_size = size;
ac.units_per_block = 100;
ac.alignment = alignment;
int ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc", &ac);
UT_ASSERTeq(ret, 0);
PMEMoid oid;
ret = pmemobj_xalloc(pop, &oid, 1, 0,
POBJ_CLASS_ID(ac.class_id), NULL, NULL);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(oid.off % alignment, 0);
UT_ASSERTeq((uintptr_t)pmemobj_direct(oid) % alignment, 0);
ret = pmemobj_xalloc(pop, &oid, 1, 0,
POBJ_CLASS_ID(ac.class_id), NULL, NULL);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(oid.off % alignment, 0);
UT_ASSERTeq((uintptr_t)pmemobj_direct(oid) % alignment, 0);
char query[1024];
SNPRINTF(query, 1024, "heap.alloc_class.%u.desc", ac.class_id);
struct pobj_alloc_class_desc read_ac;
ret = pmemobj_ctl_get(pop, query, &read_ac);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(ac.alignment, read_ac.alignment);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_ctl_alignment");
if (argc != 2)
UT_FATAL("usage: %s file-name", argv[0]);
const char *path = argv[1];
if ((pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL * 10,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
test_fail();
test_aligned_allocs(1024, 512, POBJ_HEADER_NONE);
test_aligned_allocs(1024, 512, POBJ_HEADER_COMPACT);
test_aligned_allocs(64, 64, POBJ_HEADER_COMPACT);
pmemobj_close(pop);
DONE(NULL);
}
| 2,055 | 23.47619 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_list/mocks_windows.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* mocks_windows.h -- redefinitions of obj list functions
*
* This file is Windows-specific.
*
* This file should be included (i.e. using Forced Include) by libpmemobj
* files, when compiled for the purpose of obj_list test.
* It would replace default implementation with mocked functions defined
* in obj_list.c.
*
* These defines could be also passed as preprocessor definitions.
*/
#if defined(__cplusplus)
extern "C" {
#endif
#ifdef WRAP_REAL
#define WRAP_REAL_PMALLOC
#define WRAP_REAL_ULOG
#define WRAP_REAL_LANE
#define WRAP_REAL_HEAP
#define WRAP_REAL_PMEMOBJ
#endif
#ifndef WRAP_REAL_PMALLOC
#define pmalloc __wrap_pmalloc
#define pfree __wrap_pfree
#define pmalloc_construct __wrap_pmalloc_construct
#define prealloc __wrap_prealloc
#define prealloc_construct __wrap_prealloc_construct
#define palloc_usable_size __wrap_palloc_usable_size
#define palloc_reserve __wrap_palloc_reserve
#define palloc_publish __wrap_palloc_publish
#define palloc_defer_free __wrap_palloc_defer_free
#endif
#ifndef WRAP_REAL_ULOG
#define ulog_store __wrap_ulog_store
#define ulog_process __wrap_ulog_process
#endif
#ifndef WRAP_REAL_LANE
#define lane_hold __wrap_lane_hold
#define lane_release __wrap_lane_release
#define lane_recover_and_section_boot __wrap_lane_recover_and_section_boot
#define lane_section_cleanup __wrap_lane_section_cleanup
#endif
#ifndef WRAP_REAL_HEAP
#define heap_boot __wrap_heap_boot
#endif
#ifndef WRAP_REAL_PMEMOBJ
#define pmemobj_alloc __wrap_pmemobj_alloc
#define pmemobj_alloc_usable_size __wrap_pmemobj_alloc_usable_size
#define pmemobj_openU __wrap_pmemobj_open
#define pmemobj_close __wrap_pmemobj_close
#define pmemobj_direct __wrap_pmemobj_direct
#define pmemobj_pool_by_oid __wrap_pmemobj_pool_by_oid
#define pmemobj_pool_by_ptr __wrap_pmemobj_pool_by_ptr
#endif
#if defined(__cplusplus)
}
#endif
| 1,933 | 26.628571 | 74 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_list/obj_list.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* obj_list.h -- unit tests for list module
*/
#include <stddef.h>
#include <sys/param.h>
#include "list.h"
#include "obj.h"
#include "lane.h"
#include "unittest.h"
#include "util.h"
/* offset to "in band" item */
#define OOB_OFF (sizeof(struct oob_header))
/* pmemobj initial heap offset */
#define HEAP_OFFSET 8192
TOID_DECLARE(struct item, 0);
TOID_DECLARE(struct list, 1);
TOID_DECLARE(struct oob_list, 2);
TOID_DECLARE(struct oob_item, 3);
struct item {
int id;
POBJ_LIST_ENTRY(struct item) next;
};
struct oob_header {
char data[48];
};
struct oob_item {
struct oob_header oob;
struct item item;
};
struct oob_list {
struct list_head head;
};
struct list {
POBJ_LIST_HEAD(listhead, struct item) head;
};
enum ulog_fail
{
/* don't fail at all */
NO_FAIL,
/* fail after ulog_store */
FAIL_AFTER_FINISH,
/* fail before ulog_store */
FAIL_BEFORE_FINISH,
/* fail after process */
FAIL_AFTER_PROCESS
};
/* global handle to pmemobj pool */
extern PMEMobjpool *Pop;
/* pointer to heap offset */
extern uint64_t *Heap_offset;
/* list lane section */
extern struct lane Lane;
/* actual item id */
extern int *Id;
/* fail event */
extern enum ulog_fail Ulog_fail;
/* global "in band" lists */
extern TOID(struct list) List;
extern TOID(struct list) List_sec;
/* global "out of band" lists */
extern TOID(struct oob_list) List_oob;
extern TOID(struct oob_list) List_oob_sec;
extern TOID(struct oob_item) *Item;
/* usage macros */
#define FATAL_USAGE()\
UT_FATAL("usage: obj_list <file> [PRnifr]")
#define FATAL_USAGE_PRINT()\
UT_FATAL("usage: obj_list <file> P:<list>")
#define FATAL_USAGE_PRINT_REVERSE()\
UT_FATAL("usage: obj_list <file> R:<list>")
#define FATAL_USAGE_INSERT()\
UT_FATAL("usage: obj_list <file> i:<where>:<num>")
#define FATAL_USAGE_INSERT_NEW()\
UT_FATAL("usage: obj_list <file> n:<where>:<num>:<value>")
#define FATAL_USAGE_REMOVE_FREE()\
UT_FATAL("usage: obj_list <file> f:<list>:<num>:<from>")
#define FATAL_USAGE_REMOVE()\
UT_FATAL("usage: obj_list <file> r:<num>")
#define FATAL_USAGE_MOVE()\
UT_FATAL("usage: obj_list <file> m:<num>:<where>:<num>")
#define FATAL_USAGE_FAIL()\
UT_FATAL("usage: obj_list <file> "\
"F:<after_finish|before_finish|after_process>")
| 2,314 | 21.475728 | 59 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_list/obj_list_mocks.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_list_mocks.c -- mocks for redo/lane/heap/obj modules
*/
#include <inttypes.h>
#include "valgrind_internal.h"
#include "obj_list.h"
#include "set.h"
/*
* pmem_drain_nop -- no operation for drain on non-pmem memory
*/
static void
pmem_drain_nop(void)
{
/* NOP */
}
/*
* obj_persist -- pmemobj version of pmem_persist w/o replication
*/
static int
obj_persist(void *ctx, const void *addr, size_t len, unsigned flags)
{
PMEMobjpool *pop = (PMEMobjpool *)ctx;
pop->persist_local(addr, len);
return 0;
}
/*
* obj_flush -- pmemobj version of pmem_flush w/o replication
*/
static int
obj_flush(void *ctx, const void *addr, size_t len, unsigned flags)
{
PMEMobjpool *pop = (PMEMobjpool *)ctx;
pop->flush_local(addr, len);
return 0;
}
static uintptr_t Pool_addr;
static size_t Pool_size;
static void
obj_msync_nofail(const void *addr, size_t size)
{
uintptr_t addr_ptrt = (uintptr_t)addr;
/*
* Verify msynced range is in the last mapped file range. Useful for
* catching errors which normally would be caught only on Windows by
* win_mmap.c.
*/
if (addr_ptrt < Pool_addr || addr_ptrt >= Pool_addr + Pool_size ||
addr_ptrt + size >= Pool_addr + Pool_size)
UT_FATAL("<0x%" PRIxPTR ",0x%" PRIxPTR "> "
"not in <0x%" PRIxPTR ",0x%" PRIxPTR "> range",
addr_ptrt, addr_ptrt + size, Pool_addr,
Pool_addr + Pool_size);
if (pmem_msync(addr, size))
UT_FATAL("!pmem_msync");
}
/*
* obj_drain -- pmemobj version of pmem_drain w/o replication
*/
static void
obj_drain(void *ctx)
{
PMEMobjpool *pop = (PMEMobjpool *)ctx;
pop->drain_local();
}
static void *
obj_memcpy(void *ctx, void *dest, const void *src, size_t len,
unsigned flags)
{
return pmem_memcpy(dest, src, len, flags);
}
static void *
obj_memset(void *ctx, void *ptr, int c, size_t sz, unsigned flags)
{
return pmem_memset(ptr, c, sz, flags);
}
/*
* linear_alloc -- allocates `size` bytes (rounded up to 8 bytes) and returns
* offset to the allocated object
*/
static uint64_t
linear_alloc(uint64_t *cur_offset, size_t size)
{
uint64_t ret = *cur_offset;
*cur_offset += roundup(size, sizeof(uint64_t));
return ret;
}
/*
* pmemobj_open -- pmemobj_open mock
*
* This function initializes the pmemobj pool for purposes of this
* unittest.
*/
FUNC_MOCK(pmemobj_open, PMEMobjpool *, const char *fname, const char *layout)
FUNC_MOCK_RUN_DEFAULT
{
size_t size;
int is_pmem;
void *addr = pmem_map_file(fname, 0, 0, 0, &size, &is_pmem);
if (!addr) {
UT_OUT("!%s: pmem_map_file", fname);
return NULL;
}
Pool_addr = (uintptr_t)addr;
Pool_size = size;
Pop = (PMEMobjpool *)addr;
Pop->addr = Pop;
Pop->is_pmem = is_pmem;
Pop->rdonly = 0;
Pop->uuid_lo = 0x12345678;
VALGRIND_REMOVE_PMEM_MAPPING(&Pop->mutex_head,
sizeof(Pop->mutex_head));
VALGRIND_REMOVE_PMEM_MAPPING(&Pop->rwlock_head,
sizeof(Pop->rwlock_head));
VALGRIND_REMOVE_PMEM_MAPPING(&Pop->cond_head,
sizeof(Pop->cond_head));
Pop->mutex_head = NULL;
Pop->rwlock_head = NULL;
Pop->cond_head = NULL;
if (Pop->is_pmem) {
Pop->persist_local = pmem_persist;
Pop->flush_local = pmem_flush;
Pop->drain_local = pmem_drain;
Pop->memcpy_local = pmem_memcpy;
Pop->memset_local = pmem_memset;
} else {
Pop->persist_local = obj_msync_nofail;
Pop->flush_local = obj_msync_nofail;
Pop->drain_local = pmem_drain_nop;
Pop->memcpy_local = pmem_memcpy;
Pop->memset_local = pmem_memset;
}
Pop->p_ops.persist = obj_persist;
Pop->p_ops.flush = obj_flush;
Pop->p_ops.drain = obj_drain;
Pop->p_ops.memcpy = obj_memcpy;
Pop->p_ops.memset = obj_memset;
Pop->p_ops.base = Pop;
struct pmem_ops *p_ops = &Pop->p_ops;
Pop->heap_offset = HEAP_OFFSET;
Pop->heap_size = size - Pop->heap_offset;
uint64_t heap_offset = HEAP_OFFSET;
Heap_offset = (uint64_t *)((uintptr_t)Pop +
linear_alloc(&heap_offset, sizeof(*Heap_offset)));
Id = (int *)((uintptr_t)Pop + linear_alloc(&heap_offset, sizeof(*Id)));
/* Alloc lane layout */
Lane.layout = (struct lane_layout *)((uintptr_t)Pop +
linear_alloc(&heap_offset, LANE_TOTAL_SIZE));
/* Alloc in band lists */
List.oid.pool_uuid_lo = Pop->uuid_lo;
List.oid.off = linear_alloc(&heap_offset, sizeof(struct list));
List_sec.oid.pool_uuid_lo = Pop->uuid_lo;
List_sec.oid.off = linear_alloc(&heap_offset, sizeof(struct list));
/* Alloc out of band lists */
List_oob.oid.pool_uuid_lo = Pop->uuid_lo;
List_oob.oid.off = linear_alloc(&heap_offset, sizeof(struct oob_list));
List_oob_sec.oid.pool_uuid_lo = Pop->uuid_lo;
List_oob_sec.oid.off =
linear_alloc(&heap_offset, sizeof(struct oob_list));
Item = (union oob_item_toid *)((uintptr_t)Pop +
linear_alloc(&heap_offset, sizeof(*Item)));
Item->oid.pool_uuid_lo = Pop->uuid_lo;
Item->oid.off = linear_alloc(&heap_offset, sizeof(struct oob_item));
pmemops_persist(p_ops, Item, sizeof(*Item));
if (*Heap_offset == 0) {
*Heap_offset = heap_offset;
pmemops_persist(p_ops, Heap_offset, sizeof(*Heap_offset));
}
pmemops_persist(p_ops, Pop, HEAP_OFFSET);
Pop->run_id += 2;
pmemops_persist(p_ops, &Pop->run_id, sizeof(Pop->run_id));
Lane.external = operation_new((struct ulog *)&Lane.layout->external,
LANE_REDO_EXTERNAL_SIZE, NULL, NULL, p_ops, LOG_TYPE_REDO);
return Pop;
}
FUNC_MOCK_END
/*
* pmemobj_close -- pmemobj_close mock
*
* Just unmap the mapped area.
*/
FUNC_MOCK(pmemobj_close, void, PMEMobjpool *pop)
FUNC_MOCK_RUN_DEFAULT {
operation_delete(Lane.external);
UT_ASSERTeq(pmem_unmap(Pop,
Pop->heap_size + Pop->heap_offset), 0);
Pop = NULL;
Pool_addr = 0;
Pool_size = 0;
}
FUNC_MOCK_END
/*
* pmemobj_pool_by_ptr -- pmemobj_pool_by_ptr mock
*
* Just return Pop.
*/
FUNC_MOCK_RET_ALWAYS(pmemobj_pool_by_ptr, PMEMobjpool *, Pop, const void *ptr);
/*
* pmemobj_direct -- pmemobj_direct mock
*/
FUNC_MOCK(pmemobj_direct, void *, PMEMoid oid)
FUNC_MOCK_RUN_DEFAULT {
return (void *)((uintptr_t)Pop + oid.off);
}
FUNC_MOCK_END
FUNC_MOCK_RET_ALWAYS(pmemobj_pool_by_oid, PMEMobjpool *, Pop, PMEMoid oid);
/*
* pmemobj_alloc_usable_size -- pmemobj_alloc_usable_size mock
*/
FUNC_MOCK(pmemobj_alloc_usable_size, size_t, PMEMoid oid)
FUNC_MOCK_RUN_DEFAULT {
size_t size = palloc_usable_size(
&Pop->heap, oid.off - OOB_OFF);
return size - OOB_OFF;
}
FUNC_MOCK_END
/*
* pmemobj_alloc -- pmemobj_alloc mock
*
* Allocates an object using pmalloc and return PMEMoid.
*/
FUNC_MOCK(pmemobj_alloc, int, PMEMobjpool *pop, PMEMoid *oidp,
size_t size, uint64_t type_num,
pmemobj_constr constructor, void *arg)
FUNC_MOCK_RUN_DEFAULT {
PMEMoid oid = {0, 0};
oid.pool_uuid_lo = 0;
pmalloc(pop, &oid.off, size, 0, 0);
if (oidp) {
*oidp = oid;
if (OBJ_PTR_FROM_POOL(pop, oidp))
pmemops_persist(&Pop->p_ops, oidp,
sizeof(*oidp));
}
return 0;
}
FUNC_MOCK_END
/*
* lane_hold -- lane_hold mock
*
* Returns pointer to list lane section.
*/
FUNC_MOCK(lane_hold, unsigned, PMEMobjpool *pop, struct lane **lane)
FUNC_MOCK_RUN_DEFAULT {
*lane = &Lane;
return 0;
}
FUNC_MOCK_END
/*
* lane_release -- lane_release mock
*
* Always returns success.
*/
FUNC_MOCK_RET_ALWAYS_VOID(lane_release, PMEMobjpool *pop);
/*
* lane_recover_and_section_boot -- lane_recover_and_section_boot mock
*/
FUNC_MOCK(lane_recover_and_section_boot, int, PMEMobjpool *pop)
FUNC_MOCK_RUN_DEFAULT {
ulog_recover((struct ulog *)&Lane.layout->external,
OBJ_OFF_IS_VALID_FROM_CTX, &pop->p_ops);
return 0;
}
FUNC_MOCK_END
/*
* lane_section_cleanup -- lane_section_cleanup mock
*/
FUNC_MOCK(lane_section_cleanup, int, PMEMobjpool *pop)
FUNC_MOCK_RUN_DEFAULT {
return 0;
}
FUNC_MOCK_END
/*
* ulog_store_last -- ulog_store_last mock
*/
FUNC_MOCK(ulog_store, void,
struct ulog *dest,
struct ulog *src, size_t nbytes, size_t redo_base_nbytes,
size_t ulog_base_capacity,
struct ulog_next *next, const struct pmem_ops *p_ops)
FUNC_MOCK_RUN_DEFAULT {
switch (Ulog_fail) {
case FAIL_AFTER_FINISH:
_FUNC_REAL(ulog_store)(dest, src,
nbytes, redo_base_nbytes,
ulog_base_capacity,
next, p_ops);
DONEW(NULL);
break;
case FAIL_BEFORE_FINISH:
DONEW(NULL);
break;
default:
_FUNC_REAL(ulog_store)(dest, src,
nbytes, redo_base_nbytes,
ulog_base_capacity,
next, p_ops);
break;
}
}
FUNC_MOCK_END
/*
* ulog_process -- ulog_process mock
*/
FUNC_MOCK(ulog_process, void, struct ulog *ulog,
ulog_check_offset_fn check, const struct pmem_ops *p_ops)
FUNC_MOCK_RUN_DEFAULT {
_FUNC_REAL(ulog_process)(ulog, check, p_ops);
if (Ulog_fail == FAIL_AFTER_PROCESS) {
DONEW(NULL);
}
}
FUNC_MOCK_END
/*
* heap_boot -- heap_boot mock
*
* Always returns success.
*/
FUNC_MOCK_RET_ALWAYS(heap_boot, int, 0, PMEMobjpool *pop);
| 8,765 | 22.691892 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_list/obj_list_mocks_palloc.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* obj_list_mocks_palloc.c -- mocks for palloc/pmalloc modules
*/
#include "obj_list.h"
/*
* pmalloc -- pmalloc mock
*
* Allocates the memory using linear allocator.
* Prints the id of allocated struct oob_item for tracking purposes.
*/
FUNC_MOCK(pmalloc, int, PMEMobjpool *pop, uint64_t *ptr,
size_t size, uint64_t extra_field, uint16_t flags)
FUNC_MOCK_RUN_DEFAULT {
struct pmem_ops *p_ops = &Pop->p_ops;
size = size + OOB_OFF + sizeof(uint64_t) * 2;
uint64_t *alloc_size = (uint64_t *)((uintptr_t)Pop
+ *Heap_offset);
*alloc_size = size;
pmemops_persist(p_ops, alloc_size, sizeof(*alloc_size));
*ptr = *Heap_offset + sizeof(uint64_t);
if (OBJ_PTR_FROM_POOL(pop, ptr))
pmemops_persist(p_ops, ptr, sizeof(*ptr));
struct oob_item *item =
(struct oob_item *)((uintptr_t)Pop + *ptr);
*ptr += OOB_OFF;
if (OBJ_PTR_FROM_POOL(pop, ptr))
pmemops_persist(p_ops, ptr, sizeof(*ptr));
item->item.id = *Id;
pmemops_persist(p_ops, &item->item.id, sizeof(item->item.id));
(*Id)++;
pmemops_persist(p_ops, Id, sizeof(*Id));
*Heap_offset = *Heap_offset + sizeof(uint64_t) +
size + OOB_OFF;
pmemops_persist(p_ops, Heap_offset, sizeof(*Heap_offset));
UT_OUT("pmalloc(id = %d)", item->item.id);
return 0;
}
FUNC_MOCK_END
/*
* pfree -- pfree mock
*
* Just prints freeing struct oob_item id. Doesn't free the memory.
*/
FUNC_MOCK(pfree, void, PMEMobjpool *pop, uint64_t *ptr)
FUNC_MOCK_RUN_DEFAULT {
struct oob_item *item =
(struct oob_item *)((uintptr_t)Pop + *ptr - OOB_OFF);
UT_OUT("pfree(id = %d)", item->item.id);
*ptr = 0;
if (OBJ_PTR_FROM_POOL(pop, ptr))
pmemops_persist(&Pop->p_ops, ptr, sizeof(*ptr));
return;
}
FUNC_MOCK_END
/*
* pmalloc_construct -- pmalloc_construct mock
*
* Allocates the memory using linear allocator and invokes the constructor.
* Prints the id of allocated struct oob_item for tracking purposes.
*/
FUNC_MOCK(pmalloc_construct, int, PMEMobjpool *pop, uint64_t *off,
size_t size, palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t flags, uint16_t class_id)
FUNC_MOCK_RUN_DEFAULT {
struct pmem_ops *p_ops = &Pop->p_ops;
size = size + OOB_OFF + sizeof(uint64_t) * 2;
uint64_t *alloc_size = (uint64_t *)((uintptr_t)Pop +
*Heap_offset);
*alloc_size = size;
pmemops_persist(p_ops, alloc_size, sizeof(*alloc_size));
*off = *Heap_offset + sizeof(uint64_t) + OOB_OFF;
if (OBJ_PTR_FROM_POOL(pop, off))
pmemops_persist(p_ops, off, sizeof(*off));
*Heap_offset = *Heap_offset + sizeof(uint64_t) + size;
pmemops_persist(p_ops, Heap_offset, sizeof(*Heap_offset));
void *ptr = (void *)((uintptr_t)Pop + *off);
constructor(pop, ptr, size, arg);
return 0;
}
FUNC_MOCK_END
/*
* prealloc -- prealloc mock
*/
FUNC_MOCK(prealloc, int, PMEMobjpool *pop, uint64_t *off, size_t size,
uint64_t extra_field, uint16_t flags)
FUNC_MOCK_RUN_DEFAULT {
uint64_t *alloc_size = (uint64_t *)((uintptr_t)Pop +
*off - sizeof(uint64_t));
struct item *item = (struct item *)((uintptr_t)Pop +
*off + OOB_OFF);
if (*alloc_size >= size) {
*alloc_size = size;
pmemops_persist(&Pop->p_ops, alloc_size,
sizeof(*alloc_size));
UT_OUT("prealloc(id = %d, size = %zu) = true",
item->id,
(size - OOB_OFF) / sizeof(struct item));
return 0;
} else {
UT_OUT("prealloc(id = %d, size = %zu) = false",
item->id,
(size - OOB_OFF) / sizeof(struct item));
return -1;
}
}
FUNC_MOCK_END
/*
* prealloc_construct -- prealloc_construct mock
*/
FUNC_MOCK(prealloc_construct, int, PMEMobjpool *pop, uint64_t *off,
size_t size, palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t flags, uint16_t class_id)
FUNC_MOCK_RUN_DEFAULT {
int ret = __wrap_prealloc(pop, off, size, 0, 0);
if (!ret) {
void *ptr = (void *)((uintptr_t)Pop + *off + OOB_OFF);
constructor(pop, ptr, size, arg);
}
return ret;
}
FUNC_MOCK_END
/*
* palloc_reserve -- palloc_reserve mock
*/
FUNC_MOCK(palloc_reserve, int, struct palloc_heap *heap, size_t size,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags, uint16_t class_id,
uint16_t arena_id, struct pobj_action *act)
FUNC_MOCK_RUN_DEFAULT {
struct pmem_ops *p_ops = &Pop->p_ops;
size = size + OOB_OFF + sizeof(uint64_t) * 2;
uint64_t *alloc_size = (uint64_t *)((uintptr_t)Pop
+ *Heap_offset);
*alloc_size = size;
pmemops_persist(p_ops, alloc_size, sizeof(*alloc_size));
act->heap.offset = *Heap_offset + sizeof(uint64_t);
struct oob_item *item =
(struct oob_item *)((uintptr_t)Pop + act->heap.offset);
act->heap.offset += OOB_OFF;
item->item.id = *Id;
pmemops_persist(p_ops, &item->item.id, sizeof(item->item.id));
(*Id)++;
pmemops_persist(p_ops, Id, sizeof(*Id));
*Heap_offset += sizeof(uint64_t) + size + OOB_OFF;
pmemops_persist(p_ops, Heap_offset, sizeof(*Heap_offset));
UT_OUT("pmalloc(id = %d)", item->item.id);
return 0;
}
FUNC_MOCK_END
/*
* palloc_publish -- mock publish, must process operation
*/
FUNC_MOCK(palloc_publish, void, struct palloc_heap *heap,
struct pobj_action *actv, size_t actvcnt,
struct operation_context *ctx)
FUNC_MOCK_RUN_DEFAULT {
operation_process(ctx);
operation_finish(ctx, 0);
}
FUNC_MOCK_END
/*
* palloc_defer_free -- pfree mock
*
* Just prints freeing struct oob_item id. Doesn't free the memory.
*/
FUNC_MOCK(palloc_defer_free, void, struct palloc_heap *heap, uint64_t off,
struct pobj_action *act)
FUNC_MOCK_RUN_DEFAULT {
struct oob_item *item =
(struct oob_item *)((uintptr_t)Pop + off - OOB_OFF);
UT_OUT("pfree(id = %d)", item->item.id);
act->heap.offset = off;
return;
}
FUNC_MOCK_END
/*
* pmalloc_usable_size -- pmalloc_usable_size mock
*/
FUNC_MOCK(palloc_usable_size, size_t, struct palloc_heap *heap, uint64_t off)
FUNC_MOCK_RUN_DEFAULT {
uint64_t *alloc_size = (uint64_t *)((uintptr_t)Pop +
off - sizeof(uint64_t));
return (size_t)*alloc_size;
}
FUNC_MOCK_END
| 6,050 | 26.756881 | 77 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/blk_rw_mt/blk_rw_mt.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* blk_rw_mt.c -- unit test for multi-threaded random I/O
*
* usage: blk_rw_mt bsize file seed nthread nops
*
*/
#include "unittest.h"
#include "rand.h"
static size_t Bsize;
/* all I/O below this LBA (increases collisions) */
static const unsigned Nblock = 100;
static unsigned Seed;
static unsigned Nthread;
static unsigned Nops;
static PMEMblkpool *Handle;
/*
* construct -- build a buffer for writing
*/
static void
construct(int *ordp, unsigned char *buf)
{
for (int i = 0; i < Bsize; i++)
buf[i] = *ordp;
(*ordp)++;
if (*ordp > 255)
*ordp = 1;
}
/*
* check -- check for torn buffers
*/
static void
check(unsigned char *buf)
{
unsigned val = *buf;
for (int i = 1; i < Bsize; i++)
if (buf[i] != val) {
UT_OUT("{%u} TORN at byte %d", val, i);
break;
}
}
/*
* worker -- the work each thread performs
*/
static void *
worker(void *arg)
{
uintptr_t mytid = (uintptr_t)arg;
unsigned char *buf = MALLOC(Bsize);
int ord = 1;
rng_t rng;
randomize_r(&rng, Seed + mytid);
for (unsigned i = 0; i < Nops; i++) {
os_off_t lba = (os_off_t)(rnd64_r(&rng) % Nblock);
if (rnd64_r(&rng) % 2) {
/* read */
if (pmemblk_read(Handle, buf, lba) < 0)
UT_OUT("!read lba %zu", lba);
else
check(buf);
} else {
/* write */
construct(&ord, buf);
if (pmemblk_write(Handle, buf, lba) < 0)
UT_OUT("!write lba %zu", lba);
}
}
FREE(buf);
return NULL;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "blk_rw_mt");
if (argc != 6)
UT_FATAL("usage: %s bsize file seed nthread nops", argv[0]);
Bsize = strtoul(argv[1], NULL, 0);
const char *path = argv[2];
if ((Handle = pmemblk_create(path, Bsize, 0,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!%s: pmemblk_create", path);
Seed = strtoul(argv[3], NULL, 0);
Nthread = strtoul(argv[4], NULL, 0);
Nops = strtoul(argv[5], NULL, 0);
UT_OUT("%s block size %zu usable blocks %u", argv[1], Bsize, Nblock);
os_thread_t *threads = MALLOC(Nthread * sizeof(os_thread_t));
/* kick off nthread threads */
for (unsigned i = 0; i < Nthread; i++)
THREAD_CREATE(&threads[i], NULL, worker, (void *)(intptr_t)i);
/* wait for all the threads to complete */
for (unsigned i = 0; i < Nthread; i++)
THREAD_JOIN(&threads[i], NULL);
FREE(threads);
pmemblk_close(Handle);
/* XXX not ready to pass this part of the test yet */
int result = pmemblk_check(path, Bsize);
if (result < 0)
UT_OUT("!%s: pmemblk_check", path);
else if (result == 0)
UT_OUT("%s: pmemblk_check: not consistent", path);
DONE(NULL);
}
| 4,260 | 25.302469 | 74 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_ctl_stats/obj_ctl_stats.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* obj_ctl_stats.c -- tests for the libpmemobj statistics module
*/
#include "unittest.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_ctl_stats");
if (argc != 2)
UT_FATAL("usage: %s file-name", argv[0]);
const char *path = argv[1];
PMEMobjpool *pop;
if ((pop = pmemobj_create(path, "ctl", PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
int enabled;
int ret = pmemobj_ctl_get(pop, "stats.enabled", &enabled);
UT_ASSERTeq(enabled, 0);
UT_ASSERTeq(ret, 0);
ret = pmemobj_alloc(pop, NULL, 1, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
size_t allocated;
ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocated);
UT_ASSERTeq(allocated, 0);
enabled = 1;
ret = pmemobj_ctl_set(pop, "stats.enabled", &enabled);
UT_ASSERTeq(ret, 0);
PMEMoid oid;
ret = pmemobj_alloc(pop, &oid, 1, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
size_t oid_size = pmemobj_alloc_usable_size(oid) + 16;
ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocated);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(allocated, oid_size);
size_t run_allocated = 0;
ret = pmemobj_ctl_get(pop, "stats.heap.run_allocated", &run_allocated);
UT_ASSERTeq(ret, 0);
UT_ASSERT(run_allocated /* 2 allocs */ > allocated /* 1 alloc */);
pmemobj_free(&oid);
ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocated);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(allocated, 0);
ret = pmemobj_ctl_get(pop, "stats.heap.run_allocated", &run_allocated);
UT_ASSERTeq(ret, 0);
UT_ASSERT(run_allocated /* 2 allocs */ > allocated /* 1 alloc */);
TX_BEGIN(pop) {
oid = pmemobj_tx_alloc(1, 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
oid_size = pmemobj_alloc_usable_size(oid) + 16;
ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocated);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(allocated, oid_size);
enum pobj_stats_enabled enum_enabled;
ret = pmemobj_ctl_get(pop, "stats.enabled", &enum_enabled);
UT_ASSERTeq(enabled, POBJ_STATS_ENABLED_BOTH);
UT_ASSERTeq(ret, 0);
run_allocated = 0;
ret = pmemobj_ctl_get(pop, "stats.heap.run_allocated", &run_allocated);
UT_ASSERTeq(ret, 0);
enum_enabled = POBJ_STATS_ENABLED_PERSISTENT; /* transient disabled */
ret = pmemobj_ctl_set(pop, "stats.enabled", &enum_enabled);
UT_ASSERTeq(ret, 0);
ret = pmemobj_alloc(pop, &oid, 1, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
size_t tmp = 0;
ret = pmemobj_ctl_get(pop, "stats.heap.run_allocated", &tmp);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(tmp, run_allocated); /* shouldn't change */
/* the deallocated object shouldn't be reflected in rebuilt stats */
pmemobj_free(&oid);
pmemobj_close(pop);
pop = pmemobj_open(path, "ctl");
UT_ASSERTne(pop, NULL);
/* stats are rebuilt lazily, so initially this should be 0 */
tmp = 0;
ret = pmemobj_ctl_get(pop, "stats.heap.run_allocated", &tmp);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(tmp, 0);
ret = pmemobj_alloc(pop, NULL, 1, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
/* after first alloc, the previously allocated object will be found */
tmp = 0;
ret = pmemobj_ctl_get(pop, "stats.heap.run_allocated", &tmp);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(tmp, run_allocated + oid_size);
pmemobj_close(pop);
DONE(NULL);
}
| 3,299 | 25.829268 | 72 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/util_poolset_foreach/util_poolset_foreach.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* util_poolset_foreach.c -- unit test for util_poolset_foreach_part()
*
* usage: util_poolset_foreach file...
*/
#include "unittest.h"
#include "set.h"
#include "pmemcommon.h"
#include <errno.h>
#define LOG_PREFIX "ut"
#define LOG_LEVEL_VAR "TEST_LOG_LEVEL"
#define LOG_FILE_VAR "TEST_LOG_FILE"
#define MAJOR_VERSION 1
#define MINOR_VERSION 0
static int
cb(struct part_file *pf, void *arg)
{
if (pf->is_remote) {
/* remote replica */
const char *node_addr = pf->remote->node_addr;
const char *pool_desc = pf->remote->pool_desc;
char *set_name = (char *)arg;
UT_OUT("%s: %s %s", set_name, node_addr, pool_desc);
} else {
const char *name = pf->part->path;
char *set_name = (char *)arg;
UT_OUT("%s: %s", set_name, name);
}
return 0;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "util_poolset_foreach");
common_init(LOG_PREFIX, LOG_LEVEL_VAR, LOG_FILE_VAR,
MAJOR_VERSION, MINOR_VERSION);
if (argc < 2)
UT_FATAL("usage: %s file...",
argv[0]);
for (int i = 1; i < argc; i++) {
char *fname = argv[i];
int ret = util_poolset_foreach_part(fname, cb, fname);
UT_OUT("util_poolset_foreach_part(%s): %d", fname, ret);
}
common_fini();
DONE(NULL);
}
| 1,293 | 20.213115 | 70 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_map_prot/pmem2_map_prot.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_map_prot.c -- pmem2_map_prot unit tests
*/
#include <stdbool.h>
#include <signal.h>
#include <setjmp.h>
#include "config.h"
#include "source.h"
#include "map.h"
#include "out.h"
#include "pmem2.h"
#include "unittest.h"
#include "ut_pmem2.h"
#include "ut_pmem2_setup.h"
#include "ut_fh.h"
struct res {
struct FHandle *fh;
struct pmem2_config cfg;
struct pmem2_source *src;
};
/*
* res_prepare -- set access mode and protection flags
*/
static void
res_prepare(const char *file, struct res *res, int access, unsigned proto)
{
#ifdef _WIN32
enum file_handle_type fh_type = FH_HANDLE;
#else
enum file_handle_type fh_type = FH_FD;
#endif
ut_pmem2_prepare_config(&res->cfg, &res->src, &res->fh, fh_type, file,
0, 0, access);
pmem2_config_set_protection(&res->cfg, proto);
}
/*
* res_cleanup -- free resources
*/
static void
res_cleanup(struct res *res)
{
PMEM2_SOURCE_DELETE(&res->src);
UT_FH_CLOSE(res->fh);
}
static const char *word1 = "Persistent or nonpersistent: this is the question.";
static ut_jmp_buf_t Jmp;
/*
* signal_handler -- called on SIGSEGV
*/
static void
signal_handler(int sig)
{
ut_siglongjmp(Jmp);
}
/*
* test_rw_mode_rw_prot -- test R/W protection
* pmem2_map() - should success
* memcpy() - should success
*/
static int
test_rw_mode_rw_prot(const struct test_case *tc,
int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_rw_mode_rw_prot <file>");
struct res res;
/* read/write on file opened in read/write mode - should success */
res_prepare(argv[0], &res, FH_RDWR,
PMEM2_PROT_READ | PMEM2_PROT_WRITE);
struct pmem2_map *map;
int ret = pmem2_map(&res.cfg, res.src, &map);
UT_ASSERTeq(ret, 0);
pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map);
void *addr_map = pmem2_map_get_address(map);
memcpy_fn(addr_map, word1, strlen(word1), 0);
UT_ASSERTeq(memcmp(addr_map, word1, strlen(word1)), 0);
pmem2_unmap(&map);
res_cleanup(&res);
return 1;
}
/*
* template_mode_prot_mismatch - try to map file with mutually exclusive FD
* access and map protection
*/
static void
template_mode_prot_mismatch(char *file, int access, unsigned prot)
{
struct res res;
/* read/write on file opened in read-only mode - should fail */
res_prepare(file, &res, access, prot);
struct pmem2_map *map;
int ret = pmem2_map(&res.cfg, res.src, &map);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_NO_ACCESS);
res_cleanup(&res);
}
/*
* test_r_mode_rw_prot -- test R/W protection
* pmem2_map() - should fail
*/
static int
test_r_mode_rw_prot(const struct test_case *tc,
int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_r_mode_rw_prot <file>");
char *file = argv[0];
template_mode_prot_mismatch(file, FH_READ,
PMEM2_PROT_WRITE | PMEM2_PROT_READ);
return 1;
}
/*
* test_rw_mode_rwx_prot - test R/W/X protection on R/W file
* pmem2_map() - should fail
*/
static int
test_rw_modex_rwx_prot(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_rw_modex_rwx_prot <file>");
char *file = argv[0];
template_mode_prot_mismatch(file, FH_RDWR,
PMEM2_PROT_EXEC |PMEM2_PROT_WRITE | PMEM2_PROT_READ);
return 1;
}
/*
* test_rw_modex_rx_prot - test R/X protection on R/W file
* pmem2_map() - should fail
*/
static int
test_rw_modex_rx_prot(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_rw_modex_rx_prot <file>");
char *file = argv[0];
template_mode_prot_mismatch(file, FH_RDWR,
PMEM2_PROT_EXEC | PMEM2_PROT_READ);
return 1;
}
/*
* test_rw_mode_r_prot -- test R/W protection
* pmem2_map() - should success
* memcpy() - should fail
*/
static int
test_rw_mode_r_prot(const struct test_case *tc,
int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_rw_mode_r_prot <file>");
/* arrange to catch SIGSEGV */
struct sigaction v;
sigemptyset(&v.sa_mask);
v.sa_flags = 0;
v.sa_handler = signal_handler;
SIGACTION(SIGSEGV, &v, NULL);
struct res res;
/* read-only on file opened in read/write mode - should success */
res_prepare(argv[0], &res, FH_RDWR, PMEM2_PROT_READ);
struct pmem2_map *map;
int ret = pmem2_map(&res.cfg, res.src, &map);
UT_ASSERTeq(ret, 0);
pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map);
void *addr_map = pmem2_map_get_address(map);
if (!ut_sigsetjmp(Jmp)) {
/* memcpy should now fail */
memcpy_fn(addr_map, word1, strlen(word1), 0);
UT_FATAL("memcpy successful");
}
pmem2_unmap(&map);
res_cleanup(&res);
signal(SIGSEGV, SIG_DFL);
return 1;
}
/*
* test_r_mode_r_prot -- test R/W protection
* pmem2_map() - should success
* memcpy() - should fail
*/
static int
test_r_mode_r_prot(const struct test_case *tc,
int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_r_mode_r_prot <file>");
/* arrange to catch SIGSEGV */
struct sigaction v;
sigemptyset(&v.sa_mask);
v.sa_flags = 0;
v.sa_handler = signal_handler;
SIGACTION(SIGSEGV, &v, NULL);
struct res res;
/* read-only on file opened in read-only mode - should succeed */
res_prepare(argv[0], &res, FH_READ, PMEM2_PROT_READ);
struct pmem2_map *map;
int ret = pmem2_map(&res.cfg, res.src, &map);
UT_ASSERTeq(ret, 0);
pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map);
void *addr_map = pmem2_map_get_address(map);
if (!ut_sigsetjmp(Jmp)) {
/* memcpy should now fail */
memcpy_fn(addr_map, word1, strlen(word1), 0);
UT_FATAL("memcpy successful");
}
pmem2_unmap(&map);
res_cleanup(&res);
signal(SIGSEGV, SIG_DFL);
return 1;
}
/*
* test_rw_mode_none_prot -- test R/W protection
* pmem2_map() - should success
* memcpy() - should fail
*/
static int
test_rw_mode_none_prot(const struct test_case *tc,
int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_rw_mode_none_prot <file>");
/* arrange to catch SIGSEGV */
struct sigaction v;
sigemptyset(&v.sa_mask);
v.sa_flags = 0;
v.sa_handler = signal_handler;
SIGACTION(SIGSEGV, &v, NULL);
struct res res;
/* none on file opened in read-only mode - should success */
res_prepare(argv[0], &res, FH_READ, PMEM2_PROT_NONE);
struct pmem2_map *map;
int ret = pmem2_map(&res.cfg, res.src, &map);
UT_ASSERTeq(ret, 0);
pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map);
void *addr_map = pmem2_map_get_address(map);
if (!ut_sigsetjmp(Jmp)) {
/* memcpy should now fail */
memcpy_fn(addr_map, word1, strlen(word1), 0);
UT_FATAL("memcpy successful");
}
pmem2_unmap(&map);
res_cleanup(&res);
signal(SIGSEGV, SIG_DFL);
return 1;
}
/*
* sum_asm[] --> simple program in assembly which calculates '2 + 2' and
* returns the result
*/
static unsigned char sum_asm[] = {
0x55, /* push %rbp */
0x48, 0x89, 0xe5, /* mov %rsp,%rbp */
0xc7, 0x45, 0xf8, 0x02, 0x00, 0x00, 0x00, /* movl $0x2,-0x8(%rbp) */
0x8b, 0x45, 0xf8, /* mov -0x8(%rbp),%eax */
0x01, 0xc0, /* add %eax,%eax */
0x89, 0x45, 0xfc, /* mov %eax,-0x4(%rbp) */
0x8b, 0x45, 0xfc, /* mov -0x4(%rbp),%eax */
0x5d, /* pop %rbp */
0xc3, /* retq */
};
typedef int (*sum_fn)(void);
/*
* test_rx_mode_rx_prot_do_execute -- copy string with the program to mapped
* memory to prepare memory, execute the program and verify result
*/
static int
test_rx_mode_rx_prot_do_execute(const struct test_case *tc, int argc,
char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_rx_mode_rx_prot_do_execute <file>");
char *file = argv[0];
struct res res;
/* Windows does not support PMEM2_PROT_WRITE combination */
res_prepare(file, &res, FH_EXEC | FH_RDWR,
PMEM2_PROT_WRITE | PMEM2_PROT_READ);
struct pmem2_map *map;
int ret = pmem2_map(&res.cfg, res.src, &map);
UT_ASSERTeq(ret, 0);
char *addr_map = pmem2_map_get_address(map);
map->memcpy_fn(addr_map, sum_asm, sizeof(sum_asm), 0);
pmem2_unmap(&map);
/* Windows does not support PMEM2_PROT_EXEC combination */
pmem2_config_set_protection(&res.cfg,
PMEM2_PROT_READ | PMEM2_PROT_EXEC);
ret = pmem2_map(&res.cfg, res.src, &map);
UT_ASSERTeq(ret, 0);
sum_fn sum = (sum_fn)addr_map;
int sum_result = sum();
UT_ASSERTeq(sum_result, 4);
pmem2_unmap(&map);
res_cleanup(&res);
return 1;
}
/*
* test_rwx_mode_rx_prot_do_write -- try to copy the string into mapped memory,
* expect failure
*/
static int
test_rwx_mode_rx_prot_do_write(const struct test_case *tc,
int argc, char *argv[])
{
if (argc < 2)
UT_FATAL(
"usage: test_rwx_mode_rx_prot_do_write <file> <if_sharing>");
struct sigaction v;
sigemptyset(&v.sa_mask);
v.sa_flags = 0;
v.sa_handler = signal_handler;
SIGACTION(SIGSEGV, &v, NULL);
char *file = argv[0];
unsigned if_sharing = ATOU(argv[1]);
struct res res;
/* Windows does not support PMEM2_PROT_EXEC combination */
res_prepare(file, &res, FH_EXEC | FH_RDWR,
PMEM2_PROT_READ | PMEM2_PROT_EXEC);
if (if_sharing)
pmem2_config_set_sharing(&res.cfg, PMEM2_PRIVATE);
struct pmem2_map *map;
int ret = pmem2_map(&res.cfg, res.src, &map);
UT_ASSERTeq(ret, 0);
char *addr_map = pmem2_map_get_address(map);
if (!ut_sigsetjmp(Jmp)) {
/* memcpy_fn should fail */
map->memcpy_fn(addr_map, sum_asm, sizeof(sum_asm), 0);
}
pmem2_unmap(&map);
res_cleanup(&res);
signal(SIGSEGV, SIG_DFL);
return 2;
}
/*
* test_rwx_mode_rwx_prot_do_execute -- copy string with the program to mapped
* memory to prepare memory, execute the program and verify result
*/
static int
test_rwx_mode_rwx_prot_do_execute(const struct test_case *tc,
int argc, char *argv[])
{
if (argc < 2)
UT_FATAL(
"usage: test_rwx_mode_rwx_prot_do_execute <file> <if_sharing>");
char *file = argv[0];
unsigned if_sharing = ATOU(argv[1]);
struct res res;
res_prepare(file, &res, FH_EXEC | FH_RDWR,
PMEM2_PROT_EXEC | PMEM2_PROT_WRITE | PMEM2_PROT_READ);
if (if_sharing)
pmem2_config_set_sharing(&res.cfg, PMEM2_PRIVATE);
struct pmem2_map *map;
int ret = pmem2_map(&res.cfg, res.src, &map);
UT_ASSERTeq(ret, 0);
char *addr_map = pmem2_map_get_address(map);
map->memcpy_fn(addr_map, sum_asm, sizeof(sum_asm), 0);
sum_fn sum = (sum_fn)addr_map;
int sum_result = sum();
UT_ASSERTeq(sum_result, 4);
pmem2_unmap(&map);
res_cleanup(&res);
signal(SIGSEGV, SIG_DFL);
return 2;
}
/*
* test_rw_mode_rw_prot_do_execute -- copy string with the program to mapped
* memory to prepare memory, and execute the program - should fail
*/
static int
test_rw_mode_rw_prot_do_execute(const struct test_case *tc,
int argc, char *argv[])
{
if (argc < 2)
UT_FATAL(
"usage: test_rw_mode_rwx_prot_do_execute <file> <if_sharing>");
struct sigaction v;
sigemptyset(&v.sa_mask);
v.sa_flags = 0;
v.sa_handler = signal_handler;
SIGACTION(SIGSEGV, &v, NULL);
char *file = argv[0];
unsigned if_sharing = ATOU(argv[1]);
struct res res;
res_prepare(file, &res, FH_RDWR, PMEM2_PROT_WRITE | PMEM2_PROT_READ);
if (if_sharing)
pmem2_config_set_sharing(&res.cfg, PMEM2_PRIVATE);
struct pmem2_map *map;
int ret = pmem2_map(&res.cfg, res.src, &map);
UT_ASSERTeq(ret, 0);
void *addr_map = pmem2_map_get_address(map);
map->memcpy_fn(addr_map, sum_asm, sizeof(sum_asm), 0);
sum_fn sum = (sum_fn)addr_map;
if (!ut_sigsetjmp(Jmp)) {
sum(); /* sum function should now fail */
}
pmem2_unmap(&map);
res_cleanup(&res);
return 2;
}
static const char *initial_state = "No code.";
/*
* test_rwx_prot_map_priv_do_execute -- copy string with the program to
* the mapped memory with MAP_PRIVATE to prepare memory, execute the program
* and verify the result
*/
static int
test_rwx_prot_map_priv_do_execute(const struct test_case *tc,
int argc, char *argv[])
{
if (argc < 1)
UT_FATAL(
"usage: test_rwx_prot_map_priv_do_execute <file> <if_sharing>");
char *file = argv[0];
struct res res;
res_prepare(file, &res, FH_RDWR, PMEM2_PROT_WRITE | PMEM2_PROT_READ);
struct pmem2_map *map;
int ret = pmem2_map(&res.cfg, res.src, &map);
UT_ASSERTeq(ret, 0);
char *addr_map = pmem2_map_get_address(map);
map->memcpy_fn(addr_map, initial_state, sizeof(initial_state), 0);
pmem2_unmap(&map);
res_cleanup(&res);
res_prepare(file, &res, FH_READ | FH_EXEC,
PMEM2_PROT_EXEC | PMEM2_PROT_WRITE | PMEM2_PROT_READ);
pmem2_config_set_sharing(&res.cfg, PMEM2_PRIVATE);
ret = pmem2_map(&res.cfg, res.src, &map);
UT_ASSERTeq(ret, 0);
addr_map = pmem2_map_get_address(map);
map->memcpy_fn(addr_map, sum_asm, sizeof(sum_asm), 0);
sum_fn sum = (sum_fn)addr_map;
int sum_result = sum();
UT_ASSERTeq(sum_result, 4);
pmem2_unmap(&map);
ret = pmem2_map(&res.cfg, res.src, &map);
UT_ASSERTeq(ret, 0);
addr_map = pmem2_map_get_address(map);
/* check if changes in private mapping affect initial state */
UT_ASSERTeq(memcmp(addr_map, initial_state, strlen(initial_state)), 0);
pmem2_unmap(&map);
res_cleanup(&res);
return 1;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_rw_mode_rw_prot),
TEST_CASE(test_r_mode_rw_prot),
TEST_CASE(test_rw_modex_rwx_prot),
TEST_CASE(test_rw_modex_rx_prot),
TEST_CASE(test_rw_mode_r_prot),
TEST_CASE(test_r_mode_r_prot),
TEST_CASE(test_rw_mode_none_prot),
TEST_CASE(test_rx_mode_rx_prot_do_execute),
TEST_CASE(test_rwx_mode_rx_prot_do_write),
TEST_CASE(test_rwx_mode_rwx_prot_do_execute),
TEST_CASE(test_rw_mode_rw_prot_do_execute),
TEST_CASE(test_rwx_prot_map_priv_do_execute),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem2_map_prot");
util_init();
out_init("pmem2_map_prot", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0, 0);
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
out_fini();
DONE(NULL);
}
#ifdef _MSC_VER
MSVC_CONSTR(libpmem2_init)
MSVC_DESTR(libpmem2_fini)
#endif
| 13,698 | 22.537801 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_layout/obj_layout.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* obj_layout.c -- unit test for layout
*
* This test should be modified after every layout change. It's here to prevent
* any accidental layout changes.
*/
#include "util.h"
#include "unittest.h"
#include "sync.h"
#include "heap_layout.h"
#include "lane.h"
#include "tx.h"
#include "ulog.h"
#include "list.h"
#define SIZEOF_CHUNK_HEADER_V3 (8)
#define MAX_CHUNK_V3 (65535 - 7)
#define SIZEOF_CHUNK_V3 (1024ULL * 256)
#define SIZEOF_CHUNK_RUN_HEADER_V3 (16)
#define SIZEOF_ZONE_HEADER_V3 (64)
#define SIZEOF_ZONE_METADATA_V3 (SIZEOF_ZONE_HEADER_V3 +\
SIZEOF_CHUNK_HEADER_V3 * MAX_CHUNK_V3)
#define SIZEOF_HEAP_HDR_V3 (1024)
#define SIZEOF_LEGACY_ALLOCATION_HEADER_V3 (64)
#define SIZEOF_COMPACT_ALLOCATION_HEADER_V3 (16)
#define SIZEOF_LOCK_V3 (64)
#define SIZEOF_PMEMOID_V3 (16)
#define SIZEOF_LIST_ENTRY_V3 (SIZEOF_PMEMOID_V3 * 2)
#define SIZEOF_LIST_HEAD_V3 (SIZEOF_PMEMOID_V3 + SIZEOF_LOCK_V3)
#define SIZEOF_LANE_SECTION_V3 (1024)
#define SIZEOF_LANE_V3 (3 * SIZEOF_LANE_SECTION_V3)
#define SIZEOF_ULOG_V4 (CACHELINE_SIZE)
#define SIZEOF_ULOG_BASE_ENTRY_V4 (8)
#define SIZEOF_ULOG_VAL_ENTRY_V4 (16)
#define SIZEOF_ULOG_BUF_ENTRY_V4 (24)
#if CACHELINE_SIZE == 128
#define SIZEOF_LANE_UNDO_SIZE (1920)
#define SIZEOF_LANE_REDO_EXTERNAL_SIZE (640)
#define SIZEOF_LANE_REDO_INTERNAL_SIZE (128)
#elif CACHELINE_SIZE == 64
#define SIZEOF_LANE_UNDO_SIZE (2048)
#define SIZEOF_LANE_REDO_EXTERNAL_SIZE (640)
#define SIZEOF_LANE_REDO_INTERNAL_SIZE (192)
#else
#error "Unknown cacheline size"
#endif
POBJ_LAYOUT_BEGIN(layout);
POBJ_LAYOUT_ROOT(layout, struct foo);
POBJ_LAYOUT_END(layout);
struct foo {
POBJ_LIST_ENTRY(struct foo) f;
};
POBJ_LIST_HEAD(foo_head, struct foo);
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_layout");
UT_COMPILE_ERROR_ON(CHUNKSIZE != SIZEOF_CHUNK_V3);
ASSERT_ALIGNED_BEGIN(struct chunk);
ASSERT_ALIGNED_FIELD(struct chunk, data);
ASSERT_ALIGNED_CHECK(struct chunk);
UT_COMPILE_ERROR_ON(sizeof(struct chunk_run) != SIZEOF_CHUNK_V3);
ASSERT_ALIGNED_BEGIN(struct chunk_run_header);
ASSERT_ALIGNED_FIELD(struct chunk_run_header, block_size);
ASSERT_ALIGNED_FIELD(struct chunk_run_header, alignment);
ASSERT_ALIGNED_CHECK(struct chunk_run_header);
UT_COMPILE_ERROR_ON(sizeof(struct chunk_run_header) !=
SIZEOF_CHUNK_RUN_HEADER_V3);
ASSERT_ALIGNED_BEGIN(struct chunk_run);
ASSERT_ALIGNED_FIELD(struct chunk_run, hdr);
ASSERT_ALIGNED_FIELD(struct chunk_run, content);
ASSERT_ALIGNED_CHECK(struct chunk_run);
UT_COMPILE_ERROR_ON(sizeof(struct chunk_run) != SIZEOF_CHUNK_V3);
ASSERT_ALIGNED_BEGIN(struct chunk_header);
ASSERT_ALIGNED_FIELD(struct chunk_header, type);
ASSERT_ALIGNED_FIELD(struct chunk_header, flags);
ASSERT_ALIGNED_FIELD(struct chunk_header, size_idx);
ASSERT_ALIGNED_CHECK(struct chunk_header);
UT_COMPILE_ERROR_ON(sizeof(struct chunk_header) !=
SIZEOF_CHUNK_HEADER_V3);
ASSERT_ALIGNED_BEGIN(struct zone_header);
ASSERT_ALIGNED_FIELD(struct zone_header, magic);
ASSERT_ALIGNED_FIELD(struct zone_header, size_idx);
ASSERT_ALIGNED_FIELD(struct zone_header, reserved);
ASSERT_ALIGNED_CHECK(struct zone_header);
UT_COMPILE_ERROR_ON(sizeof(struct zone_header) !=
SIZEOF_ZONE_HEADER_V3);
ASSERT_ALIGNED_BEGIN(struct zone);
ASSERT_ALIGNED_FIELD(struct zone, header);
ASSERT_ALIGNED_FIELD(struct zone, chunk_headers);
ASSERT_ALIGNED_CHECK(struct zone);
UT_COMPILE_ERROR_ON(sizeof(struct zone) !=
SIZEOF_ZONE_METADATA_V3);
ASSERT_ALIGNED_BEGIN(struct heap_header);
ASSERT_ALIGNED_FIELD(struct heap_header, signature);
ASSERT_ALIGNED_FIELD(struct heap_header, major);
ASSERT_ALIGNED_FIELD(struct heap_header, minor);
ASSERT_ALIGNED_FIELD(struct heap_header, unused);
ASSERT_ALIGNED_FIELD(struct heap_header, chunksize);
ASSERT_ALIGNED_FIELD(struct heap_header, chunks_per_zone);
ASSERT_ALIGNED_FIELD(struct heap_header, reserved);
ASSERT_ALIGNED_FIELD(struct heap_header, checksum);
ASSERT_ALIGNED_CHECK(struct heap_header);
UT_COMPILE_ERROR_ON(sizeof(struct heap_header) !=
SIZEOF_HEAP_HDR_V3);
ASSERT_ALIGNED_BEGIN(struct allocation_header_legacy);
ASSERT_ALIGNED_FIELD(struct allocation_header_legacy, unused);
ASSERT_ALIGNED_FIELD(struct allocation_header_legacy, size);
ASSERT_ALIGNED_FIELD(struct allocation_header_legacy, unused2);
ASSERT_ALIGNED_FIELD(struct allocation_header_legacy, root_size);
ASSERT_ALIGNED_FIELD(struct allocation_header_legacy, type_num);
ASSERT_ALIGNED_CHECK(struct allocation_header_legacy);
UT_COMPILE_ERROR_ON(sizeof(struct allocation_header_legacy) !=
SIZEOF_LEGACY_ALLOCATION_HEADER_V3);
ASSERT_ALIGNED_BEGIN(struct allocation_header_compact);
ASSERT_ALIGNED_FIELD(struct allocation_header_compact, size);
ASSERT_ALIGNED_FIELD(struct allocation_header_compact, extra);
ASSERT_ALIGNED_CHECK(struct allocation_header_compact);
UT_COMPILE_ERROR_ON(sizeof(struct allocation_header_compact) !=
SIZEOF_COMPACT_ALLOCATION_HEADER_V3);
ASSERT_ALIGNED_BEGIN(struct ulog);
ASSERT_ALIGNED_FIELD(struct ulog, checksum);
ASSERT_ALIGNED_FIELD(struct ulog, next);
ASSERT_ALIGNED_FIELD(struct ulog, capacity);
ASSERT_ALIGNED_FIELD(struct ulog, gen_num);
ASSERT_ALIGNED_FIELD(struct ulog, flags);
ASSERT_ALIGNED_FIELD(struct ulog, unused);
ASSERT_ALIGNED_CHECK(struct ulog);
UT_COMPILE_ERROR_ON(sizeof(struct ulog) !=
SIZEOF_ULOG_V4);
ASSERT_ALIGNED_BEGIN(struct ulog_entry_base);
ASSERT_ALIGNED_FIELD(struct ulog_entry_base, offset);
ASSERT_ALIGNED_CHECK(struct ulog_entry_base);
UT_COMPILE_ERROR_ON(sizeof(struct ulog_entry_base) !=
SIZEOF_ULOG_BASE_ENTRY_V4);
ASSERT_ALIGNED_BEGIN(struct ulog_entry_val);
ASSERT_ALIGNED_FIELD(struct ulog_entry_val, base);
ASSERT_ALIGNED_FIELD(struct ulog_entry_val, value);
ASSERT_ALIGNED_CHECK(struct ulog_entry_val);
UT_COMPILE_ERROR_ON(sizeof(struct ulog_entry_val) !=
SIZEOF_ULOG_VAL_ENTRY_V4);
ASSERT_ALIGNED_BEGIN(struct ulog_entry_buf);
ASSERT_ALIGNED_FIELD(struct ulog_entry_buf, base);
ASSERT_ALIGNED_FIELD(struct ulog_entry_buf, checksum);
ASSERT_ALIGNED_FIELD(struct ulog_entry_buf, size);
ASSERT_ALIGNED_CHECK(struct ulog_entry_buf);
UT_COMPILE_ERROR_ON(sizeof(struct ulog_entry_buf) !=
SIZEOF_ULOG_BUF_ENTRY_V4);
ASSERT_ALIGNED_BEGIN(PMEMoid);
ASSERT_ALIGNED_FIELD(PMEMoid, pool_uuid_lo);
ASSERT_ALIGNED_FIELD(PMEMoid, off);
ASSERT_ALIGNED_CHECK(PMEMoid);
UT_COMPILE_ERROR_ON(sizeof(PMEMoid) !=
SIZEOF_PMEMOID_V3);
UT_COMPILE_ERROR_ON(sizeof(PMEMmutex) != SIZEOF_LOCK_V3);
UT_COMPILE_ERROR_ON(sizeof(PMEMmutex) != sizeof(PMEMmutex_internal));
UT_COMPILE_ERROR_ON(util_alignof(PMEMmutex) !=
util_alignof(PMEMmutex_internal));
UT_COMPILE_ERROR_ON(util_alignof(PMEMmutex) !=
util_alignof(os_mutex_t));
UT_COMPILE_ERROR_ON(util_alignof(PMEMmutex) !=
util_alignof(uint64_t));
UT_COMPILE_ERROR_ON(sizeof(PMEMrwlock) != SIZEOF_LOCK_V3);
UT_COMPILE_ERROR_ON(util_alignof(PMEMrwlock) !=
util_alignof(PMEMrwlock_internal));
UT_COMPILE_ERROR_ON(util_alignof(PMEMrwlock) !=
util_alignof(os_rwlock_t));
UT_COMPILE_ERROR_ON(util_alignof(PMEMrwlock) !=
util_alignof(uint64_t));
UT_COMPILE_ERROR_ON(sizeof(PMEMcond) != SIZEOF_LOCK_V3);
UT_COMPILE_ERROR_ON(util_alignof(PMEMcond) !=
util_alignof(PMEMcond_internal));
UT_COMPILE_ERROR_ON(util_alignof(PMEMcond) !=
util_alignof(os_cond_t));
UT_COMPILE_ERROR_ON(util_alignof(PMEMcond) !=
util_alignof(uint64_t));
UT_COMPILE_ERROR_ON(sizeof(struct foo) != SIZEOF_LIST_ENTRY_V3);
UT_COMPILE_ERROR_ON(sizeof(struct list_entry) != SIZEOF_LIST_ENTRY_V3);
UT_COMPILE_ERROR_ON(sizeof(struct foo_head) != SIZEOF_LIST_HEAD_V3);
UT_COMPILE_ERROR_ON(sizeof(struct list_head) != SIZEOF_LIST_HEAD_V3);
ASSERT_ALIGNED_BEGIN(struct lane_layout);
ASSERT_ALIGNED_FIELD(struct lane_layout, internal);
ASSERT_ALIGNED_FIELD(struct lane_layout, external);
ASSERT_ALIGNED_FIELD(struct lane_layout, undo);
ASSERT_ALIGNED_CHECK(struct lane_layout);
UT_COMPILE_ERROR_ON(sizeof(struct lane_layout) !=
SIZEOF_LANE_V3);
UT_COMPILE_ERROR_ON(LANE_UNDO_SIZE != SIZEOF_LANE_UNDO_SIZE);
UT_COMPILE_ERROR_ON(LANE_REDO_EXTERNAL_SIZE !=
SIZEOF_LANE_REDO_EXTERNAL_SIZE);
UT_COMPILE_ERROR_ON(LANE_REDO_INTERNAL_SIZE !=
SIZEOF_LANE_REDO_INTERNAL_SIZE);
DONE(NULL);
}
#ifdef _MSC_VER
/*
* Since libpmemobj is linked statically, we need to invoke its ctor/dtor.
*/
MSVC_CONSTR(libpmemobj_init)
MSVC_DESTR(libpmemobj_fini)
#endif
| 8,411 | 35.103004 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_tx_add_range_direct/obj_tx_add_range_direct.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_tx_add_range_direct.c -- unit test for pmemobj_tx_add_range_direct
*/
#include <string.h>
#include <stddef.h>
#include "tx.h"
#include "unittest.h"
#include "util.h"
#include "valgrind_internal.h"
#define LAYOUT_NAME "tx_add_range_direct"
#define OBJ_SIZE 1024
enum type_number {
TYPE_OBJ,
TYPE_OBJ_ABORT,
};
TOID_DECLARE(struct object, 0);
struct object {
size_t value;
unsigned char data[OBJ_SIZE - sizeof(size_t)];
};
#define VALUE_OFF (offsetof(struct object, value))
#define VALUE_SIZE (sizeof(size_t))
#define DATA_OFF (offsetof(struct object, data))
#define DATA_SIZE (OBJ_SIZE - sizeof(size_t))
#define TEST_VALUE_1 1
#define TEST_VALUE_2 2
/*
* do_tx_zalloc -- do tx allocation with specified type number
*/
static PMEMoid
do_tx_zalloc(PMEMobjpool *pop, unsigned type_num)
{
PMEMoid ret = OID_NULL;
TX_BEGIN(pop) {
ret = pmemobj_tx_zalloc(sizeof(struct object), type_num);
} TX_END
return ret;
}
/*
* do_tx_alloc -- do tx allocation and initialize first num bytes
*/
static PMEMoid
do_tx_alloc(PMEMobjpool *pop, uint64_t type_num, uint64_t init_num)
{
PMEMoid ret = OID_NULL;
TX_BEGIN(pop) {
ret = pmemobj_tx_alloc(sizeof(struct object), type_num);
pmemobj_memset(pop, pmemobj_direct(ret), 0, init_num, 0);
} TX_END
return ret;
}
/*
* do_tx_add_range_alloc_commit -- call add_range_direct on object allocated
* within the same transaction and commit the transaction
*/
static void
do_tx_add_range_alloc_commit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
UT_ASSERT(!TOID_IS_NULL(obj));
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF,
VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
ret = pmemobj_tx_add_range_direct(ptr + DATA_OFF,
DATA_SIZE);
UT_ASSERTeq(ret, 0);
pmemobj_memset_persist(pop, D_RW(obj)->data, TEST_VALUE_2,
DATA_SIZE);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
size_t i;
for (i = 0; i < DATA_SIZE; i++)
UT_ASSERTeq(D_RO(obj)->data[i], TEST_VALUE_2);
}
/*
* do_tx_add_range_alloc_abort -- call add_range_direct on object allocated
* within the same transaction and abort the transaction
*/
static void
do_tx_add_range_alloc_abort(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ_ABORT));
UT_ASSERT(!TOID_IS_NULL(obj));
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF,
VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
ret = pmemobj_tx_add_range_direct(ptr + DATA_OFF,
DATA_SIZE);
UT_ASSERTeq(ret, 0);
pmemobj_memset_persist(pop, D_RW(obj)->data, TEST_VALUE_2,
DATA_SIZE);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_OBJ_ABORT));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_add_range_twice_commit -- call add_range_direct one the same area
* twice and commit the transaction
*/
static void
do_tx_add_range_twice_commit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
UT_ASSERT(!TOID_IS_NULL(obj));
TX_BEGIN(pop) {
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF,
VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF,
VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_2;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_2);
}
/*
* do_tx_add_range_twice_abort -- call add_range_direct one the same area
* twice and abort the transaction
*/
static void
do_tx_add_range_twice_abort(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
UT_ASSERT(!TOID_IS_NULL(obj));
TX_BEGIN(pop) {
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF,
VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF,
VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_2;
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, 0);
}
/*
* do_tx_add_range_abort_after_nested -- call add_range_direct and
* commit the tx
*/
static void
do_tx_add_range_abort_after_nested(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj1;
TOID(struct object) obj2;
TOID_ASSIGN(obj1, do_tx_zalloc(pop, TYPE_OBJ));
TOID_ASSIGN(obj2, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
char *ptr1 = (char *)pmemobj_direct(obj1.oid);
ret = pmemobj_tx_add_range_direct(ptr1 + VALUE_OFF,
VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj1)->value = TEST_VALUE_1;
TX_BEGIN(pop) {
char *ptr2 = (char *)pmemobj_direct(obj2.oid);
ret = pmemobj_tx_add_range_direct(ptr2 + DATA_OFF,
DATA_SIZE);
UT_ASSERTeq(ret, 0);
pmemobj_memset_persist(pop, D_RW(obj2)->data,
TEST_VALUE_2, DATA_SIZE);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj1)->value, 0);
size_t i;
for (i = 0; i < DATA_SIZE; i++)
UT_ASSERTeq(D_RO(obj2)->data[i], 0);
}
/*
* do_tx_add_range_abort_nested -- call add_range_direct and
* commit the tx
*/
static void
do_tx_add_range_abort_nested(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj1;
TOID(struct object) obj2;
TOID_ASSIGN(obj1, do_tx_zalloc(pop, TYPE_OBJ));
TOID_ASSIGN(obj2, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
char *ptr1 = (char *)pmemobj_direct(obj1.oid);
ret = pmemobj_tx_add_range_direct(ptr1 + VALUE_OFF,
VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj1)->value = TEST_VALUE_1;
TX_BEGIN(pop) {
char *ptr2 = (char *)pmemobj_direct(obj2.oid);
ret = pmemobj_tx_add_range_direct(ptr2 + DATA_OFF,
DATA_SIZE);
UT_ASSERTeq(ret, 0);
pmemobj_memset_persist(pop, D_RW(obj2)->data,
TEST_VALUE_2, DATA_SIZE);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj1)->value, 0);
size_t i;
for (i = 0; i < DATA_SIZE; i++)
UT_ASSERTeq(D_RO(obj2)->data[i], 0);
}
/*
* do_tx_add_range_commit_nested -- call add_range_direct and commit the tx
*/
static void
do_tx_add_range_commit_nested(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj1;
TOID(struct object) obj2;
TOID_ASSIGN(obj1, do_tx_zalloc(pop, TYPE_OBJ));
TOID_ASSIGN(obj2, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
char *ptr1 = (char *)pmemobj_direct(obj1.oid);
ret = pmemobj_tx_add_range_direct(ptr1 + VALUE_OFF,
VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj1)->value = TEST_VALUE_1;
TX_BEGIN(pop) {
char *ptr2 = (char *)pmemobj_direct(obj2.oid);
ret = pmemobj_tx_add_range_direct(ptr2 + DATA_OFF,
DATA_SIZE);
UT_ASSERTeq(ret, 0);
pmemobj_memset_persist(pop, D_RW(obj2)->data,
TEST_VALUE_2, DATA_SIZE);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj1)->value, TEST_VALUE_1);
size_t i;
for (i = 0; i < DATA_SIZE; i++)
UT_ASSERTeq(D_RO(obj2)->data[i], TEST_VALUE_2);
}
/*
* do_tx_add_range_abort -- call add_range_direct and abort the tx
*/
static void
do_tx_add_range_abort(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF,
VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, 0);
}
/*
* do_tx_add_range_commit -- call add_range_direct and commit tx
*/
static void
do_tx_add_range_commit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF,
VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
}
/*
* do_tx_xadd_range_no_flush_commit -- call xadd_range_direct with
* POBJ_XADD_NO_FLUSH flag set and commit tx
*/
static void
do_tx_xadd_range_no_flush_commit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_xadd_range_direct(ptr + VALUE_OFF,
VALUE_SIZE, POBJ_XADD_NO_FLUSH);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
/* let pmemcheck find we didn't flush it */
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
}
/*
* do_tx_xadd_range_no_snapshot_commit -- call xadd_range_direct with
* POBJ_XADD_NO_SNAPSHOT flag, commit the transaction
*/
static void
do_tx_xadd_range_no_snapshot_commit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_xadd_range_direct(ptr + VALUE_OFF,
VALUE_SIZE, POBJ_XADD_NO_SNAPSHOT);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
}
/*
* do_tx_xadd_range_no_snapshot_abort -- call xadd_range_direct with
* POBJ_XADD_NO_SNAPSHOT flag, modify the value, abort the transaction
*/
static void
do_tx_xadd_range_no_snapshot_abort(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
D_RW(obj)->value = TEST_VALUE_1;
TX_BEGIN(pop) {
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_xadd_range_direct(ptr + VALUE_OFF, VALUE_SIZE,
POBJ_XADD_NO_SNAPSHOT);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_2;
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
/*
* value added with NO_SNAPSHOT flag should NOT be rolled back
* after abort
*/
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_2);
}
/*
* do_tx_xadd_range_no_uninit_check -- call xdd_range_direct for
* initialized memory with POBJ_XADD_ASSUME_INITIALIZED flag set and commit the
* tx
*/
static void
do_tx_xadd_range_no_uninit_check_commit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_xadd_range_direct(ptr + VALUE_OFF, VALUE_SIZE,
POBJ_XADD_ASSUME_INITIALIZED);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
}
/*
* do_tx_xadd_range_no_uninit_check -- call xadd_range_direct for
* uninitialized memory with POBJ_XADD_ASSUME_INITIALIZED flag set and commit
* the tx
*/
static void
do_tx_xadd_range_no_uninit_check_commit_uninit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_OBJ, 0));
TX_BEGIN(pop) {
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_xadd_range_direct(ptr + VALUE_OFF, VALUE_SIZE,
POBJ_XADD_ASSUME_INITIALIZED);
UT_ASSERTeq(ret, 0);
ret = pmemobj_tx_xadd_range_direct(ptr + DATA_OFF, DATA_SIZE,
POBJ_XADD_ASSUME_INITIALIZED);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
D_RW(obj)->data[256] = TEST_VALUE_2;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERTeq(D_RO(obj)->data[256], TEST_VALUE_2);
}
/*
* do_tx_xadd_range_no_uninit_check -- call xadd_range_direct for
* partially uninitialized memory with POBJ_XADD_ASSUME_INITIALIZED flag set
* only for uninitialized part and commit the tx
*/
static void
do_tx_xadd_range_no_uninit_check_commit_part_uninit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_OBJ, VALUE_SIZE));
TX_BEGIN(pop) {
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF, VALUE_SIZE);
UT_ASSERTeq(ret, 0);
ret = pmemobj_tx_xadd_range_direct(ptr + DATA_OFF, DATA_SIZE,
POBJ_XADD_ASSUME_INITIALIZED);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
D_RW(obj)->data[256] = TEST_VALUE_2;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERTeq(D_RO(obj)->data[256], TEST_VALUE_2);
}
/*
* do_tx_add_range_no_uninit_check -- call add_range_direct for
* partially uninitialized memory.
*/
static void
do_tx_add_range_no_uninit_check_commit_no_flag(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_OBJ, VALUE_SIZE));
TX_BEGIN(pop) {
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF, VALUE_SIZE);
UT_ASSERTeq(ret, 0);
ret = pmemobj_tx_add_range_direct(ptr + DATA_OFF, DATA_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
D_RW(obj)->data[256] = TEST_VALUE_2;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERTeq(D_RO(obj)->data[256], TEST_VALUE_2);
}
/*
* do_tx_xadd_range_no_uninit_check_abort -- call pmemobj_tx_range with
* POBJ_XADD_ASSUME_INITIALIZED flag, modify the value inside aborted
* transaction
*/
static void
do_tx_xadd_range_no_uninit_check_abort(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_OBJ, 0));
TX_BEGIN(pop) {
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_xadd_range_direct(ptr + VALUE_OFF, VALUE_SIZE,
POBJ_XADD_ASSUME_INITIALIZED);
UT_ASSERTeq(ret, 0);
ret = pmemobj_tx_xadd_range_direct(ptr + DATA_OFF, DATA_SIZE,
POBJ_XADD_ASSUME_INITIALIZED);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
D_RW(obj)->data[256] = TEST_VALUE_2;
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
}
/*
* do_tx_commit_and_abort -- use range cache, commit and then abort to make
* sure that it won't affect previously modified data.
*/
static void
do_tx_commit_and_abort(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
TX_SET(obj, value, TEST_VALUE_1); /* this will land in cache */
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TX_BEGIN(pop) {
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
}
/*
* test_add_direct_macros -- test TX_ADD_DIRECT, TX_ADD_FIELD_DIRECT and
* TX_SET_DIRECT
*/
static void
test_add_direct_macros(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
struct object *o = D_RW(obj);
TX_SET_DIRECT(o, value, TEST_VALUE_1);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
TX_BEGIN(pop) {
struct object *o = D_RW(obj);
TX_ADD_DIRECT(o);
o->value = TEST_VALUE_2;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_2);
TX_BEGIN(pop) {
struct object *o = D_RW(obj);
TX_ADD_FIELD_DIRECT(o, value);
o->value = TEST_VALUE_1;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
}
#define MAX_CACHED_RANGES 100
/*
* test_tx_corruption_bug -- test whether tx_adds for small objects from one
* transaction does NOT leak to the next transaction
*/
static void
test_tx_corruption_bug(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
struct object *o = D_RW(obj);
unsigned char i;
UT_COMPILE_ERROR_ON(1.5 * MAX_CACHED_RANGES > 255);
TX_BEGIN(pop) {
for (i = 0; i < 1.5 * MAX_CACHED_RANGES; ++i) {
TX_ADD_DIRECT(&o->data[i]);
o->data[i] = i;
}
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
for (i = 0; i < 1.5 * MAX_CACHED_RANGES; ++i)
UT_ASSERTeq((unsigned char)o->data[i], i);
TX_BEGIN(pop) {
for (i = 0; i < 0.1 * MAX_CACHED_RANGES; ++i) {
TX_ADD_DIRECT(&o->data[i]);
o->data[i] = i + 10;
}
pmemobj_tx_abort(EINVAL);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
for (i = 0; i < 1.5 * MAX_CACHED_RANGES; ++i)
UT_ASSERTeq((unsigned char)o->data[i], i);
pmemobj_free(&obj.oid);
}
static void
do_tx_add_range_too_large(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
int ret = 0;
TX_BEGIN(pop) {
ret = pmemobj_tx_add_range_direct(pmemobj_direct(obj.oid),
PMEMOBJ_MAX_ALLOC_SIZE + 1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
UT_ASSERTeq(errno, EINVAL);
UT_ASSERTeq(ret, 0);
} TX_END
errno = 0;
ret = 0;
TX_BEGIN(pop) {
ret = pmemobj_tx_xadd_range_direct(pmemobj_direct(obj.oid),
PMEMOBJ_MAX_ALLOC_SIZE + 1, POBJ_XADD_NO_ABORT);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, EINVAL);
UT_ASSERTeq(ret, EINVAL);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
errno = 0;
ret = 0;
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
ret = pmemobj_tx_add_range_direct(pmemobj_direct(obj.oid),
PMEMOBJ_MAX_ALLOC_SIZE + 1);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, EINVAL);
UT_ASSERTeq(ret, EINVAL);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
errno = 0;
ret = 0;
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
ret = pmemobj_tx_xadd_range_direct(pmemobj_direct(obj.oid),
PMEMOBJ_MAX_ALLOC_SIZE + 1, 0);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, EINVAL);
UT_ASSERTeq(ret, EINVAL);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
errno = 0;
}
static void
do_tx_add_range_lots_of_small_snapshots(PMEMobjpool *pop)
{
size_t s = TX_DEFAULT_RANGE_CACHE_SIZE * 2;
size_t snapshot_s = 8;
PMEMoid obj;
int ret = pmemobj_zalloc(pop, &obj, s, 0);
UT_ASSERTeq(ret, 0);
TX_BEGIN(pop) {
for (size_t n = 0; n < s; n += snapshot_s) {
void *addr = (void *)((size_t)pmemobj_direct(obj) + n);
pmemobj_tx_add_range_direct(addr, snapshot_s);
}
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
static void
do_tx_add_cache_overflowing_range(PMEMobjpool *pop)
{
/*
* This test adds snapshot to the cache, but in way that results in
* one of the add_range being split into two caches.
*/
size_t s = TX_DEFAULT_RANGE_CACHE_SIZE * 2;
size_t snapshot_s = TX_DEFAULT_RANGE_CACHE_THRESHOLD - 8;
PMEMoid obj;
int ret = pmemobj_zalloc(pop, &obj, s, 0);
UT_ASSERTeq(ret, 0);
TX_BEGIN(pop) {
size_t n = 0;
while (n != s) {
if (n + snapshot_s > s)
snapshot_s = s - n;
void *addr = (void *)((size_t)pmemobj_direct(obj) + n);
pmemobj_tx_add_range_direct(addr, snapshot_s);
memset(addr, 0xc, snapshot_s);
n += snapshot_s;
}
pmemobj_tx_abort(0);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERT(util_is_zeroed(pmemobj_direct(obj), s));
UT_ASSERTne(errno, 0);
errno = 0;
pmemobj_free(&obj);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_add_range_direct");
util_init();
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL * 4,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
do_tx_add_range_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_commit_nested(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_abort_nested(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_abort_after_nested(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_twice_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_twice_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_alloc_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_alloc_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_commit_and_abort(pop);
VALGRIND_WRITE_STATS;
test_add_direct_macros(pop);
VALGRIND_WRITE_STATS;
test_tx_corruption_bug(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_too_large(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_lots_of_small_snapshots(pop);
VALGRIND_WRITE_STATS;
do_tx_add_cache_overflowing_range(pop);
VALGRIND_WRITE_STATS;
do_tx_xadd_range_no_snapshot_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_xadd_range_no_snapshot_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_xadd_range_no_uninit_check_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_xadd_range_no_uninit_check_commit_uninit(pop);
VALGRIND_WRITE_STATS;
do_tx_xadd_range_no_uninit_check_commit_part_uninit(pop);
VALGRIND_WRITE_STATS;
do_tx_xadd_range_no_uninit_check_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_no_uninit_check_commit_no_flag(pop);
VALGRIND_WRITE_STATS;
do_tx_xadd_range_no_flush_commit(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 20,975 | 22.177901 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_many_size_allocs/obj_many_size_allocs.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* obj_many_size_allocs.c -- allocation of many objects with different sizes
*
*/
#include <stddef.h>
#include "unittest.h"
#include "heap.h"
#define LAYOUT_NAME "many_size_allocs"
#define TEST_ALLOC_SIZE 2048
#define LAZY_LOAD_SIZE 10
#define LAZY_LOAD_BIG_SIZE 150
struct cargs {
size_t size;
};
static int
test_constructor(PMEMobjpool *pop, void *addr, void *args)
{
struct cargs *a = args;
/* do not use pmem_memset_persit() here */
pmemobj_memset_persist(pop, addr, a->size % 256, a->size);
return 0;
}
static PMEMobjpool *
test_allocs(PMEMobjpool *pop, const char *path)
{
PMEMoid *oid = MALLOC(sizeof(PMEMoid) * TEST_ALLOC_SIZE);
if (pmemobj_alloc(pop, &oid[0], 0, 0, NULL, NULL) == 0)
UT_FATAL("pmemobj_alloc(0) succeeded");
for (unsigned i = 1; i < TEST_ALLOC_SIZE; ++i) {
struct cargs args = { i };
if (pmemobj_alloc(pop, &oid[i], i, 0,
test_constructor, &args) != 0)
UT_FATAL("!pmemobj_alloc");
UT_ASSERT(!OID_IS_NULL(oid[i]));
}
pmemobj_close(pop);
UT_ASSERT(pmemobj_check(path, LAYOUT_NAME) == 1);
UT_ASSERT((pop = pmemobj_open(path, LAYOUT_NAME)) != NULL);
for (int i = 1; i < TEST_ALLOC_SIZE; ++i) {
pmemobj_free(&oid[i]);
UT_ASSERT(OID_IS_NULL(oid[i]));
}
FREE(oid);
return pop;
}
static PMEMobjpool *
test_lazy_load(PMEMobjpool *pop, const char *path)
{
PMEMoid oid[3];
int ret = pmemobj_alloc(pop, &oid[0], LAZY_LOAD_SIZE, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
ret = pmemobj_alloc(pop, &oid[1], LAZY_LOAD_SIZE, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
ret = pmemobj_alloc(pop, &oid[2], LAZY_LOAD_SIZE, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
pmemobj_close(pop);
UT_ASSERT((pop = pmemobj_open(path, LAYOUT_NAME)) != NULL);
pmemobj_free(&oid[1]);
ret = pmemobj_alloc(pop, &oid[1], LAZY_LOAD_BIG_SIZE, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
return pop;
}
#define ALLOC_BLOCK_SIZE 64
#define MAX_BUCKET_MAP_ENTRIES (RUN_DEFAULT_SIZE / ALLOC_BLOCK_SIZE)
static void
test_all_classes(PMEMobjpool *pop)
{
for (unsigned i = 1; i <= MAX_BUCKET_MAP_ENTRIES; ++i) {
int err;
int nallocs = 0;
while ((err = pmemobj_alloc(pop, NULL, i * ALLOC_BLOCK_SIZE, 0,
NULL, NULL)) == 0) {
nallocs++;
}
UT_ASSERT(nallocs > 0);
PMEMoid iter, niter;
POBJ_FOREACH_SAFE(pop, iter, niter) {
pmemobj_free(&iter);
}
}
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_many_size_allocs");
if (argc != 2)
UT_FATAL("usage: %s file-name", argv[0]);
const char *path = argv[1];
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, LAYOUT_NAME,
0, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
pop = test_lazy_load(pop, path);
pop = test_allocs(pop, path);
test_all_classes(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 2,837 | 20.179104 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_integration/pmem2_integration.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* pmem2_integration.c -- pmem2 integration tests
*/
#include "libpmem2.h"
#include "unittest.h"
#include "rand.h"
#include "ut_pmem2.h"
#include "ut_pmem2_setup_integration.h"
#define N_GRANULARITIES 3 /* BYTE, CACHE_LINE, PAGE */
/*
* map_invalid -- try to mapping memory with invalid config
*/
static void
map_invalid(struct pmem2_config *cfg, struct pmem2_source *src, int result)
{
struct pmem2_map *map = (struct pmem2_map *)0x7;
int ret = pmem2_map(cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, result);
UT_ASSERTeq(map, NULL);
}
/*
* map_valid -- return valid mapped pmem2_map and validate mapped memory length
*/
static struct pmem2_map *
map_valid(struct pmem2_config *cfg, struct pmem2_source *src, size_t size)
{
struct pmem2_map *map = NULL;
int ret = pmem2_map(cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTne(map, NULL);
UT_ASSERTeq(pmem2_map_get_size(map), size);
return map;
}
/*
* test_reuse_cfg -- map pmem2_map twice using the same pmem2_config
*/
static int
test_reuse_cfg(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_reuse_cfg <file>");
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t size;
UT_ASSERTeq(pmem2_source_size(src, &size), 0);
struct pmem2_map *map1 = map_valid(cfg, src, size);
struct pmem2_map *map2 = map_valid(cfg, src, size);
/* cleanup after the test */
pmem2_unmap(&map2);
pmem2_unmap(&map1);
pmem2_config_delete(&cfg);
pmem2_source_delete(&src);
CLOSE(fd);
return 1;
}
/*
* test_reuse_cfg_with_diff_fd -- map pmem2_map using the same pmem2_config
* with changed file descriptor
*/
static int
test_reuse_cfg_with_diff_fd(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: test_reuse_cfg_with_diff_fd <file> <file2>");
char *file1 = argv[0];
int fd1 = OPEN(file1, O_RDWR);
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd1,
PMEM2_GRANULARITY_PAGE);
size_t size1;
UT_ASSERTeq(pmem2_source_size(src, &size1), 0);
struct pmem2_map *map1 = map_valid(cfg, src, size1);
char *file2 = argv[1];
int fd2 = OPEN(file2, O_RDWR);
/* set another valid file descriptor in source */
struct pmem2_source *src2;
UT_ASSERTeq(pmem2_source_from_fd(&src2, fd2), 0);
size_t size2;
UT_ASSERTeq(pmem2_source_size(src2, &size2), 0);
struct pmem2_map *map2 = map_valid(cfg, src2, size2);
/* cleanup after the test */
pmem2_unmap(&map2);
CLOSE(fd2);
pmem2_unmap(&map1);
pmem2_config_delete(&cfg);
pmem2_source_delete(&src);
pmem2_source_delete(&src2);
CLOSE(fd1);
return 2;
}
/*
* test_register_pmem -- map, use and unmap memory
*/
static int
test_register_pmem(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_register_pmem <file>");
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
char *word = "XXXXXXXX";
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t size;
UT_ASSERTeq(pmem2_source_size(src, &size), 0);
struct pmem2_map *map = map_valid(cfg, src, size);
char *addr = pmem2_map_get_address(map);
size_t length = strlen(word);
/* write some data in mapped memory without persisting data */
memcpy(addr, word, length);
/* cleanup after the test */
pmem2_unmap(&map);
pmem2_config_delete(&cfg);
pmem2_source_delete(&src);
CLOSE(fd);
return 1;
}
/*
* test_use_misc_lens_and_offsets -- test with multiple offsets and lengths
*/
static int
test_use_misc_lens_and_offsets(const struct test_case *tc,
int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_use_misc_lens_and_offsets <file>");
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t len;
UT_ASSERTeq(pmem2_source_size(src, &len), 0);
struct pmem2_map *map = map_valid(cfg, src, len);
char *base = pmem2_map_get_address(map);
pmem2_persist_fn persist_fn = pmem2_get_persist_fn(map);
rng_t rng;
randomize_r(&rng, 13); /* arbitrarily chosen value */
for (size_t i = 0; i < len; i++)
base[i] = (char)rnd64_r(&rng);
persist_fn(base, len);
UT_ASSERTeq(len % Ut_mmap_align, 0);
for (size_t l = len; l > 0; l -= Ut_mmap_align) {
for (size_t off = 0; off < l; off += Ut_mmap_align) {
size_t len2 = l - off;
int ret = pmem2_config_set_length(cfg, len2);
UT_PMEM2_EXPECT_RETURN(ret, 0);
ret = pmem2_config_set_offset(cfg, off);
UT_PMEM2_EXPECT_RETURN(ret, 0);
struct pmem2_map *map2 = map_valid(cfg, src, len2);
char *ptr = pmem2_map_get_address(map2);
UT_ASSERTeq(ret = memcmp(base + off, ptr, len2), 0);
pmem2_unmap(&map2);
}
}
pmem2_unmap(&map);
pmem2_config_delete(&cfg);
pmem2_source_delete(&src);
CLOSE(fd);
return 1;
}
struct gran_test_ctx;
typedef void(*map_func)(struct pmem2_config *cfg,
struct pmem2_source *src, struct gran_test_ctx *ctx);
/*
* gran_test_ctx -- essential parameters used by granularity test
*/
struct gran_test_ctx {
map_func map_with_expected_gran;
enum pmem2_granularity expected_granularity;
};
/*
* map_with_avail_gran -- map the range with valid granularity,
* includes cleanup
*/
static void
map_with_avail_gran(struct pmem2_config *cfg,
struct pmem2_source *src, struct gran_test_ctx *ctx)
{
struct pmem2_map *map;
int ret = pmem2_map(cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTne(map, NULL);
UT_ASSERTeq(ctx->expected_granularity,
pmem2_map_get_store_granularity(map));
/* cleanup after the test */
pmem2_unmap(&map);
}
/*
* map_with_unavail_gran -- map the range with invalid granularity
* (unsuccessful)
*/
static void
map_with_unavail_gran(struct pmem2_config *cfg,
struct pmem2_source *src, struct gran_test_ctx *unused)
{
struct pmem2_map *map;
int ret = pmem2_map(cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_GRANULARITY_NOT_SUPPORTED);
UT_ERR("%s", pmem2_errormsg());
UT_ASSERTeq(map, NULL);
}
static const map_func map_with_gran[N_GRANULARITIES][N_GRANULARITIES] = {
/* requested granularity / available granularity */
/* -------------------------------------------------------------------- */
/* BYTE CACHE_LINE PAGE */
/* -------------------------------------------------------------------- */
/* BYTE */ {map_with_avail_gran, map_with_unavail_gran, map_with_unavail_gran},
/* CL */ {map_with_avail_gran, map_with_avail_gran, map_with_unavail_gran},
/* PAGE */ {map_with_avail_gran, map_with_avail_gran, map_with_avail_gran}};
static const enum pmem2_granularity gran_id2granularity[N_GRANULARITIES] = {
PMEM2_GRANULARITY_BYTE,
PMEM2_GRANULARITY_CACHE_LINE,
PMEM2_GRANULARITY_PAGE};
/*
* str2gran_id -- reads granularity id from the provided string
*/
static int
str2gran_id(const char *in)
{
int gran = atoi(in);
UT_ASSERT(gran >= 0 && gran < N_GRANULARITIES);
return gran;
}
/*
* test_granularity -- performs pmem2_map with certain expected granularity
* in context of certain available granularity
*/
static int
test_granularity(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 3)
UT_FATAL(
"usage: test_granularity <file>"
" <available_granularity> <requested_granularity>");
struct gran_test_ctx ctx;
int avail_gran_id = str2gran_id(argv[1]);
int req_gran_id = str2gran_id(argv[2]);
ctx.expected_granularity = gran_id2granularity[avail_gran_id];
ctx.map_with_expected_gran = map_with_gran[req_gran_id][avail_gran_id];
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
gran_id2granularity[req_gran_id]);
ctx.map_with_expected_gran(cfg, src, &ctx);
pmem2_config_delete(&cfg);
pmem2_source_delete(&src);
CLOSE(fd);
return 3;
}
/*
* test_len_not_aligned -- try to use unaligned length
*/
static int
test_len_not_aligned(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_len_not_aligned <file>");
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t len, alignment;
int ret = pmem2_source_size(src, &len);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_SOURCE_ALIGNMENT(src, &alignment);
UT_ASSERT(len > alignment);
size_t aligned_len = ALIGN_DOWN(len, alignment);
size_t unaligned_len = aligned_len - 1;
ret = pmem2_config_set_length(cfg, unaligned_len);
UT_PMEM2_EXPECT_RETURN(ret, 0);
map_invalid(cfg, src, PMEM2_E_LENGTH_UNALIGNED);
pmem2_config_delete(&cfg);
pmem2_source_delete(&src);
CLOSE(fd);
return 1;
}
/*
* test_len_aligned -- try to use aligned length
*/
static int
test_len_aligned(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_len_aligned <file>");
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t len, alignment;
int ret = pmem2_source_size(src, &len);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_SOURCE_ALIGNMENT(src, &alignment);
UT_ASSERT(len > alignment);
size_t aligned_len = ALIGN_DOWN(len, alignment);
ret = pmem2_config_set_length(cfg, aligned_len);
UT_PMEM2_EXPECT_RETURN(ret, 0);
struct pmem2_map *map = map_valid(cfg, src, aligned_len);
pmem2_unmap(&map);
pmem2_config_delete(&cfg);
pmem2_source_delete(&src);
CLOSE(fd);
return 1;
}
/*
* test_offset_not_aligned -- try to map with unaligned offset
*/
static int
test_offset_not_aligned(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_offset_not_aligned <file>");
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t len, alignment;
int ret = pmem2_source_size(src, &len);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_SOURCE_ALIGNMENT(src, &alignment);
/* break the offset */
size_t offset = alignment - 1;
ret = pmem2_config_set_offset(cfg, offset);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERT(len > alignment);
/* in this case len has to be aligned, only offset will be unaligned */
size_t aligned_len = ALIGN_DOWN(len, alignment);
ret = pmem2_config_set_length(cfg, aligned_len - alignment);
UT_PMEM2_EXPECT_RETURN(ret, 0);
map_invalid(cfg, src, PMEM2_E_OFFSET_UNALIGNED);
pmem2_config_delete(&cfg);
pmem2_source_delete(&src);
CLOSE(fd);
return 1;
}
/*
* test_offset_aligned -- try to map with aligned offset
*/
static int
test_offset_aligned(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_offset_aligned <file>");
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t len, alignment;
int ret = pmem2_source_size(src, &len);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_SOURCE_ALIGNMENT(src, &alignment);
/* set the aligned offset */
size_t offset = alignment;
ret = pmem2_config_set_offset(cfg, offset);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERT(len > alignment * 2);
/* set the aligned len */
size_t map_len = ALIGN_DOWN(len / 2, alignment);
ret = pmem2_config_set_length(cfg, map_len);
UT_PMEM2_EXPECT_RETURN(ret, 0);
struct pmem2_map *map = map_valid(cfg, src, map_len);
pmem2_unmap(&map);
pmem2_config_delete(&cfg);
pmem2_source_delete(&src);
CLOSE(fd);
return 1;
}
/*
* test_mem_move_cpy_set_with_map_private -- map O_RDONLY file and do
* pmem2_[cpy|set|move]_fns with PMEM2_PRIVATE sharing
*/
static int
test_mem_move_cpy_set_with_map_private(const struct test_case *tc, int argc,
char *argv[])
{
if (argc < 1)
UT_FATAL(
"usage: test_mem_move_cpy_set_with_map_private <file>");
char *file = argv[0];
int fd = OPEN(file, O_RDONLY);
const char *word1 = "Persistent memory...";
const char *word2 = "Nonpersistent memory";
const char *word3 = "XXXXXXXXXXXXXXXXXXXX";
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
pmem2_config_set_sharing(cfg, PMEM2_PRIVATE);
size_t size = 0;
UT_ASSERTeq(pmem2_source_size(src, &size), 0);
struct pmem2_map *map = map_valid(cfg, src, size);
char *addr = pmem2_map_get_address(map);
/* copy inital state */
char *initial_state = MALLOC(size);
memcpy(initial_state, addr, size);
pmem2_memmove_fn memmove_fn = pmem2_get_memmove_fn(map);
pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map);
pmem2_memset_fn memset_fn = pmem2_get_memset_fn(map);
memcpy_fn(addr, word1, strlen(word1), 0);
UT_ASSERTeq(strcmp(addr, word1), 0);
memmove_fn(addr, word2, strlen(word2), 0);
UT_ASSERTeq(strcmp(addr, word2), 0);
memset_fn(addr, 'X', strlen(word3), 0);
UT_ASSERTeq(strcmp(addr, word3), 0);
/* remap memory, and check that the data has not been saved */
pmem2_unmap(&map);
map = map_valid(cfg, src, size);
addr = pmem2_map_get_address(map);
UT_ASSERTeq(strcmp(addr, initial_state), 0);
/* cleanup after the test */
pmem2_unmap(&map);
FREE(initial_state);
pmem2_config_delete(&cfg);
pmem2_source_delete(&src);
CLOSE(fd);
return 1;
}
/*
* test_deep_flush_valid -- perform valid deep_flush for whole map
*/
static int
test_deep_flush_valid(const struct test_case *tc, int argc, char *argv[])
{
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t len;
PMEM2_SOURCE_SIZE(src, &len);
struct pmem2_map *map = map_valid(cfg, src, len);
char *addr = pmem2_map_get_address(map);
pmem2_persist_fn persist_fn = pmem2_get_persist_fn(map);
memset(addr, 0, len);
persist_fn(addr, len);
int ret = pmem2_deep_flush(map, addr, len);
UT_PMEM2_EXPECT_RETURN(ret, 0);
pmem2_unmap(&map);
PMEM2_CONFIG_DELETE(&cfg);
PMEM2_SOURCE_DELETE(&src);
CLOSE(fd);
return 1;
}
/*
* test_deep_flush_e_range_behind -- try deep_flush for range behind a map
*/
static int
test_deep_flush_e_range_behind(const struct test_case *tc,
int argc, char *argv[])
{
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t len;
PMEM2_SOURCE_SIZE(src, &len);
struct pmem2_map *map = map_valid(cfg, src, len);
size_t map_size = pmem2_map_get_size(map);
char *addr = pmem2_map_get_address(map);
pmem2_persist_fn persist_fn = pmem2_get_persist_fn(map);
memset(addr, 0, len);
persist_fn(addr, len);
int ret = pmem2_deep_flush(map, addr + map_size + 1, 64);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_DEEP_FLUSH_RANGE);
pmem2_unmap(&map);
PMEM2_CONFIG_DELETE(&cfg);
PMEM2_SOURCE_DELETE(&src);
CLOSE(fd);
return 1;
}
/*
* test_deep_flush_e_range_before -- try deep_flush for range before a map
*/
static int
test_deep_flush_e_range_before(const struct test_case *tc,
int argc, char *argv[])
{
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t len;
PMEM2_SOURCE_SIZE(src, &len);
struct pmem2_map *map = map_valid(cfg, src, len);
size_t map_size = pmem2_map_get_size(map);
char *addr = pmem2_map_get_address(map);
pmem2_persist_fn persist_fn = pmem2_get_persist_fn(map);
memset(addr, 0, len);
persist_fn(addr, len);
int ret = pmem2_deep_flush(map, addr - map_size, 64);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_DEEP_FLUSH_RANGE);
pmem2_unmap(&map);
PMEM2_CONFIG_DELETE(&cfg);
PMEM2_SOURCE_DELETE(&src);
CLOSE(fd);
return 1;
}
/*
* test_deep_flush_slice -- try deep_flush for slice of a map
*/
static int
test_deep_flush_slice(const struct test_case *tc, int argc, char *argv[])
{
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t len;
PMEM2_SOURCE_SIZE(src, &len);
struct pmem2_map *map = map_valid(cfg, src, len);
size_t map_size = pmem2_map_get_size(map);
size_t map_part = map_size / 4;
char *addr = pmem2_map_get_address(map);
pmem2_persist_fn persist_fn = pmem2_get_persist_fn(map);
memset(addr, 0, map_part);
persist_fn(addr, map_part);
int ret = pmem2_deep_flush(map, addr + map_part, map_part);
UT_PMEM2_EXPECT_RETURN(ret, 0);
pmem2_unmap(&map);
PMEM2_CONFIG_DELETE(&cfg);
PMEM2_SOURCE_DELETE(&src);
CLOSE(fd);
return 1;
}
/*
* test_deep_flush_overlap -- try deep_flush for range overlaping map
*/
static int
test_deep_flush_overlap(const struct test_case *tc, int argc, char *argv[])
{
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t len;
PMEM2_SOURCE_SIZE(src, &len);
struct pmem2_map *map = map_valid(cfg, src, len);
size_t map_size = pmem2_map_get_size(map);
char *addr = pmem2_map_get_address(map);
pmem2_persist_fn persist_fn = pmem2_get_persist_fn(map);
memset(addr, 0, len);
persist_fn(addr, len);
int ret = pmem2_deep_flush(map, addr + 1024, map_size);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_DEEP_FLUSH_RANGE);
pmem2_unmap(&map);
PMEM2_CONFIG_DELETE(&cfg);
PMEM2_SOURCE_DELETE(&src);
CLOSE(fd);
return 1;
}
/*
* test_source_anon -- tests map/config/source functions in combination
* with anonymous source.
*/
static int
test_source_anon(enum pmem2_sharing_type sharing,
enum pmem2_granularity granularity,
size_t source_len, size_t map_len)
{
int ret = 0;
struct pmem2_config *cfg;
struct pmem2_source *src;
struct pmem2_map *map;
struct pmem2_badblock_context *bbctx;
UT_ASSERTeq(pmem2_source_from_anon(&src, source_len), 0);
UT_ASSERTeq(pmem2_source_device_id(src, NULL, NULL), PMEM2_E_NOSUPP);
UT_ASSERTeq(pmem2_source_device_usc(src, NULL), PMEM2_E_NOSUPP);
UT_ASSERTeq(pmem2_badblock_context_new(src, &bbctx), PMEM2_E_NOSUPP);
size_t alignment;
UT_ASSERTeq(pmem2_source_alignment(src, &alignment), 0);
UT_ASSERT(alignment >= Ut_pagesize);
size_t size;
UT_ASSERTeq(pmem2_source_size(src, &size), 0);
UT_ASSERTeq(size, source_len);
PMEM2_CONFIG_NEW(&cfg);
UT_ASSERTeq(pmem2_config_set_length(cfg, map_len), 0);
UT_ASSERTeq(pmem2_config_set_offset(cfg, alignment), 0); /* ignored */
UT_ASSERTeq(pmem2_config_set_required_store_granularity(cfg,
granularity), 0);
UT_ASSERTeq(pmem2_config_set_sharing(cfg, sharing), 0);
if ((ret = pmem2_map(cfg, src, &map)) != 0)
goto map_fail;
void *addr = pmem2_map_get_address(map);
UT_ASSERTne(addr, NULL);
UT_ASSERTeq(pmem2_map_get_size(map), map_len ? map_len : source_len);
UT_ASSERTeq(pmem2_map_get_store_granularity(map),
PMEM2_GRANULARITY_BYTE);
UT_ASSERTeq(pmem2_deep_flush(map, addr, alignment), PMEM2_E_NOSUPP);
UT_ASSERTeq(pmem2_unmap(&map), 0);
map_fail:
PMEM2_CONFIG_DELETE(&cfg);
pmem2_source_delete(&src);
return ret;
}
/*
* test_source_anon_ok_private -- valid config /w private flag
*/
static int
test_source_anon_private(const struct test_case *tc, int argc, char *argv[])
{
int ret = test_source_anon(PMEM2_PRIVATE, PMEM2_GRANULARITY_BYTE,
1 << 30ULL, 1 << 20ULL);
UT_ASSERTeq(ret, 0);
return 1;
}
/*
* test_source_anon_shared -- valid config /w shared flag
*/
static int
test_source_anon_shared(const struct test_case *tc, int argc, char *argv[])
{
int ret = test_source_anon(PMEM2_SHARED, PMEM2_GRANULARITY_BYTE,
1 << 30ULL, 1 << 20ULL);
UT_ASSERTeq(ret, 0);
return 1;
}
/*
* test_source_anon_page -- valid config /w page granularity
*/
static int
test_source_anon_page(const struct test_case *tc, int argc, char *argv[])
{
int ret = test_source_anon(PMEM2_SHARED, PMEM2_GRANULARITY_PAGE,
1 << 30ULL, 1 << 20ULL);
UT_ASSERTeq(ret, 0);
return 1;
}
/*
* test_source_anon_zero_len -- valid config /w zero (src inherited) map length
*/
static int
test_source_anon_zero_len(const struct test_case *tc, int argc, char *argv[])
{
int ret = test_source_anon(PMEM2_SHARED, PMEM2_GRANULARITY_BYTE,
1 << 30ULL, 0);
UT_ASSERTeq(ret, 0);
return 1;
}
/*
* test_source_anon_too_small -- valid config /w small mapping length
*/
static int
test_source_anon_too_small(const struct test_case *tc, int argc, char *argv[])
{
int ret = test_source_anon(PMEM2_SHARED, PMEM2_GRANULARITY_BYTE,
1 << 30ULL, 1 << 10ULL);
UT_ASSERTne(ret, 0);
return 1;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_reuse_cfg),
TEST_CASE(test_reuse_cfg_with_diff_fd),
TEST_CASE(test_register_pmem),
TEST_CASE(test_use_misc_lens_and_offsets),
TEST_CASE(test_granularity),
TEST_CASE(test_len_not_aligned),
TEST_CASE(test_len_aligned),
TEST_CASE(test_offset_not_aligned),
TEST_CASE(test_offset_aligned),
TEST_CASE(test_mem_move_cpy_set_with_map_private),
TEST_CASE(test_deep_flush_valid),
TEST_CASE(test_deep_flush_e_range_behind),
TEST_CASE(test_deep_flush_e_range_before),
TEST_CASE(test_deep_flush_slice),
TEST_CASE(test_deep_flush_overlap),
TEST_CASE(test_source_anon_private),
TEST_CASE(test_source_anon_shared),
TEST_CASE(test_source_anon_page),
TEST_CASE(test_source_anon_too_small),
TEST_CASE(test_source_anon_zero_len),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem2_integration");
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
DONE(NULL);
}
| 22,113 | 23.736018 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_ctl_alloc_class_config/obj_ctl_alloc_class_config.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* obj_ctl_alloc_class_config.c -- tests for the ctl alloc class config
*/
#include "unittest.h"
#define LAYOUT "obj_ctl_alloc_class_config"
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_ctl_alloc_class_config");
if (argc != 2)
UT_FATAL("usage: %s file-name", argv[0]);
const char *path = argv[1];
PMEMobjpool *pop;
if ((pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
struct pobj_alloc_class_desc alloc_class;
int ret;
ret = pmemobj_ctl_get(pop, "heap.alloc_class.128.desc", &alloc_class);
UT_ASSERTeq(ret, 0);
UT_OUT("%d %lu %d", alloc_class.header_type, alloc_class.unit_size,
alloc_class.units_per_block);
ret = pmemobj_ctl_get(pop, "heap.alloc_class.129.desc", &alloc_class);
UT_ASSERTeq(ret, 0);
UT_OUT("%d %lu %d", alloc_class.header_type, alloc_class.unit_size,
alloc_class.units_per_block);
ret = pmemobj_ctl_get(pop, "heap.alloc_class.130.desc", &alloc_class);
UT_ASSERTeq(ret, 0);
UT_OUT("%d %lu %d", alloc_class.header_type, alloc_class.unit_size,
alloc_class.units_per_block);
pmemobj_close(pop);
DONE(NULL);
}
| 1,242 | 22.45283 | 71 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_action/obj_action.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2019, Intel Corporation */
/*
* obj_action.c -- test the action API
*/
#include <stdlib.h>
#include "unittest.h"
#define LAYOUT_NAME "obj_action"
struct macro_reserve_s {
PMEMoid oid;
uint64_t value;
};
TOID_DECLARE(struct macro_reserve_s, 1);
struct foo {
int bar;
};
struct root {
struct {
PMEMoid oid;
uint64_t value;
} reserved;
struct {
PMEMoid oid;
uint64_t value;
} published;
struct {
PMEMoid oid;
} tx_reserved;
struct {
PMEMoid oid;
} tx_reserved_fulfilled;
struct {
PMEMoid oid;
} tx_published;
};
#define HUGE_ALLOC_SIZE ((1 << 20) * 3)
#define MAX_ACTS 10
static void
test_resv_cancel_huge(PMEMobjpool *pop)
{
PMEMoid oid;
unsigned nallocs = 0;
struct pobj_action *act = (struct pobj_action *)
ZALLOC(sizeof(struct pobj_action) * MAX_ACTS);
do {
oid = pmemobj_reserve(pop, &act[nallocs++], HUGE_ALLOC_SIZE, 0);
} while (!OID_IS_NULL(oid));
pmemobj_cancel(pop, act, nallocs - 1);
unsigned nallocs2 = 0;
do {
oid = pmemobj_reserve(pop, &act[nallocs2++],
HUGE_ALLOC_SIZE, 0);
} while (!OID_IS_NULL(oid));
pmemobj_cancel(pop, act, nallocs2 - 1);
UT_ASSERTeq(nallocs, nallocs2);
FREE(act);
}
static void
test_defer_free(PMEMobjpool *pop)
{
PMEMoid oid;
int ret = pmemobj_alloc(pop, &oid, sizeof(struct foo), 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
struct pobj_action act;
pmemobj_defer_free(pop, oid, &act);
pmemobj_publish(pop, &act, 1);
struct foo *f = (struct foo *)pmemobj_direct(oid);
f->bar = 5; /* should trigger memcheck error */
ret = pmemobj_alloc(pop, &oid, sizeof(struct foo), 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
pmemobj_defer_free(pop, oid, &act);
pmemobj_cancel(pop, &act, 1);
f = (struct foo *)pmemobj_direct(oid);
f->bar = 5; /* should NOT trigger memcheck error */
}
/*
* This function tests if macros included in action.h api compile and
* allocate memory.
*/
static void
test_api_macros(PMEMobjpool *pop)
{
struct pobj_action macro_reserve_act[1];
TOID(struct macro_reserve_s) macro_reserve_p = POBJ_RESERVE_NEW(pop,
struct macro_reserve_s, ¯o_reserve_act[0]);
UT_ASSERT(!OID_IS_NULL(macro_reserve_p.oid));
pmemobj_publish(pop, macro_reserve_act, 1);
POBJ_FREE(¯o_reserve_p);
macro_reserve_p = POBJ_RESERVE_ALLOC(pop, struct macro_reserve_s,
sizeof(struct macro_reserve_s), ¯o_reserve_act[0]);
UT_ASSERT(!OID_IS_NULL(macro_reserve_p.oid));
pmemobj_publish(pop, macro_reserve_act, 1);
POBJ_FREE(¯o_reserve_p);
macro_reserve_p = POBJ_XRESERVE_NEW(pop, struct macro_reserve_s,
¯o_reserve_act[0], 0);
UT_ASSERT(!OID_IS_NULL(macro_reserve_p.oid));
pmemobj_publish(pop, macro_reserve_act, 1);
POBJ_FREE(¯o_reserve_p);
macro_reserve_p = POBJ_XRESERVE_ALLOC(pop, struct macro_reserve_s,
sizeof(struct macro_reserve_s), ¯o_reserve_act[0], 0);
UT_ASSERT(!OID_IS_NULL(macro_reserve_p.oid));
pmemobj_publish(pop, macro_reserve_act, 1);
POBJ_FREE(¯o_reserve_p);
}
#define POBJ_MAX_ACTIONS 60
static void
test_many(PMEMobjpool *pop, size_t n)
{
struct pobj_action *act = (struct pobj_action *)
MALLOC(sizeof(struct pobj_action) * n);
PMEMoid *oid = (PMEMoid *)
MALLOC(sizeof(PMEMoid) * n);
for (int i = 0; i < n; ++i) {
oid[i] = pmemobj_reserve(pop, &act[i], 1, 0);
UT_ASSERT(!OID_IS_NULL(oid[i]));
}
UT_ASSERTeq(pmemobj_publish(pop, act, n), 0);
for (int i = 0; i < n; ++i) {
pmemobj_defer_free(pop, oid[i], &act[i]);
}
UT_ASSERTeq(pmemobj_publish(pop, act, n), 0);
FREE(oid);
FREE(act);
}
static void
test_duplicate(PMEMobjpool *pop)
{
struct pobj_alloc_class_desc alloc_class_128;
alloc_class_128.header_type = POBJ_HEADER_COMPACT;
alloc_class_128.unit_size = 1024 * 100;
alloc_class_128.units_per_block = 1;
alloc_class_128.alignment = 0;
int ret = pmemobj_ctl_set(pop, "heap.alloc_class.128.desc",
&alloc_class_128);
UT_ASSERTeq(ret, 0);
struct pobj_action a[10];
PMEMoid oid[10];
oid[0] = pmemobj_xreserve(pop, &a[0], 1, 0, POBJ_CLASS_ID(128));
UT_ASSERT(!OID_IS_NULL(oid[0]));
pmemobj_cancel(pop, a, 1);
oid[0] = pmemobj_xreserve(pop, &a[0], 1, 0, POBJ_CLASS_ID(128));
UT_ASSERT(!OID_IS_NULL(oid[0]));
oid[0] = pmemobj_xreserve(pop, &a[1], 1, 0, POBJ_CLASS_ID(128));
UT_ASSERT(!OID_IS_NULL(oid[0]));
oid[0] = pmemobj_xreserve(pop, &a[2], 1, 0, POBJ_CLASS_ID(128));
UT_ASSERT(!OID_IS_NULL(oid[0]));
pmemobj_cancel(pop, a, 3);
oid[0] = pmemobj_xreserve(pop, &a[0], 1, 0, POBJ_CLASS_ID(128));
UT_ASSERT(!OID_IS_NULL(oid[0]));
oid[0] = pmemobj_xreserve(pop, &a[1], 1, 0, POBJ_CLASS_ID(128));
UT_ASSERT(!OID_IS_NULL(oid[0]));
oid[0] = pmemobj_xreserve(pop, &a[2], 1, 0, POBJ_CLASS_ID(128));
UT_ASSERT(!OID_IS_NULL(oid[0]));
oid[0] = pmemobj_xreserve(pop, &a[3], 1, 0, POBJ_CLASS_ID(128));
UT_ASSERT(!OID_IS_NULL(oid[0]));
oid[0] = pmemobj_xreserve(pop, &a[4], 1, 0, POBJ_CLASS_ID(128));
UT_ASSERT(!OID_IS_NULL(oid[0]));
pmemobj_cancel(pop, a, 5);
}
static void
test_many_sets(PMEMobjpool *pop, size_t n)
{
struct pobj_action *act = (struct pobj_action *)
MALLOC(sizeof(struct pobj_action) * n);
PMEMoid oid;
pmemobj_alloc(pop, &oid, sizeof(uint64_t) * n, 0, NULL, NULL);
UT_ASSERT(!OID_IS_NULL(oid));
uint64_t *values = (uint64_t *)pmemobj_direct(oid);
for (uint64_t i = 0; i < n; ++i)
pmemobj_set_value(pop, &act[i], values + i, i);
UT_ASSERTeq(pmemobj_publish(pop, act, n), 0);
for (uint64_t i = 0; i < n; ++i)
UT_ASSERTeq(*(values + i), i);
pmemobj_free(&oid);
FREE(act);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_action");
if (argc < 2)
UT_FATAL("usage: %s filename", argv[0]);
const char *path = argv[1];
PMEMobjpool *pop = pmemobj_create(path, LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR);
if (pop == NULL)
UT_FATAL("!pmemobj_create: %s", path);
PMEMoid root = pmemobj_root(pop, sizeof(struct root));
struct root *rootp = (struct root *)pmemobj_direct(root);
struct pobj_action reserved[2];
struct pobj_action published[2];
struct pobj_action tx_reserved;
struct pobj_action tx_reserved_fulfilled;
struct pobj_action tx_published;
rootp->reserved.oid =
pmemobj_reserve(pop, &reserved[0], sizeof(struct foo), 0);
pmemobj_set_value(pop, &reserved[1], &rootp->reserved.value, 1);
rootp->tx_reserved.oid =
pmemobj_reserve(pop, &tx_reserved, sizeof(struct foo), 0);
rootp->tx_reserved_fulfilled.oid =
pmemobj_reserve(pop,
&tx_reserved_fulfilled, sizeof(struct foo), 0);
rootp->tx_published.oid =
pmemobj_reserve(pop, &tx_published, sizeof(struct foo), 0);
rootp->published.oid =
pmemobj_reserve(pop, &published[0], sizeof(struct foo), 0);
TX_BEGIN(pop) {
pmemobj_tx_publish(&tx_reserved, 1);
pmemobj_tx_abort(EINVAL);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TX_BEGIN(pop) {
pmemobj_tx_publish(&tx_reserved_fulfilled, 1);
pmemobj_tx_publish(NULL, 0); /* this is to force resv fulfill */
pmemobj_tx_abort(EINVAL);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
pmemobj_set_value(pop, &published[1], &rootp->published.value, 1);
pmemobj_publish(pop, published, 2);
TX_BEGIN(pop) {
pmemobj_tx_publish(&tx_published, 1);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
pmemobj_persist(pop, rootp, sizeof(*rootp));
pmemobj_close(pop);
UT_ASSERTeq(pmemobj_check(path, LAYOUT_NAME), 1);
UT_ASSERTne(pop = pmemobj_open(path, LAYOUT_NAME), NULL);
root = pmemobj_root(pop, sizeof(struct root));
rootp = (struct root *)pmemobj_direct(root);
struct foo *reserved_foop =
(struct foo *)pmemobj_direct(rootp->reserved.oid);
reserved_foop->bar = 1; /* should trigger memcheck error */
UT_ASSERTeq(rootp->reserved.value, 0);
struct foo *published_foop =
(struct foo *)pmemobj_direct(rootp->published.oid);
published_foop->bar = 1; /* should NOT trigger memcheck error */
UT_ASSERTeq(rootp->published.value, 1);
struct foo *tx_reserved_foop =
(struct foo *)pmemobj_direct(rootp->tx_reserved.oid);
tx_reserved_foop->bar = 1; /* should trigger memcheck error */
struct foo *tx_reserved_fulfilled_foop =
(struct foo *)pmemobj_direct(rootp->tx_reserved_fulfilled.oid);
tx_reserved_fulfilled_foop->bar = 1; /* should trigger memcheck error */
struct foo *tx_published_foop =
(struct foo *)pmemobj_direct(rootp->tx_published.oid);
tx_published_foop->bar = 1; /* should NOT trigger memcheck error */
test_resv_cancel_huge(pop);
test_defer_free(pop);
test_api_macros(pop);
test_many(pop, POBJ_MAX_ACTIONS * 2);
test_many_sets(pop, POBJ_MAX_ACTIONS * 2);
test_duplicate(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 8,548 | 23.286932 | 73 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_source_size/pmem2_source_size.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* pmem2_source_size.c -- pmem2_source_size unittests
*/
#include <stdint.h>
#include "fault_injection.h"
#include "unittest.h"
#include "ut_pmem2.h"
#include "ut_fh.h"
#include "config.h"
#include "out.h"
typedef void (*test_fun)(const char *path, os_off_t size);
/*
* test_normal_file - tests normal file (common)
*/
static void
test_normal_file(const char *path, os_off_t expected_size,
enum file_handle_type type)
{
struct FHandle *fh = UT_FH_OPEN(type, path, FH_RDWR);
struct pmem2_source *src;
PMEM2_SOURCE_FROM_FH(&src, fh);
size_t size;
int ret = pmem2_source_size(src, &size);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTeq(size, expected_size);
PMEM2_SOURCE_DELETE(&src);
UT_FH_CLOSE(fh);
}
/*
* test_normal_file_fd - tests normal file using a file descriptor
*/
static int
test_normal_file_fd(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: test_normal_file_fd <file> <expected_size>");
char *path = argv[0];
os_off_t expected_size = ATOLL(argv[1]);
test_normal_file(path, expected_size, FH_FD);
return 2;
}
/*
* test_normal_file_handle - tests normal file using a HANDLE
*/
static int
test_normal_file_handle(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: test_normal_file_handle"
" <file> <expected_size>");
char *path = argv[0];
os_off_t expected_size = ATOLL(argv[1]);
test_normal_file(path, expected_size, FH_HANDLE);
return 2;
}
/*
* test_tmpfile - tests temporary file
*/
static void
test_tmpfile(const char *dir, os_off_t requested_size,
enum file_handle_type type)
{
struct FHandle *fh = UT_FH_OPEN(type, dir, FH_RDWR | FH_TMPFILE);
UT_FH_TRUNCATE(fh, requested_size);
struct pmem2_source *src;
PMEM2_SOURCE_FROM_FH(&src, fh);
size_t size = SIZE_MAX;
int ret = pmem2_source_size(src, &size);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTeq(size, requested_size);
PMEM2_SOURCE_DELETE(&src);
UT_FH_CLOSE(fh);
}
/*
* test_tmpfile_fd - tests temporary file using file descriptor interface
*/
static int
test_tmpfile_fd(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: test_tmpfile_fd <file> <requested_size>");
char *dir = argv[0];
os_off_t requested_size = ATOLL(argv[1]);
test_tmpfile(dir, requested_size, FH_FD);
return 2;
}
/*
* test_tmpfile_handle - tests temporary file using file handle interface
*/
static int
test_tmpfile_handle(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: test_tmpfile_handle <file> <requested_size>");
char *dir = argv[0];
os_off_t requested_size = ATOLL(argv[1]);
test_tmpfile(dir, requested_size, FH_HANDLE);
return 2;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_normal_file_fd),
TEST_CASE(test_normal_file_handle),
TEST_CASE(test_tmpfile_fd),
TEST_CASE(test_tmpfile_handle),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char **argv)
{
START(argc, argv, "pmem2_source_size");
util_init();
out_init("pmem2_source_size", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0, 0);
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
out_fini();
DONE(NULL);
}
| 3,326 | 20.191083 | 75 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/ex_linkedlist/ex_linkedlist.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* ex_linkedlist.c - test of linkedlist example
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "pmemobj_list.h"
#include "unittest.h"
#define ELEMENT_NO 10
#define PRINT_RES(res, struct_name) do {\
if ((res) == 0) {\
UT_OUT("Outcome for " #struct_name " is correct!");\
} else {\
UT_ERR("Outcome for " #struct_name\
" does not match expected result!!!");\
}\
} while (0)
POBJ_LAYOUT_BEGIN(list);
POBJ_LAYOUT_ROOT(list, struct base);
POBJ_LAYOUT_TOID(list, struct tqueuehead);
POBJ_LAYOUT_TOID(list, struct slisthead);
POBJ_LAYOUT_TOID(list, struct tqnode);
POBJ_LAYOUT_TOID(list, struct snode);
POBJ_LAYOUT_END(list);
POBJ_TAILQ_HEAD(tqueuehead, struct tqnode);
struct tqnode {
int data;
POBJ_TAILQ_ENTRY(struct tqnode) tnd;
};
POBJ_SLIST_HEAD(slisthead, struct snode);
struct snode {
int data;
POBJ_SLIST_ENTRY(struct snode) snd;
};
struct base {
struct tqueuehead tqueue;
struct slisthead slist;
};
static const int expectedResTQ[] = { 111, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 222 };
static const int expectedResSL[] = { 111, 8, 222, 6, 5, 4, 3, 2, 1, 0, 333 };
/*
* dump_tq -- dumps list on standard output
*/
static void
dump_tq(struct tqueuehead *head, const char *str)
{
TOID(struct tqnode) var;
UT_OUT("%s start", str);
POBJ_TAILQ_FOREACH(var, head, tnd)
UT_OUT("%d", D_RW(var)->data);
UT_OUT("%s end", str);
}
/*
* init_tqueue -- initialize tail queue
*/
static void
init_tqueue(PMEMobjpool *pop, struct tqueuehead *head)
{
if (!POBJ_TAILQ_EMPTY(head))
return;
TOID(struct tqnode) node;
TOID(struct tqnode) middleNode;
TOID(struct tqnode) node888;
TOID(struct tqnode) tempNode;
int i = 0;
TX_BEGIN(pop) {
POBJ_TAILQ_INIT(head);
dump_tq(head, "after init");
for (i = 0; i < ELEMENT_NO; ++i) {
node = TX_NEW(struct tqnode);
D_RW(node)->data = i;
if (0 == i) {
middleNode = node;
}
POBJ_TAILQ_INSERT_HEAD(head, node, tnd);
node = TX_NEW(struct tqnode);
D_RW(node)->data = i;
POBJ_TAILQ_INSERT_TAIL(head, node, tnd);
}
dump_tq(head, "after insert[head|tail]");
node = TX_NEW(struct tqnode);
D_RW(node)->data = 666;
POBJ_TAILQ_INSERT_AFTER(middleNode, node, tnd);
dump_tq(head, "after insert_after1");
middleNode = POBJ_TAILQ_NEXT(middleNode, tnd);
node = TX_NEW(struct tqnode);
D_RW(node)->data = 888;
node888 = node;
POBJ_TAILQ_INSERT_BEFORE(middleNode, node, tnd);
dump_tq(head, "after insert_before1");
node = TX_NEW(struct tqnode);
D_RW(node)->data = 555;
POBJ_TAILQ_INSERT_BEFORE(middleNode, node, tnd);
dump_tq(head, "after insert_before2");
node = TX_NEW(struct tqnode);
D_RW(node)->data = 111;
tempNode = POBJ_TAILQ_FIRST(head);
POBJ_TAILQ_INSERT_BEFORE(tempNode, node, tnd);
dump_tq(head, "after insert_before3");
node = TX_NEW(struct tqnode);
D_RW(node)->data = 222;
tempNode = POBJ_TAILQ_LAST(head);
POBJ_TAILQ_INSERT_AFTER(tempNode, node, tnd);
dump_tq(head, "after insert_after2");
tempNode = middleNode;
middleNode = POBJ_TAILQ_PREV(tempNode, tnd);
POBJ_TAILQ_MOVE_ELEMENT_TAIL(head, middleNode, tnd);
dump_tq(head, "after move_element_tail");
POBJ_TAILQ_MOVE_ELEMENT_HEAD(head, tempNode, tnd);
dump_tq(head, "after move_element_head");
tempNode = POBJ_TAILQ_FIRST(head);
POBJ_TAILQ_REMOVE(head, tempNode, tnd);
dump_tq(head, "after remove1");
tempNode = POBJ_TAILQ_LAST(head);
POBJ_TAILQ_REMOVE(head, tempNode, tnd);
dump_tq(head, "after remove2");
POBJ_TAILQ_REMOVE(head, node888, tnd);
dump_tq(head, "after remove3");
} TX_ONABORT {
abort();
} TX_END
}
/*
* dump_sl -- dumps list on standard output
*/
static void
dump_sl(struct slisthead *head, const char *str)
{
TOID(struct snode) var;
UT_OUT("%s start", str);
POBJ_SLIST_FOREACH(var, head, snd)
UT_OUT("%d", D_RW(var)->data);
UT_OUT("%s end", str);
}
/*
* init_slist -- initialize SLIST
*/
static void
init_slist(PMEMobjpool *pop, struct slisthead *head)
{
if (!POBJ_SLIST_EMPTY(head))
return;
TOID(struct snode) node;
TOID(struct snode) tempNode;
int i = 0;
TX_BEGIN(pop) {
POBJ_SLIST_INIT(head);
dump_sl(head, "after init");
for (i = 0; i < ELEMENT_NO; ++i) {
node = TX_NEW(struct snode);
D_RW(node)->data = i;
POBJ_SLIST_INSERT_HEAD(head, node, snd);
}
dump_sl(head, "after insert_head");
tempNode = POBJ_SLIST_FIRST(head);
node = TX_NEW(struct snode);
D_RW(node)->data = 111;
POBJ_SLIST_INSERT_AFTER(tempNode, node, snd);
dump_sl(head, "after insert_after1");
tempNode = POBJ_SLIST_NEXT(node, snd);
node = TX_NEW(struct snode);
D_RW(node)->data = 222;
POBJ_SLIST_INSERT_AFTER(tempNode, node, snd);
dump_sl(head, "after insert_after2");
tempNode = POBJ_SLIST_NEXT(node, snd);
POBJ_SLIST_REMOVE_FREE(head, tempNode, snd);
dump_sl(head, "after remove_free1");
POBJ_SLIST_REMOVE_HEAD(head, snd);
dump_sl(head, "after remove_head");
TOID(struct snode) element = POBJ_SLIST_FIRST(head);
while (!TOID_IS_NULL(D_RO(element)->snd.pe_next)) {
element = D_RO(element)->snd.pe_next;
}
node = TX_NEW(struct snode);
D_RW(node)->data = 333;
POBJ_SLIST_INSERT_AFTER(element, node, snd);
dump_sl(head, "after insert_after3");
element = node;
node = TX_NEW(struct snode);
D_RW(node)->data = 123;
POBJ_SLIST_INSERT_AFTER(element, node, snd);
dump_sl(head, "after insert_after4");
tempNode = POBJ_SLIST_NEXT(node, snd);
POBJ_SLIST_REMOVE_FREE(head, node, snd);
dump_sl(head, "after remove_free2");
} TX_ONABORT {
abort();
} TX_END
}
int
main(int argc, char *argv[])
{
unsigned res = 0;
PMEMobjpool *pop;
const char *path;
START(argc, argv, "ex_linkedlist");
/* root doesn't count */
UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(list) != 4);
if (argc != 2) {
UT_FATAL("usage: %s file-name", argv[0]);
}
path = argv[1];
if (os_access(path, F_OK) != 0) {
if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(list),
PMEMOBJ_MIN_POOL, 0666)) == NULL) {
UT_FATAL("!pmemobj_create: %s", path);
}
} else {
if ((pop = pmemobj_open(path,
POBJ_LAYOUT_NAME(list))) == NULL) {
UT_FATAL("!pmemobj_open: %s", path);
}
}
TOID(struct base) base = POBJ_ROOT(pop, struct base);
struct tqueuehead *tqhead = &D_RW(base)->tqueue;
struct slisthead *slhead = &D_RW(base)->slist;
init_tqueue(pop, tqhead);
init_slist(pop, slhead);
int i = 0;
TOID(struct tqnode) tqelement;
POBJ_TAILQ_FOREACH(tqelement, tqhead, tnd) {
if (D_RO(tqelement)->data != expectedResTQ[i]) {
res = 1;
break;
}
i++;
}
PRINT_RES(res, tail queue);
i = 0;
res = 0;
TOID(struct snode) slelement;
POBJ_SLIST_FOREACH(slelement, slhead, snd) {
if (D_RO(slelement)->data != expectedResSL[i]) {
res = 1;
break;
}
i++;
}
PRINT_RES(res, singly linked list);
pmemobj_close(pop);
DONE(NULL);
}
| 6,919 | 22.862069 | 77 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_persist_count/mocks_windows.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* mocks_windows.h -- redefinitions of pmem functions
*
* This file is Windows-specific.
*
* This file should be included (i.e. using Forced Include) by libpmemobj
* files, when compiled for the purpose of obj_persist_count test.
* It would replace default implementation with mocked functions defined
* in obj_persist_count.c.
*
* These defines could be also passed as preprocessor definitions.
*/
#ifndef WRAP_REAL
#define pmem_persist __wrap_pmem_persist
#define pmem_flush __wrap_pmem_flush
#define pmem_drain __wrap_pmem_drain
#define pmem_msync __wrap_pmem_msync
#define pmem_memcpy_persist __wrap_pmem_memcpy_persist
#define pmem_memcpy_nodrain __wrap_pmem_memcpy_nodrain
#define pmem_memcpy __wrap_pmem_memcpy
#define pmem_memmove_persist __wrap_pmem_memmove_persist
#define pmem_memmove_nodrain __wrap_pmem_memmove_nodrain
#define pmem_memmove __wrap_pmem_memmove
#define pmem_memset_persist __wrap_pmem_memset_persist
#define pmem_memset_nodrain __wrap_pmem_memset_nodrain
#define pmem_memset __wrap_pmem_memset
#endif
| 1,130 | 34.34375 | 73 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_persist_count/obj_persist_count.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* obj_persist_count.c -- counting number of persists
*/
#define _GNU_SOURCE
#include "obj.h"
#include "pmalloc.h"
#include "unittest.h"
struct ops_counter {
unsigned n_cl_stores;
unsigned n_drain;
unsigned n_pmem_persist;
unsigned n_pmem_msync;
unsigned n_pmem_flush;
unsigned n_pmem_drain;
unsigned n_flush_from_pmem_memcpy;
unsigned n_flush_from_pmem_memset;
unsigned n_drain_from_pmem_memcpy;
unsigned n_drain_from_pmem_memset;
unsigned n_pot_cache_misses;
};
static struct ops_counter ops_counter;
static struct ops_counter tx_counter;
#define FLUSH_ALIGN ((uintptr_t)64)
#define MOVNT_THRESHOLD 256
static unsigned
cl_flushed(const void *addr, size_t len, uintptr_t alignment)
{
uintptr_t start = (uintptr_t)addr & ~(alignment - 1);
uintptr_t end = ((uintptr_t)addr + len + alignment - 1) &
~(alignment - 1);
return (unsigned)(end - start) / FLUSH_ALIGN;
}
#define PMEM_F_MEM_MOVNT (PMEM_F_MEM_WC | PMEM_F_MEM_NONTEMPORAL)
#define PMEM_F_MEM_MOV (PMEM_F_MEM_WB | PMEM_F_MEM_TEMPORAL)
static unsigned
bulk_cl_changed(const void *addr, size_t len, unsigned flags)
{
uintptr_t start = (uintptr_t)addr & ~(FLUSH_ALIGN - 1);
uintptr_t end = ((uintptr_t)addr + len + FLUSH_ALIGN - 1) &
~(FLUSH_ALIGN - 1);
unsigned cl_changed = (unsigned)(end - start) / FLUSH_ALIGN;
int wc; /* write combining */
if (flags & PMEM_F_MEM_NOFLUSH)
wc = 0; /* NOFLUSH always uses temporal instructions */
else if (flags & PMEM_F_MEM_MOVNT)
wc = 1;
else if (flags & PMEM_F_MEM_MOV)
wc = 0;
else if (len < MOVNT_THRESHOLD)
wc = 0;
else
wc = 1;
/* count number of potential cache misses */
if (!wc) {
/*
* When we don't use write combining, it means all
* cache lines may be missing.
*/
ops_counter.n_pot_cache_misses += cl_changed;
} else {
/*
* When we use write combining there won't be any cache misses,
* with an exception of unaligned beginning or end.
*/
if (start != (uintptr_t)addr)
ops_counter.n_pot_cache_misses++;
if (end != ((uintptr_t)addr + len) &&
start + FLUSH_ALIGN != end)
ops_counter.n_pot_cache_misses++;
}
return cl_changed;
}
static void
flush_cl(const void *addr, size_t len)
{
unsigned flushed = cl_flushed(addr, len, FLUSH_ALIGN);
ops_counter.n_cl_stores += flushed;
ops_counter.n_pot_cache_misses += flushed;
}
static void
flush_msync(const void *addr, size_t len)
{
unsigned flushed = cl_flushed(addr, len, Pagesize);
ops_counter.n_cl_stores += flushed;
ops_counter.n_pot_cache_misses += flushed;
}
FUNC_MOCK(pmem_persist, void, const void *addr, size_t len)
FUNC_MOCK_RUN_DEFAULT {
ops_counter.n_pmem_persist++;
flush_cl(addr, len);
ops_counter.n_drain++;
_FUNC_REAL(pmem_persist)(addr, len);
}
FUNC_MOCK_END
FUNC_MOCK(pmem_msync, int, const void *addr, size_t len)
FUNC_MOCK_RUN_DEFAULT {
ops_counter.n_pmem_msync++;
flush_msync(addr, len);
ops_counter.n_drain++;
return _FUNC_REAL(pmem_msync)(addr, len);
}
FUNC_MOCK_END
FUNC_MOCK(pmem_flush, void, const void *addr, size_t len)
FUNC_MOCK_RUN_DEFAULT {
ops_counter.n_pmem_flush++;
flush_cl(addr, len);
_FUNC_REAL(pmem_flush)(addr, len);
}
FUNC_MOCK_END
FUNC_MOCK(pmem_drain, void, void)
FUNC_MOCK_RUN_DEFAULT {
ops_counter.n_pmem_drain++;
ops_counter.n_drain++;
_FUNC_REAL(pmem_drain)();
}
FUNC_MOCK_END
static void
memcpy_nodrain_count(void *dest, const void *src, size_t len, unsigned flags)
{
unsigned cl_stores = bulk_cl_changed(dest, len, flags);
if (!(flags & PMEM_F_MEM_NOFLUSH))
ops_counter.n_flush_from_pmem_memcpy += cl_stores;
ops_counter.n_cl_stores += cl_stores;
}
static void
memcpy_persist_count(void *dest, const void *src, size_t len, unsigned flags)
{
memcpy_nodrain_count(dest, src, len, flags);
ops_counter.n_drain_from_pmem_memcpy++;
ops_counter.n_drain++;
}
FUNC_MOCK(pmem_memcpy_persist, void *, void *dest, const void *src, size_t len)
FUNC_MOCK_RUN_DEFAULT {
memcpy_persist_count(dest, src, len, 0);
return _FUNC_REAL(pmem_memcpy_persist)(dest, src, len);
}
FUNC_MOCK_END
FUNC_MOCK(pmem_memcpy_nodrain, void *, void *dest, const void *src, size_t len)
FUNC_MOCK_RUN_DEFAULT {
memcpy_nodrain_count(dest, src, len, 0);
return _FUNC_REAL(pmem_memcpy_nodrain)(dest, src, len);
}
FUNC_MOCK_END
static unsigned
sanitize_flags(unsigned flags)
{
if (flags & PMEM_F_MEM_NOFLUSH) {
/* NOFLUSH implies NODRAIN */
flags |= PMEM_F_MEM_NODRAIN;
}
return flags;
}
FUNC_MOCK(pmem_memcpy, void *, void *dest, const void *src, size_t len,
unsigned flags)
FUNC_MOCK_RUN_DEFAULT {
flags = sanitize_flags(flags);
if (flags & PMEM_F_MEM_NODRAIN)
memcpy_nodrain_count(dest, src, len, flags);
else
memcpy_persist_count(dest, src, len, flags);
return _FUNC_REAL(pmem_memcpy)(dest, src, len, flags);
}
FUNC_MOCK_END
FUNC_MOCK(pmem_memmove_persist, void *, void *dest, const void *src, size_t len)
FUNC_MOCK_RUN_DEFAULT {
memcpy_persist_count(dest, src, len, 0);
return _FUNC_REAL(pmem_memmove_persist)(dest, src, len);
}
FUNC_MOCK_END
FUNC_MOCK(pmem_memmove_nodrain, void *, void *dest, const void *src, size_t len)
FUNC_MOCK_RUN_DEFAULT {
memcpy_nodrain_count(dest, src, len, 0);
return _FUNC_REAL(pmem_memmove_nodrain)(dest, src, len);
}
FUNC_MOCK_END
FUNC_MOCK(pmem_memmove, void *, void *dest, const void *src, size_t len,
unsigned flags)
FUNC_MOCK_RUN_DEFAULT {
flags = sanitize_flags(flags);
if (flags & PMEM_F_MEM_NODRAIN)
memcpy_nodrain_count(dest, src, len, flags);
else
memcpy_persist_count(dest, src, len, flags);
return _FUNC_REAL(pmem_memmove)(dest, src, len, flags);
}
FUNC_MOCK_END
static void
memset_nodrain_count(void *dest, size_t len, unsigned flags)
{
unsigned cl_set = bulk_cl_changed(dest, len, flags);
if (!(flags & PMEM_F_MEM_NOFLUSH))
ops_counter.n_flush_from_pmem_memset += cl_set;
ops_counter.n_cl_stores += cl_set;
}
static void
memset_persist_count(void *dest, size_t len, unsigned flags)
{
memset_nodrain_count(dest, len, flags);
ops_counter.n_drain_from_pmem_memset++;
ops_counter.n_drain++;
}
FUNC_MOCK(pmem_memset_persist, void *, void *dest, int c, size_t len)
FUNC_MOCK_RUN_DEFAULT {
memset_persist_count(dest, len, 0);
return _FUNC_REAL(pmem_memset_persist)(dest, c, len);
}
FUNC_MOCK_END
FUNC_MOCK(pmem_memset_nodrain, void *, void *dest, int c, size_t len)
FUNC_MOCK_RUN_DEFAULT {
memset_nodrain_count(dest, len, 0);
return _FUNC_REAL(pmem_memset_nodrain)(dest, c, len);
}
FUNC_MOCK_END
FUNC_MOCK(pmem_memset, void *, void *dest, int c, size_t len, unsigned flags)
FUNC_MOCK_RUN_DEFAULT {
flags = sanitize_flags(flags);
if (flags & PMEM_F_MEM_NODRAIN)
memset_nodrain_count(dest, len, flags);
else
memset_persist_count(dest, len, flags);
return _FUNC_REAL(pmem_memset)(dest, c, len, flags);
}
FUNC_MOCK_END
/*
* reset_counters -- zero all counters
*/
static void
reset_counters(void)
{
memset(&ops_counter, 0, sizeof(ops_counter));
}
/*
* print_reset_counters -- print and then zero all counters
*/
static void
print_reset_counters(const char *task, unsigned tx)
{
#define CNT(name) (ops_counter.name - tx * tx_counter.name)
UT_OUT(
"%-14s %-7d %-10d %-12d %-10d %-10d %-10d %-15d %-17d %-15d %-17d %-23d",
task,
CNT(n_cl_stores),
CNT(n_drain),
CNT(n_pmem_persist),
CNT(n_pmem_msync),
CNT(n_pmem_flush),
CNT(n_pmem_drain),
CNT(n_flush_from_pmem_memcpy),
CNT(n_drain_from_pmem_memcpy),
CNT(n_flush_from_pmem_memset),
CNT(n_drain_from_pmem_memset),
CNT(n_pot_cache_misses));
#undef CNT
reset_counters();
}
#define LARGE_SNAPSHOT ((1 << 10) * 10)
struct foo_large {
uint8_t snapshot[LARGE_SNAPSHOT];
};
struct foo {
int val;
uint64_t dest;
PMEMoid bar;
PMEMoid bar2;
};
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_persist_count");
if (argc != 2)
UT_FATAL("usage: %s file-name", argv[0]);
const char *path = argv[1];
PMEMobjpool *pop;
if ((pop = pmemobj_create(path, "persist_count",
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
UT_OUT(
"%-14s %-7s %-10s %-12s %-10s %-10s %-10s %-15s %-17s %-15s %-17s %-23s",
"task",
"cl(all)",
"drain(all)",
"pmem_persist",
"pmem_msync",
"pmem_flush",
"pmem_drain",
"pmem_memcpy_cls",
"pmem_memcpy_drain",
"pmem_memset_cls",
"pmem_memset_drain",
"potential_cache_misses");
print_reset_counters("pool_create", 0);
/* allocate one structure to create a run */
pmemobj_alloc(pop, NULL, sizeof(struct foo), 0, NULL, NULL);
reset_counters();
PMEMoid root = pmemobj_root(pop, sizeof(struct foo));
UT_ASSERT(!OID_IS_NULL(root));
print_reset_counters("root_alloc", 0);
PMEMoid oid;
int ret = pmemobj_alloc(pop, &oid, sizeof(struct foo), 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
print_reset_counters("atomic_alloc", 0);
pmemobj_free(&oid);
print_reset_counters("atomic_free", 0);
struct foo *f = pmemobj_direct(root);
TX_BEGIN(pop) {
} TX_END
memcpy(&tx_counter, &ops_counter, sizeof(ops_counter));
print_reset_counters("tx_begin_end", 0);
TX_BEGIN(pop) {
f->bar = pmemobj_tx_alloc(sizeof(struct foo), 0);
UT_ASSERT(!OID_IS_NULL(f->bar));
} TX_END
print_reset_counters("tx_alloc", 1);
TX_BEGIN(pop) {
f->bar2 = pmemobj_tx_alloc(sizeof(struct foo), 0);
UT_ASSERT(!OID_IS_NULL(f->bar2));
} TX_END
print_reset_counters("tx_alloc_next", 1);
TX_BEGIN(pop) {
pmemobj_tx_free(f->bar);
} TX_END
print_reset_counters("tx_free", 1);
TX_BEGIN(pop) {
pmemobj_tx_free(f->bar2);
} TX_END
print_reset_counters("tx_free_next", 1);
TX_BEGIN(pop) {
pmemobj_tx_xadd_range_direct(&f->val, sizeof(f->val),
POBJ_XADD_NO_FLUSH);
} TX_END
print_reset_counters("tx_add", 1);
TX_BEGIN(pop) {
pmemobj_tx_xadd_range_direct(&f->val, sizeof(f->val),
POBJ_XADD_NO_FLUSH);
} TX_END
print_reset_counters("tx_add_next", 1);
PMEMoid large_foo;
pmemobj_zalloc(pop, &large_foo, sizeof(struct foo_large), 0);
UT_ASSERT(!OID_IS_NULL(large_foo));
reset_counters();
struct foo_large *flarge = pmemobj_direct(large_foo);
TX_BEGIN(pop) {
pmemobj_tx_xadd_range_direct(&flarge->snapshot,
sizeof(flarge->snapshot),
POBJ_XADD_NO_FLUSH);
} TX_END
print_reset_counters("tx_add_large", 1);
TX_BEGIN(pop) {
pmemobj_tx_xadd_range_direct(&flarge->snapshot,
sizeof(flarge->snapshot),
POBJ_XADD_NO_FLUSH);
} TX_END
print_reset_counters("tx_add_lnext", 1);
pmalloc(pop, &f->dest, sizeof(f->val), 0, 0);
print_reset_counters("pmalloc", 0);
pfree(pop, &f->dest);
print_reset_counters("pfree", 0);
uint64_t stack_var;
pmalloc(pop, &stack_var, sizeof(f->val), 0, 0);
print_reset_counters("pmalloc_stack", 0);
pfree(pop, &stack_var);
print_reset_counters("pfree_stack", 0);
pmemobj_close(pop);
DONE(NULL);
}
#ifdef _MSC_VER
/*
* Since libpmemobj is linked statically, we need to invoke its ctor/dtor.
*/
MSVC_CONSTR(libpmemobj_init)
MSVC_DESTR(libpmemobj_fini)
#endif
| 10,962 | 22.832609 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/rpmem_proto/rpmem_proto.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmem_proto.c -- unit test for rpmem_proto header
*
* The purpose of this test is to make sure the structures which describe
* rpmem protocol messages does not have any padding.
*/
#include "unittest.h"
#include "librpmem.h"
#include "rpmem_proto.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "rpmem_proto");
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_hdr);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_hdr, type);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_hdr, size);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_hdr);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_hdr_resp);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_hdr_resp, status);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_hdr_resp, type);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_hdr_resp, size);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_hdr_resp);
ASSERT_ALIGNED_BEGIN(struct rpmem_pool_attr);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, signature);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, major);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, compat_features);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, incompat_features);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, ro_compat_features);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, poolset_uuid);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, uuid);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, next_uuid);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, prev_uuid);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, user_flags);
ASSERT_ALIGNED_CHECK(struct rpmem_pool_attr);
ASSERT_ALIGNED_BEGIN(struct rpmem_pool_attr_packed);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, signature);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, major);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, compat_features);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, incompat_features);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, ro_compat_features);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, poolset_uuid);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, uuid);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, next_uuid);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, prev_uuid);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, user_flags);
ASSERT_ALIGNED_CHECK(struct rpmem_pool_attr_packed);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_ibc_attr);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_ibc_attr, port);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_ibc_attr, persist_method);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_ibc_attr, rkey);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_ibc_attr, raddr);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_ibc_attr, nlanes);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_ibc_attr);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_common);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_common, major);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_common, minor);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_common, pool_size);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_common, nlanes);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_common, provider);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_common, buff_size);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_common);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_pool_desc);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_pool_desc, size);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_pool_desc);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_create);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_create, hdr);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_create, c);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_create, pool_attr);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_create, pool_desc);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_create);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_create_resp);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_create_resp, hdr);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_create_resp, ibc);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_create_resp);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_open);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_open, hdr);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_open, c);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_open, pool_desc);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_open);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_open_resp);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_open_resp, hdr);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_open_resp, ibc);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_open_resp, pool_attr);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_open_resp);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_close);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_close, hdr);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_close, flags);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_close);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_close_resp);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_close_resp, hdr);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_close_resp);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_persist);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_persist, flags);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_persist, lane);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_persist, addr);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_persist, size);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_persist);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_persist_resp);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_persist_resp, flags);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_persist_resp, lane);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_persist_resp);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_set_attr);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_set_attr, hdr);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_set_attr, pool_attr);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_set_attr);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_set_attr_resp);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_set_attr_resp, hdr);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_set_attr_resp);
DONE(NULL);
}
| 5,733 | 41.474074 | 73 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/compat_incompat_features/pool_open.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2018, Intel Corporation */
/*
* pool_open.c -- a tool for verifying that an obj/blk/log pool opens correctly
*
* usage: pool_open <path> <obj|blk|log> <layout>
*/
#include "unittest.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "compat_incompat_features");
if (argc < 3)
UT_FATAL("usage: %s <obj|blk|log> <path>", argv[0]);
char *type = argv[1];
char *path = argv[2];
if (strcmp(type, "obj") == 0) {
PMEMobjpool *pop = pmemobj_open(path, "");
if (pop == NULL) {
UT_FATAL("!%s: pmemobj_open failed", path);
} else {
UT_OUT("%s: pmemobj_open succeeded", path);
pmemobj_close(pop);
}
} else if (strcmp(type, "blk") == 0) {
PMEMblkpool *pop = pmemblk_open(path, 0);
if (pop == NULL) {
UT_FATAL("!%s: pmemblk_open failed", path);
} else {
UT_OUT("%s: pmemblk_open succeeded", path);
pmemblk_close(pop);
}
} else if (strcmp(type, "log") == 0) {
PMEMlogpool *pop = pmemlog_open(path);
if (pop == NULL) {
UT_FATAL("!%s: pmemlog_open failed", path);
} else {
UT_OUT("%s: pmemlog_open succeeded", path);
pmemlog_close(pop);
}
} else {
UT_FATAL("usage: %s <obj|blk|log> <path>", argv[0]);
}
DONE(NULL);
}
| 1,237 | 23.27451 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/util_poolset/mocks_windows.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* mocks_windows.h -- redefinitions of libc functions used in util_poolset
*
* This file is Windows-specific.
*
* This file should be included (i.e. using Forced Include) by libpmem
* files, when compiled for the purpose of util_poolset test.
* It would replace default implementation with mocked functions defined
* in util_poolset.c.
*
* These defines could be also passed as preprocessor definitions.
*/
#ifndef WRAP_REAL_OPEN
#define os_open __wrap_os_open
#endif
#ifndef WRAP_REAL_FALLOCATE
#define os_posix_fallocate __wrap_os_posix_fallocate
#endif
#ifndef WRAP_REAL_PMEM
#define pmem_is_pmem __wrap_pmem_is_pmem
#endif
| 730 | 25.107143 | 74 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/util_poolset/util_poolset.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* util_poolset.c -- unit test for util_pool_create() / util_pool_open()
*
* usage: util_poolset cmd minlen hdrsize [mockopts] setfile ...
*/
#include <stdbool.h>
#include "unittest.h"
#include "pmemcommon.h"
#include "set.h"
#include <errno.h>
#include "mocks.h"
#include "fault_injection.h"
#define LOG_PREFIX "ut"
#define LOG_LEVEL_VAR "TEST_LOG_LEVEL"
#define LOG_FILE_VAR "TEST_LOG_FILE"
#define MAJOR_VERSION 1
#define MINOR_VERSION 0
#define SIG "PMEMXXX"
#define MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */
#define TEST_FORMAT_INCOMPAT_DEFAULT POOL_FEAT_CKSUM_2K
#define TEST_FORMAT_INCOMPAT_CHECK POOL_FEAT_INCOMPAT_VALID
static size_t Extend_size = MIN_PART * 2;
const char *Open_path = "";
os_off_t Fallocate_len = -1;
size_t Is_pmem_len = 0;
/*
* poolset_info -- (internal) dumps poolset info and checks its integrity
*
* Performs the following checks:
* - part_size[i] == rounddown(file_size - pool_hdr_size, Mmap_align)
* - replica_size == sum(part_size)
* - pool_size == min(replica_size)
*/
static void
poolset_info(const char *fname, struct pool_set *set, int o)
{
if (o)
UT_OUT("%s: opened: nreps %d poolsize %zu rdonly %d",
fname, set->nreplicas, set->poolsize,
set->rdonly);
else
UT_OUT("%s: created: nreps %d poolsize %zu zeroed %d",
fname, set->nreplicas, set->poolsize,
set->zeroed);
size_t poolsize = SIZE_MAX;
for (unsigned r = 0; r < set->nreplicas; r++) {
struct pool_replica *rep = set->replica[r];
size_t repsize = 0;
UT_OUT(" replica[%d]: nparts %d nhdrs %d repsize %zu "
"is_pmem %d",
r, rep->nparts, rep->nhdrs, rep->repsize, rep->is_pmem);
for (unsigned i = 0; i < rep->nparts; i++) {
struct pool_set_part *part = &rep->part[i];
UT_OUT(" part[%d] path %s filesize %zu size %zu",
i, part->path, part->filesize, part->size);
size_t partsize =
(part->filesize & ~(Ut_mmap_align - 1));
repsize += partsize;
if (i > 0 && (set->options & OPTION_SINGLEHDR) == 0)
UT_ASSERTeq(part->size,
partsize - Ut_mmap_align); /* XXX */
}
repsize -= (rep->nhdrs - 1) * Ut_mmap_align;
UT_ASSERTeq(rep->repsize, repsize);
UT_ASSERT(rep->resvsize >= repsize);
if (rep->repsize < poolsize)
poolsize = rep->repsize;
}
UT_ASSERTeq(set->poolsize, poolsize);
}
/*
* mock_options -- (internal) parse mock options and enable mocked functions
*/
static int
mock_options(const char *arg)
{
/* reset to defaults */
Open_path = "";
Fallocate_len = -1;
Is_pmem_len = 0;
if (arg[0] != '-' || arg[1] != 'm')
return 0;
switch (arg[2]) {
case 'n':
/* do nothing */
break;
case 'o':
/* open */
Open_path = &arg[4];
break;
case 'f':
/* fallocate */
Fallocate_len = ATOLL(&arg[4]);
break;
case 'p':
/* is_pmem */
Is_pmem_len = ATOULL(&arg[4]);
break;
default:
UT_FATAL("unknown mock option: %c", arg[2]);
}
return 1;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "util_poolset");
common_init(LOG_PREFIX, LOG_LEVEL_VAR, LOG_FILE_VAR,
MAJOR_VERSION, MINOR_VERSION);
if (argc < 3)
UT_FATAL("usage: %s cmd minsize [mockopts] "
"setfile ...", argv[0]);
char *fname;
struct pool_set *set;
int ret;
size_t minsize = strtoul(argv[2], &fname, 0);
for (int arg = 3; arg < argc; arg++) {
arg += mock_options(argv[arg]);
fname = argv[arg];
struct pool_attr attr;
memset(&attr, 0, sizeof(attr));
memcpy(attr.signature, SIG, sizeof(SIG));
attr.major = 1;
switch (argv[1][0]) {
case 'c':
attr.features.incompat = TEST_FORMAT_INCOMPAT_DEFAULT;
ret = util_pool_create(&set, fname, 0, minsize,
MIN_PART, &attr, NULL, REPLICAS_ENABLED);
if (ret == -1)
UT_OUT("!%s: util_pool_create", fname);
else {
/*
* XXX: On Windows pool files are created with
* R/W permissions, so no need for chmod().
*/
#ifndef _WIN32
util_poolset_chmod(set, S_IWUSR | S_IRUSR);
#endif
poolset_info(fname, set, 0);
util_poolset_close(set, DO_NOT_DELETE_PARTS);
}
break;
case 'o':
attr.features.incompat = TEST_FORMAT_INCOMPAT_CHECK;
ret = util_pool_open(&set, fname, MIN_PART, &attr,
NULL, NULL, 0 /* flags */);
if (ret == -1)
UT_OUT("!%s: util_pool_open", fname);
else {
poolset_info(fname, set, 1);
util_poolset_close(set, DO_NOT_DELETE_PARTS);
}
break;
case 'e':
attr.features.incompat = TEST_FORMAT_INCOMPAT_CHECK;
ret = util_pool_open(&set, fname, MIN_PART, &attr,
NULL, NULL, 0 /* flags */);
UT_ASSERTeq(ret, 0);
size_t esize = Extend_size;
void *nptr = util_pool_extend(set, &esize, MIN_PART);
if (nptr == NULL)
UT_OUT("!%s: util_pool_extend", fname);
else {
poolset_info(fname, set, 1);
}
util_poolset_close(set, DO_NOT_DELETE_PARTS);
break;
case 'f':
if (!core_fault_injection_enabled())
break;
attr.features.incompat = TEST_FORMAT_INCOMPAT_CHECK;
ret = util_pool_open(&set, fname, MIN_PART, &attr,
NULL, NULL, 0 /* flags */);
UT_ASSERTeq(ret, 0);
size_t fsize = Extend_size;
core_inject_fault_at(PMEM_MALLOC, 2,
"util_poolset_append_new_part");
void *fnptr = util_pool_extend(set, &fsize, MIN_PART);
UT_ASSERTeq(fnptr, NULL);
UT_ASSERTeq(errno, ENOMEM);
util_poolset_close(set, DO_NOT_DELETE_PARTS);
break;
}
}
common_fini();
DONE(NULL);
}
| 5,390 | 23.843318 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_tx_user_data/obj_tx_user_data.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* obj_tx_user_data.c -- unit test for pmemobj_tx_(get/set)_user_data
*/
#include "unittest.h"
#define LAYOUT_NAME "tx_user_data"
#define USER_DATA_V1 (void *) 123456789ULL
#define USER_DATA_V2 (void *) 987654321ULL
/*
* do_tx_set_get_user_data_nested -- do set and verify user data in a tx
*/
static void
do_tx_set_get_user_data_nested(PMEMobjpool *pop)
{
TX_BEGIN(pop) {
pmemobj_tx_set_user_data(USER_DATA_V1);
UT_ASSERTeq(USER_DATA_V1, pmemobj_tx_get_user_data());
TX_BEGIN(pop) {
UT_ASSERTeq(USER_DATA_V1, pmemobj_tx_get_user_data());
pmemobj_tx_set_user_data(USER_DATA_V2);
UT_ASSERTeq(USER_DATA_V2, pmemobj_tx_get_user_data());
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
} TX_ONCOMMIT {
UT_ASSERTeq(USER_DATA_V2, pmemobj_tx_get_user_data());
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TX_BEGIN(pop) {
UT_ASSERTeq(NULL, pmemobj_tx_get_user_data());
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
/*
* do_tx_set_get_user_data_abort -- do set and verify user data in a tx after
* tx abort
*/
static void
do_tx_set_get_user_data_abort(PMEMobjpool *pop)
{
TX_BEGIN(pop) {
pmemobj_tx_set_user_data(USER_DATA_V1);
UT_ASSERTeq(USER_DATA_V1, pmemobj_tx_get_user_data());
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
UT_ASSERTeq(USER_DATA_V1, pmemobj_tx_get_user_data());
} TX_END
TX_BEGIN(pop) {
UT_ASSERTeq(NULL, pmemobj_tx_get_user_data());
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_user_data");
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
do_tx_set_get_user_data_nested(pop);
do_tx_set_get_user_data_abort(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 1,948 | 20.655556 | 77 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem_memset/pmem_memset.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* pmem_memset.c -- unit test for doing a memset
*
* usage: pmem_memset file offset length
*/
#include "unittest.h"
#include "util_pmem.h"
#include "file.h"
#include "memset_common.h"
typedef void *pmem_memset_fn(void *pmemdest, int c, size_t len, unsigned flags);
static void *
pmem_memset_persist_wrapper(void *pmemdest, int c, size_t len, unsigned flags)
{
(void) flags;
return pmem_memset_persist(pmemdest, c, len);
}
static void *
pmem_memset_nodrain_wrapper(void *pmemdest, int c, size_t len, unsigned flags)
{
(void) flags;
return pmem_memset_nodrain(pmemdest, c, len);
}
static void
do_memset_variants(int fd, char *dest, const char *file_name, size_t dest_off,
size_t bytes, persist_fn p)
{
do_memset(fd, dest, file_name, dest_off, bytes,
pmem_memset_persist_wrapper, 0, p);
do_memset(fd, dest, file_name, dest_off, bytes,
pmem_memset_nodrain_wrapper, 0, p);
for (int i = 0; i < ARRAY_SIZE(Flags); ++i) {
do_memset(fd, dest, file_name, dest_off, bytes,
pmem_memset, Flags[i], p);
if (Flags[i] & PMEMOBJ_F_MEM_NOFLUSH)
pmem_persist(dest, bytes);
}
}
static void
do_persist_ddax(const void *ptr, size_t size)
{
util_persist_auto(1, ptr, size);
}
static void
do_persist(const void *ptr, size_t size)
{
util_persist_auto(0, ptr, size);
}
int
main(int argc, char *argv[])
{
int fd;
size_t mapped_len;
char *dest;
if (argc != 4)
UT_FATAL("usage: %s file offset length", argv[0]);
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem_memset %s %s %s %savx %savx512f",
argv[2], argv[3],
thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
fd = OPEN(argv[1], O_RDWR);
/* open a pmem file and memory map it */
if ((dest = pmem_map_file(argv[1], 0, 0, 0, &mapped_len, NULL)) == NULL)
UT_FATAL("!Could not mmap %s\n", argv[1]);
size_t dest_off = strtoul(argv[2], NULL, 0);
size_t bytes = strtoul(argv[3], NULL, 0);
enum file_type type = util_fd_get_type(fd);
if (type < 0)
UT_FATAL("cannot check type of file with fd %d", fd);
persist_fn p;
p = type == TYPE_DEVDAX ? do_persist_ddax : do_persist;
do_memset_variants(fd, dest, argv[1], dest_off, bytes, p);
UT_ASSERTeq(pmem_unmap(dest, mapped_len), 0);
CLOSE(fd);
DONE(NULL);
}
| 2,428 | 22.355769 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_fragmentation/obj_fragmentation.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* obj_fragmentation.c -- measures average heap fragmentation
*
* A pretty simplistic test that measures internal fragmentation of the
* allocator for the given size.
*/
#include <stdlib.h>
#include "unittest.h"
#define LAYOUT_NAME "obj_fragmentation"
#define OBJECT_OVERHEAD 64 /* account for the header added to each object */
#define MAX_OVERALL_OVERHEAD 0.10f
/*
* For the best accuracy fragmentation should be measured for one full zone
* because the metadata is preallocated. For reasonable test duration a smaller
* size must be used.
*/
#define DEFAULT_FILE_SIZE ((size_t)(1ULL << 28)) /* 256 megabytes */
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_fragmentation");
if (argc < 3)
UT_FATAL("usage: %s allocsize filename [filesize]", argv[0]);
size_t file_size;
if (argc == 4)
file_size = ATOUL(argv[3]);
else
file_size = DEFAULT_FILE_SIZE;
size_t alloc_size = ATOUL(argv[1]);
const char *path = argv[2];
PMEMobjpool *pop = pmemobj_create(path, LAYOUT_NAME, file_size,
S_IWUSR | S_IRUSR);
if (pop == NULL)
UT_FATAL("!pmemobj_create: %s", path);
size_t allocated = 0;
int err = 0;
do {
PMEMoid oid;
err = pmemobj_alloc(pop, &oid, alloc_size, 0, NULL, NULL);
if (err == 0)
allocated += pmemobj_alloc_usable_size(oid) +
OBJECT_OVERHEAD;
} while (err == 0);
float allocated_pct = ((float)allocated / file_size);
float overhead_pct = 1.f - allocated_pct;
UT_ASSERT(overhead_pct <= MAX_OVERALL_OVERHEAD);
pmemobj_close(pop);
DONE(NULL);
}
| 1,607 | 23.738462 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_recovery/obj_recovery.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* obj_recovery.c -- unit test for pool recovery
*/
#include "unittest.h"
#include "valgrind_internal.h"
#if VG_PMEMCHECK_ENABLED
#define VALGRIND_PMEMCHECK_END_TX VALGRIND_PMC_END_TX
#else
#define VALGRIND_PMEMCHECK_END_TX
#endif
POBJ_LAYOUT_BEGIN(recovery);
POBJ_LAYOUT_ROOT(recovery, struct root);
POBJ_LAYOUT_TOID(recovery, struct foo);
POBJ_LAYOUT_END(recovery);
#define MB (1 << 20)
struct foo {
int bar;
};
struct root {
PMEMmutex lock;
TOID(struct foo) foo;
char large_data[MB];
};
#define BAR_VALUE 5
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_recovery");
/* root doesn't count */
UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(recovery) != 1);
if (argc != 5)
UT_FATAL("usage: %s [file] [lock: y/n] "
"[cmd: c/o] [type: n/f/s/l]",
argv[0]);
const char *path = argv[1];
PMEMobjpool *pop = NULL;
int exists = argv[3][0] == 'o';
enum { TEST_NEW, TEST_FREE, TEST_SET, TEST_LARGE } type;
if (argv[4][0] == 'n')
type = TEST_NEW;
else if (argv[4][0] == 'f')
type = TEST_FREE;
else if (argv[4][0] == 's')
type = TEST_SET;
else if (argv[4][0] == 'l')
type = TEST_LARGE;
else
UT_FATAL("invalid type");
if (!exists) {
if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(recovery),
0, S_IWUSR | S_IRUSR)) == NULL) {
UT_FATAL("failed to create pool\n");
}
} else {
if ((pop = pmemobj_open(path, POBJ_LAYOUT_NAME(recovery)))
== NULL) {
UT_FATAL("failed to open pool\n");
}
}
TOID(struct root) root = POBJ_ROOT(pop, struct root);
int lock_type = TX_PARAM_NONE;
void *lock = NULL;
if (argv[2][0] == 'y') {
lock_type = TX_PARAM_MUTEX;
lock = &D_RW(root)->lock;
}
if (type == TEST_SET) {
if (!exists) {
TX_BEGIN_PARAM(pop, lock_type, lock) {
TX_ADD(root);
TOID(struct foo) f = TX_NEW(struct foo);
D_RW(root)->foo = f;
D_RW(f)->bar = BAR_VALUE;
} TX_END
TX_BEGIN_PARAM(pop, lock_type, lock) {
TX_ADD_FIELD(D_RW(root)->foo, bar);
D_RW(D_RW(root)->foo)->bar = BAR_VALUE * 2;
/*
* Even though flushes are not required inside
* of a transaction, this is done here to
* suppress irrelevant pmemcheck issues, because
* we exit the program before the data is
* flushed, while preserving any real ones.
*/
pmemobj_persist(pop,
&D_RW(D_RW(root)->foo)->bar,
sizeof(int));
/*
* We also need to cleanup the transaction state
* of pmemcheck.
*/
VALGRIND_PMEMCHECK_END_TX;
exit(0); /* simulate a crash */
} TX_END
} else {
UT_ASSERT(D_RW(D_RW(root)->foo)->bar == BAR_VALUE);
}
} else if (type == TEST_LARGE) {
if (!exists) {
TX_BEGIN(pop) {
TX_MEMSET(D_RW(root)->large_data, 0xc, MB);
pmemobj_persist(pop,
D_RW(root)->large_data, MB);
VALGRIND_PMEMCHECK_END_TX;
exit(0);
} TX_END
} else {
UT_ASSERT(util_is_zeroed(D_RW(root)->large_data, MB));
TX_BEGIN(pop) { /* we should be able to start TX */
TX_MEMSET(D_RW(root)->large_data, 0xc, MB);
pmemobj_persist(pop,
D_RW(root)->large_data, MB);
VALGRIND_PMEMCHECK_END_TX;
pmemobj_tx_abort(0);
} TX_END
}
} else if (type == TEST_NEW) {
if (!exists) {
TX_BEGIN_PARAM(pop, lock_type, lock) {
TOID(struct foo) f = TX_NEW(struct foo);
TX_SET(root, foo, f);
pmemobj_persist(pop,
&D_RW(root)->foo,
sizeof(PMEMoid));
VALGRIND_PMEMCHECK_END_TX;
exit(0); /* simulate a crash */
} TX_END
} else {
UT_ASSERT(TOID_IS_NULL(D_RW(root)->foo));
}
} else { /* TEST_FREE */
if (!exists) {
TX_BEGIN_PARAM(pop, lock_type, lock) {
TX_ADD(root);
TOID(struct foo) f = TX_NEW(struct foo);
D_RW(root)->foo = f;
D_RW(f)->bar = BAR_VALUE;
} TX_END
TX_BEGIN_PARAM(pop, lock_type, lock) {
TX_ADD(root);
TX_FREE(D_RW(root)->foo);
D_RW(root)->foo = TOID_NULL(struct foo);
pmemobj_persist(pop,
&D_RW(root)->foo,
sizeof(PMEMoid));
VALGRIND_PMEMCHECK_END_TX;
exit(0); /* simulate a crash */
} TX_END
} else {
UT_ASSERT(!TOID_IS_NULL(D_RW(root)->foo));
}
}
UT_ASSERT(pmemobj_check(path, POBJ_LAYOUT_NAME(recovery)));
pmemobj_close(pop);
DONE(NULL);
}
| 4,244 | 20.994819 | 61 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_perror/pmem2_perror.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_perror.c -- pmem2_perror unittests
*/
#include "libpmem2.h"
#include "unittest.h"
#include "out.h"
#include "config.h"
#include "source.h"
/*
* test_fail_pmem2_func_simple - simply check print message when func
* from pmem2 API fails
*/
static int
test_fail_pmem2_func_simple(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
size_t offset = (size_t)INT64_MAX + 1;
/* "randomly" chosen function to be failed */
int ret = pmem2_config_set_offset(&cfg, offset);
UT_ASSERTne(ret, 0);
pmem2_perror("pmem2_config_set_offset");
return 0;
}
/*
* test_fail_pmem2_func_format - check print message when func
* from pmem2 API fails and ellipsis operator is used
*/
static int
test_fail_pmem2_func_format(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
size_t offset = (size_t)INT64_MAX + 1;
/* "randomly" chosen function to be failed */
int ret = pmem2_config_set_offset(&cfg, offset);
UT_ASSERTne(ret, 0);
pmem2_perror("pmem2_config_set_offset %d", 123);
return 0;
}
/*
* test_fail_system_func_simple - check print message when directly called
* system func fails
*/
static int
test_fail_system_func_simple(const struct test_case *tc, int argc, char *argv[])
{
/* "randomly" chosen function to be failed */
int ret = os_open("XXX", O_RDONLY);
UT_ASSERTeq(ret, -1);
ERR("!open");
pmem2_perror("test");
return 0;
}
/*
* test_fail_system_func_format - check print message when directly called
* system func fails and ellipsis operator is used
*/
static int
test_fail_system_func_format(const struct test_case *tc, int argc, char *argv[])
{
/* "randomly" chosen function to be failed */
int ret = os_open("XXX", O_RDONLY);
UT_ASSERTeq(ret, -1);
ERR("!open");
pmem2_perror("test %d", 123);
return 0;
}
/*
* test_fail_pmem2_syscall_simple - check print message when system func
* fails through pmem2_source_size func
*/
static int
test_fail_pmem2_syscall_simple(const struct test_case *tc,
int argc, char *argv[])
{
struct pmem2_source src;
size_t size;
#ifdef _WIN32
src.type = PMEM2_SOURCE_HANDLE;
src.value.handle = INVALID_HANDLE_VALUE;
#else
src.type = PMEM2_SOURCE_FD;
src.value.fd = -1;
#endif
/* "randomly" chosen function to be failed */
int ret = pmem2_source_size(&src, &size);
ASSERTne(ret, 0);
pmem2_perror("test");
return 0;
}
/*
* test_fail_pmem2_syscall_simple - check print message when system func
* fails through pmem2_source_size func and ellipsis operator is used
*/
static int
test_fail_pmem2_syscall_format(const struct test_case *tc,
int argc, char *argv[])
{
struct pmem2_source src;
size_t size;
#ifdef _WIN32
src.type = PMEM2_SOURCE_HANDLE;
src.value.handle = INVALID_HANDLE_VALUE;
#else
src.type = PMEM2_SOURCE_FD;
src.value.fd = -1;
#endif
/* "randomly" chosen function to be failed */
int ret = pmem2_source_size(&src, &size);
ASSERTne(ret, 0);
pmem2_perror("test %d", 123);
return 0;
}
/*
* test_simple_err_to_errno_check - check if conversion
* from pmem2 err value to errno works fine
*/
static int
test_simple_err_to_errno_check(const struct test_case *tc,
int argc, char *argv[])
{
int ret_errno = pmem2_err_to_errno(PMEM2_E_NOSUPP);
UT_ASSERTeq(ret_errno, ENOTSUP);
ret_errno = pmem2_err_to_errno(PMEM2_E_UNKNOWN);
UT_ASSERTeq(ret_errno, EINVAL);
ret_errno = pmem2_err_to_errno(-ENOTSUP);
UT_ASSERTeq(ret_errno, ENOTSUP);
return 0;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_fail_pmem2_func_simple),
TEST_CASE(test_fail_pmem2_func_format),
TEST_CASE(test_fail_system_func_simple),
TEST_CASE(test_fail_system_func_format),
TEST_CASE(test_fail_pmem2_syscall_simple),
TEST_CASE(test_fail_pmem2_syscall_format),
TEST_CASE(test_simple_err_to_errno_check),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char **argv)
{
START(argc, argv, "pmem2_perror");
util_init();
out_init("pmem2_perror", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0, 0);
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
out_fini();
DONE(NULL);
}
| 4,205 | 21.253968 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_mem_ext/pmem2_mem_ext.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_mem_ext.c -- test for low level functions from libpmem2
*/
#include "unittest.h"
#include "file.h"
#include "ut_pmem2.h"
#include "valgrind_internal.h"
typedef void *(*memmove_fn)(void *pmemdest, const void *src, size_t len,
unsigned flags);
typedef void *(*memcpy_fn)(void *pmemdest, const void *src, size_t len,
unsigned flags);
typedef void *(*memset_fn)(void *pmemdest, int c, size_t len,
unsigned flags);
static unsigned Flags[] = {
0,
PMEM_F_MEM_NONTEMPORAL,
PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_WC,
PMEM_F_MEM_WB,
PMEM_F_MEM_NOFLUSH,
PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH |
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL |
PMEM_F_MEM_WC | PMEM_F_MEM_WB,
};
/*
* do_memcpy_with_flag -- pmem2 memcpy with specified flag amd size
*/
static void
do_memcpy_with_flag(char *addr, size_t data_size, memcpy_fn cpy_fn, int flag)
{
char *addr2 = addr + data_size;
cpy_fn(addr2, addr, data_size, Flags[flag]);
}
/*
* do_memmove_with_flag -- pmem2 memmove with specified flag and size
*/
static void
do_memmove_with_flag(char *addr, size_t data_size, memmove_fn mov_fn, int flag)
{
char *addr2 = addr + data_size;
mov_fn(addr2, addr, data_size, Flags[flag]);
}
/*
* do_memset_with_flag -- pmem2 memset with specified flag and size
*/
static void
do_memset_with_flag(char *addr, size_t data_size, memset_fn set_fn, int flag)
{
set_fn(addr, 1, data_size, Flags[flag]);
if (Flags[flag] & PMEM2_F_MEM_NOFLUSH)
VALGRIND_DO_PERSIST(addr, data_size);
}
int
main(int argc, char *argv[])
{
int fd;
char *addr;
size_t mapped_len;
struct pmem2_config *cfg;
struct pmem2_source *src;
struct pmem2_map *map;
if (argc != 5)
UT_FATAL("usage: %s file type size flag", argv[0]);
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem2_mem_ext %s %savx %savx512f",
thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
util_init();
char type = argv[2][0];
size_t data_size = strtoul(argv[3], NULL, 0);
int flag = atoi(argv[4]);
UT_ASSERT(flag < ARRAY_SIZE(Flags));
fd = OPEN(argv[1], O_RDWR);
UT_ASSERT(fd != -1);
PMEM2_CONFIG_NEW(&cfg);
PMEM2_SOURCE_FROM_FD(&src, fd);
PMEM2_CONFIG_SET_GRANULARITY(cfg, PMEM2_GRANULARITY_PAGE);
int ret = pmem2_map(cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_CONFIG_DELETE(&cfg);
PMEM2_SOURCE_DELETE(&src);
mapped_len = pmem2_map_get_size(map);
UT_ASSERT(data_size * 2 < mapped_len);
addr = pmem2_map_get_address(map);
if (addr == NULL)
UT_FATAL("!could not map file: %s", argv[1]);
switch (type) {
case 'C':
{
pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map);
do_memcpy_with_flag(addr, data_size, memcpy_fn, flag);
break;
}
case 'S':
{
pmem2_memset_fn memset_fn = pmem2_get_memset_fn(map);
do_memset_with_flag(addr, data_size, memset_fn, flag);
break;
}
case 'M':
{
pmem2_memmove_fn memmove_fn = pmem2_get_memmove_fn(map);
do_memmove_with_flag(addr, data_size, memmove_fn, flag);
break;
}
default:
UT_FATAL("!wrong type of test %c", type);
break;
}
ret = pmem2_unmap(&map);
UT_ASSERTeq(ret, 0);
CLOSE(fd);
DONE(NULL);
}
| 3,349 | 22.426573 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/rpmemd_util/rpmemd_util_test.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2018, Intel Corporation */
/*
* rpmemd_util_test.c -- unit tests for rpmemd_util module
*/
#include "unittest.h"
#include "rpmem_common.h"
#include "rpmemd_log.h"
#include "rpmemd_util.h"
#include "util.h"
/* structure to store results */
struct result {
int ret;
enum rpmem_persist_method persist_method;
int (*persist)(const void *addr, size_t len);
void *(*memcpy_persist)(void *pmemdest, const void *src, size_t len);
};
/* all values to test */
static const enum rpmem_persist_method pms[] =
{RPMEM_PM_GPSPM, RPMEM_PM_APM, MAX_RPMEM_PM};
static const int is_pmems[] = {0, 1};
enum mode {
MODE_VALID,
MODE_INVALID,
MODE_MAX
};
static const int ranges[2][2][2] = {
[MODE_VALID] = {
{0, ARRAY_SIZE(pms) - 1},
{0, ARRAY_SIZE(is_pmems)}
},
[MODE_INVALID] = {
{ARRAY_SIZE(pms) - 1, ARRAY_SIZE(pms)},
{0, ARRAY_SIZE(is_pmems)}
}
};
/* expected results */
static const struct result exp_results[3][2] = {
{
/* GPSPM and is_pmem == false */
{0, RPMEM_PM_GPSPM, pmem_msync, memcpy},
/* GPSPM and is_pmem == true */
{0, RPMEM_PM_GPSPM, rpmemd_pmem_persist,
pmem_memcpy_persist}
}, {
/* APM and is_pmem == false */
{0, RPMEM_PM_GPSPM, pmem_msync, memcpy},
/* APM and is_pmem == true */
{0, RPMEM_PM_APM, rpmemd_flush_fatal,
pmem_memcpy_persist}
}, {
/* persistency method outside of the range */
{1, 0, 0, 0},
{1, 0, 0, 0}
}
};
static void
test_apply_pm_policy(struct result *result, int is_pmem)
{
if (rpmemd_apply_pm_policy(&result->persist_method, &result->persist,
&result->memcpy_persist, is_pmem)) {
goto err;
}
result->ret = 0;
return;
err:
result->ret = 1;
}
#define USAGE() do {\
UT_ERR("usage: %s valid|invalid", argv[0]);\
} while (0)
static void
test(const int pm_range[2], const int is_pmem_range[2])
{
rpmemd_log_level = RPD_LOG_NOTICE;
int ret = rpmemd_log_init("rpmemd_log", NULL, 0);
UT_ASSERTeq(ret, 0);
struct result result;
const struct result *exp_result;
for (int pm_ind = pm_range[0]; pm_ind < pm_range[1]; ++pm_ind) {
for (int is_pmem_ind = is_pmem_range[0];
is_pmem_ind < is_pmem_range[1]; ++is_pmem_ind) {
result.persist_method = pms[pm_ind];
exp_result = &exp_results[pm_ind][is_pmem_ind];
test_apply_pm_policy(&result, is_pmems[is_pmem_ind]);
UT_ASSERTeq(result.ret, exp_result->ret);
if (exp_result->ret == 0) {
UT_ASSERTeq(result.persist_method,
exp_result->persist_method);
UT_ASSERTeq(result.persist,
exp_result->persist);
}
}
}
rpmemd_log_close();
}
int
main(int argc, char *argv[])
{
START(argc, argv, "rpmemd_util");
if (argc < 2) {
USAGE();
return 1;
}
const char *mode_str = argv[1];
enum mode mode = MODE_MAX;
if (strcmp(mode_str, "valid") == 0) {
mode = MODE_VALID;
} else if (strcmp(mode_str, "invalid") == 0) {
mode = MODE_INVALID;
} else {
USAGE();
return 1;
}
UT_ASSERTne(mode, MODE_MAX);
test(ranges[mode][0], ranges[mode][1]);
DONE(NULL);
}
| 3,027 | 20.027778 | 70 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_defrag/obj_defrag.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* obj_defrag.c -- unit test for pmemobj_defrag
*/
#include "unittest.h"
#include <limits.h>
#define OBJECT_SIZE 100
static void
defrag_basic(PMEMobjpool *pop)
{
int ret;
PMEMoid oid1;
PMEMoid oid2;
PMEMoid oid3;
ret = pmemobj_zalloc(pop, &oid1, OBJECT_SIZE, 0);
UT_ASSERTeq(ret, 0);
ret = pmemobj_zalloc(pop, &oid2, OBJECT_SIZE, 0);
UT_ASSERTeq(ret, 0);
ret = pmemobj_zalloc(pop, &oid3, OBJECT_SIZE, 0);
UT_ASSERTeq(ret, 0);
char *buff = (char *)MALLOC(OBJECT_SIZE);
memset(buff, 0xc, OBJECT_SIZE);
char *foop = (char *)pmemobj_direct(oid3);
pmemobj_memcpy_persist(pop, foop, buff, OBJECT_SIZE);
UT_ASSERT(memcmp(foop, buff, OBJECT_SIZE) == 0);
pmemobj_free(&oid1);
PMEMoid oid4 = oid3;
PMEMoid *oids[] = {&oid2, &oid3, &oid4};
struct pobj_defrag_result result;
ret = pmemobj_defrag(pop, oids, 3, &result);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(result.total, 2);
UT_ASSERTeq(result.relocated, 2);
/* the object at higher location should move into the freed oid1 pos */
foop = (char *)pmemobj_direct(oid3);
UT_ASSERT(oid3.off < oid2.off);
UT_ASSERTeq(oid3.off, oid4.off);
UT_ASSERT(memcmp(foop, buff, OBJECT_SIZE) == 0);
pmemobj_free(&oid2);
pmemobj_free(&oid3);
FREE(buff);
}
struct test_object
{
PMEMoid a;
PMEMoid b;
PMEMoid c;
};
static void
defrag_nested_pointers(PMEMobjpool *pop)
{
int ret;
/*
* This is done so that the oids below aren't allocated literally in the
* ideal position in the heap (chunk 0, offset 0).
*/
#define EXTRA_ALLOCS 100
for (int i = 0; i < EXTRA_ALLOCS; ++i) {
PMEMoid extra;
ret = pmemobj_zalloc(pop, &extra, OBJECT_SIZE, 0);
UT_ASSERTeq(ret, 0);
pmemobj_free(&extra);
}
#undef EXTRA_ALLOCS
PMEMoid oid1;
PMEMoid oid2;
PMEMoid oid3;
ret = pmemobj_zalloc(pop, &oid1, OBJECT_SIZE, 0);
UT_ASSERTeq(ret, 0);
ret = pmemobj_zalloc(pop, &oid2, OBJECT_SIZE, 0);
UT_ASSERTeq(ret, 0);
ret = pmemobj_zalloc(pop, &oid3, OBJECT_SIZE, 0);
UT_ASSERTeq(ret, 0);
struct test_object *oid1p = (struct test_object *)pmemobj_direct(oid1);
struct test_object *oid2p = (struct test_object *)pmemobj_direct(oid2);
struct test_object *oid3p = (struct test_object *)pmemobj_direct(oid3);
oid1p->a = OID_NULL;
oid1p->b = oid2;
oid1p->c = oid1;
pmemobj_persist(pop, oid1p, sizeof(*oid1p));
oid2p->a = oid1;
oid2p->b = OID_NULL;
oid2p->c = oid3;
pmemobj_persist(pop, oid2p, sizeof(*oid2p));
oid3p->a = oid2;
oid3p->b = oid2;
oid3p->c = oid1;
pmemobj_persist(pop, oid3p, sizeof(*oid3p));
#define OID_PTRS 12
#define EXTRA_OID_PTRS 60
#define OIDS_ALL (EXTRA_OID_PTRS + OID_PTRS)
PMEMoid **oids = (PMEMoid **)MALLOC(sizeof(PMEMoid *) * OIDS_ALL);
PMEMoid *oid3pprs = (PMEMoid *)MALLOC(sizeof(PMEMoid) * EXTRA_OID_PTRS);
int i;
for (i = 0; i < EXTRA_OID_PTRS; ++i) {
oid3pprs[i] = oid3;
oids[i] = &oid3pprs[i];
}
oids[i + 0] = &oid1;
oids[i + 1] = &oid2;
oids[i + 2] = &oid3;
oids[i + 3] = &oid1p->a;
oids[i + 4] = &oid1p->b;
oids[i + 5] = &oid1p->c;
oids[i + 6] = &oid2p->a;
oids[i + 7] = &oid2p->b;
oids[i + 8] = &oid2p->c;
oids[i + 9] = &oid3p->a;
oids[i + 10] = &oid3p->b;
oids[i + 11] = &oid3p->c;
struct pobj_defrag_result result;
ret = pmemobj_defrag(pop, oids, OIDS_ALL, &result);
UT_ASSERTeq(result.total, 3);
UT_ASSERTeq(result.relocated, 3);
UT_ASSERTeq(ret, 0);
oid1p = (struct test_object *)pmemobj_direct(oid1);
oid2p = (struct test_object *)pmemobj_direct(oid2);
oid3p = (struct test_object *)pmemobj_direct(oid3);
for (int i = 0; i < EXTRA_OID_PTRS; ++i) {
UT_ASSERTeq(oid3pprs[i].off, oid3.off);
}
UT_ASSERTeq(oid1p->a.off, 0);
UT_ASSERTeq(oid1p->b.off, oid2.off);
UT_ASSERTeq(oid1p->c.off, oid1.off);
UT_ASSERTeq(oid2p->a.off, oid1.off);
UT_ASSERTeq(oid2p->b.off, 0);
UT_ASSERTeq(oid2p->c.off, oid3.off);
UT_ASSERTeq(oid3p->a.off, oid2.off);
UT_ASSERTeq(oid3p->b.off, oid2.off);
UT_ASSERTeq(oid3p->c.off, oid1.off);
pmemobj_free(&oid1);
pmemobj_free(&oid2);
pmemobj_free(&oid3);
FREE(oids);
FREE(oid3pprs);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_defrag");
const char *path = argv[1];
PMEMobjpool *pop = NULL;
pop = pmemobj_create(path, POBJ_LAYOUT_NAME(basic),
PMEMOBJ_MIN_POOL * 2, S_IWUSR | S_IRUSR);
if (pop == NULL)
UT_FATAL("!pmemobj_create: %s", path);
defrag_basic(pop);
defrag_nested_pointers(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 4,429 | 22.817204 | 73 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_deep_flush/pmem2_deep_flush.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_deep_flush.c -- unit test for pmem_deep_flush()
*
* usage: pmem2_deep_flush file deep_persist_size offset
*
* pmem2_deep_flush depending on the mapping granularity is performed using one
* of the following paths:
* - page: NOP
* - cache: pmem2_deep_flush_dax
* - byte: pmem2_persist_cpu_cache + pmem2_deep_flush_dax
*
* Where pmem2_deep_flush_dax:
* - pmem2_get_type_from_stat is used to determine a file type
* - for regular files performs pmem2_flush_file_buffers_os OR
* - for Device DAX:
* - is looking for Device DAX region (pmem2_get_region_id)
* - is constructing the region deep flush file paths
* - opens deep_flush file (os_open)
* - reads deep_flush file (read)
* - performs a write to it (write)
*
* Where pmem2_persist_cpu_cache performs:
* - flush (replaced by mock_flush) AND
* - drain (replaced by mock_drain)
*
* Additionally, for the sake of this test, the following functions are
* replaced:
* - pmem2_get_type_from_stat (to control perceived file type)
* - pmem2_flush_file_buffers_os (for counting calls)
* - pmem2_get_region_id (to prevent reading sysfs in search for non
* existing Device DAXes)
* or mocked:
* - os_open (to prevent opening non existing
* /sys/bus/nd/devices/region[0-9]+/deep_flush files)
* - write (for counting writes to non-existing
* /sys/bus/nd/devices/region[0-9]+/deep_flush files)
*
* NOTE: In normal usage the persist function precedes any call to
* pmem2_deep_flush. This test aims to validate the pmem2_deep_flush
* function and so the persist function is omitted.
*/
#include "source.h"
#ifndef _WIN32
#include <sys/sysmacros.h>
#endif
#include "mmap.h"
#include "persist.h"
#include "pmem2_arch.h"
#include "pmem2_utils.h"
#include "region_namespace.h"
#include "unittest.h"
static int n_file_buffs_flushes = 0;
static int n_fences = 0;
static int n_flushes = 0;
static int n_writes = 0;
static int n_reads = 0;
static enum pmem2_file_type *ftype_value;
static int read_invalid = 0;
static int deep_flush_not_needed = 0;
#ifndef _WIN32
#define MOCK_FD 999
#define MOCK_REG_ID 888
#define MOCK_BUS_DEVICE_PATH "/sys/bus/nd/devices/region888/deep_flush"
#define MOCK_DEV_ID 777UL
/*
* pmem2_get_region_id -- redefine libpmem2 function
*/
int
pmem2_get_region_id(const struct pmem2_source *src,
unsigned *region_id)
{
*region_id = MOCK_REG_ID;
return 0;
}
/*
* os_open -- os_open mock
*/
FUNC_MOCK(os_open, int, const char *path, int flags, ...)
FUNC_MOCK_RUN_DEFAULT {
if (strcmp(path, MOCK_BUS_DEVICE_PATH) == 0)
return MOCK_FD;
va_list ap;
va_start(ap, flags);
int mode = va_arg(ap, int);
va_end(ap);
return _FUNC_REAL(os_open)(path, flags, mode);
}
FUNC_MOCK_END
/*
* write -- write mock
*/
FUNC_MOCK(write, int, int fd, const void *buffer, size_t count)
FUNC_MOCK_RUN_DEFAULT {
UT_ASSERTeq(*(char *)buffer, '1');
UT_ASSERTeq(count, 1);
UT_ASSERTeq(fd, MOCK_FD);
++n_writes;
return 1;
}
FUNC_MOCK_END
/*
* read -- read mock
*/
FUNC_MOCK(read, int, int fd, void *buffer, size_t nbytes)
FUNC_MOCK_RUN_DEFAULT {
UT_ASSERTeq(nbytes, 2);
UT_ASSERTeq(fd, MOCK_FD);
UT_OUT("mocked read, fd %d", fd);
char pattern[2] = {'1', '\n'};
int ret = sizeof(pattern);
if (deep_flush_not_needed)
pattern[0] = '0';
if (read_invalid) {
ret = 0;
goto end;
}
memcpy(buffer, pattern, sizeof(pattern));
end:
++n_reads;
return ret;
}
FUNC_MOCK_END
#endif /* not _WIN32 */
/*
* mock_flush -- count flush calls in the test
*/
static void
mock_flush(const void *addr, size_t len)
{
++n_flushes;
}
/*
* mock_drain -- count drain calls in the test
*/
static void
mock_drain(void)
{
++n_fences;
}
/*
* pmem2_arch_init -- attach flush and drain functions replacements
*/
void
pmem2_arch_init(struct pmem2_arch_info *info)
{
info->flush = mock_flush;
info->fence = mock_drain;
}
/*
* pmem2_map_find -- redefine libpmem2 function, redefinition is needed
* for a proper compilation of the test. NOTE: this function is not used
* in the test.
*/
struct pmem2_map *
pmem2_map_find(const void *addr, size_t len)
{
UT_ASSERT(0);
return NULL;
}
/*
* pmem2_flush_file_buffers_os -- redefine libpmem2 function
*/
int
pmem2_flush_file_buffers_os(struct pmem2_map *map, const void *addr, size_t len,
int autorestart)
{
++n_file_buffs_flushes;
return 0;
}
/*
* map_init -- fill pmem2_map in minimal scope
*/
static void
map_init(struct pmem2_map *map)
{
const size_t length = 8 * MEGABYTE;
map->content_length = length;
/*
* The test needs to allocate more memory because some test cases
* validate behavior with address beyond mapping.
*/
map->addr = MALLOC(2 * length);
#ifndef _WIN32
map->source.type = PMEM2_SOURCE_FD;
/* mocked device ID for device DAX */
map->source.value.st_rdev = MOCK_DEV_ID;
#else
map->source.type = PMEM2_SOURCE_HANDLE;
#endif
ftype_value = &map->source.value.ftype;
}
/*
* counters_check_n_reset -- check numbers of uses of deep-flushing elements
* and reset them
*/
static void
counters_check_n_reset(int msynces, int flushes, int fences,
int writes, int reads)
{
UT_ASSERTeq(n_file_buffs_flushes, msynces);
UT_ASSERTeq(n_flushes, flushes);
UT_ASSERTeq(n_fences, fences);
UT_ASSERTeq(n_writes, writes);
UT_ASSERTeq(n_reads, reads);
n_file_buffs_flushes = 0;
n_flushes = 0;
n_fences = 0;
n_writes = 0;
n_reads = 0;
read_invalid = 0;
deep_flush_not_needed = 0;
}
/*
* test_deep_flush_func -- test pmem2_deep_flush for all granularity options
*/
static int
test_deep_flush_func(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_map map;
map_init(&map);
*ftype_value = PMEM2_FTYPE_REG;
void *addr = map.addr;
size_t len = map.content_length;
map.effective_granularity = PMEM2_GRANULARITY_PAGE;
pmem2_set_flush_fns(&map);
int ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(0, 0, 0, 0, 0);
map.effective_granularity = PMEM2_GRANULARITY_CACHE_LINE;
pmem2_set_flush_fns(&map);
ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(1, 0, 0, 0, 0);
map.effective_granularity = PMEM2_GRANULARITY_BYTE;
pmem2_set_flush_fns(&map);
ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(1, 0, 0, 0, 0);
FREE(map.addr);
return 0;
}
/*
* test_deep_flush_func_devdax -- test pmem2_deep_flush with mocked DAX devices
*/
static int
test_deep_flush_func_devdax(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_map map;
map_init(&map);
void *addr = map.addr;
size_t len = map.content_length;
*ftype_value = PMEM2_FTYPE_DEVDAX;
map.effective_granularity = PMEM2_GRANULARITY_CACHE_LINE;
pmem2_set_flush_fns(&map);
int ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(0, 1, 1, 1, 1);
deep_flush_not_needed = 1;
ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(0, 1, 1, 0, 1);
read_invalid = 1;
ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(0, 1, 1, 0, 1);
map.effective_granularity = PMEM2_GRANULARITY_BYTE;
pmem2_set_flush_fns(&map);
ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(0, 1, 1, 1, 1);
deep_flush_not_needed = 1;
ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(0, 1, 1, 0, 1);
read_invalid = 1;
ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, 0);
counters_check_n_reset(0, 1, 1, 0, 1);
FREE(map.addr);
return 0;
}
/*
* test_deep_flush_range_beyond_mapping -- test pmem2_deep_flush with
* the address that goes beyond mapping
*/
static int
test_deep_flush_range_beyond_mapping(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_map map;
map_init(&map);
/* set address completely beyond mapping */
void *addr = (void *)((uintptr_t)map.addr + map.content_length);
size_t len = map.content_length;
int ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, PMEM2_E_DEEP_FLUSH_RANGE);
/*
* set address in the middle of mapping, which makes range partially
* beyond mapping
*/
addr = (void *)((uintptr_t)map.addr + map.content_length / 2);
ret = pmem2_deep_flush(&map, addr, len);
UT_ASSERTeq(ret, PMEM2_E_DEEP_FLUSH_RANGE);
FREE(map.addr);
return 0;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_deep_flush_func),
TEST_CASE(test_deep_flush_func_devdax),
TEST_CASE(test_deep_flush_range_beyond_mapping),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem2_deep_flush");
pmem2_persist_init();
util_init();
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
DONE(NULL);
}
| 8,865 | 22.270341 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_direct/obj_direct.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_direct.c -- unit test for pmemobj_direct()
*/
#include "obj.h"
#include "obj_direct.h"
#include "sys_util.h"
#include "unittest.h"
#define MAX_PATH_LEN 255
#define LAYOUT_NAME "direct"
static os_mutex_t lock1;
static os_mutex_t lock2;
static os_cond_t sync_cond1;
static os_cond_t sync_cond2;
static int cond1;
static int cond2;
static PMEMoid thread_oid;
static void *
obj_direct(PMEMoid oid)
{
void *ptr1 = obj_direct_inline(oid);
void *ptr2 = obj_direct_non_inline(oid);
UT_ASSERTeq(ptr1, ptr2);
return ptr1;
}
static void *
test_worker(void *arg)
{
/* check before pool is closed, then let main continue */
UT_ASSERTne(obj_direct(thread_oid), NULL);
util_mutex_lock(&lock1);
cond1 = 1;
os_cond_signal(&sync_cond1);
util_mutex_unlock(&lock1);
/* wait for main thread to free & close, then check */
util_mutex_lock(&lock2);
while (!cond2)
os_cond_wait(&sync_cond2, &lock2);
util_mutex_unlock(&lock2);
UT_ASSERTeq(obj_direct(thread_oid), NULL);
return NULL;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_direct");
if (argc != 3)
UT_FATAL("usage: %s [directory] [# of pools]", argv[0]);
unsigned npools = ATOU(argv[2]);
const char *dir = argv[1];
int r;
util_mutex_init(&lock1);
util_mutex_init(&lock2);
util_cond_init(&sync_cond1);
util_cond_init(&sync_cond2);
cond1 = cond2 = 0;
PMEMobjpool **pops = MALLOC(npools * sizeof(PMEMobjpool *));
UT_ASSERTne(pops, NULL);
size_t length = strlen(dir) + MAX_PATH_LEN;
char *path = MALLOC(length);
for (unsigned i = 0; i < npools; ++i) {
int ret = snprintf(path, length, "%s"OS_DIR_SEP_STR"testfile%d",
dir, i);
if (ret < 0 || ret >= length)
UT_FATAL("snprintf: %d", ret);
pops[i] = pmemobj_create(path, LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR);
if (pops[i] == NULL)
UT_FATAL("!pmemobj_create");
}
PMEMoid *oids = MALLOC(npools * sizeof(PMEMoid));
UT_ASSERTne(oids, NULL);
PMEMoid *tmpoids = MALLOC(npools * sizeof(PMEMoid));
UT_ASSERTne(tmpoids, NULL);
oids[0] = OID_NULL;
UT_ASSERTeq(obj_direct(oids[0]), NULL);
for (unsigned i = 0; i < npools; ++i) {
oids[i] = (PMEMoid) {pops[i]->uuid_lo, 0};
UT_ASSERTeq(obj_direct(oids[i]), NULL);
uint64_t off = pops[i]->heap_offset;
oids[i] = (PMEMoid) {pops[i]->uuid_lo, off};
UT_ASSERTeq((char *)obj_direct(oids[i]) - off,
(char *)pops[i]);
r = pmemobj_alloc(pops[i], &tmpoids[i], 100, 1, NULL, NULL);
UT_ASSERTeq(r, 0);
}
r = pmemobj_alloc(pops[0], &thread_oid, 100, 2, NULL, NULL);
UT_ASSERTeq(r, 0);
UT_ASSERTne(obj_direct(thread_oid), NULL);
os_thread_t t;
THREAD_CREATE(&t, NULL, test_worker, NULL);
/* wait for the worker thread to perform the first check */
util_mutex_lock(&lock1);
while (!cond1)
os_cond_wait(&sync_cond1, &lock1);
util_mutex_unlock(&lock1);
for (unsigned i = 0; i < npools; ++i) {
UT_ASSERTne(obj_direct(tmpoids[i]), NULL);
pmemobj_free(&tmpoids[i]);
UT_ASSERTeq(obj_direct(tmpoids[i]), NULL);
pmemobj_close(pops[i]);
UT_ASSERTeq(obj_direct(oids[i]), NULL);
}
/* signal the worker that we're free and closed */
util_mutex_lock(&lock2);
cond2 = 1;
os_cond_signal(&sync_cond2);
util_mutex_unlock(&lock2);
THREAD_JOIN(&t, NULL);
util_cond_destroy(&sync_cond1);
util_cond_destroy(&sync_cond2);
util_mutex_destroy(&lock1);
util_mutex_destroy(&lock2);
FREE(pops);
FREE(tmpoids);
FREE(oids);
DONE(NULL);
}
| 3,476 | 22.653061 | 66 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_memcheck/obj_memcheck.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
#include "unittest.h"
#include "valgrind_internal.h"
/*
* Layout definition
*/
POBJ_LAYOUT_BEGIN(mc);
POBJ_LAYOUT_ROOT(mc, struct root);
POBJ_LAYOUT_TOID(mc, struct struct1);
POBJ_LAYOUT_END(mc);
struct struct1 {
int fld;
int dyn[];
};
struct root {
TOID(struct struct1) s1;
TOID(struct struct1) s2;
};
static void
test_memcheck_bug(void)
{
#if VG_MEMCHECK_ENABLED
volatile char tmp[100];
VALGRIND_CREATE_MEMPOOL(tmp, 0, 0);
VALGRIND_MEMPOOL_ALLOC(tmp, tmp + 8, 16);
VALGRIND_MEMPOOL_FREE(tmp, tmp + 8);
VALGRIND_MEMPOOL_ALLOC(tmp, tmp + 8, 16);
VALGRIND_MAKE_MEM_NOACCESS(tmp, 8);
tmp[7] = 0x66;
#endif
}
static void
test_memcheck_bug2(void)
{
#if VG_MEMCHECK_ENABLED
volatile char tmp[1000];
VALGRIND_CREATE_MEMPOOL(tmp, 0, 0);
VALGRIND_MEMPOOL_ALLOC(tmp, tmp + 128, 128);
VALGRIND_MEMPOOL_FREE(tmp, tmp + 128);
VALGRIND_MEMPOOL_ALLOC(tmp, tmp + 256, 128);
VALGRIND_MEMPOOL_FREE(tmp, tmp + 256);
/*
* This should produce warning:
* Address ... is 0 bytes inside a block of size 128 bytes freed.
* instead, it produces a warning:
* Address ... is 0 bytes after a block of size 128 freed
*/
int *data = (int *)(tmp + 256);
*data = 0x66;
#endif
}
static void
test_everything(const char *path)
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(mc),
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
struct root *rt = D_RW(POBJ_ROOT(pop, struct root));
POBJ_ALLOC(pop, &rt->s1, struct struct1, sizeof(struct struct1),
NULL, NULL);
struct struct1 *s1 = D_RW(rt->s1);
struct struct1 *s2;
POBJ_ALLOC(pop, &rt->s2, struct struct1, sizeof(struct struct1),
NULL, NULL);
s2 = D_RW(rt->s2);
POBJ_FREE(&rt->s2);
/* read of uninitialized variable */
if (s1->fld)
UT_OUT("%d", 1);
/* write to freed object */
s2->fld = 7;
pmemobj_persist(pop, s2, sizeof(*s2));
POBJ_ALLOC(pop, &rt->s2, struct struct1, sizeof(struct struct1),
NULL, NULL);
s2 = D_RW(rt->s2);
memset(s2, 0, pmemobj_alloc_usable_size(rt->s2.oid));
s2->fld = 12; /* ok */
/* invalid write */
s2->dyn[100000] = 9;
/* invalid write */
s2->dyn[1000] = 9;
pmemobj_persist(pop, s2, sizeof(struct struct1));
POBJ_REALLOC(pop, &rt->s2, struct struct1,
sizeof(struct struct1) + 100 * sizeof(int));
s2 = D_RW(rt->s2);
s2->dyn[0] = 9; /* ok */
pmemobj_persist(pop, s2, sizeof(struct struct1) + 100 * sizeof(int));
POBJ_FREE(&rt->s2);
/* invalid write to REALLOCated and FREEd object */
s2->dyn[0] = 9;
pmemobj_persist(pop, s2, sizeof(struct struct1) + 100 * sizeof(int));
POBJ_ALLOC(pop, &rt->s2, struct struct1, sizeof(struct struct1),
NULL, NULL);
POBJ_REALLOC(pop, &rt->s2, struct struct1,
sizeof(struct struct1) + 30 * sizeof(int));
s2 = D_RW(rt->s2);
s2->dyn[0] = 0;
s2->dyn[29] = 29;
pmemobj_persist(pop, s2, sizeof(struct struct1) + 30 * sizeof(int));
POBJ_FREE(&rt->s2);
s2->dyn[0] = 9;
pmemobj_persist(pop, s2, sizeof(struct struct1) + 30 * sizeof(int));
pmemobj_close(pop);
}
static void usage(const char *a)
{
UT_FATAL("usage: %s [m|t] file-name", a);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_memcheck");
/* root doesn't count */
UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(mc) != 1);
if (argc < 2)
usage(argv[0]);
if (strcmp(argv[1], "m") == 0)
test_memcheck_bug();
else if (strcmp(argv[1], "t") == 0) {
if (argc < 3)
usage(argv[0]);
test_everything(argv[2]);
} else
usage(argv[0]);
test_memcheck_bug2();
DONE(NULL);
}
| 3,591 | 20.769697 | 70 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_defrag_advanced/obj_defrag_advanced.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* obj_defrag_advanced.c -- test for libpmemobj defragmentation feature
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <stddef.h>
#include <unistd.h>
#include <stdlib.h>
#include "rand.h"
#include "vgraph.h"
#include "pgraph.h"
#include "os_thread.h"
#include "unittest.h"
struct create_params_t {
uint64_t seed;
rng_t rng;
struct vgraph_params vparams;
struct pgraph_params pparams;
};
/*
* graph_create -- create a graph
* - generate an intermediate volatile graph representation
* - use the volatile graph to allocate a persistent one
*/
static void
graph_create(struct create_params_t *task, PMEMobjpool *pop, PMEMoid *oidp,
rng_t *rngp)
{
struct vgraph_t *vgraph = vgraph_new(&task->vparams, rngp);
pgraph_new(pop, oidp, vgraph, &task->pparams, rngp);
vgraph_delete(vgraph);
}
/*
* graph_defrag -- defragment the pool
* - collect pointers to all PMEMoids
* - do a sanity checks
* - call pmemobj_defrag
* - return # of relocated objects
*/
static size_t
graph_defrag(PMEMobjpool *pop, PMEMoid oid)
{
struct pgraph_t *pgraph = (struct pgraph_t *)pmemobj_direct(oid);
/* count number of oids */
unsigned oidcnt = pgraph->nodes_num;
for (unsigned i = 0; i < pgraph->nodes_num; ++i) {
struct pnode_t *pnode = (struct pnode_t *)pmemobj_direct
(pgraph->nodes[i]);
oidcnt += pnode->edges_num;
}
/* create array of oid pointers */
PMEMoid **oidv = (PMEMoid **)MALLOC(sizeof(PMEMoid *) * oidcnt);
unsigned oidi = 0;
for (unsigned i = 0; i < pgraph->nodes_num; ++i) {
oidv[oidi++] = &pgraph->nodes[i];
struct pnode_t *pnode = (struct pnode_t *)pmemobj_direct
(pgraph->nodes[i]);
for (unsigned j = 0; j < pnode->edges_num; ++j) {
oidv[oidi++] = &pnode->edges[j];
}
}
UT_ASSERTeq(oidi, oidcnt);
/* check if all oids are valid */
for (unsigned i = 0; i < oidcnt; ++i) {
void *ptr = pmemobj_direct(*oidv[i]);
UT_ASSERTne(ptr, NULL);
}
/* check if all oids appear only once */
for (unsigned i = 0; i < oidcnt - 1; ++i) {
for (unsigned j = i + 1; j < oidcnt; ++j) {
UT_ASSERTne(oidv[i], oidv[j]);
}
}
struct pobj_defrag_result result;
int ret = pmemobj_defrag(pop, oidv, oidcnt, &result);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(result.total, pgraph->nodes_num);
FREE(oidv);
return result.relocated;
}
/*
* graph_defrag_ntimes -- defragment the graph N times
* - where N <= max_rounds
* - it stops defrag if # of relocated objects == 0
*/
static void
graph_defrag_ntimes(PMEMobjpool *pop, PMEMoid oid, unsigned max_rounds)
{
size_t relocated;
unsigned rounds = 0;
do {
relocated = graph_defrag(pop, oid);
++rounds;
} while (relocated > 0 && rounds < max_rounds);
}
#define HAS_TO_EXIST (1)
/*
* graph_dump -- dump a graph from the pool to a text file
*/
static void
graph_dump(PMEMoid oid, const char *path, int has_exist)
{
struct pgraph_t *pgraph = (struct pgraph_t *)pmemobj_direct(oid);
if (has_exist)
UT_ASSERTne(pgraph, NULL);
if (pgraph)
pgraph_print(pgraph, path);
}
#define FGETS_BUFF_LEN 1024
/*
* dump_compare -- compare graph dumps
* Test fails if the contents of dumps do not match
*/
static void
dump_compare(const char *path1, const char *path2)
{
FILE *dump1 = FOPEN(path1, "r");
FILE *dump2 = FOPEN(path2, "r");
char buff1[FGETS_BUFF_LEN];
char buff2[FGETS_BUFF_LEN];
char *sret1, *sret2;
do {
sret1 = fgets(buff1, FGETS_BUFF_LEN, dump1);
sret2 = fgets(buff2, FGETS_BUFF_LEN, dump2);
/* both files have to end at the same time */
if (!sret1) {
UT_ASSERTeq(sret2, NULL);
FCLOSE(dump1);
FCLOSE(dump2);
return;
}
UT_ASSERTeq(sret1, buff1);
UT_ASSERTeq(sret2, buff2);
UT_ASSERTeq(strcmp(buff1, buff2), 0);
} while (1);
}
/*
* create_params_init -- initialize create params
*/
static void
create_params_init(struct create_params_t *params)
{
params->seed = 1;
/* good enough defaults - no magic here */
params->vparams.max_nodes = 50;
params->vparams.max_edges = 10;
params->vparams.range_nodes = 10;
params->vparams.range_edges = 10;
params->vparams.min_pattern_size = 8;
params->vparams.max_pattern_size = 1024;
params->pparams.graph_copies = 10;
}
/* global state */
static struct global_t {
PMEMobjpool *pop;
} global;
/*
* PMEMobj root object structure
*/
struct root_t {
unsigned graphs_num;
PMEMoid graphs[];
};
/*
* root_size -- calculate a root object size
*/
static inline size_t
root_size(unsigned graph_num, size_t min_root_size)
{
size_t size = sizeof(struct root_t) + sizeof(PMEMoid) * graph_num;
return MAX(size, min_root_size);
}
#define QUERY_GRAPHS_NUM UINT_MAX
static struct root_t *
get_root(unsigned graphs_num, size_t min_root_size)
{
PMEMoid roid;
struct root_t *root;
if (graphs_num == QUERY_GRAPHS_NUM) {
/* allocate a root object without graphs */
roid = pmemobj_root(global.pop, root_size(0, 0));
if (OID_IS_NULL(roid))
UT_FATAL("!pmemobj_root:");
root = (struct root_t *)pmemobj_direct(roid);
UT_ASSERTne(root, NULL);
graphs_num = root->graphs_num;
}
UT_ASSERT(graphs_num > 0);
/* reallocate a root object with all known graphs */
roid = pmemobj_root(global.pop, root_size(graphs_num, min_root_size));
if (OID_IS_NULL(roid))
UT_FATAL("!pmemobj_root:");
root = (struct root_t *)pmemobj_direct(roid);
UT_ASSERTne(root, NULL);
return root;
}
/*
* parse_nonzero -- parse non-zero unsigned
*/
static void
parse_nonzero(unsigned *var, const char *arg)
{
unsigned long v = STRTOUL(arg, NULL, 10);
UT_ASSERTne(v, 0);
UT_ASSERT(v < UINT_MAX);
*var = v;
}
#define GRAPH_LAYOUT POBJ_LAYOUT_NAME(graph)
/*
* op_pool_create -- create a pool
*/
static int
op_pool_create(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <path>", tc->name);
/* parse arguments */
const char *path = argv[0];
/* open a pool */
global.pop = pmemobj_create(path, GRAPH_LAYOUT, 0, S_IWUSR | S_IRUSR);
if (global.pop == NULL) {
UT_FATAL("!pmemobj_create: %s", path);
}
return 1;
}
/*
* op_pool_close -- close the poll
*/
static int
op_pool_close(const struct test_case *tc, int argc, char *argv[])
{
pmemobj_close(global.pop);
global.pop = NULL;
return 0;
}
/*
* op_graph_create -- create a graph
*/
static int
op_graph_create(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 4)
UT_FATAL("usage: %s <max-nodes> <max-edges> <graph-copies>"
" <min-root-size>", tc->name);
/* parse arguments */
struct create_params_t cparams;
create_params_init(&cparams);
parse_nonzero(&cparams.vparams.max_nodes, argv[0]);
parse_nonzero(&cparams.vparams.max_edges, argv[1]);
parse_nonzero(&cparams.pparams.graph_copies, argv[2]);
size_t min_root_size = STRTOULL(argv[3], NULL, 10);
struct root_t *root = get_root(1, min_root_size);
randomize(cparams.seed);
/* generate a single graph */
graph_create(&cparams, global.pop, &root->graphs[0], NULL);
root->graphs_num = 1;
pmemobj_persist(global.pop, root, root_size(1, min_root_size));
return 4;
}
/*
* op_graph_dump -- dump the graph
*/
static int
op_graph_dump(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <dump>", tc->name);
/* parse arguments */
const char *dump = argv[0];
struct root_t *root = get_root(QUERY_GRAPHS_NUM, 0);
UT_ASSERTeq(root->graphs_num, 1);
/* dump the graph before defrag */
graph_dump(root->graphs[0], dump, HAS_TO_EXIST);
return 1;
}
/*
* op_graph_defrag -- defrag the graph
*/
static int
op_graph_defrag(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <max-rounds>", tc->name);
/* parse arguments */
unsigned max_rounds;
parse_nonzero(&max_rounds, argv[0]);
struct root_t *root = get_root(QUERY_GRAPHS_NUM, 0);
UT_ASSERTeq(root->graphs_num, 1);
/* do the defrag */
graph_defrag_ntimes(global.pop, root->graphs[0], max_rounds);
return 1;
}
/*
* op_dump_compare -- compare dumps
*/
static int
op_dump_compare(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: %s <dump1> <dump2>", tc->name);
/* parse arguments */
const char *dump1 = argv[0];
const char *dump2 = argv[1];
dump_compare(dump1, dump2);
return 2;
}
struct create_n_defrag_params_t {
char dump1[PATH_MAX];
char dump2[PATH_MAX];
struct create_params_t cparams;
PMEMobjpool *pop;
PMEMoid *oidp;
unsigned max_rounds;
unsigned ncycles;
};
/*
* create_n_defrag_thread -- create and defrag graphs mutiple times
*/
static void *
create_n_defrag_thread(void *arg)
{
struct create_n_defrag_params_t *params =
(struct create_n_defrag_params_t *)arg;
struct create_params_t *cparams = ¶ms->cparams;
for (unsigned i = 0; i < params->ncycles; ++i) {
graph_create(cparams, global.pop, params->oidp, &cparams->rng);
graph_dump(*params->oidp, params->dump1, HAS_TO_EXIST);
graph_defrag_ntimes(params->pop, *params->oidp,
params->max_rounds);
graph_dump(*params->oidp, params->dump2, HAS_TO_EXIST);
dump_compare(params->dump1, params->dump2);
pgraph_delete(params->oidp);
}
return NULL;
}
/*
* op_graph_create_n_defrag_mt -- multi-threaded graphs creation & defrag
*/
static int
op_graph_create_n_defrag_mt(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 8)
UT_FATAL("usage: %s <max-nodes> <max-edges> <graph-copies>"
" <min-root-size> <max-defrag-rounds> <n-threads>"
"<n-create-defrag-cycles> <dump-suffix>",
tc->name);
/* parse arguments */
struct create_params_t cparams;
create_params_init(&cparams);
parse_nonzero(&cparams.vparams.max_nodes, argv[0]);
parse_nonzero(&cparams.vparams.max_edges, argv[1]);
parse_nonzero(&cparams.pparams.graph_copies, argv[2]);
size_t min_root_size = STRTOULL(argv[3], NULL, 10);
unsigned max_rounds;
parse_nonzero(&max_rounds, argv[4]);
unsigned nthreads;
parse_nonzero(&nthreads, argv[5]);
unsigned ncycles;
parse_nonzero(&ncycles, argv[6]);
char *dump_suffix = argv[7];
struct root_t *root = get_root(nthreads, min_root_size);
root->graphs_num = nthreads;
pmemobj_persist(global.pop, root, sizeof(*root));
/* prepare threads params */
struct create_n_defrag_params_t *paramss =
(struct create_n_defrag_params_t *)MALLOC(
sizeof(*paramss) * nthreads);
for (unsigned i = 0; i < nthreads; ++i) {
struct create_n_defrag_params_t *params = ¶mss[i];
SNPRINTF(params->dump1, PATH_MAX, "dump_1_th%u_%s.log",
i, dump_suffix);
SNPRINTF(params->dump2, PATH_MAX, "dump_2_th%u_%s.log",
i, dump_suffix);
memcpy(¶ms->cparams, &cparams, sizeof(cparams));
params->cparams.seed += i;
randomize_r(¶ms->cparams.rng, params->cparams.seed);
params->pop = global.pop;
params->oidp = &root->graphs[i];
params->max_rounds = max_rounds;
params->ncycles = ncycles;
}
/* spawn threads */
os_thread_t *threads = (os_thread_t *)MALLOC(
sizeof(*threads) * nthreads);
for (unsigned i = 0; i < nthreads; ++i)
THREAD_CREATE(&threads[i], NULL, create_n_defrag_thread,
¶mss[i]);
/* join all threads */
void *ret = NULL;
for (unsigned i = 0; i < nthreads; ++i) {
THREAD_JOIN(&threads[i], &ret);
UT_ASSERTeq(ret, NULL);
}
FREE(threads);
FREE(paramss);
return 8;
}
/*
* op_pool_open -- open the pool
*/
static int
op_pool_open(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <path>", tc->name);
/* parse arguments */
const char *path = argv[0];
/* open a pool */
global.pop = pmemobj_open(path, GRAPH_LAYOUT);
if (global.pop == NULL)
UT_FATAL("!pmemobj_create: %s", path);
return 1;
}
/*
* op_graph_dump_all -- dump all graphs
*/
static int
op_graph_dump_all(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <dump-prefix>", tc->name);
/* parse arguments */
const char *dump_prefix = argv[0];
struct root_t *root = get_root(QUERY_GRAPHS_NUM, 0);
char dump[PATH_MAX];
for (unsigned i = 0; i < root->graphs_num; ++i) {
SNPRINTF(dump, PATH_MAX, "%s_%u.log", dump_prefix, i);
graph_dump(root->graphs[i], dump, HAS_TO_EXIST);
}
return 1;
}
/*
* ops -- available ops
*/
static struct test_case ops[] = {
TEST_CASE(op_pool_create),
TEST_CASE(op_pool_close),
TEST_CASE(op_graph_create),
TEST_CASE(op_graph_dump),
TEST_CASE(op_graph_defrag),
TEST_CASE(op_dump_compare),
TEST_CASE(op_graph_create_n_defrag_mt),
/* for pool validation only */
TEST_CASE(op_pool_open),
TEST_CASE(op_graph_dump_all),
};
#define NOPS ARRAY_SIZE(ops)
#define TEST_NAME "obj_defrag_advanced"
int
main(int argc, char *argv[])
{
START(argc, argv, TEST_NAME);
TEST_CASE_PROCESS(argc, argv, ops, NOPS);
DONE(NULL);
}
| 12,707 | 21.452297 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_defrag_advanced/pgraph.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pgraph.c -- persistent graph representation
*/
#include <inttypes.h>
#include "unittest.h"
#include "vgraph.h"
#include "pgraph.h"
#define PATTERN 'g'
/*
* pnode_size -- return the entire of node size
*/
static size_t
pnode_size(unsigned edges_num, size_t pattern_size)
{
size_t node_size = sizeof(struct pnode_t);
node_size += sizeof(PMEMoid) * edges_num;
node_size += pattern_size;
return node_size;
}
/*
* pnode_init -- initialize the node
*/
static void
pnode_init(PMEMobjpool *pop, PMEMoid pnode_oid, struct vnode_t *vnode,
PMEMoid pnodes[])
{
struct pnode_t *pnode = (struct pnode_t *)pmemobj_direct(pnode_oid);
pnode->node_id = vnode->node_id;
pnode->size = vnode->psize;
/* set edges */
pnode->edges_num = vnode->edges_num;
for (unsigned i = 0; i < vnode->edges_num; ++i)
pnode->edges[i] = pnodes[vnode->edges[i]];
/* initialize pattern */
pnode->pattern_size = vnode->pattern_size;
void *pattern = (void *)&pnode->edges[pnode->edges_num];
pmemobj_memset(pop, pattern, PATTERN, pnode->pattern_size,
PMEMOBJ_F_MEM_NOFLUSH);
/* persist the whole node state */
pmemobj_persist(pop, (const void *)pnode, pnode->size);
}
/*
* order_shuffle -- shuffle the nodes in graph
*/
static void
order_shuffle(unsigned *order, unsigned num, rng_t *rngp)
{
for (unsigned i = 0; i < num; ++i) {
unsigned j = rand_range(0, num, rngp);
unsigned temp = order[j];
order[j] = order[i];
order[i] = temp;
}
}
/*
* order_new -- generate the sequence of the graph nodes allocation
*/
static unsigned *
order_new(struct vgraph_t *vgraph, rng_t *rngp)
{
unsigned *order = (unsigned *)MALLOC(sizeof(unsigned)
* vgraph->nodes_num);
/* initialize id list */
for (unsigned i = 0; i < vgraph->nodes_num; ++i)
order[i] = i;
order_shuffle(order, vgraph->nodes_num, rngp);
return order;
}
/*
* pgraph_copy_new -- allocate a persistent copy of the volatile graph
*/
static PMEMoid *
pgraph_copy_new(PMEMobjpool *pop, struct vgraph_t *vgraph, rng_t *rngp)
{
/* to be returned array of PMEMoids to raw nodes allocations */
PMEMoid *nodes = (PMEMoid *)MALLOC(sizeof(PMEMoid) * vgraph->nodes_num);
/* generates random order of nodes allocation */
unsigned *order = order_new(vgraph, rngp);
/* allocate the nodes in the random order */
int ret;
for (unsigned i = 0; i < vgraph->nodes_num; ++i) {
struct vnode_t vnode = vgraph->node[order[i]];
PMEMoid *node = &nodes[order[i]];
ret = pmemobj_alloc(pop, node, vnode.psize, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
}
FREE(order);
return nodes;
}
/*
* pgraph_copy_delete -- free copies of the graph
*/
static void
pgraph_copy_delete(PMEMoid *nodes, unsigned num)
{
for (unsigned i = 0; i < num; ++i) {
if (OID_IS_NULL(nodes[i]))
continue;
pmemobj_free(&nodes[i]);
}
FREE(nodes);
}
/*
* pgraph_size -- return the struct pgraph_t size
*/
static size_t
pgraph_size(unsigned nodes_num)
{
return sizeof(struct pgraph_t) + sizeof(PMEMoid) * nodes_num;
}
/*
* pgraph_new -- allocate a new persistent graph in such a way
* that the fragmentation is as large as possible
*/
void
pgraph_new(PMEMobjpool *pop, PMEMoid *oidp, struct vgraph_t *vgraph,
struct pgraph_params *params, rng_t *rngp)
{
int ret = pmemobj_alloc(pop, oidp, pgraph_size(vgraph->nodes_num),
0, NULL, NULL);
UT_ASSERTeq(ret, 0);
struct pgraph_t *pgraph = (struct pgraph_t *)pmemobj_direct(*oidp);
pgraph->nodes_num = vgraph->nodes_num;
pmemobj_persist(pop, pgraph, sizeof(*pgraph));
/* calculate size of pnodes */
for (unsigned i = 0; i < vgraph->nodes_num; ++i) {
struct vnode_t *vnode = &vgraph->node[i];
vnode->psize = pnode_size(vnode->edges_num,
vnode->pattern_size);
}
/* prepare multiple copies of the nodes */
unsigned copies_num = rand_range(1, params->graph_copies, rngp);
PMEMoid **copies = (PMEMoid **)MALLOC(sizeof(PMEMoid *) * copies_num);
for (unsigned i = 0; i < copies_num; ++i)
copies[i] = pgraph_copy_new(pop, vgraph, rngp);
/* peek exactly the one copy of each node */
for (unsigned i = 0; i < pgraph->nodes_num; ++i) {
unsigned copy_id = rand_range(0, copies_num, rngp);
pgraph->nodes[i] = copies[copy_id][i];
copies[copy_id][i] = OID_NULL;
}
pmemobj_persist(pop, pgraph->nodes,
sizeof(PMEMoid) * pgraph->nodes_num);
/* free unused copies of the nodes */
for (unsigned i = 0; i < copies_num; ++i)
pgraph_copy_delete(copies[i], vgraph->nodes_num);
FREE(copies);
/* initialize pnodes */
for (unsigned i = 0; i < pgraph->nodes_num; ++i)
pnode_init(pop, pgraph->nodes[i], &vgraph->node[i],
pgraph->nodes);
}
/*
* pgraph_delete -- free the persistent graph
*/
void
pgraph_delete(PMEMoid *oidp)
{
struct pgraph_t *pgraph = (struct pgraph_t *)pmemobj_direct(*oidp);
/* free pnodes */
for (unsigned i = 0; i < pgraph->nodes_num; ++i)
pmemobj_free(&pgraph->nodes[i]);
pmemobj_free(oidp);
}
/*
* pgraph_print -- print graph in human readable format
*/
void
pgraph_print(struct pgraph_t *pgraph, const char *dump)
{
UT_ASSERTne(dump, NULL);
FILE *out = FOPEN(dump, "w");
/* print the graph statistics */
fprintf(out, "# of nodes: %u\n", pgraph->nodes_num);
uint64_t total_edges_num = 0;
for (unsigned i = 0; i < pgraph->nodes_num; ++i) {
PMEMoid node_oid = pgraph->nodes[i];
struct pnode_t *pnode =
(struct pnode_t *)pmemobj_direct(node_oid);
total_edges_num += pnode->edges_num;
}
fprintf(out, "Total # of edges: %" PRIu64 "\n\n", total_edges_num);
/* print the graph itself */
for (unsigned i = 0; i < pgraph->nodes_num; ++i) {
PMEMoid node_oid = pgraph->nodes[i];
struct pnode_t *pnode =
(struct pnode_t *)pmemobj_direct(node_oid);
fprintf(out, "%u:", pnode->node_id);
for (unsigned j = 0; j < pnode->edges_num; ++j) {
PMEMoid edge_oid = pnode->edges[j];
struct pnode_t *edge =
(struct pnode_t *)pmemobj_direct(edge_oid);
UT_ASSERT(edge->node_id < pgraph->nodes_num);
fprintf(out, "%u, ", edge->node_id);
}
fprintf(out, "\n");
}
FCLOSE(out);
}
| 6,058 | 23.934156 | 73 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_defrag_advanced/vgraph.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* vgraph.c -- volatile graph representation
*/
#include <stdlib.h>
#include <stdio.h>
#include "rand.h"
#include "unittest.h"
#include "vgraph.h"
/*
* rand_range -- generate pseudo-random number from given interval [min, max]
*/
unsigned
rand_range(unsigned min, unsigned max, rng_t *rngp)
{
if (min == max)
return min;
if (min > max)
UT_FATAL("!rand_range");
unsigned ret;
if (rngp)
ret = (unsigned)rnd64_r(rngp);
else
ret = (unsigned)rnd64();
return ((unsigned)ret % (max - min)) + min;
}
/*
* vnode_new -- allocate a new volatile node
*/
static void
vnode_new(struct vnode_t *node, unsigned v, struct vgraph_params *params,
rng_t *rngp)
{
unsigned min_edges = 1;
if (params->max_edges > params->range_edges)
min_edges = params->max_edges - params->range_edges;
unsigned edges_num = rand_range(min_edges, params->max_edges, rngp);
node->node_id = v;
node->edges_num = edges_num;
node->edges = (unsigned *)MALLOC(sizeof(int) * edges_num);
node->pattern_size = rand_range(params->min_pattern_size,
params->max_pattern_size, rngp);
}
/*
* vnode_delete -- free a volatile node
*/
static void
vnode_delete(struct vnode_t *node)
{
FREE(node->edges);
}
/*
* vgraph_get_node -- return node in graph based on given id_node
*/
static struct vnode_t *
vgraph_get_node(struct vgraph_t *graph, unsigned id_node)
{
struct vnode_t *node;
node = &graph->node[id_node];
return node;
}
/*
* vgraph_add_edges -- randomly assign destination nodes to the edges
*/
static void
vgraph_add_edges(struct vgraph_t *graph, rng_t *rngp)
{
unsigned nodes_count = 0;
unsigned edges_count = 0;
struct vnode_t *node;
for (nodes_count = 0; nodes_count < graph->nodes_num; nodes_count++) {
node = vgraph_get_node(graph, nodes_count);
unsigned edges_num = node->edges_num;
for (edges_count = 0; edges_count < edges_num; edges_count++) {
unsigned node_link =
rand_range(0, graph->nodes_num, rngp);
node->edges[edges_count] = node_link;
}
}
}
/*
* vgraph_new -- allocate a new volatile graph
*/
struct vgraph_t *
vgraph_new(struct vgraph_params *params, rng_t *rngp)
{
unsigned min_nodes = 1;
if (params->max_nodes > params->range_nodes)
min_nodes = params->max_nodes - params->range_nodes;
unsigned nodes_num = rand_range(min_nodes, params->max_nodes, rngp);
struct vgraph_t *graph =
(struct vgraph_t *)MALLOC(sizeof(struct vgraph_t) +
sizeof(struct vnode_t) * nodes_num);
graph->nodes_num = nodes_num;
for (unsigned i = 0; i < nodes_num; i++) {
vnode_new(&graph->node[i], i, params, rngp);
}
vgraph_add_edges(graph, rngp);
return graph;
}
/*
* vgraph_delete -- free the volatile graph
*/
void
vgraph_delete(struct vgraph_t *graph)
{
for (unsigned i = 0; i < graph->nodes_num; i++)
vnode_delete(&graph->node[i]);
FREE(graph);
}
| 2,894 | 21.099237 | 77 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_defrag_advanced/vgraph.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* vgraph.h -- volatile graph representation
*/
#ifndef OBJ_DEFRAG_ADV_VGRAPH
#define OBJ_DEFRAG_ADV_VGRAPH
#include "rand.h"
struct vgraph_params
{
unsigned max_nodes; /* max # of nodes per graph */
unsigned max_edges; /* max # of edges per node */
/* # of nodes is between [max_nodes - range_nodes, max_nodes] */
unsigned range_nodes;
/* # of edges is between [max_edges - range_edges, max_edges] */
unsigned range_edges;
unsigned min_pattern_size;
unsigned max_pattern_size;
};
struct vnode_t
{
unsigned node_id;
unsigned edges_num; /* # of edges starting from this node */
unsigned *edges; /* ids of nodes the edges are pointing to */
/* the persistent node attributes */
size_t pattern_size; /* size of the pattern allocated after the node */
size_t psize; /* the total size of the node */
};
struct vgraph_t
{
unsigned nodes_num;
struct vnode_t node[];
};
unsigned rand_range(unsigned min, unsigned max, rng_t *rngp);
struct vgraph_t *vgraph_new(struct vgraph_params *params, rng_t *rngp);
void vgraph_delete(struct vgraph_t *graph);
#endif
| 1,158 | 23.145833 | 72 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_mem/obj_mem.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* obj_mem.c -- simple test for pmemobj_memcpy, pmemobj_memmove and
* pmemobj_memset that verifies nothing blows up on pmemobj side.
* Real consistency tests are for libpmem.
*/
#include "unittest.h"
static unsigned Flags[] = {
0,
PMEMOBJ_F_MEM_NODRAIN,
PMEMOBJ_F_MEM_NONTEMPORAL,
PMEMOBJ_F_MEM_TEMPORAL,
PMEMOBJ_F_MEM_NONTEMPORAL | PMEMOBJ_F_MEM_TEMPORAL,
PMEMOBJ_F_MEM_NONTEMPORAL | PMEMOBJ_F_MEM_NODRAIN,
PMEMOBJ_F_MEM_WC,
PMEMOBJ_F_MEM_WB,
PMEMOBJ_F_MEM_NOFLUSH,
/* all possible flags */
PMEMOBJ_F_MEM_NODRAIN | PMEMOBJ_F_MEM_NOFLUSH |
PMEMOBJ_F_MEM_NONTEMPORAL | PMEMOBJ_F_MEM_TEMPORAL |
PMEMOBJ_F_MEM_WC | PMEMOBJ_F_MEM_WB,
};
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_mem");
if (argc != 2)
UT_FATAL("usage: %s [directory]", argv[0]);
PMEMobjpool *pop = pmemobj_create(argv[1], "obj_mem", 0,
S_IWUSR | S_IRUSR);
if (!pop)
UT_FATAL("!pmemobj_create");
struct root {
char c[4096];
};
struct root *r = pmemobj_direct(pmemobj_root(pop, sizeof(struct root)));
for (int i = 0; i < ARRAY_SIZE(Flags); ++i) {
unsigned f = Flags[i];
pmemobj_memset(pop, &r->c[0], 0x77, 2048, f);
pmemobj_memset(pop, &r->c[2048], 0xff, 2048, f);
pmemobj_memcpy(pop, &r->c[2048 + 7], &r->c[0], 100, f);
pmemobj_memcpy(pop, &r->c[2048 + 1024], &r->c[0] + 17, 128, f);
pmemobj_memmove(pop, &r->c[125], &r->c[150], 100, f);
pmemobj_memmove(pop, &r->c[350], &r->c[325], 100, f);
if (f & PMEMOBJ_F_MEM_NOFLUSH)
pmemobj_persist(pop, r, sizeof(*r));
}
pmemobj_close(pop);
DONE(NULL);
}
| 1,644 | 22.84058 | 73 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem_valgr_simple/pmem_valgr_simple.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2016, Intel Corporation */
/*
* pmem_valgr_simple.c -- simple unit test using pmemcheck
*
* usage: pmem_valgr_simple file
*/
#include "unittest.h"
int
main(int argc, char *argv[])
{
size_t mapped_len;
char *dest;
int is_pmem;
START(argc, argv, "pmem_valgr_simple");
if (argc != 4)
UT_FATAL("usage: %s file offset length", argv[0]);
int dest_off = atoi(argv[2]);
size_t bytes = strtoul(argv[3], NULL, 0);
dest = pmem_map_file(argv[1], 0, 0, 0, &mapped_len, &is_pmem);
if (dest == NULL)
UT_FATAL("!Could not mmap %s\n", argv[1]);
/* these will not be made persistent */
*(int *)dest = 4;
/* this will be made persistent */
uint64_t *tmp64dst = (void *)((uintptr_t)dest + 4096);
*tmp64dst = 50;
if (is_pmem) {
pmem_persist(tmp64dst, sizeof(*tmp64dst));
} else {
UT_ASSERTeq(pmem_msync(tmp64dst, sizeof(*tmp64dst)), 0);
}
uint16_t *tmp16dst = (void *)((uintptr_t)dest + 1024);
*tmp16dst = 21;
/* will appear as flushed/fenced in valgrind log */
pmem_flush(tmp16dst, sizeof(*tmp16dst));
/* shows strange behavior of memset in some cases */
memset(dest + dest_off, 0, bytes);
UT_ASSERTeq(pmem_unmap(dest, mapped_len), 0);
DONE(NULL);
}
| 1,240 | 21.160714 | 63 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/libpmempool_check_version/libpmempool_check_version.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* libpmempool_check_version -- a unittest for libpmempool_check_version.
*
*/
#include "unittest.h"
#include "libpmempool.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "libpmempool_check_version");
UT_ASSERTne(pmempool_check_version(0, 0), NULL);
UT_ASSERTne(NULL, pmempool_check_version(PMEMPOOL_MAJOR_VERSION - 1,
PMEMPOOL_MINOR_VERSION));
if (PMEMPOOL_MINOR_VERSION > 0) {
UT_ASSERTeq(NULL, pmempool_check_version(PMEMPOOL_MAJOR_VERSION,
PMEMPOOL_MINOR_VERSION - 1));
}
UT_ASSERTeq(NULL, pmempool_check_version(PMEMPOOL_MAJOR_VERSION,
PMEMPOOL_MINOR_VERSION));
UT_ASSERTne(NULL, pmempool_check_version(PMEMPOOL_MAJOR_VERSION + 1,
PMEMPOOL_MINOR_VERSION));
UT_ASSERTne(NULL, pmempool_check_version(PMEMPOOL_MAJOR_VERSION,
PMEMPOOL_MINOR_VERSION + 1));
DONE(NULL);
}
| 897 | 22.631579 | 73 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/win_mmap_dtor/win_mmap_dtor.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* win_mmap_dtor.c -- unit test for windows mmap destructor
*/
#include "unittest.h"
#include "os.h"
#include "win_mmap.h"
#define KILOBYTE (1 << 10)
#define MEGABYTE (1 << 20)
unsigned long long Mmap_align;
int
main(int argc, char *argv[])
{
START(argc, argv, "win_mmap_dtor");
if (argc != 2)
UT_FATAL("usage: %s path", argv[0]);
SYSTEM_INFO si;
GetSystemInfo(&si);
/* set pagesize for mmap */
Mmap_align = si.dwAllocationGranularity;
const char *path = argv[1];
int fd = os_open(path, O_RDWR);
UT_ASSERTne(fd, -1);
/*
* Input file has size equal to 2MB, but the mapping is 3MB.
* In this case mmap should map whole file and reserve 1MB
* of virtual address space for remaining part of the mapping.
*/
void *addr = mmap(NULL, 3 * MEGABYTE, PROT_READ, MAP_SHARED, fd, 0);
UT_ASSERTne(addr, MAP_FAILED);
MEMORY_BASIC_INFORMATION basic_info;
SIZE_T bytes_returned;
bytes_returned = VirtualQuery(addr, &basic_info,
sizeof(basic_info));
UT_ASSERTeq(bytes_returned, sizeof(basic_info));
UT_ASSERTeq(basic_info.RegionSize, 2 * MEGABYTE);
UT_ASSERTeq(basic_info.State, MEM_COMMIT);
bytes_returned = VirtualQuery((char *)addr + 2 * MEGABYTE,
&basic_info, sizeof(basic_info));
UT_ASSERTeq(bytes_returned, sizeof(basic_info));
UT_ASSERTeq(basic_info.RegionSize, MEGABYTE);
UT_ASSERTeq(basic_info.State, MEM_RESERVE);
win_mmap_fini();
bytes_returned = VirtualQuery((char *)addr + 2 * MEGABYTE,
&basic_info, sizeof(basic_info));
UT_ASSERTeq(bytes_returned, sizeof(basic_info));
/*
* region size can be bigger than 1MB because there was probably
* free space after this mapping
*/
UT_ASSERTeq(basic_info.State, MEM_FREE);
DONE(NULL);
}
| 1,778 | 22.72 | 69 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem_map_file_win/mocks_windows.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* mocks_windows.h -- redefinitions of libc functions
*
* This file is Windows-specific.
*
* This file should be included (i.e. using Forced Include) by libpmem
* files, when compiled for the purpose of pmem_map_file test.
* It would replace default implementation with mocked functions defined
* in pmem_map_file.c.
*
* These defines could be also passed as preprocessor definitions.
*/
#ifndef WRAP_REAL
#define os_posix_fallocate __wrap_os_posix_fallocate
#define os_ftruncate __wrap_os_ftruncate
#endif
| 608 | 28 | 72 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem_map_file_win/mocks_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* mocks_windows.c -- mocked functions used in pmem_map_file.c
* (Windows-specific)
*/
#include "unittest.h"
#define MAX_LEN (4 * 1024 * 1024)
/*
* posix_fallocate -- interpose on libc posix_fallocate()
*/
FUNC_MOCK(os_posix_fallocate, int, int fd, os_off_t offset, os_off_t len)
FUNC_MOCK_RUN_DEFAULT {
UT_OUT("posix_fallocate: off %ju len %ju", offset, len);
if (len > MAX_LEN)
return ENOSPC;
return _FUNC_REAL(os_posix_fallocate)(fd, offset, len);
}
FUNC_MOCK_END
/*
* ftruncate -- interpose on libc ftruncate()
*/
FUNC_MOCK(os_ftruncate, int, int fd, os_off_t len)
FUNC_MOCK_RUN_DEFAULT {
UT_OUT("ftruncate: len %ju", len);
if (len > MAX_LEN) {
errno = ENOSPC;
return -1;
}
return _FUNC_REAL(os_ftruncate)(fd, len);
}
FUNC_MOCK_END
| 868 | 21.868421 | 73 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_tx_locks/obj_tx_locks.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_tx_locks.c -- unit test for transaction locks
*/
#include "unittest.h"
#define LAYOUT_NAME "direct"
#define NUM_LOCKS 2
#define NUM_THREADS 10
#define TEST_VALUE_A 5
#define TEST_VALUE_B 10
#define TEST_VALUE_C 15
#define BEGIN_TX(pop, mutexes, rwlocks)\
TX_BEGIN_PARAM((pop), TX_PARAM_MUTEX,\
&(mutexes)[0], TX_PARAM_MUTEX, &(mutexes)[1], TX_PARAM_RWLOCK,\
&(rwlocks)[0], TX_PARAM_RWLOCK, &(rwlocks)[1], TX_PARAM_NONE)
#define BEGIN_TX_OLD(pop, mutexes, rwlocks)\
TX_BEGIN_LOCK((pop), TX_LOCK_MUTEX,\
&(mutexes)[0], TX_LOCK_MUTEX, &(mutexes)[1], TX_LOCK_RWLOCK,\
&(rwlocks)[0], TX_LOCK_RWLOCK, &(rwlocks)[1], TX_LOCK_NONE)
struct transaction_data {
PMEMmutex mutexes[NUM_LOCKS];
PMEMrwlock rwlocks[NUM_LOCKS];
int a;
int b;
int c;
};
static PMEMobjpool *Pop;
/*
* do_tx -- (internal) thread-friendly transaction
*/
static void *
do_tx(void *arg)
{
struct transaction_data *data = arg;
BEGIN_TX(Pop, data->mutexes, data->rwlocks) {
data->a = TEST_VALUE_A;
} TX_ONCOMMIT {
UT_ASSERT(data->a == TEST_VALUE_A);
data->b = TEST_VALUE_B;
} TX_ONABORT { /* not called */
data->a = TEST_VALUE_B;
} TX_FINALLY {
UT_ASSERT(data->b == TEST_VALUE_B);
data->c = TEST_VALUE_C;
} TX_END
return NULL;
}
/*
* do_tx_old -- (internal) thread-friendly transaction, tests deprecated macros
*/
static void *
do_tx_old(void *arg)
{
struct transaction_data *data = arg;
BEGIN_TX_OLD(Pop, data->mutexes, data->rwlocks) {
data->a = TEST_VALUE_A;
} TX_ONCOMMIT {
UT_ASSERT(data->a == TEST_VALUE_A);
data->b = TEST_VALUE_B;
} TX_ONABORT { /* not called */
data->a = TEST_VALUE_B;
} TX_FINALLY {
UT_ASSERT(data->b == TEST_VALUE_B);
data->c = TEST_VALUE_C;
} TX_END
return NULL;
}
/*
* do_aborted_tx -- (internal) thread-friendly aborted transaction
*/
static void *
do_aborted_tx(void *arg)
{
struct transaction_data *data = arg;
BEGIN_TX(Pop, data->mutexes, data->rwlocks) {
data->a = TEST_VALUE_A;
pmemobj_tx_abort(EINVAL);
data->a = TEST_VALUE_B;
} TX_ONCOMMIT { /* not called */
data->a = TEST_VALUE_B;
} TX_ONABORT {
UT_ASSERT(data->a == TEST_VALUE_A);
data->b = TEST_VALUE_B;
} TX_FINALLY {
UT_ASSERT(data->b == TEST_VALUE_B);
data->c = TEST_VALUE_C;
} TX_END
return NULL;
}
/*
* do_nested_tx-- (internal) thread-friendly nested transaction
*/
static void *
do_nested_tx(void *arg)
{
struct transaction_data *data = arg;
BEGIN_TX(Pop, data->mutexes, data->rwlocks) {
BEGIN_TX(Pop, data->mutexes, data->rwlocks) {
data->a = TEST_VALUE_A;
} TX_ONCOMMIT {
UT_ASSERT(data->a == TEST_VALUE_A);
data->b = TEST_VALUE_B;
} TX_END
} TX_ONCOMMIT {
data->c = TEST_VALUE_C;
} TX_END
return NULL;
}
/*
* do_aborted_nested_tx -- (internal) thread-friendly aborted nested transaction
*/
static void *
do_aborted_nested_tx(void *arg)
{
struct transaction_data *data = arg;
BEGIN_TX(Pop, data->mutexes, data->rwlocks) {
data->a = TEST_VALUE_C;
BEGIN_TX(Pop, data->mutexes, data->rwlocks) {
data->a = TEST_VALUE_A;
pmemobj_tx_abort(EINVAL);
data->a = TEST_VALUE_B;
} TX_ONCOMMIT { /* not called */
data->a = TEST_VALUE_C;
} TX_ONABORT {
UT_ASSERT(data->a == TEST_VALUE_A);
data->b = TEST_VALUE_B;
} TX_FINALLY {
UT_ASSERT(data->b == TEST_VALUE_B);
data->c = TEST_VALUE_C;
} TX_END
data->a = TEST_VALUE_B;
} TX_ONCOMMIT { /* not called */
UT_ASSERT(data->a == TEST_VALUE_A);
data->c = TEST_VALUE_C;
} TX_ONABORT {
UT_ASSERT(data->a == TEST_VALUE_A);
UT_ASSERT(data->b == TEST_VALUE_B);
UT_ASSERT(data->c == TEST_VALUE_C);
data->a = TEST_VALUE_B;
} TX_FINALLY {
UT_ASSERT(data->a == TEST_VALUE_B);
data->b = TEST_VALUE_A;
} TX_END
return NULL;
}
static void
run_mt_test(void *(*worker)(void *), void *arg)
{
os_thread_t thread[NUM_THREADS];
for (int i = 0; i < NUM_THREADS; ++i) {
THREAD_CREATE(&thread[i], NULL, worker, arg);
}
for (int i = 0; i < NUM_THREADS; ++i) {
THREAD_JOIN(&thread[i], NULL);
}
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_locks");
if (argc > 3)
UT_FATAL("usage: %s <file> [m]", argv[0]);
if ((Pop = pmemobj_create(argv[1], LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
int multithread = 0;
if (argc == 3) {
multithread = (argv[2][0] == 'm');
if (!multithread)
UT_FATAL("wrong test type supplied %c", argv[1][0]);
}
PMEMoid root = pmemobj_root(Pop, sizeof(struct transaction_data));
struct transaction_data *test_obj =
(struct transaction_data *)pmemobj_direct(root);
if (multithread) {
run_mt_test(do_tx, test_obj);
} else {
do_tx(test_obj);
do_tx(test_obj);
}
UT_ASSERT(test_obj->a == TEST_VALUE_A);
UT_ASSERT(test_obj->b == TEST_VALUE_B);
UT_ASSERT(test_obj->c == TEST_VALUE_C);
if (multithread) {
run_mt_test(do_aborted_tx, test_obj);
} else {
do_aborted_tx(test_obj);
do_aborted_tx(test_obj);
}
UT_ASSERT(test_obj->a == TEST_VALUE_A);
UT_ASSERT(test_obj->b == TEST_VALUE_B);
UT_ASSERT(test_obj->c == TEST_VALUE_C);
if (multithread) {
run_mt_test(do_nested_tx, test_obj);
} else {
do_nested_tx(test_obj);
do_nested_tx(test_obj);
}
UT_ASSERT(test_obj->a == TEST_VALUE_A);
UT_ASSERT(test_obj->b == TEST_VALUE_B);
UT_ASSERT(test_obj->c == TEST_VALUE_C);
if (multithread) {
run_mt_test(do_aborted_nested_tx, test_obj);
} else {
do_aborted_nested_tx(test_obj);
do_aborted_nested_tx(test_obj);
}
UT_ASSERT(test_obj->a == TEST_VALUE_B);
UT_ASSERT(test_obj->b == TEST_VALUE_A);
UT_ASSERT(test_obj->c == TEST_VALUE_C);
/* test that deprecated macros still work */
UT_COMPILE_ERROR_ON((int)TX_LOCK_NONE != (int)TX_PARAM_NONE);
UT_COMPILE_ERROR_ON((int)TX_LOCK_MUTEX != (int)TX_PARAM_MUTEX);
UT_COMPILE_ERROR_ON((int)TX_LOCK_RWLOCK != (int)TX_PARAM_RWLOCK);
if (multithread) {
run_mt_test(do_tx_old, test_obj);
} else {
do_tx_old(test_obj);
do_tx_old(test_obj);
}
UT_ASSERT(test_obj->a == TEST_VALUE_A);
UT_ASSERT(test_obj->b == TEST_VALUE_B);
UT_ASSERT(test_obj->c == TEST_VALUE_C);
pmemobj_close(Pop);
DONE(NULL);
}
| 6,164 | 21.918216 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/blk_recovery/blk_recovery.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* blk_recovery.c -- unit test for pmemblk recovery
*
* usage: blk_recovery bsize file first_lba lba
*
*/
#include "unittest.h"
#include <sys/param.h>
#include "blk.h"
#include "btt_layout.h"
#include <endian.h>
static size_t Bsize;
/*
* construct -- build a buffer for writing
*/
static void
construct(unsigned char *buf)
{
static int ord = 1;
for (int i = 0; i < Bsize; i++)
buf[i] = ord;
ord++;
if (ord > 255)
ord = 1;
}
/*
* ident -- identify what a buffer holds
*/
static char *
ident(unsigned char *buf)
{
static char descr[100];
unsigned val = *buf;
for (int i = 1; i < Bsize; i++)
if (buf[i] != val) {
sprintf(descr, "{%u} TORN at byte %d", val, i);
return descr;
}
sprintf(descr, "{%u}", val);
return descr;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "blk_recovery");
if (argc != 5 && argc != 3)
UT_FATAL("usage: %s bsize file [first_lba lba]", argv[0]);
Bsize = strtoul(argv[1], NULL, 0);
const char *path = argv[2];
if (argc > 3) {
PMEMblkpool *handle;
if ((handle = pmemblk_create(path, Bsize, 0,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!%s: pmemblk_create", path);
UT_OUT("%s block size %zu usable blocks %zu",
argv[1], Bsize, pmemblk_nblock(handle));
/* write the first lba */
os_off_t lba = STRTOL(argv[3], NULL, 0);
unsigned char *buf = MALLOC(Bsize);
construct(buf);
if (pmemblk_write(handle, buf, lba) < 0)
UT_FATAL("!write lba %zu", lba);
UT_OUT("write lba %zu: %s", lba, ident(buf));
/* reach into the layout and write-protect the map */
struct btt_info *infop = (void *)((char *)handle +
roundup(sizeof(struct pmemblk), BLK_FORMAT_DATA_ALIGN));
char *mapaddr = (char *)infop + le32toh(infop->mapoff);
char *flogaddr = (char *)infop + le32toh(infop->flogoff);
UT_OUT("write-protecting map, length %zu",
(size_t)(flogaddr - mapaddr));
MPROTECT(mapaddr, (size_t)(flogaddr - mapaddr), PROT_READ);
/* map each file argument with the given map type */
lba = STRTOL(argv[4], NULL, 0);
construct(buf);
if (pmemblk_write(handle, buf, lba) < 0)
UT_FATAL("!write lba %zu", lba);
else
UT_FATAL("write lba %zu: %s", lba, ident(buf));
} else {
int result = pmemblk_check(path, Bsize);
if (result < 0)
UT_OUT("!%s: pmemblk_check", path);
else if (result == 0)
UT_OUT("%s: pmemblk_check: not consistent", path);
else
UT_OUT("%s: consistent", path);
}
DONE(NULL);
}
| 4,164 | 26.766667 | 74 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/out_err_mt/out_err_mt.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* out_err_mt.c -- unit test for error messages
*/
#include <sys/types.h>
#include <stdarg.h>
#include <errno.h>
#include "unittest.h"
#include "valgrind_internal.h"
#include "util.h"
#define NUM_THREADS 16
static void
print_errors(const char *msg)
{
UT_OUT("%s", msg);
UT_OUT("PMEM: %s", pmem_errormsg());
UT_OUT("PMEMOBJ: %s", pmemobj_errormsg());
UT_OUT("PMEMLOG: %s", pmemlog_errormsg());
UT_OUT("PMEMBLK: %s", pmemblk_errormsg());
UT_OUT("PMEMPOOL: %s", pmempool_errormsg());
}
static void
check_errors(unsigned ver)
{
int ret;
int err_need;
int err_found;
ret = sscanf(pmem_errormsg(),
"libpmem major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEM_MAJOR_VERSION);
ret = sscanf(pmemobj_errormsg(),
"libpmemobj major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMOBJ_MAJOR_VERSION);
ret = sscanf(pmemlog_errormsg(),
"libpmemlog major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMLOG_MAJOR_VERSION);
ret = sscanf(pmemblk_errormsg(),
"libpmemblk major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMBLK_MAJOR_VERSION);
ret = sscanf(pmempool_errormsg(),
"libpmempool major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMPOOL_MAJOR_VERSION);
}
static void *
do_test(void *arg)
{
unsigned ver = *(unsigned *)arg;
pmem_check_version(ver, 0);
pmemobj_check_version(ver, 0);
pmemlog_check_version(ver, 0);
pmemblk_check_version(ver, 0);
pmempool_check_version(ver, 0);
check_errors(ver);
return NULL;
}
static void
run_mt_test(void *(*worker)(void *))
{
os_thread_t thread[NUM_THREADS];
unsigned ver[NUM_THREADS];
for (unsigned i = 0; i < NUM_THREADS; ++i) {
ver[i] = 10000 + i;
THREAD_CREATE(&thread[i], NULL, worker, &ver[i]);
}
for (unsigned i = 0; i < NUM_THREADS; ++i) {
THREAD_JOIN(&thread[i], NULL);
}
}
int
main(int argc, char *argv[])
{
START(argc, argv, "out_err_mt");
if (argc != 6)
UT_FATAL("usage: %s file1 file2 file3 file4 dir",
argv[0]);
print_errors("start");
PMEMobjpool *pop = pmemobj_create(argv[1], "test",
PMEMOBJ_MIN_POOL, 0666);
PMEMlogpool *plp = pmemlog_create(argv[2],
PMEMLOG_MIN_POOL, 0666);
PMEMblkpool *pbp = pmemblk_create(argv[3],
128, PMEMBLK_MIN_POOL, 0666);
util_init();
pmem_check_version(10000, 0);
pmemobj_check_version(10001, 0);
pmemlog_check_version(10002, 0);
pmemblk_check_version(10003, 0);
pmempool_check_version(10006, 0);
print_errors("version check");
void *ptr = NULL;
/*
* We are testing library error reporting and we don't want this test
* to fail under memcheck.
*/
VALGRIND_DO_DISABLE_ERROR_REPORTING;
pmem_msync(ptr, 1);
VALGRIND_DO_ENABLE_ERROR_REPORTING;
print_errors("pmem_msync");
int ret;
PMEMoid oid;
ret = pmemobj_alloc(pop, &oid, 0, 0, NULL, NULL);
UT_ASSERTeq(ret, -1);
print_errors("pmemobj_alloc");
pmemlog_append(plp, NULL, PMEMLOG_MIN_POOL);
print_errors("pmemlog_append");
size_t nblock = pmemblk_nblock(pbp);
pmemblk_set_error(pbp, (long long)nblock + 1);
print_errors("pmemblk_set_error");
run_mt_test(do_test);
pmemobj_close(pop);
pmemlog_close(plp);
pmemblk_close(pbp);
PMEMpoolcheck *ppc;
struct pmempool_check_args args = {NULL, };
ppc = pmempool_check_init(&args, sizeof(args) / 2);
UT_ASSERTeq(ppc, NULL);
print_errors("pmempool_check_init");
DONE(NULL);
}
| 3,840 | 22.278788 | 70 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/libpmempool_api/libpmempool_test.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* libpmempool_test -- test of libpmempool.
*
*/
#include <stddef.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <getopt.h>
#include "unittest.h"
/*
* Exact copy of the struct pmempool_check_args from libpmempool 1.0 provided to
* test libpmempool against various pmempool_check_args structure versions.
*/
struct pmempool_check_args_1_0 {
const char *path;
const char *backup_path;
enum pmempool_pool_type pool_type;
int flags;
};
/*
* check_pool -- check given pool
*/
static void
check_pool(struct pmempool_check_args *args, size_t args_size)
{
const char *status2str[] = {
[PMEMPOOL_CHECK_RESULT_CONSISTENT] = "consistent",
[PMEMPOOL_CHECK_RESULT_NOT_CONSISTENT] = "not consistent",
[PMEMPOOL_CHECK_RESULT_REPAIRED] = "repaired",
[PMEMPOOL_CHECK_RESULT_CANNOT_REPAIR] = "cannot repair",
[PMEMPOOL_CHECK_RESULT_ERROR] = "fatal",
};
PMEMpoolcheck *ppc = pmempool_check_init(args, args_size);
if (!ppc) {
char buff[UT_MAX_ERR_MSG];
ut_strerror(errno, buff, UT_MAX_ERR_MSG);
UT_OUT("Error: %s", buff);
return;
}
struct pmempool_check_status *status = NULL;
while ((status = pmempool_check(ppc)) != NULL) {
switch (status->type) {
case PMEMPOOL_CHECK_MSG_TYPE_ERROR:
UT_OUT("%s", status->str.msg);
break;
case PMEMPOOL_CHECK_MSG_TYPE_INFO:
UT_OUT("%s", status->str.msg);
break;
case PMEMPOOL_CHECK_MSG_TYPE_QUESTION:
UT_OUT("%s", status->str.msg);
status->str.answer = "yes";
break;
default:
pmempool_check_end(ppc);
exit(EXIT_FAILURE);
}
}
enum pmempool_check_result ret = pmempool_check_end(ppc);
UT_OUT("status = %s", status2str[ret]);
}
/*
* print_usage -- print usage of program
*/
static void
print_usage(char *name)
{
UT_OUT("Usage: %s [-t <pool_type>] [-r <repair>] [-d <dry_run>] "
"[-y <always_yes>] [-f <flags>] [-a <advanced>] "
"[-b <backup_path>] <pool_path>", name);
}
/*
* set_flag -- parse the value and set the flag according to a obtained value
*/
static void
set_flag(const char *value, int *flags, int flag)
{
if (atoi(value) > 0)
*flags |= flag;
else
*flags &= ~flag;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "libpmempool_test");
int opt;
struct pmempool_check_args_1_0 args = {
.path = NULL,
.backup_path = NULL,
.pool_type = PMEMPOOL_POOL_TYPE_LOG,
.flags = PMEMPOOL_CHECK_FORMAT_STR |
PMEMPOOL_CHECK_REPAIR | PMEMPOOL_CHECK_VERBOSE
};
size_t args_size = sizeof(struct pmempool_check_args_1_0);
while ((opt = getopt(argc, argv, "t:r:d:a:y:s:b:")) != -1) {
switch (opt) {
case 't':
if (strcmp(optarg, "blk") == 0) {
args.pool_type = PMEMPOOL_POOL_TYPE_BLK;
} else if (strcmp(optarg, "log") == 0) {
args.pool_type = PMEMPOOL_POOL_TYPE_LOG;
} else if (strcmp(optarg, "obj") == 0) {
args.pool_type = PMEMPOOL_POOL_TYPE_OBJ;
} else if (strcmp(optarg, "btt") == 0) {
args.pool_type = PMEMPOOL_POOL_TYPE_BTT;
} else {
args.pool_type =
(uint32_t)strtoul(optarg, NULL, 0);
}
break;
case 'r':
set_flag(optarg, &args.flags, PMEMPOOL_CHECK_REPAIR);
break;
case 'd':
set_flag(optarg, &args.flags, PMEMPOOL_CHECK_DRY_RUN);
break;
case 'a':
set_flag(optarg, &args.flags, PMEMPOOL_CHECK_ADVANCED);
break;
case 'y':
set_flag(optarg, &args.flags,
PMEMPOOL_CHECK_ALWAYS_YES);
break;
case 's':
args_size = strtoul(optarg, NULL, 0);
break;
case 'b':
args.backup_path = optarg;
break;
default:
print_usage(argv[0]);
UT_FATAL("unknown option: %c", opt);
}
}
if (optind < argc) {
args.path = argv[optind];
}
check_pool((struct pmempool_check_args *)&args, args_size);
DONE(NULL);
}
| 3,753 | 22.31677 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/blk_nblock/blk_nblock.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* blk_nblock.c -- unit test for pmemblk_nblock()
*
* usage: blk_nblock bsize:file...
*
*/
#include "unittest.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "blk_nblock");
if (argc < 2)
UT_FATAL("usage: %s bsize:file...", argv[0]);
/* map each file argument with the given map type */
for (int arg = 1; arg < argc; arg++) {
char *fname;
size_t bsize = strtoul(argv[arg], &fname, 0);
if (*fname != ':')
UT_FATAL("usage: %s bsize:file...", argv[0]);
fname++;
PMEMblkpool *handle;
handle = pmemblk_create(fname, bsize, 0, S_IWUSR | S_IRUSR);
if (handle == NULL) {
UT_OUT("!%s: pmemblk_create", fname);
} else {
UT_OUT("%s: block size %zu usable blocks: %zu",
fname, bsize, pmemblk_nblock(handle));
UT_ASSERTeq(pmemblk_bsize(handle), bsize);
pmemblk_close(handle);
int result = pmemblk_check(fname, bsize);
if (result < 0)
UT_OUT("!%s: pmemblk_check", fname);
else if (result == 0)
UT_OUT("%s: pmemblk_check: not consistent",
fname);
else {
UT_ASSERTeq(pmemblk_check(fname, bsize + 1),
-1);
UT_ASSERTeq(pmemblk_check(fname, 0), 1);
handle = pmemblk_open(fname, 0);
UT_ASSERTeq(pmemblk_bsize(handle), bsize);
pmemblk_close(handle);
}
}
}
DONE(NULL);
}
| 1,358 | 22.431034 | 62 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_tx_invalid/obj_tx_invalid.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* obj_tx_invalid.c -- tests which transactional functions are available in
* which transaction stages
*/
#include <stddef.h>
#include "file.h"
#include "unittest.h"
/*
* Layout definition
*/
POBJ_LAYOUT_BEGIN(tx_invalid);
POBJ_LAYOUT_ROOT(tx_invalid, struct dummy_root);
POBJ_LAYOUT_TOID(tx_invalid, struct dummy_node);
POBJ_LAYOUT_END(tx_invalid);
struct dummy_node {
int value;
};
struct dummy_root {
TOID(struct dummy_node) node;
};
int
main(int argc, char *argv[])
{
if (argc != 3)
UT_FATAL("usage: %s file-name op", argv[0]);
START(argc, argv, "obj_tx_invalid %s", argv[2]);
/* root doesn't count */
UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(tx_invalid) != 1);
PMEMobjpool *pop;
const char *path = argv[1];
int exists = util_file_exists(path);
if (exists < 0)
UT_FATAL("!util_file_exists");
if (!exists) {
if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(tx_invalid),
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL) {
UT_FATAL("!pmemobj_create %s", path);
}
} else {
if ((pop = pmemobj_open(path, POBJ_LAYOUT_NAME(tx_invalid)))
== NULL) {
UT_FATAL("!pmemobj_open %s", path);
}
}
PMEMoid oid = pmemobj_first(pop);
if (OID_IS_NULL(oid)) {
if (pmemobj_alloc(pop, &oid, 10, 1, NULL, NULL))
UT_FATAL("!pmemobj_alloc");
} else {
UT_ASSERTeq(pmemobj_type_num(oid), 1);
}
if (strcmp(argv[2], "alloc") == 0)
pmemobj_tx_alloc(10, 1);
else if (strcmp(argv[2], "alloc-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_alloc(10, 1);
} TX_END
} else if (strcmp(argv[2], "alloc-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_alloc(10, 1);
} TX_END
} else if (strcmp(argv[2], "alloc-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_alloc(10, 1);
} TX_END
} else if (strcmp(argv[2], "alloc-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_alloc(10, 1);
} TX_END
} else if (strcmp(argv[2], "alloc-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_alloc(10, 1);
}
else if (strcmp(argv[2], "zalloc") == 0)
pmemobj_tx_zalloc(10, 1);
else if (strcmp(argv[2], "zalloc-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_zalloc(10, 1);
} TX_END
} else if (strcmp(argv[2], "zalloc-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_zalloc(10, 1);
} TX_END
} else if (strcmp(argv[2], "zalloc-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_zalloc(10, 1);
} TX_END
} else if (strcmp(argv[2], "zalloc-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_zalloc(10, 1);
} TX_END
} else if (strcmp(argv[2], "zalloc-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_zalloc(10, 1);
}
else if (strcmp(argv[2], "strdup") == 0)
pmemobj_tx_strdup("aaa", 1);
else if (strcmp(argv[2], "strdup-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_strdup("aaa", 1);
} TX_END
} else if (strcmp(argv[2], "strdup-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_strdup("aaa", 1);
} TX_END
} else if (strcmp(argv[2], "strdup-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_strdup("aaa", 1);
} TX_END
} else if (strcmp(argv[2], "strdup-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_strdup("aaa", 1);
} TX_END
} else if (strcmp(argv[2], "strdup-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_strdup("aaa", 1);
}
else if (strcmp(argv[2], "realloc") == 0)
pmemobj_tx_realloc(oid, 10, 1);
else if (strcmp(argv[2], "realloc-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_realloc(oid, 10, 1);
} TX_END
} else if (strcmp(argv[2], "realloc-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_realloc(oid, 10, 1);
} TX_END
} else if (strcmp(argv[2], "realloc-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_realloc(oid, 10, 1);
} TX_END
} else if (strcmp(argv[2], "realloc-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_realloc(oid, 10, 1);
} TX_END
} else if (strcmp(argv[2], "realloc-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_realloc(oid, 10, 1);
}
else if (strcmp(argv[2], "zrealloc") == 0)
pmemobj_tx_zrealloc(oid, 10, 1);
else if (strcmp(argv[2], "zrealloc-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_zrealloc(oid, 10, 1);
} TX_END
} else if (strcmp(argv[2], "zrealloc-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_zrealloc(oid, 10, 1);
} TX_END
} else if (strcmp(argv[2], "zrealloc-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_zrealloc(oid, 10, 1);
} TX_END
} else if (strcmp(argv[2], "zrealloc-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_zrealloc(oid, 10, 1);
} TX_END
} else if (strcmp(argv[2], "zrealloc-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_zrealloc(oid, 10, 1);
}
else if (strcmp(argv[2], "free") == 0)
pmemobj_tx_free(oid);
else if (strcmp(argv[2], "free-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_free(oid);
} TX_END
} else if (strcmp(argv[2], "free-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_free(oid);
} TX_END
} else if (strcmp(argv[2], "free-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_free(oid);
} TX_END
} else if (strcmp(argv[2], "free-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_free(oid);
} TX_END
} else if (strcmp(argv[2], "free-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_free(oid);
}
else if (strcmp(argv[2], "add_range") == 0)
pmemobj_tx_add_range(oid, 0, 10);
else if (strcmp(argv[2], "add_range-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_add_range(oid, 0, 10);
} TX_END
} else if (strcmp(argv[2], "add_range-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_add_range(oid, 0, 10);
} TX_END
} else if (strcmp(argv[2], "add_range-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_add_range(oid, 0, 10);
} TX_END
} else if (strcmp(argv[2], "add_range-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_add_range(oid, 0, 10);
} TX_END
} else if (strcmp(argv[2], "add_range-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_add_range(oid, 0, 10);
}
else if (strcmp(argv[2], "add_range_direct") == 0)
pmemobj_tx_add_range_direct(pmemobj_direct(oid), 10);
else if (strcmp(argv[2], "add_range_direct-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_add_range_direct(pmemobj_direct(oid), 10);
} TX_END
} else if (strcmp(argv[2], "add_range_direct-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_add_range_direct(pmemobj_direct(oid), 10);
} TX_END
} else if (strcmp(argv[2], "add_range_direct-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_add_range_direct(pmemobj_direct(oid), 10);
} TX_END
} else if (strcmp(argv[2], "add_range_direct-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_add_range_direct(pmemobj_direct(oid), 10);
} TX_END
} else if (strcmp(argv[2], "add_range_direct-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_add_range_direct(pmemobj_direct(oid), 10);
}
else if (strcmp(argv[2], "abort") == 0)
pmemobj_tx_abort(ENOMEM);
else if (strcmp(argv[2], "abort-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_END
} else if (strcmp(argv[2], "abort-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_abort(ENOMEM);
} TX_END
} else if (strcmp(argv[2], "abort-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_abort(ENOMEM);
} TX_END
} else if (strcmp(argv[2], "abort-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_abort(ENOMEM);
} TX_END
} else if (strcmp(argv[2], "abort-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_abort(ENOMEM);
}
else if (strcmp(argv[2], "commit") == 0)
pmemobj_tx_commit();
else if (strcmp(argv[2], "commit-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_commit();
} TX_END
} else if (strcmp(argv[2], "commit-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_commit();
} TX_END
} else if (strcmp(argv[2], "commit-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_commit();
} TX_END
} else if (strcmp(argv[2], "commit-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_commit();
} TX_END
} else if (strcmp(argv[2], "commit-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_commit();
}
else if (strcmp(argv[2], "end") == 0)
pmemobj_tx_end();
else if (strcmp(argv[2], "end-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_end();
} TX_END
} else if (strcmp(argv[2], "end-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_end();
pmemobj_close(pop);
exit(0);
} TX_END
} else if (strcmp(argv[2], "end-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_end();
pmemobj_close(pop);
exit(0);
} TX_END
} else if (strcmp(argv[2], "end-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_end();
pmemobj_close(pop);
exit(0);
} TX_END
} else if (strcmp(argv[2], "end-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_end();
}
else if (strcmp(argv[2], "process") == 0)
pmemobj_tx_process();
else if (strcmp(argv[2], "process-in-work") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_process();
} TX_END
} else if (strcmp(argv[2], "process-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
pmemobj_tx_process();
} TX_END
} else if (strcmp(argv[2], "process-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
pmemobj_tx_process();
} TX_END
} else if (strcmp(argv[2], "process-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
pmemobj_tx_process();
pmemobj_tx_end();
pmemobj_close(pop);
exit(0);
} TX_END
} else if (strcmp(argv[2], "process-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
pmemobj_tx_process();
}
else if (strcmp(argv[2], "begin") == 0) {
TX_BEGIN(pop) {
} TX_END
} else if (strcmp(argv[2], "begin-in-work") == 0) {
TX_BEGIN(pop) {
TX_BEGIN(pop) {
} TX_END
} TX_END
} else if (strcmp(argv[2], "begin-in-abort") == 0) {
TX_BEGIN(pop) {
pmemobj_tx_abort(ENOMEM);
} TX_ONABORT {
TX_BEGIN(pop) {
} TX_END
} TX_END
} else if (strcmp(argv[2], "begin-in-commit") == 0) {
TX_BEGIN(pop) {
} TX_ONCOMMIT {
TX_BEGIN(pop) {
} TX_END
} TX_END
} else if (strcmp(argv[2], "begin-in-finally") == 0) {
TX_BEGIN(pop) {
} TX_FINALLY {
TX_BEGIN(pop) {
} TX_END
} TX_END
} else if (strcmp(argv[2], "begin-after-tx") == 0) {
TX_BEGIN(pop) {
} TX_END
TX_BEGIN(pop) {
} TX_END
}
pmemobj_close(pop);
DONE(NULL);
}
| 11,213 | 23.809735 | 75 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem_has_auto_flush/mocks_posix.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* mocks_posix.c -- mocked functions used in pmem_has_auto_flush.c
*/
#include <fts.h>
#include "fs.h"
#include "unittest.h"
#define BUS_DEVICE_PATH "/sys/bus/nd/devices"
/*
* open -- open mock
*/
FUNC_MOCK(open, int, const char *path, int flags, ...)
FUNC_MOCK_RUN_DEFAULT {
va_list ap;
va_start(ap, flags);
int mode = va_arg(ap, int);
va_end(ap);
if (!strstr(path, BUS_DEVICE_PATH))
return _FUNC_REAL(open)(path, flags, mode);
const char *prefix = os_getenv("BUS_DEVICE_PATH");
char path2[PATH_MAX] = { 0 };
strcat(path2, prefix);
strcat(path2, path + strlen(BUS_DEVICE_PATH));
return _FUNC_REAL(open)(path2, flags, mode);
}
FUNC_MOCK_END
struct fs {
FTS *ft;
struct fs_entry entry;
};
/*
* fs_new -- creates fs traversal instance
*/
FUNC_MOCK(fs_new, struct fs *, const char *path)
FUNC_MOCK_RUN_DEFAULT {
if (!strstr(path, BUS_DEVICE_PATH))
return _FUNC_REAL(fs_new)(path);
const char *prefix = os_getenv("BUS_DEVICE_PATH");
char path2[PATH_MAX] = { 0 };
strcat(path2, prefix);
strcat(path2, path + strlen(BUS_DEVICE_PATH));
return _FUNC_REAL(fs_new)(path2);
}
FUNC_MOCK_END
/*
* os_stat -- os_stat mock to handle sysfs path
*/
FUNC_MOCK(os_stat, int, const char *path, os_stat_t *buf)
FUNC_MOCK_RUN_DEFAULT {
if (!strstr(path, BUS_DEVICE_PATH))
return _FUNC_REAL(os_stat)(path, buf);
const char *prefix = os_getenv("BUS_DEVICE_PATH");
char path2[PATH_MAX] = { 0 };
strcat(path2, prefix);
strcat(path2, path + strlen(BUS_DEVICE_PATH));
return _FUNC_REAL(os_stat)(path2, buf);
}
FUNC_MOCK_END
| 1,627 | 22.257143 | 66 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_badblock_mocks/mocks_ndctl.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* mocks_ndctl.c -- mocked ndctl functions used
* indirectly in pmem2_badblock_mocks.c
*/
#include <sys/stat.h>
#include <ndctl/libndctl.h>
#include "unittest.h"
#include "pmem2_badblock_mocks.h"
#define RESOURCE_ADDRESS 0x1000 /* any non-zero value */
#define UINT(ptr) (unsigned)((uintptr_t)ptr)
/* index of bad blocks */
static unsigned i_bb;
/*
* ndctl_namespace_get_mode - mock ndctl_namespace_get_mode
*/
FUNC_MOCK(ndctl_namespace_get_mode, enum ndctl_namespace_mode,
struct ndctl_namespace *ndns)
FUNC_MOCK_RUN_DEFAULT {
if (IS_MODE_NAMESPACE((uintptr_t)ndns))
/* namespace mode */
return NDCTL_NS_MODE_FSDAX;
/* raw mode */
return NDCTL_NS_MODE_RAW;
}
FUNC_MOCK_END
/*
* ndctl_namespace_get_pfn - mock ndctl_namespace_get_pfn
*/
FUNC_MOCK(ndctl_namespace_get_pfn, struct ndctl_pfn *,
struct ndctl_namespace *ndns)
FUNC_MOCK_RUN_DEFAULT {
if (IS_MODE_NAMESPACE((uintptr_t)ndns))
/* namespace mode */
return (struct ndctl_pfn *)ndns;
return NULL;
}
FUNC_MOCK_END
/*
* ndctl_namespace_get_dax - mock ndctl_namespace_get_dax
*/
FUNC_MOCK(ndctl_namespace_get_dax, struct ndctl_dax *,
struct ndctl_namespace *ndns)
FUNC_MOCK_RUN_DEFAULT {
if (IS_MODE_REGION((uintptr_t)ndns))
/* region mode */
return (struct ndctl_dax *)ndns;
return NULL;
}
FUNC_MOCK_END
/*
* ndctl_pfn_get_resource - mock ndctl_pfn_get_resource
*/
FUNC_MOCK(ndctl_pfn_get_resource, unsigned long long,
struct ndctl_pfn *pfn)
FUNC_MOCK_RUN_DEFAULT {
return RESOURCE_ADDRESS;
}
FUNC_MOCK_END
/*
* ndctl_pfn_get_size - mock ndctl_pfn_get_size
*/
FUNC_MOCK(ndctl_pfn_get_size, unsigned long long,
struct ndctl_pfn *pfn)
FUNC_MOCK_RUN_DEFAULT {
return DEV_SIZE_1GB; /* 1 GiB */
}
FUNC_MOCK_END
/*
* ndctl_dax_get_resource - mock ndctl_dax_get_resource
*/
FUNC_MOCK(ndctl_dax_get_resource, unsigned long long,
struct ndctl_dax *dax)
FUNC_MOCK_RUN_DEFAULT {
return RESOURCE_ADDRESS;
}
FUNC_MOCK_END
/*
* ndctl_dax_get_size - mock ndctl_dax_get_size
*/
FUNC_MOCK(ndctl_dax_get_size, unsigned long long,
struct ndctl_dax *dax)
FUNC_MOCK_RUN_DEFAULT {
return DEV_SIZE_1GB; /* 1 GiB */
}
FUNC_MOCK_END
/*
* ndctl_namespace_get_resource - mock ndctl_namespace_get_resource
*/
FUNC_MOCK(ndctl_namespace_get_resource, unsigned long long,
struct ndctl_namespace *ndns)
FUNC_MOCK_RUN_DEFAULT {
return RESOURCE_ADDRESS;
}
FUNC_MOCK_END
/*
* ndctl_namespace_get_size - mock ndctl_namespace_get_size
*/
FUNC_MOCK(ndctl_namespace_get_size, unsigned long long,
struct ndctl_namespace *ndns)
FUNC_MOCK_RUN_DEFAULT {
return DEV_SIZE_1GB; /* 1 GiB */
}
FUNC_MOCK_END
/*
* ndctl_region_get_resource - mock ndctl_region_get_resource
*/
FUNC_MOCK(ndctl_region_get_resource, unsigned long long,
struct ndctl_region *region)
FUNC_MOCK_RUN_DEFAULT {
return RESOURCE_ADDRESS;
}
FUNC_MOCK_END
/*
* ndctl_region_get_bus - mock ndctl_region_get_bus
*/
FUNC_MOCK(ndctl_region_get_bus, struct ndctl_bus *,
struct ndctl_region *region)
FUNC_MOCK_RUN_DEFAULT {
return (struct ndctl_bus *)region;
}
FUNC_MOCK_END
/*
* ndctl_namespace_get_first_badblock - mock ndctl_namespace_get_first_badblock
*/
FUNC_MOCK(ndctl_namespace_get_first_badblock, struct badblock *,
struct ndctl_namespace *ndns)
FUNC_MOCK_RUN_DEFAULT {
i_bb = 0;
return get_nth_hw_badblock(UINT(ndns), &i_bb);
}
FUNC_MOCK_END
/*
* ndctl_namespace_get_next_badblock - mock ndctl_namespace_get_next_badblock
*/
FUNC_MOCK(ndctl_namespace_get_next_badblock, struct badblock *,
struct ndctl_namespace *ndns)
FUNC_MOCK_RUN_DEFAULT {
return get_nth_hw_badblock(UINT(ndns), &i_bb);
}
FUNC_MOCK_END
/*
* ndctl_region_get_first_badblock - mock ndctl_region_get_first_badblock
*/
FUNC_MOCK(ndctl_region_get_first_badblock, struct badblock *,
struct ndctl_region *region)
FUNC_MOCK_RUN_DEFAULT {
i_bb = 0;
return get_nth_hw_badblock(UINT(region), &i_bb);
}
FUNC_MOCK_END
/*
* ndctl_region_get_next_badblock - mock ndctl_region_get_next_badblock
*/
FUNC_MOCK(ndctl_region_get_next_badblock, struct badblock *,
struct ndctl_region *region)
FUNC_MOCK_RUN_DEFAULT {
return get_nth_hw_badblock(UINT(region), &i_bb);
}
FUNC_MOCK_END
static struct ndctl_data {
uintptr_t bus;
unsigned long long address;
unsigned long long length;
} data;
/*
* ndctl_bus_cmd_new_ars_cap - mock ndctl_bus_cmd_new_ars_cap
*/
FUNC_MOCK(ndctl_bus_cmd_new_ars_cap, struct ndctl_cmd *,
struct ndctl_bus *bus, unsigned long long address,
unsigned long long len)
FUNC_MOCK_RUN_DEFAULT {
data.bus = (uintptr_t)bus;
data.address = address;
data.length = len;
return (struct ndctl_cmd *)&data;
}
FUNC_MOCK_END
/*
* ndctl_cmd_submit - mock ndctl_cmd_submit
*/
FUNC_MOCK(ndctl_cmd_submit, int, struct ndctl_cmd *cmd)
FUNC_MOCK_RUN_DEFAULT {
return 0;
}
FUNC_MOCK_END
/*
* ndctl_cmd_ars_cap_get_range - mock ndctl_cmd_ars_cap_get_range
*/
FUNC_MOCK(ndctl_cmd_ars_cap_get_range, int,
struct ndctl_cmd *ars_cap, struct ndctl_range *range)
FUNC_MOCK_RUN_DEFAULT {
return 0;
}
FUNC_MOCK_END
/*
* ndctl_bus_cmd_new_clear_error - mock ndctl_bus_cmd_new_clear_error
*/
FUNC_MOCK(ndctl_bus_cmd_new_clear_error, struct ndctl_cmd *,
unsigned long long address,
unsigned long long len,
struct ndctl_cmd *ars_cap)
FUNC_MOCK_RUN_DEFAULT {
return ars_cap;
}
FUNC_MOCK_END
/*
* ndctl_cmd_clear_error_get_cleared - mock ndctl_cmd_clear_error_get_cleared
*/
FUNC_MOCK(ndctl_cmd_clear_error_get_cleared, unsigned long long,
struct ndctl_cmd *clear_err)
FUNC_MOCK_RUN_DEFAULT {
struct ndctl_data *pdata = (struct ndctl_data *)clear_err;
UT_OUT("ndctl_clear_error(%lu, %llu, %llu)",
pdata->bus, pdata->address, pdata->length);
return pdata->length;
}
FUNC_MOCK_END
/*
* ndctl_cmd_unref - mock ndctl_cmd_unref
*/
FUNC_MOCK(ndctl_cmd_unref, void, struct ndctl_cmd *cmd)
FUNC_MOCK_RUN_DEFAULT {
}
FUNC_MOCK_END
| 5,900 | 22.050781 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_badblock_mocks/pmem2_badblock_mocks.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_badblock_mocks.h -- definitions for pmem2_badblock_mocks test
*/
#include "extent.h"
/* fd bits 6-8: type of device */
#define FD_REG_FILE (1 << 6) /* regular file */
#define FD_CHR_DEV (2 << 6) /* character device */
#define FD_DIRECTORY (3 << 6) /* directory */
#define FD_BLK_DEV (4 << 6) /* block device */
/* fd bits 4-5: ndctl mode */
#define MODE_NO_DEVICE (1 << 4) /* did not found any matching device */
#define MODE_NAMESPACE (2 << 4) /* namespace mode */
#define MODE_REGION (3 << 4) /* region mode */
/* fd bits 0-3: number of test */
/* masks */
#define MASK_DEVICE 0b0111000000 /* bits 6-8: device mask */
#define MASK_MODE 0b0000110000 /* bits 4-5: mode mask */
#define MASK_TEST 0b0000001111 /* bits 0-3: test mask */
/* checks */
#define IS_MODE_NO_DEVICE(x) ((x & MASK_MODE) == MODE_NO_DEVICE)
#define IS_MODE_NAMESPACE(x) ((x & MASK_MODE) == MODE_NAMESPACE)
#define IS_MODE_REGION(x) ((x & MASK_MODE) == MODE_REGION)
/* default block size: 1kB */
#define BLK_SIZE_1KB 1024
/* default size of device: 1 GiB */
#define DEV_SIZE_1GB (1024 * 1024 * 1024)
struct badblock *get_nth_hw_badblock(unsigned test, unsigned *i_bb);
int get_extents(int fd, struct extents **exts);
| 1,290 | 31.275 | 71 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_badblock_mocks/mocks_pmem2.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* mocks_pmem2.c -- mocked pmem2 functions used
* indirectly in pmem2_badblock_mocks.c
*/
#include <ndctl/libndctl.h>
#include "unittest.h"
#include "out.h"
#include "extent.h"
#include "source.h"
#include "pmem2_utils.h"
#include "pmem2_badblock_mocks.h"
/*
* pmem2_region_namespace - mock pmem2_region_namespace
*/
FUNC_MOCK(pmem2_region_namespace, int,
struct ndctl_ctx *ctx,
const struct pmem2_source *src,
struct ndctl_region **pregion,
struct ndctl_namespace **pndns)
FUNC_MOCK_RUN_DEFAULT {
UT_ASSERTne(pregion, NULL);
dev_t st_rdev = src->value.st_rdev;
*pregion = (void *)st_rdev;
if (pndns == NULL)
return 0;
UT_ASSERT(src->value.ftype == PMEM2_FTYPE_REG ||
src->value.ftype == PMEM2_FTYPE_DEVDAX);
if (IS_MODE_NO_DEVICE(st_rdev)) {
/* did not found any matching device */
*pndns = NULL;
return 0;
}
*pndns = (void *)st_rdev;
return 0;
}
FUNC_MOCK_END
/*
* pmem2_extents_create_get -- allocate extents structure and get extents
* of the given file
*/
FUNC_MOCK(pmem2_extents_create_get, int,
int fd, struct extents **exts)
FUNC_MOCK_RUN_DEFAULT {
return get_extents(fd, exts);
}
FUNC_MOCK_END
| 1,279 | 20.694915 | 73 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_badblock_mocks/pmem2_badblock_mocks.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_badblock_mocks.c -- unit test for pmem2_badblock_*()
*/
#include <ndctl/libndctl.h>
#include "unittest.h"
#include "out.h"
#include "source.h"
#include "badblocks.h"
#include "pmem2_badblock_mocks.h"
#define BAD_BLOCKS_NUMBER 10
#define EXTENTS_NUMBER 8
#define MAX_BB_SET_STR "4"
#define MAX_BB_SET 4
#define DEFAULT_BB_SET 1
#define USAGE_MSG \
"Usage: pmem2_badblock_mocks <test_case> <file_type> <mode> [bad_blocks_set]\n"\
"Possible values of arguments:\n"\
" test_case : test_basic, test_read_clear_bb \n"\
" file_type : reg_file, chr_dev\n"\
" mode : no_device, namespace, region\n"\
" bad_blocks_set : 1-"MAX_BB_SET_STR"\n\n"
/* indexes of arguments */
enum args_t {
ARG_TEST_CASE = 1,
ARG_FILE_TYPE,
ARG_MODE,
ARG_BB_SET,
/* it always has to be the last one */
ARG_NUMBER, /* number of arguments */
};
typedef int test_fn(struct pmem2_source *src);
typedef struct badblock bad_blocks_array[BAD_BLOCKS_NUMBER];
/* HW bad blocks expressed in 512b sectors */
static bad_blocks_array hw_bad_blocks[] =
{
/* test #1 - no bad blocks */
{ {0, 0} },
/* test #2 - 1 HW bad block */
{ {1, 1}, {0, 0} },
/* test #3 - 6 HW bad blocks */
{ {4, 10}, {16, 10}, {28, 2}, {32, 4}, {40, 4}, {50, 2}, {0, 0} },
/* test #4 - 7 HW bad blocks */
{ {2, 4}, {8, 2}, {12, 6}, {20, 2}, {24, 10}, {38, 4}, {46, 2}, \
{0, 0} },
};
/* file's bad blocks expressed in 512b sectors */
static bad_blocks_array file_bad_blocks[] =
{
/* test #1 - no bad blocks */
{ {0, 0} },
/* test #2 - 1 file bad block */
{ {0, 2}, {0, 0} },
/* test #3 - 9 file bad blocks */
{ {4, 2}, {8, 2}, {12, 2}, {16, 2}, {20, 2}, {24, 2}, {28, 2}, \
{32, 2}, {40, 2}, {0, 0} },
/* test #4 - 9 file bad blocks */
{ {4, 2}, {8, 2}, {12, 2}, {16, 2}, {20, 2}, {24, 2}, {28, 2}, \
{32, 2}, {40, 2}, {0, 0} },
};
/* file's extents expressed in 512b sectors */
static struct extent files_extents[][EXTENTS_NUMBER] =
{
/* test #1 - no extents */
{ {0, 0, 0} },
/* test #2 - 1 extent */
{ {0, 0, 2}, {0, 0, 0} },
/* test #3 - 7 extents */
{ {2, 2, 4}, {8, 8, 2}, {12, 12, 6}, {20, 20, 2}, {24, 24, 10}, \
{38, 38, 4}, {46, 46, 2}, {0, 0, 0} },
/* test #4 - 6 extents */
{ {4, 4, 10}, {16, 16, 10}, {28, 28, 2}, {32, 32, 4}, {40, 40, 4}, \
{50, 50, 2}, {0, 0, 0} },
};
/*
* map_test_to_set -- map number of a test to an index of bad blocks' set
*/
static inline unsigned
map_test_to_set(unsigned test)
{
return test & MASK_TEST;
}
/*
* get_nth_typed_badblock -- get next typed badblock
*/
static struct badblock *
get_nth_typed_badblock(unsigned test, unsigned *i_bb,
bad_blocks_array bad_blocks[])
{
unsigned set = map_test_to_set(test);
struct badblock *bb = &bad_blocks[set][*i_bb];
if (bb->offset == 0 && bb->len == 0)
bb = NULL; /* no more bad blocks */
else
(*i_bb)++;
return bb;
}
/*
* get_nth_hw_badblock -- get next HW badblock
*/
struct badblock *
get_nth_hw_badblock(unsigned test, unsigned *i_bb)
{
return get_nth_typed_badblock(test, i_bb, hw_bad_blocks);
}
/*
* get_nth_file_badblock -- get next file's badblock
*/
static struct badblock *
get_nth_file_badblock(unsigned test, unsigned *i_bb)
{
return get_nth_typed_badblock(test, i_bb, file_bad_blocks);
}
/*
* get_nth_badblock -- get next badblock
*/
static struct badblock *
get_nth_badblock(int fd, unsigned *i_bb)
{
UT_ASSERT(fd >= 0);
if ((fd & MASK_MODE) == MODE_NO_DEVICE)
/* no matching device found */
return NULL;
switch (fd & MASK_DEVICE) {
case FD_REG_FILE: /* regular file */
return get_nth_file_badblock((unsigned)fd, i_bb);
case FD_CHR_DEV: /* character device */
return get_nth_hw_badblock((unsigned)fd, i_bb);
case FD_DIRECTORY:
case FD_BLK_DEV:
break;
}
/* no bad blocks found */
return NULL;
}
/*
* get_extents -- get file's extents
*/
int
get_extents(int fd, struct extents **exts)
{
unsigned set = map_test_to_set((unsigned)fd);
*exts = ZALLOC(sizeof(struct extents));
struct extents *pexts = *exts;
/* set block size */
pexts->blksize = BLK_SIZE_1KB;
if ((fd & MASK_DEVICE) != FD_REG_FILE) {
/* not a regular file */
return 0;
}
/* count extents (length > 0) */
while (files_extents[set][pexts->extents_count].length)
pexts->extents_count++;
/*
* It will be freed internally by libpmem2
* (pmem2_badblock_context_delete)
*/
pexts->extents = MALLOC(pexts->extents_count * sizeof(struct extent));
for (int i = 0; i < pexts->extents_count; i++) {
struct extent ext = files_extents[set][i];
uint64_t off_phy = ext.offset_physical;
uint64_t off_log = ext.offset_logical;
uint64_t len = ext.length;
/* check alignment */
UT_ASSERTeq(SEC2B(off_phy) % pexts->blksize, 0);
UT_ASSERTeq(SEC2B(off_log) % pexts->blksize, 0);
UT_ASSERTeq(SEC2B(len) % pexts->blksize, 0);
pexts->extents[i].offset_physical = SEC2B(off_phy);
pexts->extents[i].offset_logical = SEC2B(off_log);
pexts->extents[i].length = SEC2B(len);
}
return 0;
}
/*
* test_basic -- basic test
*/
static int
test_basic(struct pmem2_source *src)
{
UT_OUT("TEST: test_basic: 0x%x", src->value.fd);
struct pmem2_badblock_context *bbctx;
struct pmem2_badblock bb;
int ret;
ret = pmem2_badblock_context_new(src, &bbctx);
if (ret)
return ret;
ret = pmem2_badblock_next(bbctx, &bb);
pmem2_badblock_context_delete(&bbctx);
return ret;
}
/*
* test_read_clear_bb -- test reading and clearing bad blocks
*/
static int
test_read_clear_bb(struct pmem2_source *src)
{
UT_OUT("TEST: test_read_clear_bb: 0x%x", src->value.fd);
struct pmem2_badblock_context *bbctx;
struct pmem2_badblock bb;
struct badblock *bb2;
unsigned i_bb;
int ret;
ret = pmem2_badblock_context_new(src, &bbctx);
if (ret)
return ret;
i_bb = 0;
while ((ret = pmem2_badblock_next(bbctx, &bb)) == 0) {
bb2 = get_nth_badblock(src->value.fd, &i_bb);
UT_ASSERTne(bb2, NULL);
UT_ASSERTeq(bb.offset, SEC2B(bb2->offset));
UT_ASSERTeq(bb.length, SEC2B(bb2->len));
ret = pmem2_badblock_clear(bbctx, &bb);
if (ret)
goto exit_free;
}
bb2 = get_nth_badblock(src->value.fd, &i_bb);
UT_ASSERTeq(bb2, NULL);
exit_free:
pmem2_badblock_context_delete(&bbctx);
return ret;
}
static void
parse_arguments(int argc, char *argv[], int *test, enum pmem2_file_type *ftype,
test_fn **test_func)
{
if (argc < (ARG_NUMBER - 1) || argc > ARG_NUMBER) {
UT_OUT(USAGE_MSG);
if (argc > ARG_NUMBER)
UT_FATAL("too many arguments");
else
UT_FATAL("missing required argument(s)");
}
char *test_case = argv[ARG_TEST_CASE];
char *file_type = argv[ARG_FILE_TYPE];
char *mode = argv[ARG_MODE];
*test = 0;
*test_func = NULL;
if (strcmp(test_case, "test_basic") == 0) {
*test_func = test_basic;
} else if (strcmp(test_case, "test_read_clear_bb") == 0) {
*test_func = test_read_clear_bb;
} else {
UT_OUT(USAGE_MSG);
UT_FATAL("wrong test case: %s", test_case);
}
if (strcmp(file_type, "reg_file") == 0) {
*test |= FD_REG_FILE;
*ftype = PMEM2_FTYPE_REG;
} else if (strcmp(file_type, "chr_dev") == 0) {
*test |= FD_CHR_DEV;
*ftype = PMEM2_FTYPE_DEVDAX;
} else {
UT_OUT(USAGE_MSG);
UT_FATAL("wrong file type: %s", file_type);
}
if (strcmp(mode, "no_device") == 0) {
*test |= MODE_NO_DEVICE;
} else if (strcmp(mode, "namespace") == 0) {
*test |= MODE_NAMESPACE;
} else if (strcmp(mode, "region") == 0) {
*test |= MODE_REGION;
} else {
UT_OUT(USAGE_MSG);
UT_FATAL("wrong mode: %s", mode);
}
int bad_blocks_set =
(argc == 5) ? atoi(argv[ARG_BB_SET]) : DEFAULT_BB_SET;
if (bad_blocks_set >= 1 && bad_blocks_set <= MAX_BB_SET) {
*test |= (bad_blocks_set - 1);
} else {
UT_OUT(USAGE_MSG);
UT_FATAL("wrong bad_blocks_set: %i", bad_blocks_set);
}
}
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem2_badblock_mocks");
/* sanity check of defines */
UT_ASSERTeq(atoi(MAX_BB_SET_STR), MAX_BB_SET);
struct pmem2_source src;
test_fn *test_func;
src.type = PMEM2_SOURCE_FD;
parse_arguments(argc, argv, &src.value.fd, &src.value.ftype,
&test_func);
src.value.st_rdev = (dev_t)src.value.fd;
int result = test_func(&src);
UT_ASSERTeq(result, PMEM2_E_NO_BAD_BLOCK_FOUND);
DONE(NULL);
}
| 8,239 | 22.815029 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_api/pmem2_api.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_api.c -- PMEM2_API_[START|END] unittests
*/
#include "unittest.h"
#include "ut_pmem2.h"
#include "ut_pmem2_setup_integration.h"
/*
* map_valid -- return valid mapped pmem2_map and validate mapped memory length
*/
static struct pmem2_map *
map_valid(struct pmem2_config *cfg, struct pmem2_source *src, size_t size)
{
struct pmem2_map *map = NULL;
PMEM2_MAP(cfg, src, &map);
UT_ASSERTeq(pmem2_map_get_size(map), size);
return map;
}
/*
* test_pmem2_api_logs -- map O_RDWR file and do pmem2_[cpy|set|move]_fns
*/
static int
test_pmem2_api_logs(const struct test_case *tc, int argc,
char *argv[])
{
if (argc < 1)
UT_FATAL(
"usage: test_mem_move_cpy_set_with_map_private <file>");
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
const char *word1 = "Persistent memory...";
const char *word2 = "Nonpersistent memory";
const char *word3 = "XXXXXXXXXXXXXXXXXXXX";
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t size = 0;
PMEM2_SOURCE_SIZE(src, &size);
struct pmem2_map *map = map_valid(cfg, src, size);
char *addr = pmem2_map_get_address(map);
pmem2_memmove_fn memmove_fn = pmem2_get_memmove_fn(map);
pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map);
pmem2_memset_fn memset_fn = pmem2_get_memset_fn(map);
memcpy_fn(addr, word1, strlen(word1), 0);
UT_ASSERTeq(strcmp(addr, word1), 0);
memmove_fn(addr, word2, strlen(word2), 0);
UT_ASSERTeq(strcmp(addr, word2), 0);
memset_fn(addr, 'X', strlen(word3), 0);
UT_ASSERTeq(strcmp(addr, word3), 0);
/* cleanup after the test */
pmem2_unmap(&map);
pmem2_config_delete(&cfg);
pmem2_source_delete(&src);
CLOSE(fd);
return 1;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_pmem2_api_logs),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem2_api");
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
DONE(NULL);
}
| 2,130 | 22.94382 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/rpmemd_obc/rpmemd_obc_test_open.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmemd_obc_test_open.c -- test cases for open request message
*/
#include "rpmemd_obc_test_common.h"
/*
* Number of cases for checking open request message. Must be kept in sync
* with client_bad_msg_open function.
*/
#define BAD_MSG_OPEN_COUNT 11
/*
* client_bad_msg_open -- check if server detects invalid open request
* messages
*/
static void
client_bad_msg_open(const char *ctarget)
{
char *target = STRDUP(ctarget);
size_t msg_size = sizeof(OPEN_MSG) + POOL_DESC_SIZE;
struct rpmem_msg_open *msg = MALLOC(msg_size);
for (int i = 0; i < BAD_MSG_OPEN_COUNT; i++) {
struct rpmem_ssh *ssh = clnt_connect(target);
*msg = OPEN_MSG;
msg->hdr.size = msg_size;
memcpy(msg->pool_desc.desc, POOL_DESC, POOL_DESC_SIZE);
switch (i) {
case 0:
msg->c.provider = 0;
break;
case 1:
msg->c.provider = MAX_RPMEM_PROV;
break;
case 2:
msg->pool_desc.size -= 1;
break;
case 3:
msg->pool_desc.size += 1;
break;
case 4:
msg->pool_desc.size = 0;
msg->hdr.size = sizeof(OPEN_MSG) +
msg->pool_desc.size;
break;
case 5:
msg->pool_desc.size = 1;
msg->hdr.size = sizeof(OPEN_MSG) +
msg->pool_desc.size;
break;
case 6:
msg->pool_desc.desc[0] = '\0';
break;
case 7:
msg->pool_desc.desc[POOL_DESC_SIZE / 2] = '\0';
break;
case 8:
msg->pool_desc.desc[POOL_DESC_SIZE - 1] = 'E';
break;
case 9:
msg->c.major = RPMEM_PROTO_MAJOR + 1;
break;
case 10:
msg->c.minor = RPMEM_PROTO_MINOR + 1;
break;
default:
UT_ASSERT(0);
}
rpmem_hton_msg_open(msg);
clnt_send(ssh, msg, msg_size);
clnt_wait_disconnect(ssh);
clnt_close(ssh);
}
FREE(msg);
FREE(target);
}
/*
* client_msg_open_noresp -- send open request message and don't expect a
* response
*/
static void
client_msg_open_noresp(const char *ctarget)
{
char *target = STRDUP(ctarget);
size_t msg_size = sizeof(OPEN_MSG) + POOL_DESC_SIZE;
struct rpmem_msg_open *msg = MALLOC(msg_size);
struct rpmem_ssh *ssh = clnt_connect(target);
*msg = OPEN_MSG;
msg->hdr.size = msg_size;
memcpy(msg->pool_desc.desc, POOL_DESC, POOL_DESC_SIZE);
rpmem_hton_msg_open(msg);
clnt_send(ssh, msg, msg_size);
clnt_wait_disconnect(ssh);
clnt_close(ssh);
FREE(msg);
FREE(target);
}
/*
* client_msg_open_resp -- send open request message and expect a response
* with specified status. If status is 0, validate open request response
* message
*/
static void
client_msg_open_resp(const char *ctarget, int status)
{
char *target = STRDUP(ctarget);
size_t msg_size = sizeof(OPEN_MSG) + POOL_DESC_SIZE;
struct rpmem_msg_open *msg = MALLOC(msg_size);
struct rpmem_msg_open_resp resp;
struct rpmem_ssh *ssh = clnt_connect(target);
*msg = OPEN_MSG;
msg->hdr.size = msg_size;
memcpy(msg->pool_desc.desc, POOL_DESC, POOL_DESC_SIZE);
rpmem_hton_msg_open(msg);
clnt_send(ssh, msg, msg_size);
clnt_recv(ssh, &resp, sizeof(resp));
rpmem_ntoh_msg_open_resp(&resp);
if (status) {
UT_ASSERTeq(resp.hdr.status, (uint32_t)status);
} else {
UT_ASSERTeq(resp.hdr.type, RPMEM_MSG_TYPE_OPEN_RESP);
UT_ASSERTeq(resp.hdr.size,
sizeof(struct rpmem_msg_open_resp));
UT_ASSERTeq(resp.hdr.status, (uint32_t)status);
UT_ASSERTeq(resp.ibc.port, PORT);
UT_ASSERTeq(resp.ibc.rkey, RKEY);
UT_ASSERTeq(resp.ibc.raddr, RADDR);
UT_ASSERTeq(resp.ibc.persist_method, PERSIST_METHOD);
}
clnt_close(ssh);
FREE(msg);
FREE(target);
}
/*
* client_open -- test case for open request message - client side
*/
int
client_open(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
set_rpmem_cmd("server_bad_msg");
client_bad_msg_open(target);
set_rpmem_cmd("server_msg_noresp %d", RPMEM_MSG_TYPE_OPEN);
client_msg_open_noresp(target);
set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_OPEN, 0);
client_msg_open_resp(target, 0);
set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_OPEN, 1);
client_msg_open_resp(target, 1);
return 1;
}
| 4,105 | 21.56044 | 74 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/rpmemd_obc/rpmemd_obc_test_create.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmemd_obc_test_create.c -- test cases for create request message
*/
#include "rpmemd_obc_test_common.h"
/*
* Number of cases for checking create request message. Must be kept in sync
* with client_bad_msg_create function.
*/
#define BAD_MSG_CREATE_COUNT 11
/*
* client_bad_msg_create -- check if server detects invalid create request
* messages
*/
static void
client_bad_msg_create(const char *ctarget)
{
char *target = STRDUP(ctarget);
size_t msg_size = sizeof(CREATE_MSG) + POOL_DESC_SIZE;
struct rpmem_msg_create *msg = MALLOC(msg_size);
for (int i = 0; i < BAD_MSG_CREATE_COUNT; i++) {
struct rpmem_ssh *ssh = clnt_connect(target);
*msg = CREATE_MSG;
msg->hdr.size = msg_size;
memcpy(msg->pool_desc.desc, POOL_DESC, POOL_DESC_SIZE);
switch (i) {
case 0:
msg->c.provider = 0;
break;
case 1:
msg->c.provider = MAX_RPMEM_PROV;
break;
case 2:
msg->pool_desc.size -= 1;
break;
case 3:
msg->pool_desc.size += 1;
break;
case 4:
msg->pool_desc.size = 0;
msg->hdr.size = sizeof(CREATE_MSG) +
msg->pool_desc.size;
break;
case 5:
msg->pool_desc.size = 1;
msg->hdr.size = sizeof(CREATE_MSG) +
msg->pool_desc.size;
break;
case 6:
msg->pool_desc.desc[0] = '\0';
break;
case 7:
msg->pool_desc.desc[POOL_DESC_SIZE / 2] = '\0';
break;
case 8:
msg->pool_desc.desc[POOL_DESC_SIZE - 1] = 'E';
break;
case 9:
msg->c.major = RPMEM_PROTO_MAJOR + 1;
break;
case 10:
msg->c.minor = RPMEM_PROTO_MINOR + 1;
break;
default:
UT_ASSERT(0);
}
rpmem_hton_msg_create(msg);
clnt_send(ssh, msg, msg_size);
clnt_wait_disconnect(ssh);
clnt_close(ssh);
}
FREE(msg);
FREE(target);
}
/*
* client_msg_create_noresp -- send create request message and don't expect
* a response
*/
static void
client_msg_create_noresp(const char *ctarget)
{
char *target = STRDUP(ctarget);
size_t msg_size = sizeof(CREATE_MSG) + POOL_DESC_SIZE;
struct rpmem_msg_create *msg = MALLOC(msg_size);
struct rpmem_ssh *ssh = clnt_connect(target);
*msg = CREATE_MSG;
msg->hdr.size = msg_size;
memcpy(msg->pool_desc.desc, POOL_DESC, POOL_DESC_SIZE);
rpmem_hton_msg_create(msg);
clnt_send(ssh, msg, msg_size);
clnt_close(ssh);
FREE(msg);
FREE(target);
}
/*
* client_msg_create_resp -- send create request message and expect a response
* with specified status. If status is 0, validate create request response
* message
*/
static void
client_msg_create_resp(const char *ctarget, int status)
{
char *target = STRDUP(ctarget);
size_t msg_size = sizeof(CREATE_MSG) + POOL_DESC_SIZE;
struct rpmem_msg_create *msg = MALLOC(msg_size);
struct rpmem_msg_create_resp resp;
struct rpmem_ssh *ssh = clnt_connect(target);
*msg = CREATE_MSG;
msg->hdr.size = msg_size;
memcpy(msg->pool_desc.desc, POOL_DESC, POOL_DESC_SIZE);
rpmem_hton_msg_create(msg);
clnt_send(ssh, msg, msg_size);
clnt_recv(ssh, &resp, sizeof(resp));
rpmem_ntoh_msg_create_resp(&resp);
if (status) {
UT_ASSERTeq(resp.hdr.status, (uint32_t)status);
} else {
UT_ASSERTeq(resp.hdr.type, RPMEM_MSG_TYPE_CREATE_RESP);
UT_ASSERTeq(resp.hdr.size,
sizeof(struct rpmem_msg_create_resp));
UT_ASSERTeq(resp.hdr.status, (uint32_t)status);
UT_ASSERTeq(resp.ibc.port, PORT);
UT_ASSERTeq(resp.ibc.rkey, RKEY);
UT_ASSERTeq(resp.ibc.raddr, RADDR);
UT_ASSERTeq(resp.ibc.persist_method, PERSIST_METHOD);
}
clnt_close(ssh);
FREE(msg);
FREE(target);
}
/*
* client_create -- test case for create request message - client side
*/
int
client_create(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
set_rpmem_cmd("server_bad_msg");
client_bad_msg_create(target);
set_rpmem_cmd("server_msg_noresp %d", RPMEM_MSG_TYPE_CREATE);
client_msg_create_noresp(target);
set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_CREATE, 0);
client_msg_create_resp(target, 0);
set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_CREATE, 1);
client_msg_create_resp(target, 1);
return 1;
}
| 4,165 | 22.016575 | 78 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/rpmemd_obc/rpmemd_obc_test_set_attr.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* rpmemd_obc_test_set_attr.c -- test cases for set attributes request message
*/
#include "rpmemd_obc_test_common.h"
/*
* client_msg_set_attr_noresp -- send set attributes request message and don't
* expect a response
*/
static void
client_msg_set_attr_noresp(const char *ctarget)
{
char *target = STRDUP(ctarget);
size_t msg_size = sizeof(SET_ATTR_MSG);
struct rpmem_msg_set_attr *msg = MALLOC(msg_size);
struct rpmem_ssh *ssh = clnt_connect(target);
*msg = SET_ATTR_MSG;
rpmem_hton_msg_set_attr(msg);
clnt_send(ssh, msg, msg_size);
clnt_wait_disconnect(ssh);
clnt_close(ssh);
FREE(msg);
FREE(target);
}
/*
* client_msg_set_attr_resp -- send set attributes request message and expect
* a response with specified status. If status is 0, validate set attributes
* request response message
*/
static void
client_msg_set_attr_resp(const char *ctarget, int status)
{
char *target = STRDUP(ctarget);
size_t msg_size = sizeof(SET_ATTR_MSG);
struct rpmem_msg_set_attr *msg = MALLOC(msg_size);
struct rpmem_msg_set_attr_resp resp;
struct rpmem_ssh *ssh = clnt_connect(target);
*msg = SET_ATTR_MSG;
rpmem_hton_msg_set_attr(msg);
clnt_send(ssh, msg, msg_size);
clnt_recv(ssh, &resp, sizeof(resp));
rpmem_ntoh_msg_set_attr_resp(&resp);
if (status) {
UT_ASSERTeq(resp.hdr.status, (uint32_t)status);
} else {
UT_ASSERTeq(resp.hdr.type, RPMEM_MSG_TYPE_SET_ATTR_RESP);
UT_ASSERTeq(resp.hdr.size,
sizeof(struct rpmem_msg_set_attr_resp));
UT_ASSERTeq(resp.hdr.status, (uint32_t)status);
}
clnt_close(ssh);
FREE(msg);
FREE(target);
}
/*
* client_set_attr -- test case for set attributes request message - client
* side
*/
int
client_set_attr(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
set_rpmem_cmd("server_msg_noresp %d", RPMEM_MSG_TYPE_SET_ATTR);
client_msg_set_attr_noresp(target);
set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_SET_ATTR, 0);
client_msg_set_attr_resp(target, 0);
set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_SET_ATTR, 1);
client_msg_set_attr_resp(target, 1);
return 1;
}
| 2,255 | 22.5 | 78 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/rpmemd_obc/rpmemd_obc_test_close.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* rpmemd_obc_test_close.c -- test cases for close request message
*/
#include "rpmemd_obc_test_common.h"
/*
* client_msg_close_noresp -- send close request message and don't expect a
* response
*/
static void
client_msg_close_noresp(const char *ctarget)
{
char *target = STRDUP(ctarget);
struct rpmem_msg_close msg = CLOSE_MSG;
rpmem_hton_msg_close(&msg);
struct rpmem_ssh *ssh = clnt_connect(target);
clnt_send(ssh, &msg, sizeof(msg));
clnt_wait_disconnect(ssh);
clnt_close(ssh);
FREE(target);
}
/*
* client_msg_close_resp -- send close request message and expect a response
* with specified status. If status is 0, validate close request response
* message
*/
static void
client_msg_close_resp(const char *ctarget, int status)
{
char *target = STRDUP(ctarget);
struct rpmem_msg_close msg = CLOSE_MSG;
rpmem_hton_msg_close(&msg);
struct rpmem_msg_close_resp resp;
struct rpmem_ssh *ssh = clnt_connect(target);
clnt_send(ssh, &msg, sizeof(msg));
clnt_recv(ssh, &resp, sizeof(resp));
rpmem_ntoh_msg_close_resp(&resp);
if (status)
UT_ASSERTeq(resp.hdr.status, (uint32_t)status);
clnt_close(ssh);
FREE(target);
}
/*
* client_close -- test case for close request message - client side
*/
int
client_close(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
set_rpmem_cmd("server_msg_noresp %d", RPMEM_MSG_TYPE_CLOSE);
client_msg_close_noresp(target);
set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_CLOSE, 0);
client_msg_close_resp(target, 0);
set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_CLOSE, 1);
client_msg_close_resp(target, 1);
return 1;
}
| 1,791 | 21.683544 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/rpmemd_obc/rpmemd_obc_test_common.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmemd_obc_test_common.h -- common declarations for rpmemd_obc test
*/
#include "unittest.h"
#include "librpmem.h"
#include "rpmem_proto.h"
#include "rpmem_common.h"
#include "rpmem_ssh.h"
#include "rpmem_util.h"
#include "rpmemd_log.h"
#include "rpmemd_obc.h"
#define PORT 1234
#define RKEY 0x0123456789abcdef
#define RADDR 0xfedcba9876543210
#define PERSIST_METHOD RPMEM_PM_APM
#define POOL_ATTR_INIT {\
.signature = "<RPMEM>",\
.major = 1,\
.compat_features = 2,\
.incompat_features = 3,\
.ro_compat_features = 4,\
.poolset_uuid = "POOLSET_UUID0123",\
.uuid = "UUID0123456789AB",\
.next_uuid = "NEXT_UUID0123456",\
.prev_uuid = "PREV_UUID0123456",\
.user_flags = "USER_FLAGS012345",\
}
#define POOL_ATTR_ALT {\
.signature = "<ALT>",\
.major = 5,\
.compat_features = 6,\
.incompat_features = 7,\
.ro_compat_features = 8,\
.poolset_uuid = "UUID_POOLSET_ALT",\
.uuid = "ALT_UUIDCDEFFEDC",\
.next_uuid = "456UUID_NEXT_ALT",\
.prev_uuid = "UUID012_ALT_PREV",\
.user_flags = "012345USER_FLAGS",\
}
#define POOL_SIZE 0x0001234567abcdef
#define NLANES 0x123
#define NLANES_RESP 16
#define PROVIDER RPMEM_PROV_LIBFABRIC_SOCKETS
#define POOL_DESC "pool.set"
#define BUFF_SIZE 8192
static const char pool_desc[] = POOL_DESC;
#define POOL_DESC_SIZE (sizeof(pool_desc) / sizeof(pool_desc[0]))
struct rpmem_ssh *clnt_connect(char *target);
void clnt_wait_disconnect(struct rpmem_ssh *ssh);
void clnt_send(struct rpmem_ssh *ssh, const void *buff, size_t len);
void clnt_recv(struct rpmem_ssh *ssh, void *buff, size_t len);
void clnt_close(struct rpmem_ssh *ssh);
enum conn_wait_close {
CONN_CLOSE,
CONN_WAIT_CLOSE,
};
void set_rpmem_cmd(const char *fmt, ...);
extern struct rpmemd_obc_requests REQ_CB;
struct req_cb_arg {
int resp;
unsigned long long types;
int force_ret;
int ret;
int status;
};
static const struct rpmem_msg_hdr MSG_HDR = {
.type = RPMEM_MSG_TYPE_CLOSE,
.size = sizeof(struct rpmem_msg_hdr),
};
static const struct rpmem_msg_create CREATE_MSG = {
.hdr = {
.type = RPMEM_MSG_TYPE_CREATE,
.size = sizeof(struct rpmem_msg_create),
},
.c = {
.major = RPMEM_PROTO_MAJOR,
.minor = RPMEM_PROTO_MINOR,
.pool_size = POOL_SIZE,
.nlanes = NLANES,
.provider = PROVIDER,
.buff_size = BUFF_SIZE,
},
.pool_attr = POOL_ATTR_INIT,
.pool_desc = {
.size = POOL_DESC_SIZE,
},
};
static const struct rpmem_msg_open OPEN_MSG = {
.hdr = {
.type = RPMEM_MSG_TYPE_OPEN,
.size = sizeof(struct rpmem_msg_open),
},
.c = {
.major = RPMEM_PROTO_MAJOR,
.minor = RPMEM_PROTO_MINOR,
.pool_size = POOL_SIZE,
.nlanes = NLANES,
.provider = PROVIDER,
.buff_size = BUFF_SIZE,
},
.pool_desc = {
.size = POOL_DESC_SIZE,
},
};
static const struct rpmem_msg_close CLOSE_MSG = {
.hdr = {
.type = RPMEM_MSG_TYPE_CLOSE,
.size = sizeof(struct rpmem_msg_close),
},
};
static const struct rpmem_msg_set_attr SET_ATTR_MSG = {
.hdr = {
.type = RPMEM_MSG_TYPE_SET_ATTR,
.size = sizeof(struct rpmem_msg_set_attr),
},
.pool_attr = POOL_ATTR_ALT,
};
TEST_CASE_DECLARE(server_accept_sim);
TEST_CASE_DECLARE(server_accept_sim_fork);
TEST_CASE_DECLARE(client_accept_sim);
TEST_CASE_DECLARE(server_accept_seq);
TEST_CASE_DECLARE(server_accept_seq_fork);
TEST_CASE_DECLARE(client_accept_seq);
TEST_CASE_DECLARE(client_bad_msg_hdr);
TEST_CASE_DECLARE(server_bad_msg);
TEST_CASE_DECLARE(server_msg_noresp);
TEST_CASE_DECLARE(server_msg_resp);
TEST_CASE_DECLARE(client_econnreset);
TEST_CASE_DECLARE(server_econnreset);
TEST_CASE_DECLARE(client_create);
TEST_CASE_DECLARE(server_open);
TEST_CASE_DECLARE(client_close);
TEST_CASE_DECLARE(server_close);
TEST_CASE_DECLARE(client_open);
TEST_CASE_DECLARE(client_set_attr);
| 3,791 | 23 | 70 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/ctl_prefault/ctl_prefault.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* ctl_prefault.c -- tests for the ctl entry points: prefault
*/
#include <stdlib.h>
#include <string.h>
#include <sys/resource.h>
#include "unittest.h"
#define OBJ_STR "obj"
#define BLK_STR "blk"
#define LOG_STR "log"
#define BSIZE 20
#define LAYOUT "obj_ctl_prefault"
#ifdef __FreeBSD__
typedef char vec_t;
#else
typedef unsigned char vec_t;
#endif
typedef int (*fun)(void *, const char *, void *);
/*
* prefault_fun -- function ctl_get/set testing
*/
static void
prefault_fun(int prefault, fun get_func, fun set_func)
{
int ret;
int arg;
int arg_read;
if (prefault == 1) { /* prefault at open */
arg_read = -1;
ret = get_func(NULL, "prefault.at_open", &arg_read);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg_read, 0);
arg = 1;
ret = set_func(NULL, "prefault.at_open", &arg);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg, 1);
arg_read = -1;
ret = get_func(NULL, "prefault.at_open", &arg_read);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg_read, 1);
} else if (prefault == 2) { /* prefault at create */
arg_read = -1;
ret = get_func(NULL, "prefault.at_create", &arg_read);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg_read, 0);
arg = 1;
ret = set_func(NULL, "prefault.at_create", &arg);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg, 1);
arg_read = -1;
ret = get_func(NULL, "prefault.at_create", &arg_read);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg_read, 1);
}
}
/*
* count_resident_pages -- count resident_pages
*/
static size_t
count_resident_pages(void *pool, size_t length)
{
size_t arr_len = (length + Ut_pagesize - 1) / Ut_pagesize;
vec_t *vec = MALLOC(sizeof(*vec) * arr_len);
int ret = mincore(pool, length, vec);
UT_ASSERTeq(ret, 0);
size_t resident_pages = 0;
for (size_t i = 0; i < arr_len; ++i)
resident_pages += vec[i] & 0x1;
FREE(vec);
return resident_pages;
}
/*
* test_obj -- open/create PMEMobjpool
*/
static void
test_obj(const char *path, int open)
{
PMEMobjpool *pop;
if (open) {
if ((pop = pmemobj_open(path, LAYOUT)) == NULL)
UT_FATAL("!pmemobj_open: %s", path);
} else {
if ((pop = pmemobj_create(path, LAYOUT,
PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
}
size_t resident_pages = count_resident_pages(pop, PMEMOBJ_MIN_POOL);
pmemobj_close(pop);
UT_OUT("%ld", resident_pages);
}
/*
* test_blk -- open/create PMEMblkpool
*/
static void
test_blk(const char *path, int open)
{
PMEMblkpool *pbp;
if (open) {
if ((pbp = pmemblk_open(path, BSIZE)) == NULL)
UT_FATAL("!pmemblk_open: %s", path);
} else {
if ((pbp = pmemblk_create(path, BSIZE, PMEMBLK_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemblk_create: %s", path);
}
size_t resident_pages = count_resident_pages(pbp, PMEMBLK_MIN_POOL);
pmemblk_close(pbp);
UT_OUT("%ld", resident_pages);
}
/*
* test_log -- open/create PMEMlogpool
*/
static void
test_log(const char *path, int open)
{
PMEMlogpool *plp;
/*
* To test prefaulting, pool must have size at least equal to 2 pages.
* If 2MB huge pages are used this is at least 4MB.
*/
size_t pool_size = 2 * PMEMLOG_MIN_POOL;
if (open) {
if ((plp = pmemlog_open(path)) == NULL)
UT_FATAL("!pmemlog_open: %s", path);
} else {
if ((plp = pmemlog_create(path, pool_size,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemlog_create: %s", path);
}
size_t resident_pages = count_resident_pages(plp, pool_size);
pmemlog_close(plp);
UT_OUT("%ld", resident_pages);
}
#define USAGE() do {\
UT_FATAL("usage: %s file-name type(obj/blk/log) prefault(0/1/2) "\
"open(0/1)", argv[0]);\
} while (0)
int
main(int argc, char *argv[])
{
START(argc, argv, "ctl_prefault");
if (argc != 5)
USAGE();
char *type = argv[1];
const char *path = argv[2];
int prefault = atoi(argv[3]);
int open = atoi(argv[4]);
if (strcmp(type, OBJ_STR) == 0) {
prefault_fun(prefault, (fun)pmemobj_ctl_get,
(fun)pmemobj_ctl_set);
test_obj(path, open);
} else if (strcmp(type, BLK_STR) == 0) {
prefault_fun(prefault, (fun)pmemblk_ctl_get,
(fun)pmemblk_ctl_set);
test_blk(path, open);
} else if (strcmp(type, LOG_STR) == 0) {
prefault_fun(prefault, (fun)pmemlog_ctl_get,
(fun)pmemlog_ctl_set);
test_log(path, open);
} else
USAGE();
DONE(NULL);
}
| 4,326 | 20.527363 | 71 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_memcpy/memcpy_common.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* memcpy_common.c -- common part for tests doing a persistent memcpy
*/
#include "unittest.h"
#include "memcpy_common.h"
#include "valgrind_internal.h"
/*
* do_memcpy: Worker function for memcpy
*
* Always work within the boundary of bytes. Fill in 1/2 of the src
* memory with the pattern we want to write. This allows us to check
* that we did not overwrite anything we were not supposed to in the
* dest. Use the non pmem version of the memset/memcpy commands
* so as not to introduce any possible side affects.
*/
void
do_memcpy(int fd, char *dest, int dest_off, char *src, int src_off,
size_t bytes, size_t mapped_len, const char *file_name, memcpy_fn fn,
unsigned flags, persist_fn persist)
{
void *ret;
char *buf = MALLOC(bytes);
memset(buf, 0, bytes);
memset(dest, 0, bytes);
persist(dest, bytes);
memset(src, 0, bytes);
persist(src, bytes);
memset(src, 0x5A, bytes / 4);
persist(src, bytes / 4);
memset(src + bytes / 4, 0x46, bytes / 4);
persist(src + bytes / 4, bytes / 4);
/* dest == src */
ret = fn(dest + dest_off, dest + dest_off, bytes / 2, flags);
UT_ASSERTeq(ret, dest + dest_off);
UT_ASSERTeq(*(char *)(dest + dest_off), 0);
/* len == 0 */
ret = fn(dest + dest_off, src, 0, flags);
UT_ASSERTeq(ret, dest + dest_off);
UT_ASSERTeq(*(char *)(dest + dest_off), 0);
ret = fn(dest + dest_off, src + src_off, bytes / 2, flags);
if (flags & PMEM2_F_MEM_NOFLUSH)
VALGRIND_DO_PERSIST((dest + dest_off), bytes / 2);
UT_ASSERTeq(ret, dest + dest_off);
/* memcmp will validate that what I expect in memory. */
if (memcmp(src + src_off, dest + dest_off, bytes / 2))
UT_FATAL("%s: first %zu bytes do not match",
file_name, bytes / 2);
/* Now validate the contents of the file */
LSEEK(fd, (os_off_t)(dest_off + (int)(mapped_len / 2)), SEEK_SET);
if (READ(fd, buf, bytes / 2) == bytes / 2) {
if (memcmp(src + src_off, buf, bytes / 2))
UT_FATAL("%s: first %zu bytes do not match",
file_name, bytes / 2);
}
FREE(buf);
}
unsigned Flags[] = {
0,
PMEM_F_MEM_NODRAIN,
PMEM_F_MEM_NONTEMPORAL,
PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_NODRAIN,
PMEM_F_MEM_WC,
PMEM_F_MEM_WB,
PMEM_F_MEM_NOFLUSH,
/* all possible flags */
PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH |
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL |
PMEM_F_MEM_WC | PMEM_F_MEM_WB,
};
| 2,491 | 27.643678 | 73 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_memcpy/memcpy_common.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* memcpy_common.h -- header file for common memcpy utilities
*/
#ifndef MEMCPY_COMMON_H
#define MEMCPY_COMMON_H 1
#include "unittest.h"
#include "file.h"
typedef void *(*memcpy_fn)(void *pmemdest, const void *src, size_t len,
unsigned flags);
typedef void (*persist_fn)(const void *ptr, size_t len);
extern unsigned Flags[10];
void do_memcpy(int fd, char *dest, int dest_off, char *src, int src_off,
size_t bytes, size_t mapped_len, const char *file_name, memcpy_fn fn,
unsigned flags, persist_fn p);
#endif
| 611 | 23.48 | 73 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_memcpy/pmem2_memcpy.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_memcpy.c -- test for doing a memcpy from libpmem2
*
* usage: pmem2_memcpy file destoff srcoff length
*
*/
#include "unittest.h"
#include "file.h"
#include "ut_pmem2.h"
#include "memcpy_common.h"
/*
* do_memcpy_variants -- do_memcpy wrapper that tests multiple variants
* of memcpy functions
*/
static void
do_memcpy_variants(int fd, char *dest, int dest_off, char *src, int src_off,
size_t bytes, size_t mapped_len, const char *file_name,
persist_fn p, memcpy_fn fn)
{
for (int i = 0; i < ARRAY_SIZE(Flags); ++i) {
do_memcpy(fd, dest, dest_off, src, src_off, bytes, mapped_len,
file_name, fn, Flags[i], p);
}
}
int
main(int argc, char *argv[])
{
int fd;
char *dest;
char *src;
char *src_orig;
size_t mapped_len;
struct pmem2_config *cfg;
struct pmem2_source *psrc;
struct pmem2_map *map;
if (argc != 5)
UT_FATAL("usage: %s file destoff srcoff length", argv[0]);
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem2_memcpy %s %s %s %s %savx %savx512f",
argv[2], argv[3], argv[4], thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
util_init();
fd = OPEN(argv[1], O_RDWR);
UT_ASSERT(fd != -1);
int dest_off = atoi(argv[2]);
int src_off = atoi(argv[3]);
size_t bytes = strtoul(argv[4], NULL, 0);
PMEM2_CONFIG_NEW(&cfg);
PMEM2_SOURCE_FROM_FD(&psrc, fd);
PMEM2_CONFIG_SET_GRANULARITY(cfg, PMEM2_GRANULARITY_PAGE);
int ret = pmem2_map(cfg, psrc, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_CONFIG_DELETE(&cfg);
/* src > dst */
mapped_len = pmem2_map_get_size(map);
dest = pmem2_map_get_address(map);
if (dest == NULL)
UT_FATAL("!could not map file: %s", argv[1]);
src_orig = src = dest + mapped_len / 2;
UT_ASSERT(src > dest);
pmem2_persist_fn persist = pmem2_get_persist_fn(map);
memset(dest, 0, (2 * bytes));
persist(dest, 2 * bytes);
memset(src, 0, (2 * bytes));
persist(src, 2 * bytes);
pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map);
do_memcpy_variants(fd, dest, dest_off, src, src_off, bytes,
0, argv[1], persist, memcpy_fn);
src = dest;
dest = src_orig;
if (dest <= src)
UT_FATAL("cannot map files in memory order");
do_memcpy_variants(fd, dest, dest_off, src, src_off, bytes, mapped_len,
argv[1], persist, memcpy_fn);
ret = pmem2_unmap(&map);
UT_ASSERTeq(ret, 0);
CLOSE(fd);
DONE(NULL);
}
| 2,527 | 22.849057 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/util_is_zeroed/util_is_zeroed.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* util_is_zeroed.c -- unit test for util_is_zeroed
*/
#include "unittest.h"
#include "util.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "util_is_zeroed");
util_init();
char bigbuf[3000];
memset(bigbuf + 0, 0x11, 1000);
memset(bigbuf + 1000, 0x0, 1000);
memset(bigbuf + 2000, 0xff, 1000);
UT_ASSERTeq(util_is_zeroed(bigbuf, 1000), 0);
UT_ASSERTeq(util_is_zeroed(bigbuf + 1000, 1000), 1);
UT_ASSERTeq(util_is_zeroed(bigbuf + 2000, 1000), 0);
UT_ASSERTeq(util_is_zeroed(bigbuf, 0), 1);
UT_ASSERTeq(util_is_zeroed(bigbuf + 999, 1000), 0);
UT_ASSERTeq(util_is_zeroed(bigbuf + 1000, 1001), 0);
UT_ASSERTeq(util_is_zeroed(bigbuf + 1001, 1000), 0);
char *buf = bigbuf + 1000;
buf[0] = 1;
UT_ASSERTeq(util_is_zeroed(buf, 1000), 0);
memset(buf, 0, 1000);
buf[1] = 1;
UT_ASSERTeq(util_is_zeroed(buf, 1000), 0);
memset(buf, 0, 1000);
buf[239] = 1;
UT_ASSERTeq(util_is_zeroed(buf, 1000), 0);
memset(buf, 0, 1000);
buf[999] = 1;
UT_ASSERTeq(util_is_zeroed(buf, 1000), 0);
memset(buf, 0, 1000);
buf[1000] = 1;
UT_ASSERTeq(util_is_zeroed(buf, 1000), 1);
DONE(NULL);
}
| 1,196 | 20.763636 | 53 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem_map_file/mocks_windows.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* mocks_windows.h -- redefinitions of libc functions
*
* This file is Windows-specific.
*
* This file should be included (i.e. using Forced Include) by libpmem
* files, when compiled for the purpose of pmem_map_file test.
* It would replace default implementation with mocked functions defined
* in pmem_map_file.c.
*
* These defines could be also passed as preprocessor definitions.
*/
#ifndef WRAP_REAL
#define os_posix_fallocate __wrap_os_posix_fallocate
#define os_ftruncate __wrap_os_ftruncate
#endif
| 608 | 28 | 72 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem_map_file/mocks_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* mocks_windows.c -- mocked functions used in pmem_map_file.c
* (Windows-specific)
*/
#include "unittest.h"
#define MAX_LEN (4 * 1024 * 1024)
/*
* posix_fallocate -- interpose on libc posix_fallocate()
*/
FUNC_MOCK(os_posix_fallocate, int, int fd, os_off_t offset, os_off_t len)
FUNC_MOCK_RUN_DEFAULT {
UT_OUT("posix_fallocate: off %ju len %ju", offset, len);
if (len > MAX_LEN)
return ENOSPC;
return _FUNC_REAL(os_posix_fallocate)(fd, offset, len);
}
FUNC_MOCK_END
/*
* ftruncate -- interpose on libc ftruncate()
*/
FUNC_MOCK(os_ftruncate, int, int fd, os_off_t len)
FUNC_MOCK_RUN_DEFAULT {
UT_OUT("ftruncate: len %ju", len);
if (len > MAX_LEN) {
errno = ENOSPC;
return -1;
}
return _FUNC_REAL(os_ftruncate)(fd, len);
}
FUNC_MOCK_END
| 868 | 21.868421 | 73 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_heap/obj_heap.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_heap.c -- unit test for heap
*
* operations are: 't', 'b', 'r', 'c', 'h', 'a', 'n', 's'
* t: do test_heap, test_recycler
* b: do fault_injection in function container_new_ravl
* r: do fault_injection in function recycler_new
* c: do fault_injection in function container_new_seglists
* h: do fault_injection in function heap_boot
* a: do fault_injection in function alloc_class_new
* n: do fault_injection in function alloc_class_collection_new
* s: do fault_injection in function stats_new
*/
#include "libpmemobj.h"
#include "palloc.h"
#include "heap.h"
#include "recycler.h"
#include "obj.h"
#include "unittest.h"
#include "util.h"
#include "container_ravl.h"
#include "container_seglists.h"
#include "container.h"
#include "alloc_class.h"
#include "valgrind_internal.h"
#include "set.h"
#define MOCK_POOL_SIZE PMEMOBJ_MIN_POOL
#define MAX_BLOCKS 3
struct mock_pop {
PMEMobjpool p;
void *heap;
};
static int
obj_heap_persist(void *ctx, const void *ptr, size_t sz, unsigned flags)
{
UT_ASSERTeq(pmem_msync(ptr, sz), 0);
return 0;
}
static int
obj_heap_flush(void *ctx, const void *ptr, size_t sz, unsigned flags)
{
UT_ASSERTeq(pmem_msync(ptr, sz), 0);
return 0;
}
static void
obj_heap_drain(void *ctx)
{
}
static void *
obj_heap_memset(void *ctx, void *ptr, int c, size_t sz, unsigned flags)
{
memset(ptr, c, sz);
UT_ASSERTeq(pmem_msync(ptr, sz), 0);
return ptr;
}
static void
init_run_with_score(struct heap_layout *l, uint32_t chunk_id, int score)
{
l->zone0.chunk_headers[chunk_id].size_idx = 1;
l->zone0.chunk_headers[chunk_id].type = CHUNK_TYPE_RUN;
l->zone0.chunk_headers[chunk_id].flags = 0;
struct chunk_run *run = (struct chunk_run *)
&l->zone0.chunks[chunk_id];
VALGRIND_DO_MAKE_MEM_UNDEFINED(run, sizeof(*run));
run->hdr.alignment = 0;
run->hdr.block_size = 1024;
memset(run->content, 0xFF, RUN_DEFAULT_BITMAP_SIZE);
UT_ASSERTeq(score % 64, 0);
score /= 64;
uint64_t *bitmap = (uint64_t *)run->content;
for (; score >= 0; --score) {
bitmap[score] = 0;
}
}
static void
init_run_with_max_block(struct heap_layout *l, uint32_t chunk_id)
{
l->zone0.chunk_headers[chunk_id].size_idx = 1;
l->zone0.chunk_headers[chunk_id].type = CHUNK_TYPE_RUN;
l->zone0.chunk_headers[chunk_id].flags = 0;
struct chunk_run *run = (struct chunk_run *)
&l->zone0.chunks[chunk_id];
VALGRIND_DO_MAKE_MEM_UNDEFINED(run, sizeof(*run));
uint64_t *bitmap = (uint64_t *)run->content;
run->hdr.block_size = 1024;
run->hdr.alignment = 0;
memset(bitmap, 0xFF, RUN_DEFAULT_BITMAP_SIZE);
/* the biggest block is 10 bits */
bitmap[3] =
0b1000001110111000111111110000111111000000000011111111110000000011;
}
static void
test_container(struct block_container *bc, struct palloc_heap *heap)
{
UT_ASSERTne(bc, NULL);
struct memory_block a = {1, 0, 1, 4};
struct memory_block b = {1, 0, 2, 8};
struct memory_block c = {1, 0, 3, 16};
struct memory_block d = {1, 0, 5, 32};
init_run_with_score(heap->layout, 1, 128);
memblock_rebuild_state(heap, &a);
memblock_rebuild_state(heap, &b);
memblock_rebuild_state(heap, &c);
memblock_rebuild_state(heap, &d);
int ret;
ret = bc->c_ops->insert(bc, &a);
UT_ASSERTeq(ret, 0);
ret = bc->c_ops->insert(bc, &b);
UT_ASSERTeq(ret, 0);
ret = bc->c_ops->insert(bc, &c);
UT_ASSERTeq(ret, 0);
ret = bc->c_ops->insert(bc, &d);
UT_ASSERTeq(ret, 0);
struct memory_block invalid_ret = {0, 0, 6, 0};
ret = bc->c_ops->get_rm_bestfit(bc, &invalid_ret);
UT_ASSERTeq(ret, ENOMEM);
struct memory_block b_ret = {0, 0, 2, 0};
ret = bc->c_ops->get_rm_bestfit(bc, &b_ret);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(b_ret.chunk_id, b.chunk_id);
struct memory_block a_ret = {0, 0, 1, 0};
ret = bc->c_ops->get_rm_bestfit(bc, &a_ret);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(a_ret.chunk_id, a.chunk_id);
struct memory_block c_ret = {0, 0, 3, 0};
ret = bc->c_ops->get_rm_bestfit(bc, &c_ret);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(c_ret.chunk_id, c.chunk_id);
struct memory_block d_ret = {0, 0, 4, 0}; /* less one than target */
ret = bc->c_ops->get_rm_bestfit(bc, &d_ret);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(d_ret.chunk_id, d.chunk_id);
ret = bc->c_ops->get_rm_bestfit(bc, &c_ret);
UT_ASSERTeq(ret, ENOMEM);
ret = bc->c_ops->insert(bc, &a);
UT_ASSERTeq(ret, 0);
ret = bc->c_ops->insert(bc, &b);
UT_ASSERTeq(ret, 0);
ret = bc->c_ops->insert(bc, &c);
UT_ASSERTeq(ret, 0);
bc->c_ops->rm_all(bc);
ret = bc->c_ops->is_empty(bc);
UT_ASSERTeq(ret, 1);
ret = bc->c_ops->get_rm_bestfit(bc, &c_ret);
UT_ASSERTeq(ret, ENOMEM);
bc->c_ops->destroy(bc);
}
static void
do_fault_injection_new_ravl()
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "container_new_ravl");
struct block_container *bc = container_new_ravl(NULL);
UT_ASSERTeq(bc, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
do_fault_injection_new_seglists()
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "container_new_seglists");
struct block_container *bc = container_new_seglists(NULL);
UT_ASSERTeq(bc, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
do_fault_injection_heap_boot()
{
if (!pmemobj_fault_injection_enabled())
return;
struct mock_pop *mpop = MMAP_ANON_ALIGNED(MOCK_POOL_SIZE,
Ut_mmap_align);
PMEMobjpool *pop = &mpop->p;
pop->p_ops.persist = obj_heap_persist;
uint64_t heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool);
struct pmem_ops *p_ops = &pop->p_ops;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "heap_boot");
int r = heap_boot(NULL, NULL, heap_size, &pop->heap_size, NULL, p_ops,
NULL, NULL);
UT_ASSERTne(r, 0);
UT_ASSERTeq(errno, ENOMEM);
}
static void
do_fault_injection_recycler()
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "recycler_new");
size_t active_arenas = 1;
struct recycler *r = recycler_new(NULL, 0, &active_arenas);
UT_ASSERTeq(r, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
do_fault_injection_class_new(int i)
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, i, "alloc_class_new");
struct alloc_class_collection *c = alloc_class_collection_new();
UT_ASSERTeq(c, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
do_fault_injection_class_collection_new()
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "alloc_class_collection_new");
struct alloc_class_collection *c = alloc_class_collection_new();
UT_ASSERTeq(c, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
do_fault_injection_stats()
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "stats_new");
struct stats *s = stats_new(NULL);
UT_ASSERTeq(s, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
test_heap(void)
{
struct mock_pop *mpop = MMAP_ANON_ALIGNED(MOCK_POOL_SIZE,
Ut_mmap_align);
PMEMobjpool *pop = &mpop->p;
memset(pop, 0, MOCK_POOL_SIZE);
pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop);
pop->p_ops.persist = obj_heap_persist;
pop->p_ops.flush = obj_heap_flush;
pop->p_ops.drain = obj_heap_drain;
pop->p_ops.memset = obj_heap_memset;
pop->p_ops.base = pop;
pop->set = MALLOC(sizeof(*(pop->set)));
pop->set->options = 0;
pop->set->directory_based = 0;
struct stats *s = stats_new(pop);
UT_ASSERTne(s, NULL);
void *heap_start = (char *)pop + pop->heap_offset;
uint64_t heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool);
struct palloc_heap *heap = &pop->heap;
struct pmem_ops *p_ops = &pop->p_ops;
UT_ASSERT(heap_check(heap_start, heap_size) != 0);
UT_ASSERT(heap_init(heap_start, heap_size,
&pop->heap_size, p_ops) == 0);
UT_ASSERT(heap_boot(heap, heap_start, heap_size,
&pop->heap_size,
pop, p_ops, s, pop->set) == 0);
UT_ASSERT(heap_buckets_init(heap) == 0);
UT_ASSERT(pop->heap.rt != NULL);
test_container((struct block_container *)container_new_ravl(heap),
heap);
test_container((struct block_container *)container_new_seglists(heap),
heap);
struct alloc_class *c_small = heap_get_best_class(heap, 1);
struct alloc_class *c_big = heap_get_best_class(heap, 2048);
UT_ASSERT(c_small->unit_size < c_big->unit_size);
/* new small buckets should be empty */
UT_ASSERT(c_big->type == CLASS_RUN);
struct memory_block blocks[MAX_BLOCKS] = {
{0, 0, 1, 0},
{0, 0, 1, 0},
{0, 0, 1, 0}
};
struct bucket *b_def = heap_bucket_acquire(heap,
DEFAULT_ALLOC_CLASS_ID, HEAP_ARENA_PER_THREAD);
for (int i = 0; i < MAX_BLOCKS; ++i) {
heap_get_bestfit_block(heap, b_def, &blocks[i]);
UT_ASSERT(blocks[i].block_off == 0);
}
heap_bucket_release(heap, b_def);
struct memory_block old_run = {0, 0, 1, 0};
struct memory_block new_run = {0, 0, 0, 0};
struct alloc_class *c_run = heap_get_best_class(heap, 1024);
struct bucket *b_run = heap_bucket_acquire(heap, c_run->id,
HEAP_ARENA_PER_THREAD);
/*
* Allocate blocks from a run until one run is exhausted.
*/
UT_ASSERTne(heap_get_bestfit_block(heap, b_run, &old_run), ENOMEM);
do {
new_run.chunk_id = 0;
new_run.block_off = 0;
new_run.size_idx = 1;
UT_ASSERTne(heap_get_bestfit_block(heap, b_run, &new_run),
ENOMEM);
UT_ASSERTne(new_run.size_idx, 0);
} while (old_run.block_off != new_run.block_off);
heap_bucket_release(heap, b_run);
stats_delete(pop, s);
UT_ASSERT(heap_check(heap_start, heap_size) == 0);
heap_cleanup(heap);
UT_ASSERT(heap->rt == NULL);
FREE(pop->set);
MUNMAP_ANON_ALIGNED(mpop, MOCK_POOL_SIZE);
}
/*
* test_heap_with_size -- tests scenarios with not-nicely aligned sizes
*/
static void
test_heap_with_size()
{
/*
* To trigger bug with incorrect metadata alignment we need to
* use a size that uses exactly the size used in bugged zone size
* calculations.
*/
size_t size = PMEMOBJ_MIN_POOL + sizeof(struct zone_header) +
sizeof(struct chunk_header) * MAX_CHUNK +
sizeof(PMEMobjpool);
struct mock_pop *mpop = MMAP_ANON_ALIGNED(size,
Ut_mmap_align);
PMEMobjpool *pop = &mpop->p;
memset(pop, 0, size);
pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop);
pop->p_ops.persist = obj_heap_persist;
pop->p_ops.flush = obj_heap_flush;
pop->p_ops.drain = obj_heap_drain;
pop->p_ops.memset = obj_heap_memset;
pop->p_ops.base = pop;
pop->set = MALLOC(sizeof(*(pop->set)));
pop->set->options = 0;
pop->set->directory_based = 0;
void *heap_start = (char *)pop + pop->heap_offset;
uint64_t heap_size = size - sizeof(PMEMobjpool);
struct palloc_heap *heap = &pop->heap;
struct pmem_ops *p_ops = &pop->p_ops;
UT_ASSERT(heap_check(heap_start, heap_size) != 0);
UT_ASSERT(heap_init(heap_start, heap_size,
&pop->heap_size, p_ops) == 0);
UT_ASSERT(heap_boot(heap, heap_start, heap_size,
&pop->heap_size,
pop, p_ops, NULL, pop->set) == 0);
UT_ASSERT(heap_buckets_init(heap) == 0);
UT_ASSERT(pop->heap.rt != NULL);
struct bucket *b_def = heap_bucket_acquire(heap,
DEFAULT_ALLOC_CLASS_ID, HEAP_ARENA_PER_THREAD);
struct memory_block mb;
mb.size_idx = 1;
while (heap_get_bestfit_block(heap, b_def, &mb) == 0)
;
/* mb should now be the last chunk in the heap */
char *ptr = mb.m_ops->get_real_data(&mb);
size_t s = mb.m_ops->get_real_size(&mb);
/* last chunk should be within the heap and accessible */
UT_ASSERT((size_t)ptr + s <= (size_t)mpop + size);
VALGRIND_DO_MAKE_MEM_DEFINED(ptr, s);
memset(ptr, 0xc, s);
heap_bucket_release(heap, b_def);
UT_ASSERT(heap_check(heap_start, heap_size) == 0);
heap_cleanup(heap);
UT_ASSERT(heap->rt == NULL);
FREE(pop->set);
MUNMAP_ANON_ALIGNED(mpop, size);
}
static void
test_recycler(void)
{
struct mock_pop *mpop = MMAP_ANON_ALIGNED(MOCK_POOL_SIZE,
Ut_mmap_align);
PMEMobjpool *pop = &mpop->p;
memset(pop, 0, MOCK_POOL_SIZE);
pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop);
pop->p_ops.persist = obj_heap_persist;
pop->p_ops.flush = obj_heap_flush;
pop->p_ops.drain = obj_heap_drain;
pop->p_ops.memset = obj_heap_memset;
pop->p_ops.base = pop;
pop->set = MALLOC(sizeof(*(pop->set)));
pop->set->options = 0;
pop->set->directory_based = 0;
void *heap_start = (char *)pop + pop->heap_offset;
uint64_t heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool);
struct palloc_heap *heap = &pop->heap;
struct pmem_ops *p_ops = &pop->p_ops;
struct stats *s = stats_new(pop);
UT_ASSERTne(s, NULL);
UT_ASSERT(heap_check(heap_start, heap_size) != 0);
UT_ASSERT(heap_init(heap_start, heap_size,
&pop->heap_size, p_ops) == 0);
UT_ASSERT(heap_boot(heap, heap_start, heap_size,
&pop->heap_size,
pop, p_ops, s, pop->set) == 0);
UT_ASSERT(heap_buckets_init(heap) == 0);
UT_ASSERT(pop->heap.rt != NULL);
/* trigger heap bucket populate */
struct memory_block m = MEMORY_BLOCK_NONE;
m.size_idx = 1;
struct bucket *b = heap_bucket_acquire(heap,
DEFAULT_ALLOC_CLASS_ID,
HEAP_ARENA_PER_THREAD);
UT_ASSERT(heap_get_bestfit_block(heap, b, &m) == 0);
heap_bucket_release(heap, b);
int ret;
size_t active_arenas = 1;
struct recycler *r = recycler_new(&pop->heap, 10000 /* never recalc */,
&active_arenas);
UT_ASSERTne(r, NULL);
init_run_with_score(pop->heap.layout, 0, 64);
init_run_with_score(pop->heap.layout, 1, 128);
init_run_with_score(pop->heap.layout, 15, 0);
struct memory_block mrun = {0, 0, 1, 0};
struct memory_block mrun2 = {1, 0, 1, 0};
memblock_rebuild_state(&pop->heap, &mrun);
memblock_rebuild_state(&pop->heap, &mrun2);
ret = recycler_put(r, &mrun,
recycler_element_new(&pop->heap, &mrun));
UT_ASSERTeq(ret, 0);
ret = recycler_put(r, &mrun2,
recycler_element_new(&pop->heap, &mrun2));
UT_ASSERTeq(ret, 0);
struct memory_block mrun_ret = MEMORY_BLOCK_NONE;
mrun_ret.size_idx = 1;
struct memory_block mrun2_ret = MEMORY_BLOCK_NONE;
mrun2_ret.size_idx = 1;
ret = recycler_get(r, &mrun_ret);
UT_ASSERTeq(ret, 0);
ret = recycler_get(r, &mrun2_ret);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(mrun2.chunk_id, mrun2_ret.chunk_id);
UT_ASSERTeq(mrun.chunk_id, mrun_ret.chunk_id);
init_run_with_score(pop->heap.layout, 7, 64);
init_run_with_score(pop->heap.layout, 2, 128);
init_run_with_score(pop->heap.layout, 5, 192);
init_run_with_score(pop->heap.layout, 10, 256);
mrun.chunk_id = 7;
mrun2.chunk_id = 2;
struct memory_block mrun3 = {5, 0, 1, 0};
struct memory_block mrun4 = {10, 0, 1, 0};
memblock_rebuild_state(&pop->heap, &mrun3);
memblock_rebuild_state(&pop->heap, &mrun4);
mrun_ret.size_idx = 1;
mrun2_ret.size_idx = 1;
struct memory_block mrun3_ret = MEMORY_BLOCK_NONE;
mrun3_ret.size_idx = 1;
struct memory_block mrun4_ret = MEMORY_BLOCK_NONE;
mrun4_ret.size_idx = 1;
ret = recycler_put(r, &mrun,
recycler_element_new(&pop->heap, &mrun));
UT_ASSERTeq(ret, 0);
ret = recycler_put(r, &mrun2,
recycler_element_new(&pop->heap, &mrun2));
UT_ASSERTeq(ret, 0);
ret = recycler_put(r, &mrun3,
recycler_element_new(&pop->heap, &mrun3));
UT_ASSERTeq(ret, 0);
ret = recycler_put(r, &mrun4,
recycler_element_new(&pop->heap, &mrun4));
UT_ASSERTeq(ret, 0);
ret = recycler_get(r, &mrun_ret);
UT_ASSERTeq(ret, 0);
ret = recycler_get(r, &mrun2_ret);
UT_ASSERTeq(ret, 0);
ret = recycler_get(r, &mrun3_ret);
UT_ASSERTeq(ret, 0);
ret = recycler_get(r, &mrun4_ret);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(mrun.chunk_id, mrun_ret.chunk_id);
UT_ASSERTeq(mrun2.chunk_id, mrun2_ret.chunk_id);
UT_ASSERTeq(mrun3.chunk_id, mrun3_ret.chunk_id);
UT_ASSERTeq(mrun4.chunk_id, mrun4_ret.chunk_id);
init_run_with_max_block(pop->heap.layout, 1);
struct memory_block mrun5 = {1, 0, 1, 0};
memblock_rebuild_state(&pop->heap, &mrun5);
ret = recycler_put(r, &mrun5,
recycler_element_new(&pop->heap, &mrun5));
UT_ASSERTeq(ret, 0);
struct memory_block mrun5_ret = MEMORY_BLOCK_NONE;
mrun5_ret.size_idx = 11;
ret = recycler_get(r, &mrun5_ret);
UT_ASSERTeq(ret, ENOMEM);
mrun5_ret = MEMORY_BLOCK_NONE;
mrun5_ret.size_idx = 10;
ret = recycler_get(r, &mrun5_ret);
UT_ASSERTeq(ret, 0);
recycler_delete(r);
stats_delete(pop, s);
heap_cleanup(heap);
UT_ASSERT(heap->rt == NULL);
FREE(pop->set);
MUNMAP_ANON_ALIGNED(mpop, MOCK_POOL_SIZE);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_heap");
if (argc < 2)
UT_FATAL("usage: %s path <t|b|r|c|h|a|n|s>", argv[0]);
switch (argv[1][0]) {
case 't':
test_heap();
test_heap_with_size();
test_recycler();
break;
case 'b':
do_fault_injection_new_ravl();
break;
case 'r':
do_fault_injection_recycler();
break;
case 'c':
do_fault_injection_new_seglists();
break;
case 'h':
do_fault_injection_heap_boot();
break;
case 'a':
/* first call alloc_class_new */
do_fault_injection_class_new(1);
/* second call alloc_class_new */
do_fault_injection_class_new(2);
break;
case 'n':
do_fault_injection_class_collection_new();
break;
case 's':
do_fault_injection_stats();
break;
default:
UT_FATAL("unknown operation");
}
DONE(NULL);
}
| 16,917 | 25.027692 | 72 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_movnt_align/movnt_align_common.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* movnt_align_common.c -- common part for tests doing a persistent movnt align
*/
#include "unittest.h"
#include "movnt_align_common.h"
char *Src;
char *Dst;
char *Scratch;
/*
* check_memmove -- invoke check function with pmem_memmove_persist
*/
void
check_memmove(size_t doff, size_t soff, size_t len, pmem_memmove_fn fn,
unsigned flags)
{
memset(Dst + doff, 1, len);
memset(Src + soff, 0, len);
fn(Dst + doff, Src + soff, len, flags);
if (memcmp(Dst + doff, Src + soff, len))
UT_FATAL("memcpy/memmove failed");
}
/*
* check_memmove -- invoke check function with pmem_memcpy_persist
*/
void
check_memcpy(size_t doff, size_t soff, size_t len, pmem_memcpy_fn fn,
unsigned flags)
{
memset(Dst, 2, N_BYTES);
memset(Src, 3, N_BYTES);
memset(Scratch, 2, N_BYTES);
memset(Dst + doff, 1, len);
memset(Src + soff, 0, len);
memcpy(Scratch + doff, Src + soff, len);
fn(Dst + doff, Src + soff, len, flags);
if (memcmp(Dst, Scratch, N_BYTES))
UT_FATAL("memcpy/memmove failed");
}
/*
* check_memset -- check pmem_memset_no_drain function
*/
void
check_memset(size_t off, size_t len, pmem_memset_fn fn, unsigned flags)
{
memset(Scratch, 2, N_BYTES);
memset(Scratch + off, 1, len);
memset(Dst, 2, N_BYTES);
fn(Dst + off, 1, len, flags);
if (memcmp(Dst, Scratch, N_BYTES))
UT_FATAL("memset failed");
}
unsigned Flags[] = {
0,
PMEM_F_MEM_NODRAIN,
PMEM_F_MEM_NONTEMPORAL,
PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_NODRAIN,
PMEM_F_MEM_WC,
PMEM_F_MEM_WB,
PMEM_F_MEM_NOFLUSH,
/* all possible flags */
PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH |
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL |
PMEM_F_MEM_WC | PMEM_F_MEM_WB,
};
| 1,830 | 21.060241 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_movnt_align/pmem2_movnt_align.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_movnt_align.c -- test for functions with non-temporal stores
*
* usage: pmem2_movnt_align file [C|F|B|S]
*
* C - pmem2_memcpy()
* B - pmem2_memmove() in backward direction
* F - pmem2_memmove() in forward direction
* S - pmem2_memset()
*/
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include "libpmem2.h"
#include "unittest.h"
#include "movnt_align_common.h"
#include "ut_pmem2.h"
static pmem2_memset_fn memset_fn;
static pmem2_memcpy_fn memcpy_fn;
static pmem2_memmove_fn memmove_fn;
static void
check_memmove_variants(size_t doff, size_t soff, size_t len)
{
for (int i = 0; i < ARRAY_SIZE(Flags); ++i)
check_memmove(doff, soff, len, memmove_fn, Flags[i]);
}
static void
check_memcpy_variants(size_t doff, size_t soff, size_t len)
{
for (int i = 0; i < ARRAY_SIZE(Flags); ++i)
check_memcpy(doff, soff, len, memcpy_fn, Flags[i]);
}
static void
check_memset_variants(size_t off, size_t len)
{
for (int i = 0; i < ARRAY_SIZE(Flags); ++i)
check_memset(off, len, memset_fn, Flags[i]);
}
int
main(int argc, char *argv[])
{
if (argc != 3)
UT_FATAL("usage: %s file type", argv[0]);
struct pmem2_config *cfg;
struct pmem2_source *src;
struct pmem2_map *map;
int fd;
char type = argv[2][0];
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem2_movnt_align %c %s %savx %savx512f", type,
thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
fd = OPEN(argv[1], O_RDWR);
PMEM2_CONFIG_NEW(&cfg);
PMEM2_SOURCE_FROM_FD(&src, fd);
PMEM2_CONFIG_SET_GRANULARITY(cfg, PMEM2_GRANULARITY_PAGE);
int ret = pmem2_map(cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_CONFIG_DELETE(&cfg);
memset_fn = pmem2_get_memset_fn(map);
memcpy_fn = pmem2_get_memcpy_fn(map);
memmove_fn = pmem2_get_memmove_fn(map);
ret = pmem2_unmap(&map);
UT_ASSERTeq(ret, 0);
CLOSE(fd);
size_t page_size = Ut_pagesize;
size_t s;
switch (type) {
case 'C': /* memcpy */
/* mmap with guard pages */
Src = MMAP_ANON_ALIGNED(N_BYTES, 0);
Dst = MMAP_ANON_ALIGNED(N_BYTES, 0);
if (Src == NULL || Dst == NULL)
UT_FATAL("!mmap");
Scratch = MALLOC(N_BYTES);
/* check memcpy with 0 size */
check_memcpy_variants(0, 0, 0);
/* check memcpy with unaligned size */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memcpy_variants(0, 0, N_BYTES - s);
/* check memcpy with unaligned begin */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memcpy_variants(s, 0, N_BYTES - s);
/* check memcpy with unaligned begin and end */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memcpy_variants(s, s, N_BYTES - 2 * s);
MUNMAP_ANON_ALIGNED(Src, N_BYTES);
MUNMAP_ANON_ALIGNED(Dst, N_BYTES);
FREE(Scratch);
break;
case 'B': /* memmove backward */
/* mmap with guard pages */
Src = MMAP_ANON_ALIGNED(2 * N_BYTES - page_size, 0);
Dst = Src + N_BYTES - page_size;
if (Src == NULL)
UT_FATAL("!mmap");
/* check memmove in backward direction with 0 size */
check_memmove_variants(0, 0, 0);
/* check memmove in backward direction with unaligned size */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(0, 0, N_BYTES - s);
/* check memmove in backward direction with unaligned begin */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(s, 0, N_BYTES - s);
/*
* check memmove in backward direction with unaligned begin
* and end
*/
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(s, s, N_BYTES - 2 * s);
MUNMAP_ANON_ALIGNED(Src, 2 * N_BYTES - page_size);
break;
case 'F': /* memmove forward */
/* mmap with guard pages */
Dst = MMAP_ANON_ALIGNED(2 * N_BYTES - page_size, 0);
Src = Dst + N_BYTES - page_size;
if (Src == NULL)
UT_FATAL("!mmap");
/* check memmove in forward direction with 0 size */
check_memmove_variants(0, 0, 0);
/* check memmove in forward direction with unaligned size */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(0, 0, N_BYTES - s);
/* check memmove in forward direction with unaligned begin */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(s, 0, N_BYTES - s);
/*
* check memmove in forward direction with unaligned begin
* and end
*/
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(s, s, N_BYTES - 2 * s);
MUNMAP_ANON_ALIGNED(Dst, 2 * N_BYTES - page_size);
break;
case 'S': /* memset */
/* mmap with guard pages */
Dst = MMAP_ANON_ALIGNED(N_BYTES, 0);
if (Dst == NULL)
UT_FATAL("!mmap");
Scratch = MALLOC(N_BYTES);
/* check memset with 0 size */
check_memset_variants(0, 0);
/* check memset with unaligned size */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memset_variants(0, N_BYTES - s);
/* check memset with unaligned begin */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memset_variants(s, N_BYTES - s);
/* check memset with unaligned begin and end */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memset_variants(s, N_BYTES - 2 * s);
MUNMAP_ANON_ALIGNED(Dst, N_BYTES);
FREE(Scratch);
break;
default:
UT_FATAL("!wrong type of test");
break;
}
DONE(NULL);
}
| 5,283 | 24.042654 | 69 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_movnt_align/movnt_align_common.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* movnt_align_common.h -- header file for common movnt_align test utilities
*/
#ifndef MOVNT_ALIGN_COMMON_H
#define MOVNT_ALIGN_COMMON_H 1
#include "unittest.h"
#include "file.h"
#define N_BYTES (Ut_pagesize * 2)
extern char *Src;
extern char *Dst;
extern char *Scratch;
extern unsigned Flags[10];
typedef void *(*mem_fn)(void *, const void *, size_t);
typedef void *pmem_memcpy_fn(void *pmemdest, const void *src, size_t len,
unsigned flags);
typedef void *pmem_memmove_fn(void *pmemdest, const void *src, size_t len,
unsigned flags);
typedef void *pmem_memset_fn(void *pmemdest, int c, size_t len, unsigned flags);
void check_memmove(size_t doff, size_t soff, size_t len, pmem_memmove_fn fn,
unsigned flags);
void check_memcpy(size_t doff, size_t soff, size_t len, pmem_memcpy_fn fn,
unsigned flags);
void check_memset(size_t off, size_t len, pmem_memset_fn fn, unsigned flags);
#endif
| 989 | 26.5 | 80 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem_memmove/pmem_memmove.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* pmem_memmove.c -- unit test for doing a memmove
*
* usage:
* pmem_memmove file b:length [d:{offset}] [s:offset] [o:{1|2} S:{overlap}]
*
*/
#include "unittest.h"
#include "util_pmem.h"
#include "file.h"
#include "memmove_common.h"
typedef void *pmem_memmove_fn(void *pmemdest, const void *src, size_t len,
unsigned flags);
static void *
pmem_memmove_persist_wrapper(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
(void) flags;
return pmem_memmove_persist(pmemdest, src, len);
}
static void *
pmem_memmove_nodrain_wrapper(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
(void) flags;
return pmem_memmove_nodrain(pmemdest, src, len);
}
static void
do_persist_ddax(const void *ptr, size_t size)
{
util_persist_auto(1, ptr, size);
}
static void
do_persist(const void *ptr, size_t size)
{
util_persist_auto(0, ptr, size);
}
/*
* swap_mappings - given to mmapped regions swap them.
*
* Try swapping src and dest by unmapping src, mapping a new dest with
* the original src address as a hint. If successful, unmap original dest.
* Map a new src with the original dest as a hint.
* In the event of an error caller must unmap all passed in mappings.
*/
static void
swap_mappings(char **dest, char **src, size_t size, int fd)
{
char *d = *dest;
char *s = *src;
char *ts;
char *td;
MUNMAP(*src, size);
/* mmap destination using src addr as hint */
td = MMAP(s, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
MUNMAP(*dest, size);
*dest = td;
/* mmap src using original destination addr as a hint */
ts = MMAP(d, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
-1, 0);
*src = ts;
}
static void
do_memmove_variants(char *dst, char *src, const char *file_name,
size_t dest_off, size_t src_off, size_t bytes, persist_fn p)
{
do_memmove(dst, src, file_name, dest_off, src_off,
bytes, pmem_memmove_persist_wrapper, 0, p);
do_memmove(dst, src, file_name, dest_off, src_off,
bytes, pmem_memmove_nodrain_wrapper, 0, p);
for (int i = 0; i < ARRAY_SIZE(Flags); ++i) {
do_memmove(dst, src, file_name, dest_off, src_off,
bytes, pmem_memmove, Flags[i], p);
}
}
int
main(int argc, char *argv[])
{
int fd;
char *dst;
char *src;
size_t dst_off = 0;
size_t src_off = 0;
size_t bytes = 0;
int who = 0;
size_t mapped_len;
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem_memmove %s %s %s %s %savx %savx512f",
argc > 2 ? argv[2] : "null",
argc > 3 ? argv[3] : "null",
argc > 4 ? argv[4] : "null",
thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
fd = OPEN(argv[1], O_RDWR);
enum file_type type = util_fd_get_type(fd);
if (type < 0)
UT_FATAL("cannot check type of file %s", argv[1]);
persist_fn p;
p = type == TYPE_DEVDAX ? do_persist_ddax : do_persist;
if (argc < 3)
USAGE();
for (int arg = 2; arg < argc; arg++) {
if (strchr("dsbo",
argv[arg][0]) == NULL || argv[arg][1] != ':')
UT_FATAL("op must be d: or s: or b: or o:");
size_t val = STRTOUL(&argv[arg][2], NULL, 0);
switch (argv[arg][0]) {
case 'd':
if (val <= 0)
UT_FATAL("bad offset (%lu) with d: option",
val);
dst_off = val;
break;
case 's':
if (val <= 0)
UT_FATAL("bad offset (%lu) with s: option",
val);
src_off = val;
break;
case 'b':
if (val <= 0)
UT_FATAL("bad length (%lu) with b: option",
val);
bytes = val;
break;
case 'o':
if (val != 1 && val != 0)
UT_FATAL("bad val (%lu) with o: option",
val);
who = (int)val;
break;
}
}
if (who == 0) {
/* src > dest */
dst = pmem_map_file(argv[1], 0, 0, 0, &mapped_len, NULL);
if (dst == NULL)
UT_FATAL("!could not mmap dest file %s", argv[1]);
src = MMAP(dst + mapped_len, mapped_len,
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
-1, 0);
/*
* Its very unlikely that src would not be > dest. pmem_map_file
* chooses the first unused address >= 1TB, large
* enough to hold the give range, and 1GB aligned. Log
* the error if the mapped addresses cannot be swapped
* but allow the test to continue.
*/
if (src <= dst) {
swap_mappings(&dst, &src, mapped_len, fd);
if (src <= dst)
UT_FATAL("cannot map files in memory order");
}
do_memmove_variants(dst, src, argv[1],
dst_off, src_off, bytes, p);
/* dest > src */
swap_mappings(&dst, &src, mapped_len, fd);
if (dst <= src)
UT_FATAL("cannot map files in memory order");
do_memmove_variants(dst, src, argv[1],
dst_off, src_off, bytes, p);
int ret = pmem_unmap(dst, mapped_len);
UT_ASSERTeq(ret, 0);
MUNMAP(src, mapped_len);
} else {
/* use the same buffer for source and destination */
dst = pmem_map_file(argv[1], 0, 0, 0, &mapped_len, NULL);
if (dst == NULL)
UT_FATAL("!Could not mmap %s: \n", argv[1]);
memset(dst, 0, bytes);
p(dst, bytes);
do_memmove_variants(dst, dst, argv[1],
dst_off, src_off, bytes, p);
int ret = pmem_unmap(dst, mapped_len);
UT_ASSERTeq(ret, 0);
}
CLOSE(fd);
DONE(NULL);
}
| 5,226 | 22.334821 | 75 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_ctl_heap_size/obj_ctl_heap_size.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* obj_ctl_heap_size.c -- tests for the ctl entry points: heap.size.*
*/
#include "unittest.h"
#define LAYOUT "obj_ctl_heap_size"
#define CUSTOM_GRANULARITY ((1 << 20) * 10)
#define OBJ_SIZE 1024
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_ctl_heap_size");
if (argc != 3)
UT_FATAL("usage: %s poolset [w|x]", argv[0]);
const char *path = argv[1];
char t = argv[2][0];
PMEMobjpool *pop;
if ((pop = pmemobj_open(path, LAYOUT)) == NULL)
UT_FATAL("!pmemobj_open: %s", path);
int ret = 0;
size_t disable_granularity = 0;
ret = pmemobj_ctl_set(pop, "heap.size.granularity",
&disable_granularity);
UT_ASSERTeq(ret, 0);
/* allocate until OOM */
while (pmemobj_alloc(pop, NULL, OBJ_SIZE, 0, NULL, NULL) == 0)
;
if (t == 'x') {
ssize_t extend_size = CUSTOM_GRANULARITY;
ret = pmemobj_ctl_exec(pop, "heap.size.extend", &extend_size);
UT_ASSERTeq(ret, 0);
} else if (t == 'w') {
ssize_t new_granularity = CUSTOM_GRANULARITY;
ret = pmemobj_ctl_set(pop, "heap.size.granularity",
&new_granularity);
UT_ASSERTeq(ret, 0);
ssize_t curr_granularity;
ret = pmemobj_ctl_get(pop, "heap.size.granularity",
&curr_granularity);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(new_granularity, curr_granularity);
} else {
UT_ASSERT(0);
}
/* should succeed */
ret = pmemobj_alloc(pop, NULL, OBJ_SIZE, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
pmemobj_close(pop);
DONE(NULL);
}
| 1,500 | 21.402985 | 69 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_basic_integration/obj_basic_integration.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_basic_integration.c -- Basic integration tests
*
*/
#include <stddef.h>
#include "unittest.h"
#include "obj.h"
#define TEST_STR "abcdefgh"
#define TEST_STR_LEN 8
#define TEST_VALUE 5
/*
* Layout definition
*/
POBJ_LAYOUT_BEGIN(basic);
POBJ_LAYOUT_ROOT(basic, struct dummy_root);
POBJ_LAYOUT_TOID(basic, struct dummy_node);
POBJ_LAYOUT_TOID(basic, struct dummy_node_c);
POBJ_LAYOUT_END(basic);
struct dummy_node {
int value;
char teststr[TEST_STR_LEN];
POBJ_LIST_ENTRY(struct dummy_node) plist;
POBJ_LIST_ENTRY(struct dummy_node) plist_m;
};
struct dummy_node_c {
int value;
char teststr[TEST_STR_LEN];
POBJ_LIST_ENTRY(struct dummy_node) plist;
POBJ_LIST_ENTRY(struct dummy_node) plist_m;
};
struct dummy_root {
int value;
PMEMmutex lock;
TOID(struct dummy_node) node;
POBJ_LIST_HEAD(dummy_list, struct dummy_node) dummies;
POBJ_LIST_HEAD(moved_list, struct dummy_node) moved;
};
static int
dummy_node_constructor(PMEMobjpool *pop, void *ptr, void *arg)
{
struct dummy_node *n = (struct dummy_node *)ptr;
int *test_val = (int *)arg;
n->value = *test_val;
pmemobj_persist(pop, &n->value, sizeof(n->value));
return 0;
}
static void
test_alloc_api(PMEMobjpool *pop)
{
TOID(struct dummy_node) node_zeroed;
TOID(struct dummy_node_c) node_constructed;
POBJ_ZNEW(pop, &node_zeroed, struct dummy_node);
UT_ASSERT_rt(OID_INSTANCEOF(node_zeroed.oid, struct dummy_node));
int *test_val = (int *)MALLOC(sizeof(*test_val));
*test_val = TEST_VALUE;
POBJ_NEW(pop, &node_constructed, struct dummy_node_c,
dummy_node_constructor, test_val);
FREE(test_val);
TOID(struct dummy_node) iter;
POBJ_FOREACH_TYPE(pop, iter) {
UT_ASSERTeq(D_RO(iter)->value, 0);
}
TOID(struct dummy_node_c) iter_c;
POBJ_FOREACH_TYPE(pop, iter_c) {
UT_ASSERTeq(D_RO(iter_c)->value, TEST_VALUE);
}
PMEMoid oid_iter;
int nodes_count = 0;
POBJ_FOREACH(pop, oid_iter) {
nodes_count++;
}
UT_ASSERTne(nodes_count, 0);
POBJ_FREE(&node_zeroed);
POBJ_FREE(&node_constructed);
nodes_count = 0;
POBJ_FOREACH(pop, oid_iter) {
nodes_count++;
}
UT_ASSERTeq(nodes_count, 0);
int val = 10;
POBJ_ALLOC(pop, &node_constructed, struct dummy_node_c,
sizeof(struct dummy_node_c),
dummy_node_constructor, &val);
POBJ_REALLOC(pop, &node_constructed, struct dummy_node_c,
sizeof(struct dummy_node_c) + 1000);
UT_ASSERTeq(pmemobj_type_num(node_constructed.oid),
TOID_TYPE_NUM(struct dummy_node_c));
POBJ_ZREALLOC(pop, &node_constructed, struct dummy_node_c,
sizeof(struct dummy_node_c) + 2000);
UT_ASSERTeq(pmemobj_type_num(node_constructed.oid),
TOID_TYPE_NUM(struct dummy_node_c));
POBJ_FREE(&node_constructed);
POBJ_ZALLOC(pop, &node_zeroed, struct dummy_node,
sizeof(struct dummy_node));
POBJ_FREE(&node_zeroed);
PMEMoid oid = OID_NULL;
POBJ_FREE(&oid);
int err = 0;
err = pmemobj_alloc(pop, NULL, SIZE_MAX, 0, NULL, NULL);
UT_ASSERTeq(err, -1);
UT_ASSERTeq(errno, ENOMEM);
err = pmemobj_zalloc(pop, NULL, SIZE_MAX, 0);
UT_ASSERTeq(err, -1);
UT_ASSERTeq(errno, ENOMEM);
err = pmemobj_alloc(pop, NULL, PMEMOBJ_MAX_ALLOC_SIZE + 1, 0, NULL,
NULL);
UT_ASSERTeq(err, -1);
UT_ASSERTeq(errno, ENOMEM);
err = pmemobj_zalloc(pop, NULL, PMEMOBJ_MAX_ALLOC_SIZE + 1, 0);
UT_ASSERTeq(err, -1);
UT_ASSERTeq(errno, ENOMEM);
}
static void
test_realloc_api(PMEMobjpool *pop)
{
PMEMoid oid = OID_NULL;
int ret;
ret = pmemobj_alloc(pop, &oid, 128, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("alloc: %u, size: %zu", 128,
pmemobj_alloc_usable_size(oid));
/* grow */
ret = pmemobj_realloc(pop, &oid, 655360, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("realloc: %u => %u, size: %zu", 128, 655360,
pmemobj_alloc_usable_size(oid));
/* shrink */
ret = pmemobj_realloc(pop, &oid, 1, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("realloc: %u => %u, size: %zu", 655360, 1,
pmemobj_alloc_usable_size(oid));
/* free */
ret = pmemobj_realloc(pop, &oid, 0, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(OID_IS_NULL(oid));
UT_OUT("free");
/* alloc */
ret = pmemobj_realloc(pop, &oid, 777, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("realloc: %u => %u, size: %zu", 0, 777,
pmemobj_alloc_usable_size(oid));
/* shrink */
ret = pmemobj_realloc(pop, &oid, 1, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("realloc: %u => %u, size: %zu", 777, 1,
pmemobj_alloc_usable_size(oid));
pmemobj_free(&oid);
UT_ASSERT(OID_IS_NULL(oid));
UT_ASSERTeq(pmemobj_alloc_usable_size(oid), 0);
UT_OUT("free");
/* alloc */
ret = pmemobj_realloc(pop, &oid, 1, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("realloc: %u => %u, size: %zu", 0, 1,
pmemobj_alloc_usable_size(oid));
/* do nothing */
ret = pmemobj_realloc(pop, &oid, 1, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("realloc: %u => %u, size: %zu", 1, 1,
pmemobj_alloc_usable_size(oid));
pmemobj_free(&oid);
UT_ASSERT(OID_IS_NULL(oid));
UT_OUT("free");
/* do nothing */
ret = pmemobj_realloc(pop, &oid, 0, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(OID_IS_NULL(oid));
/* alloc */
ret = pmemobj_realloc(pop, &oid, 1, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
/* grow beyond reasonable size */
ret = pmemobj_realloc(pop, &oid, SIZE_MAX, 0);
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(errno, ENOMEM);
ret = pmemobj_realloc(pop, &oid, PMEMOBJ_MAX_ALLOC_SIZE + 1, 0);
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(errno, ENOMEM);
pmemobj_free(&oid);
UT_ASSERT(OID_IS_NULL(oid));
}
static void
test_list_api(PMEMobjpool *pop)
{
TOID(struct dummy_root) root;
root = POBJ_ROOT(pop, struct dummy_root);
int nodes_count = 0;
UT_ASSERTeq(pmemobj_type_num(root.oid), POBJ_ROOT_TYPE_NUM);
UT_COMPILE_ERROR_ON(TOID_TYPE_NUM_OF(root) != POBJ_ROOT_TYPE_NUM);
TOID(struct dummy_node) first;
TOID(struct dummy_node) iter;
POBJ_LIST_FOREACH_REVERSE(iter, &D_RO(root)->dummies, plist) {
UT_OUT("POBJ_LIST_FOREACH_REVERSE: dummy_node %d",
D_RO(iter)->value);
nodes_count++;
}
UT_ASSERTeq(nodes_count, 0);
int test_val = TEST_VALUE;
PMEMoid ret;
/* should fail */
ret = POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(root)->dummies, plist,
SIZE_MAX, dummy_node_constructor,
&test_val);
UT_ASSERTeq(errno, ENOMEM);
UT_ASSERT(OID_IS_NULL(ret));
errno = 0;
ret = POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(root)->dummies, plist,
PMEMOBJ_MAX_ALLOC_SIZE + 1, dummy_node_constructor,
&test_val);
UT_ASSERTeq(errno, ENOMEM);
UT_ASSERT(OID_IS_NULL(ret));
POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(root)->dummies, plist,
sizeof(struct dummy_node), dummy_node_constructor,
&test_val);
test_val++;
POBJ_LIST_INSERT_NEW_TAIL(pop, &D_RW(root)->dummies, plist,
sizeof(struct dummy_node), dummy_node_constructor,
&test_val);
TOID(struct dummy_node) inserted =
POBJ_LIST_FIRST(&D_RW(root)->dummies);
UT_ASSERTeq(pmemobj_type_num(inserted.oid),
TOID_TYPE_NUM(struct dummy_node));
TOID(struct dummy_node) node;
POBJ_ZNEW(pop, &node, struct dummy_node);
POBJ_LIST_INSERT_HEAD(pop, &D_RW(root)->dummies, node, plist);
nodes_count = 0;
POBJ_LIST_FOREACH(iter, &D_RO(root)->dummies, plist) {
UT_OUT("POBJ_LIST_FOREACH: dummy_node %d", D_RO(iter)->value);
nodes_count++;
}
UT_ASSERTeq(nodes_count, 3);
/* now do the same, but w/o using FOREACH macro */
nodes_count = 0;
first = POBJ_LIST_FIRST(&D_RO(root)->dummies);
iter = first;
do {
UT_OUT("POBJ_LIST_NEXT: dummy_node %d", D_RO(iter)->value);
nodes_count++;
iter = POBJ_LIST_NEXT(iter, plist);
} while (!TOID_EQUALS(iter, first));
UT_ASSERTeq(nodes_count, 3);
POBJ_LIST_MOVE_ELEMENT_HEAD(pop, &D_RW(root)->dummies,
&D_RW(root)->moved, node, plist, plist_m);
UT_ASSERTeq(POBJ_LIST_EMPTY(&D_RW(root)->moved), 0);
POBJ_LIST_MOVE_ELEMENT_HEAD(pop, &D_RW(root)->moved,
&D_RW(root)->dummies, node, plist_m, plist);
POBJ_LIST_MOVE_ELEMENT_TAIL(pop, &D_RW(root)->dummies,
&D_RW(root)->moved, node, plist, plist_m);
UT_ASSERTeq(POBJ_LIST_EMPTY(&D_RW(root)->moved), 0);
POBJ_LIST_MOVE_ELEMENT_TAIL(pop, &D_RW(root)->moved,
&D_RW(root)->dummies, node, plist_m, plist);
POBJ_LIST_REMOVE(pop, &D_RW(root)->dummies, node, plist);
POBJ_LIST_INSERT_TAIL(pop, &D_RW(root)->dummies, node, plist);
POBJ_LIST_REMOVE_FREE(pop, &D_RW(root)->dummies, node, plist);
nodes_count = 0;
POBJ_LIST_FOREACH_REVERSE(iter, &D_RO(root)->dummies, plist) {
UT_OUT("POBJ_LIST_FOREACH_REVERSE: dummy_node %d",
D_RO(iter)->value);
nodes_count++;
}
UT_ASSERTeq(nodes_count, 2);
/* now do the same, but w/o using FOREACH macro */
nodes_count = 0;
first = POBJ_LIST_FIRST(&D_RO(root)->dummies);
iter = first;
do {
UT_OUT("POBJ_LIST_PREV: dummy_node %d", D_RO(iter)->value);
nodes_count++;
iter = POBJ_LIST_PREV(iter, plist);
} while (!TOID_EQUALS(iter, first));
UT_ASSERTeq(nodes_count, 2);
test_val++;
POBJ_LIST_INSERT_NEW_AFTER(pop, &D_RW(root)->dummies,
POBJ_LIST_FIRST(&D_RO(root)->dummies), plist,
sizeof(struct dummy_node), dummy_node_constructor,
&test_val);
test_val++;
POBJ_LIST_INSERT_NEW_BEFORE(pop, &D_RW(root)->dummies,
POBJ_LIST_LAST(&D_RO(root)->dummies, plist), plist,
sizeof(struct dummy_node), dummy_node_constructor,
&test_val);
nodes_count = 0;
POBJ_LIST_FOREACH_REVERSE(iter, &D_RO(root)->dummies, plist) {
UT_OUT("POBJ_LIST_FOREACH_REVERSE: dummy_node %d",
D_RO(iter)->value);
nodes_count++;
}
UT_ASSERTeq(nodes_count, 4);
/* now do the same, but w/o using FOREACH macro */
nodes_count = 0;
first = POBJ_LIST_LAST(&D_RO(root)->dummies, plist);
iter = first;
do {
UT_OUT("POBJ_LIST_PREV: dummy_node %d", D_RO(iter)->value);
nodes_count++;
iter = POBJ_LIST_PREV(iter, plist);
} while (!TOID_EQUALS(iter, first));
UT_ASSERTeq(nodes_count, 4);
}
static void
test_tx_api(PMEMobjpool *pop)
{
TOID(struct dummy_root) root;
TOID_ASSIGN(root, pmemobj_root(pop, sizeof(struct dummy_root)));
int *vstate = NULL; /* volatile state */
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
vstate = (int *)MALLOC(sizeof(*vstate));
*vstate = TEST_VALUE;
TX_ADD(root);
D_RW(root)->value = *vstate;
TOID_ASSIGN(D_RW(root)->node, OID_NULL);
} TX_FINALLY {
FREE(vstate);
vstate = NULL;
} TX_END
UT_ASSERTeq(vstate, NULL);
UT_ASSERTeq(D_RW(root)->value, TEST_VALUE);
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
TX_ADD(root);
D_RW(root)->node = TX_ALLOC(struct dummy_node, SIZE_MAX);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
UT_ASSERTeq(errno, ENOMEM);
} TX_END
errno = 0;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
D_RW(root)->node = TX_ZALLOC(struct dummy_node, SIZE_MAX);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
UT_ASSERTeq(errno, ENOMEM);
} TX_END
errno = 0;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
D_RW(root)->node = TX_XALLOC(struct dummy_node, SIZE_MAX,
POBJ_XALLOC_ZERO);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
UT_ASSERTeq(errno, ENOMEM);
} TX_END
errno = 0;
TX_BEGIN_LOCK(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
D_RW(root)->node = TX_ALLOC(struct dummy_node,
PMEMOBJ_MAX_ALLOC_SIZE + 1);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
UT_ASSERTeq(errno, ENOMEM);
} TX_END
errno = 0;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
D_RW(root)->node = TX_ZALLOC(struct dummy_node,
PMEMOBJ_MAX_ALLOC_SIZE + 1);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
UT_ASSERTeq(errno, ENOMEM);
} TX_END
errno = 0;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
TX_ADD(root);
D_RW(root)->node = TX_ZNEW(struct dummy_node);
D_RW(root)->node = TX_REALLOC(D_RO(root)->node, SIZE_MAX);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERTeq(errno, ENOMEM);
} TX_END
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
errno = 0;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
TX_ADD(root);
D_RW(root)->node = TX_ZNEW(struct dummy_node);
D_RW(root)->node = TX_REALLOC(D_RO(root)->node,
PMEMOBJ_MAX_ALLOC_SIZE + 1);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERTeq(errno, ENOMEM);
} TX_END
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
errno = 0;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
TX_ADD(root);
D_RW(root)->node = TX_ZNEW(struct dummy_node);
TX_MEMSET(D_RW(D_RW(root)->node)->teststr, 'a', TEST_STR_LEN);
TX_MEMCPY(D_RW(D_RW(root)->node)->teststr, TEST_STR,
TEST_STR_LEN);
TX_SET(D_RW(root)->node, value, TEST_VALUE);
} TX_END
UT_ASSERTeq(D_RW(D_RW(root)->node)->value, TEST_VALUE);
UT_ASSERT(strncmp(D_RW(D_RW(root)->node)->teststr, TEST_STR,
TEST_STR_LEN) == 0);
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
TX_ADD(root);
UT_ASSERT(!TOID_IS_NULL(D_RW(root)->node));
TX_FREE(D_RW(root)->node);
D_RW(root)->node = TOID_NULL(struct dummy_node);
TOID_ASSIGN(D_RW(root)->node, OID_NULL);
} TX_END
errno = 0;
TX_BEGIN(pop) {
TX_BEGIN(NULL) {
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERT(errno == EFAULT);
} TX_END
errno = 0;
TX_BEGIN(pop) {
TX_BEGIN((PMEMobjpool *)(uintptr_t)7) {
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERT(errno == EINVAL);
} TX_END
UT_OUT("%s", pmemobj_errormsg());
TX_BEGIN(pop) {
pmemobj_tx_abort(ECANCELED);
} TX_END
UT_OUT("%s", pmemobj_errormsg());
}
static void
test_action_api(PMEMobjpool *pop)
{
struct pobj_action act[2];
uint64_t dest_value = 0;
PMEMoid oid = pmemobj_reserve(pop, &act[0], 1, 1);
pmemobj_set_value(pop, &act[1], &dest_value, 1);
pmemobj_publish(pop, act, 2);
UT_ASSERTeq(dest_value, 1);
pmemobj_free(&oid);
UT_ASSERT(OID_IS_NULL(oid));
oid = pmemobj_reserve(pop, &act[0], 1, 1);
TX_BEGIN(pop) {
pmemobj_tx_publish(act, 1);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
pmemobj_free(&oid);
UT_ASSERT(OID_IS_NULL(oid));
dest_value = 0;
oid = pmemobj_reserve(pop, &act[0], 1, 1);
pmemobj_set_value(pop, &act[1], &dest_value, 1);
pmemobj_cancel(pop, act, 2);
UT_ASSERTeq(dest_value, 0);
TOID(struct dummy_node) n =
POBJ_RESERVE_NEW(pop, struct dummy_node, &act[0]);
TOID(struct dummy_node_c) c =
POBJ_RESERVE_ALLOC(pop, struct dummy_node_c,
sizeof(struct dummy_node_c), &act[1]);
pmemobj_publish(pop, act, 2);
/* valgrind would warn in case they were not allocated */
D_RW(n)->value = 1;
D_RW(c)->value = 1;
pmemobj_persist(pop, D_RW(n), sizeof(struct dummy_node));
pmemobj_persist(pop, D_RW(c), sizeof(struct dummy_node_c));
}
static void
test_offsetof(void)
{
TOID(struct dummy_root) r;
TOID(struct dummy_node) n;
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(r, value) !=
offsetof(struct dummy_root, value));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(r, lock) !=
offsetof(struct dummy_root, lock));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(r, node) !=
offsetof(struct dummy_root, node));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(r, dummies) !=
offsetof(struct dummy_root, dummies));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(r, moved) !=
offsetof(struct dummy_root, moved));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(n, value) !=
offsetof(struct dummy_node, value));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(n, teststr) !=
offsetof(struct dummy_node, teststr));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(n, plist) !=
offsetof(struct dummy_node, plist));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(n, plist_m) !=
offsetof(struct dummy_node, plist_m));
}
static void
test_layout(void)
{
/* get number of declared types when there are no types declared */
POBJ_LAYOUT_BEGIN(mylayout);
POBJ_LAYOUT_END(mylayout);
size_t number_of_declared_types = POBJ_LAYOUT_TYPES_NUM(mylayout);
UT_ASSERTeq(number_of_declared_types, 0);
}
static void
test_root_size(PMEMobjpool *pop)
{
UT_ASSERTeq(pmemobj_root_size(pop), 0);
size_t alloc_size = sizeof(struct dummy_root);
pmemobj_root(pop, alloc_size);
UT_ASSERTeq(pmemobj_root_size(pop), sizeof(struct dummy_root));
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_basic_integration");
/* root doesn't count */
UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(basic) != 2);
if (argc < 2 || argc > 3)
UT_FATAL("usage: %s file-name [inject_fault]", argv[0]);
const char *path = argv[1];
const char *opt = argv[2];
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(basic),
0, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
test_root_size(pop);
test_alloc_api(pop);
test_realloc_api(pop);
test_list_api(pop);
test_tx_api(pop);
test_action_api(pop);
test_offsetof();
test_layout();
pmemobj_close(pop);
/* fault injection */
if (argc == 3 && strcmp(opt, "inject_fault") == 0) {
if (pmemobj_fault_injection_enabled()) {
pmemobj_inject_fault_at(PMEM_MALLOC, 1,
"heap_check_remote");
pop = pmemobj_open(path, POBJ_LAYOUT_NAME(basic));
UT_ASSERTeq(pop, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
}
if ((pop = pmemobj_open(path, POBJ_LAYOUT_NAME(basic))) == NULL)
UT_FATAL("!pmemobj_open: %s", path);
/* second open should fail, checks file locking */
if ((pmemobj_open(path, POBJ_LAYOUT_NAME(basic))) != NULL)
UT_FATAL("!pmemobj_open: %s", path);
pmemobj_close(pop);
int result = pmemobj_check(path, POBJ_LAYOUT_NAME(basic));
if (result < 0)
UT_OUT("!%s: pmemobj_check", path);
else if (result == 0)
UT_OUT("%s: pmemobj_check: not consistent", path);
DONE(NULL);
}
| 17,784 | 25.154412 | 68 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_pmemcheck/obj_pmemcheck.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
#include "unittest.h"
#include "valgrind_internal.h"
struct foo {
PMEMmutex bar;
};
static void
test_mutex_pmem_mapping_register(PMEMobjpool *pop)
{
PMEMoid foo;
int ret = pmemobj_alloc(pop, &foo, sizeof(struct foo), 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(foo));
struct foo *foop = pmemobj_direct(foo);
ret = pmemobj_mutex_lock(pop, &foop->bar);
/* foo->bar has been removed from pmem mappings collection */
VALGRIND_PRINT_PMEM_MAPPINGS;
UT_ASSERTeq(ret, 0);
ret = pmemobj_mutex_unlock(pop, &foop->bar);
UT_ASSERTeq(ret, 0);
pmemobj_free(&foo);
/* the entire foo object has been re-registered as pmem mapping */
VALGRIND_PRINT_PMEM_MAPPINGS;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_pmemcheck");
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], "pmemcheck", PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
test_mutex_pmem_mapping_register(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 1,127 | 21.56 | 71 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmreorder_simple/pmreorder_simple.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* pmreorder_simple.c -- a simple unit test for store reordering
*
* usage: pmreorder_simple g|b|c|m file
* g - write data in a consistent manner
* b - write data in a possibly inconsistent manner
* c - check data consistency
* m - write data to the pool in a consistent way,
* but at the beginning logs some inconsistent values
*
* See README file for more details.
*/
#include "unittest.h"
#include "util.h"
#include "valgrind_internal.h"
/*
* The struct three_field is inconsistent if flag is set and the fields have
* different values.
*/
struct three_field {
int first_field;
int second_field;
int third_field;
int flag;
};
/*
* write_consistent -- (internal) write data in a consistent manner
*/
static void
write_consistent(struct three_field *structp)
{
structp->first_field = 1;
structp->second_field = 1;
structp->third_field = 1;
pmem_persist(&structp->first_field, sizeof(int) * 3);
structp->flag = 1;
pmem_persist(&structp->flag, sizeof(structp->flag));
}
/*
* write_inconsistent -- (internal) write data in an inconsistent manner.
*/
static void
write_inconsistent(struct three_field *structp)
{
structp->flag = 1;
structp->first_field = 1;
structp->second_field = 1;
structp->third_field = 1;
pmem_persist(structp, sizeof(*structp));
}
/*
* check_consistency -- (internal) check struct three_field consistency
*/
static int
check_consistency(struct three_field *structp)
{
int consistent = 0;
if (structp->flag)
consistent = (structp->first_field != structp->second_field) ||
(structp->first_field != structp->third_field);
return consistent;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "pmreorder_simple");
util_init();
if ((argc != 3) || (strchr("gbcm", argv[1][0]) == NULL) ||
argv[1][1] != '\0')
UT_FATAL("usage: %s g|b|c|m file", argv[0]);
int fd = OPEN(argv[2], O_RDWR);
size_t size;
/* mmap and register in valgrind pmemcheck */
void *map = pmem_map_file(argv[2], 0, 0, 0, &size, NULL);
UT_ASSERTne(map, NULL);
struct three_field *structp = map;
char opt = argv[1][0];
/* clear the struct to get a consistent start state for writing */
if (strchr("gb", opt))
pmem_memset_persist(structp, 0, sizeof(*structp));
else if (strchr("m", opt)) {
/* set test values to log an inconsistent start state */
pmem_memset_persist(&structp->flag, 1, sizeof(int));
pmem_memset_persist(&structp->first_field, 0, sizeof(int) * 2);
pmem_memset_persist(&structp->third_field, 1, sizeof(int));
/* clear the struct to get back a consistent start state */
pmem_memset_persist(structp, 0, sizeof(*structp));
}
/* verify that DEFAULT_REORDER restores default engine */
VALGRIND_EMIT_LOG("PMREORDER_MARKER_CHANGE.BEGIN");
switch (opt) {
case 'g':
write_consistent(structp);
break;
case 'b':
write_inconsistent(structp);
break;
case 'm':
write_consistent(structp);
break;
case 'c':
return check_consistency(structp);
default:
UT_FATAL("Unrecognized option %c", opt);
}
VALGRIND_EMIT_LOG("PMREORDER_MARKER_CHANGE.END");
/* check if undefined marker will not cause an issue */
VALGRIND_EMIT_LOG("PMREORDER_MARKER_UNDEFINED.BEGIN");
VALGRIND_EMIT_LOG("PMREORDER_MARKER_UNDEFINED.END");
CLOSE(fd);
DONE(NULL);
}
| 3,335 | 24.082707 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/remote_obj_basic/remote_obj_basic.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* remote_obj_basic.c -- unit test for remote tests support
*
* usage: remote_obj_basic <create|open> <poolset-file>
*/
#include "unittest.h"
#define LAYOUT_NAME "remote_obj_basic"
int
main(int argc, char *argv[])
{
PMEMobjpool *pop;
START(argc, argv, "remote_obj_basic");
if (argc != 3)
UT_FATAL("usage: %s <create|open> <poolset-file>", argv[0]);
const char *mode = argv[1];
const char *file = argv[2];
if (strcmp(mode, "create") == 0) {
if ((pop = pmemobj_create(file, LAYOUT_NAME, 0,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", file);
else
UT_OUT("The pool set %s has been created", file);
} else if (strcmp(mode, "open") == 0) {
if ((pop = pmemobj_open(file, LAYOUT_NAME)) == NULL)
UT_FATAL("!pmemobj_open: %s", file);
else
UT_OUT("The pool set %s has been opened", file);
} else {
UT_FATAL("wrong mode: %s\n", argv[1]);
}
pmemobj_close(pop);
DONE(NULL);
}
| 1,019 | 20.25 | 62 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_ctl_debug/obj_ctl_debug.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* obj_ctl_debug.c -- tests for the ctl debug namesapce entry points
*/
#include "unittest.h"
#include "../../libpmemobj/obj.h"
#define LAYOUT "obj_ctl_debug"
#define BUFFER_SIZE 128
#define ALLOC_PATTERN 0xAC
static void
test_alloc_pattern(PMEMobjpool *pop)
{
int ret;
int pattern;
PMEMoid oid;
/* check default pattern */
ret = pmemobj_ctl_get(pop, "debug.heap.alloc_pattern", &pattern);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(pattern, PALLOC_CTL_DEBUG_NO_PATTERN);
/* check set pattern */
pattern = ALLOC_PATTERN;
ret = pmemobj_ctl_set(pop, "debug.heap.alloc_pattern", &pattern);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(pop->heap.alloc_pattern, pattern);
/* check alloc with pattern */
ret = pmemobj_alloc(pop, &oid, BUFFER_SIZE, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
char *buff = pmemobj_direct(oid);
int i;
for (i = 0; i < BUFFER_SIZE; i++)
/* should trigger memcheck error: read uninitialized values */
UT_ASSERTeq(*(buff + i), (char)pattern);
pmemobj_free(&oid);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_ctl_debug");
if (argc < 2)
UT_FATAL("usage: %s filename", argv[0]);
const char *path = argv[1];
PMEMobjpool *pop;
if ((pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_open: %s", path);
test_alloc_pattern(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 1,452 | 20.367647 | 68 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_list_macro/obj_list_macro.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* obj_list_macro.c -- unit tests for list module
*/
#include <stddef.h>
#include "libpmemobj.h"
#include "unittest.h"
TOID_DECLARE(struct item, 0);
TOID_DECLARE(struct list, 1);
struct item {
int id;
POBJ_LIST_ENTRY(struct item) next;
};
struct list {
POBJ_LIST_HEAD(listhead, struct item) head;
};
/* global lists */
static TOID(struct list) List;
static TOID(struct list) List_sec;
#define LAYOUT_NAME "list_macros"
/* usage macros */
#define FATAL_USAGE()\
UT_FATAL("usage: obj_list_macro <file> [PRnifr]")
#define FATAL_USAGE_PRINT()\
UT_FATAL("usage: obj_list_macro <file> P:<list>")
#define FATAL_USAGE_PRINT_REVERSE()\
UT_FATAL("usage: obj_list_macro <file> R:<list>")
#define FATAL_USAGE_INSERT()\
UT_FATAL("usage: obj_list_macro <file> i:<where>:<num>[:<id>]")
#define FATAL_USAGE_INSERT_NEW()\
UT_FATAL("usage: obj_list_macro <file> n:<where>:<num>[:<id>]")
#define FATAL_USAGE_REMOVE_FREE()\
UT_FATAL("usage: obj_list_macro <file> f:<list>:<num>")
#define FATAL_USAGE_REMOVE()\
UT_FATAL("usage: obj_list_macro <file> r:<list>:<num>")
#define FATAL_USAGE_MOVE()\
UT_FATAL("usage: obj_list_macro <file> m:<num>:<where>:<num>")
/*
* get_item_list -- get nth item from list
*/
static TOID(struct item)
get_item_list(TOID(struct list) list, int n)
{
TOID(struct item) item;
if (n >= 0) {
POBJ_LIST_FOREACH(item, &D_RO(list)->head, next) {
if (n == 0)
return item;
n--;
}
} else {
POBJ_LIST_FOREACH_REVERSE(item, &D_RO(list)->head, next) {
n++;
if (n == 0)
return item;
}
}
return TOID_NULL(struct item);
}
/*
* do_print -- print list elements in normal order
*/
static void
do_print(PMEMobjpool *pop, const char *arg)
{
int L; /* which list */
if (sscanf(arg, "P:%d", &L) != 1)
FATAL_USAGE_PRINT();
TOID(struct item) item;
if (L == 1) {
UT_OUT("list:");
POBJ_LIST_FOREACH(item, &D_RW(List)->head, next) {
UT_OUT("id = %d", D_RO(item)->id);
}
} else if (L == 2) {
UT_OUT("list sec:");
POBJ_LIST_FOREACH(item, &D_RW(List_sec)->head, next) {
UT_OUT("id = %d", D_RO(item)->id);
}
} else {
FATAL_USAGE_PRINT();
}
}
/*
* do_print_reverse -- print list elements in reverse order
*/
static void
do_print_reverse(PMEMobjpool *pop, const char *arg)
{
int L; /* which list */
if (sscanf(arg, "R:%d", &L) != 1)
FATAL_USAGE_PRINT_REVERSE();
TOID(struct item) item;
if (L == 1) {
UT_OUT("list reverse:");
POBJ_LIST_FOREACH_REVERSE(item, &D_RW(List)->head, next) {
UT_OUT("id = %d", D_RO(item)->id);
}
} else if (L == 2) {
UT_OUT("list sec reverse:");
POBJ_LIST_FOREACH_REVERSE(item, &D_RW(List_sec)->head, next) {
UT_OUT("id = %d", D_RO(item)->id);
}
} else {
FATAL_USAGE_PRINT_REVERSE();
}
}
/*
* item_constructor -- constructor which sets the item's id to
* new value
*/
static int
item_constructor(PMEMobjpool *pop, void *ptr, void *arg)
{
int id = *(int *)arg;
struct item *item = (struct item *)ptr;
item->id = id;
UT_OUT("constructor(id = %d)", id);
return 0;
}
/*
* do_insert_new -- insert new element to list
*/
static void
do_insert_new(PMEMobjpool *pop, const char *arg)
{
int n; /* which element on List */
int before;
int id;
int ret = sscanf(arg, "n:%d:%d:%d", &before, &n, &id);
if (ret != 3 && ret != 2)
FATAL_USAGE_INSERT_NEW();
int ptr = (ret == 3) ? id : 0;
TOID(struct item) item;
if (POBJ_LIST_EMPTY(&D_RW(List)->head)) {
POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(List)->head, next,
sizeof(struct item), item_constructor, &ptr);
if (POBJ_LIST_EMPTY(&D_RW(List)->head))
UT_FATAL("POBJ_LIST_INSERT_NEW_HEAD");
} else {
item = get_item_list(List, n);
UT_ASSERT(!TOID_IS_NULL(item));
if (!before) {
POBJ_LIST_INSERT_NEW_AFTER(pop, &D_RW(List)->head,
item, next, sizeof(struct item),
item_constructor, &ptr);
if (TOID_IS_NULL(POBJ_LIST_NEXT(item, next)))
UT_FATAL("POBJ_LIST_INSERT_NEW_AFTER");
} else {
POBJ_LIST_INSERT_NEW_BEFORE(pop, &D_RW(List)->head,
item, next, sizeof(struct item),
item_constructor, &ptr);
if (TOID_IS_NULL(POBJ_LIST_PREV(item, next)))
UT_FATAL("POBJ_LIST_INSERT_NEW_BEFORE");
}
}
}
/*
* do_insert -- insert element to list
*/
static void
do_insert(PMEMobjpool *pop, const char *arg)
{
int n; /* which element on List */
int before;
int id;
int ret = sscanf(arg, "i:%d:%d:%d", &before, &n, &id);
if (ret != 3 && ret != 2)
FATAL_USAGE_INSERT();
int ptr = (ret == 3) ? id : 0;
TOID(struct item) item;
POBJ_NEW(pop, &item, struct item, item_constructor, &ptr);
UT_ASSERT(!TOID_IS_NULL(item));
errno = 0;
if (POBJ_LIST_EMPTY(&D_RW(List)->head)) {
ret = POBJ_LIST_INSERT_HEAD(pop, &D_RW(List)->head,
item, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_INSERT_HEAD");
}
if (POBJ_LIST_EMPTY(&D_RW(List)->head))
UT_FATAL("POBJ_LIST_INSERT_HEAD");
} else {
TOID(struct item) elm = get_item_list(List, n);
UT_ASSERT(!TOID_IS_NULL(elm));
if (!before) {
ret = POBJ_LIST_INSERT_AFTER(pop, &D_RW(List)->head,
elm, item, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_INSERT_AFTER");
}
if (!TOID_EQUALS(item, POBJ_LIST_NEXT(elm, next)))
UT_FATAL("POBJ_LIST_INSERT_AFTER");
} else {
ret = POBJ_LIST_INSERT_BEFORE(pop, &D_RW(List)->head,
elm, item, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_INSERT_BEFORE");
}
if (!TOID_EQUALS(item, POBJ_LIST_PREV(elm, next)))
UT_FATAL("POBJ_LIST_INSERT_BEFORE");
}
}
}
/*
* do_remove_free -- remove and free element from list
*/
static void
do_remove_free(PMEMobjpool *pop, const char *arg)
{
int L; /* which list */
int n; /* which element */
if (sscanf(arg, "f:%d:%d", &L, &n) != 2)
FATAL_USAGE_REMOVE_FREE();
TOID(struct item) item;
TOID(struct list) tmp_list;
if (L == 1)
tmp_list = List;
else if (L == 2)
tmp_list = List_sec;
else
FATAL_USAGE_REMOVE_FREE();
if (POBJ_LIST_EMPTY(&D_RW(tmp_list)->head))
return;
item = get_item_list(tmp_list, n);
UT_ASSERT(!TOID_IS_NULL(item));
errno = 0;
int ret = POBJ_LIST_REMOVE_FREE(pop, &D_RW(tmp_list)->head,
item, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_REMOVE_FREE");
}
}
/*
* do_remove -- remove element from list
*/
static void
do_remove(PMEMobjpool *pop, const char *arg)
{
int L; /* which list */
int n; /* which element */
if (sscanf(arg, "r:%d:%d", &L, &n) != 2)
FATAL_USAGE_REMOVE();
TOID(struct item) item;
TOID(struct list) tmp_list;
if (L == 1)
tmp_list = List;
else if (L == 2)
tmp_list = List_sec;
else
FATAL_USAGE_REMOVE_FREE();
if (POBJ_LIST_EMPTY(&D_RW(tmp_list)->head))
return;
item = get_item_list(tmp_list, n);
UT_ASSERT(!TOID_IS_NULL(item));
errno = 0;
int ret = POBJ_LIST_REMOVE(pop, &D_RW(tmp_list)->head, item, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_REMOVE");
}
POBJ_FREE(&item);
}
/*
* do_move -- move element from one list to another
*/
static void
do_move(PMEMobjpool *pop, const char *arg)
{
int n;
int d;
int before;
if (sscanf(arg, "m:%d:%d:%d", &n, &before, &d) != 3)
FATAL_USAGE_MOVE();
int ret;
errno = 0;
if (POBJ_LIST_EMPTY(&D_RW(List)->head))
return;
if (POBJ_LIST_EMPTY(&D_RW(List_sec)->head)) {
ret = POBJ_LIST_MOVE_ELEMENT_HEAD(pop, &D_RW(List)->head,
&D_RW(List_sec)->head,
get_item_list(List, n),
next, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_MOVE_ELEMENT_HEAD");
}
} else {
if (before) {
ret = POBJ_LIST_MOVE_ELEMENT_BEFORE(pop,
&D_RW(List)->head,
&D_RW(List_sec)->head,
get_item_list(List_sec, d),
get_item_list(List, n),
next, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_MOVE_ELEMENT_BEFORE");
}
} else {
ret = POBJ_LIST_MOVE_ELEMENT_AFTER(pop,
&D_RW(List)->head,
&D_RW(List_sec)->head,
get_item_list(List_sec, d),
get_item_list(List, n),
next, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_MOVE_ELEMENT_AFTER");
}
}
}
}
/*
* do_cleanup -- de-initialization function
*/
static void
do_cleanup(PMEMobjpool *pop, TOID(struct list) list)
{
int ret;
errno = 0;
while (!POBJ_LIST_EMPTY(&D_RW(list)->head)) {
TOID(struct item) tmp = POBJ_LIST_FIRST(&D_RW(list)->head);
ret = POBJ_LIST_REMOVE_FREE(pop, &D_RW(list)->head, tmp, next);
UT_ASSERTeq(errno, 0);
UT_ASSERTeq(ret, 0);
}
POBJ_FREE(&list);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_list_macro");
if (argc < 2)
FATAL_USAGE();
const char *path = argv[1];
PMEMobjpool *pop;
if ((pop = pmemobj_create(path, LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
POBJ_ZNEW(pop, &List, struct list);
POBJ_ZNEW(pop, &List_sec, struct list);
int i;
for (i = 2; i < argc; i++) {
switch (argv[i][0]) {
case 'P':
do_print(pop, argv[i]);
break;
case 'R':
do_print_reverse(pop, argv[i]);
break;
case 'n':
do_insert_new(pop, argv[i]);
break;
case 'i':
do_insert(pop, argv[i]);
break;
case 'f':
do_remove_free(pop, argv[i]);
break;
case 'r':
do_remove(pop, argv[i]);
break;
case 'm':
do_move(pop, argv[i]);
break;
default:
FATAL_USAGE();
}
}
do_cleanup(pop, List);
do_cleanup(pop, List_sec);
pmemobj_close(pop);
DONE(NULL);
}
| 9,625 | 21.756501 | 68 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_critnib_mt/obj_critnib_mt.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* obj_critnib_mt.c -- multithreaded unit test for critnib
*/
#include <errno.h>
#include "critnib.h"
#include "rand.h"
#include "os_thread.h"
#include "unittest.h"
#include "util.h"
#include "valgrind_internal.h"
#define NITER_FAST 200000000
#define NITER_MID 20000000
#define NITER_SLOW 2000000
#define MAXTHREADS 4096
static int nthreads; /* number of threads */
static int nrthreads; /* in mixed tests, read threads */
static int nwthreads; /* ... and write threads */
static uint64_t
rnd_thid_r64(rng_t *seedp, uint16_t thid)
{
/*
* Stick arg (thread index) onto bits 16..31, to make it impossible for
* two worker threads to write the same value, while keeping both ends
* pseudo-random.
*/
uint64_t r = rnd64_r(seedp);
r &= ~0xffff0000ULL;
r |= ((uint64_t)thid) << 16;
return r;
}
static uint64_t
helgrind_count(uint64_t x)
{
/* Convert total number of ops to per-thread. */
x /= (unsigned)nthreads;
/*
* Reduce iteration count when running on foogrind, by a factor of 64.
* Multiple instances of foogrind cause exponential slowdown, so handle
* that as well (not that it's very useful for us...).
*/
return x >> (6 * On_valgrind);
}
/* 1024 random numbers, shared between threads. */
static uint64_t the1024[1024];
static struct critnib *c;
#define K 0xdeadbeefcafebabe
static void *
thread_read1(void *arg)
{
uint64_t niter = helgrind_count(NITER_FAST);
for (uint64_t count = 0; count < niter; count++)
UT_ASSERTeq(critnib_get(c, K), (void *)K);
return NULL;
}
static void *
thread_read1024(void *arg)
{
uint64_t niter = helgrind_count(NITER_FAST);
for (uint64_t count = 0; count < niter; count++) {
uint64_t v = the1024[count % ARRAY_SIZE(the1024)];
UT_ASSERTeq(critnib_get(c, v), (void *)v);
}
return NULL;
}
static void *
thread_write1024(void *arg)
{
rng_t rng;
randomize_r(&rng, (uintptr_t)arg);
uint64_t w1024[1024];
for (int i = 0; i < ARRAY_SIZE(w1024); i++)
w1024[i] = rnd_thid_r64(&rng, (uint16_t)(uintptr_t)arg);
uint64_t niter = helgrind_count(NITER_SLOW);
for (uint64_t count = 0; count < niter; count++) {
uint64_t v = w1024[count % ARRAY_SIZE(w1024)];
critnib_insert(c, v, (void *)v);
uint64_t r = (uint64_t)critnib_remove(c, v);
UT_ASSERTeq(v, r);
}
return NULL;
}
static void *
thread_read_write_remove(void *arg)
{
rng_t rng;
randomize_r(&rng, (uintptr_t)arg);
uint64_t niter = helgrind_count(NITER_SLOW);
for (uint64_t count = 0; count < niter; count++) {
uint64_t r, v = rnd_thid_r64(&rng, (uint16_t)(uintptr_t)arg);
critnib_insert(c, v, (void *)v);
r = (uint64_t)critnib_get(c, v);
UT_ASSERTeq(r, v);
r = (uint64_t)critnib_remove(c, v);
UT_ASSERTeq(r, v);
}
return NULL;
}
/*
* Reverse bits in a number: 1234 -> 4321 (swap _bit_ endianness).
*
* Doing this on successive numbers produces a van der Corput sequence,
* which covers the space nicely (relevant for <= tests).
*/
static uint64_t
revbits(uint64_t x)
{
uint64_t y = 0;
uint64_t a = 1;
uint64_t b = 0x8000000000000000;
for (; b; a <<= 1, b >>= 1) {
if (x & a)
y |= b;
}
return y;
}
static void *
thread_le1(void *arg)
{
uint64_t niter = helgrind_count(NITER_MID);
for (uint64_t count = 0; count < niter; count++) {
uint64_t y = revbits(count);
if (y < K)
UT_ASSERTeq(critnib_find_le(c, y), NULL);
else
UT_ASSERTeq(critnib_find_le(c, y), (void *)K);
}
return NULL;
}
static void *
thread_le1024(void *arg)
{
uint64_t niter = helgrind_count(NITER_MID);
for (uint64_t count = 0; count < niter; count++) {
uint64_t y = revbits(count);
critnib_find_le(c, y);
}
return NULL;
}
typedef void *(*thread_func_t)(void *);
/*
* Before starting the threads, we add "fixed_preload" of static values
* (K and 1), or "random_preload" of random numbers. Can't have both.
*/
static void
test(int fixed_preload, int random_preload, thread_func_t rthread,
thread_func_t wthread)
{
c = critnib_new();
if (fixed_preload >= 1)
critnib_insert(c, K, (void *)K);
if (fixed_preload >= 2)
critnib_insert(c, 1, (void *)1);
for (int i = 0; i < random_preload; i++)
critnib_insert(c, the1024[i], (void *)the1024[i]);
os_thread_t th[MAXTHREADS], wr[MAXTHREADS];
int ntr = wthread ? nrthreads : nthreads;
int ntw = wthread ? nwthreads : 0;
for (int i = 0; i < ntr; i++)
THREAD_CREATE(&th[i], 0, rthread, (void *)(uint64_t)i);
for (int i = 0; i < ntw; i++)
THREAD_CREATE(&wr[i], 0, wthread, (void *)(uint64_t)i);
/* The threads work here... */
for (int i = 0; i < ntr; i++) {
void *retval;
THREAD_JOIN(&th[i], &retval);
}
for (int i = 0; i < ntw; i++) {
void *retval;
THREAD_JOIN(&wr[i], &retval);
}
critnib_delete(c);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_critnib_mt");
util_init();
randomize(1); /* use a fixed reproducible seed */
for (int i = 0; i < ARRAY_SIZE(the1024); i++)
the1024[i] = rnd64();
nthreads = sysconf(_SC_NPROCESSORS_ONLN);
if (nthreads > MAXTHREADS)
nthreads = MAXTHREADS;
if (!nthreads)
nthreads = 8;
nwthreads = nthreads / 2;
if (!nwthreads)
nwthreads = 1;
nrthreads = nthreads - nwthreads;
if (!nrthreads)
nrthreads = 1;
test(1, 0, thread_read1, thread_write1024);
test(0, 1024, thread_read1024, thread_write1024);
test(0, 0, thread_read_write_remove, NULL);
test(1, 0, thread_le1, NULL);
test(0, 1024, thread_le1024, NULL);
DONE(NULL);
}
| 5,467 | 20.527559 | 72 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_ctl_arenas/obj_ctl_arenas.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* obj_ctl_arenas.c -- tests for the ctl entry points
* usage:
* obj_ctl_arenas <file> n - test for heap.narenas.total
*
* obj_ctl_arenas <file> s - test for heap.arena.[idx].size
* and heap.thread.arena_id (RW)
*
* obj_ctl_arenas <file> c - test for heap.arena.create,
* heap.arena.[idx].automatic and heap.narenas.automatic
* obj_ctl_arenas <file> a - mt test for heap.arena.create
* and heap.thread.arena_id
*
* obj_ctl_arenas <file> f - test for POBJ_ARENA_ID flag,
*
* obj_ctl_arenas <file> q - test for POBJ_ARENA_ID with
* non-exists arena id
*
* obj_ctl_arenas <file> m - test for heap.narenas.max (RW)
*/
#include <sched.h>
#include "sys_util.h"
#include "unittest.h"
#include "util.h"
#define CHUNKSIZE ((size_t)1024 * 256) /* 256 kilobytes */
#define LAYOUT "obj_ctl_arenas"
#define CTL_QUERY_LEN 256
#define NTHREAD 2
#define NTHREAD_ARENA 32
#define NOBJECT_THREAD 64
#define ALLOC_CLASS_ARENA 2
#define NTHREADX 16
#define NARENAS 16
#define DEFAULT_ARENAS_MAX (1 << 10)
static os_mutex_t lock;
static os_cond_t cond;
static PMEMobjpool *pop;
static int nth;
static struct pobj_alloc_class_desc alloc_class[] = {
{
.header_type = POBJ_HEADER_NONE,
.unit_size = 128,
.units_per_block = 1000,
.alignment = 0
},
{
.header_type = POBJ_HEADER_NONE,
.unit_size = 1024,
.units_per_block = 1000,
.alignment = 0
},
{
.header_type = POBJ_HEADER_NONE,
.unit_size = 111,
.units_per_block = CHUNKSIZE / 111,
.alignment = 0
},
};
struct arena_alloc {
unsigned arena;
PMEMoid oid;
};
static struct arena_alloc ref;
static void
check_arena_size(unsigned arena_id, unsigned class_id)
{
int ret;
size_t arena_size;
char arena_idx_size[CTL_QUERY_LEN];
SNPRINTF(arena_idx_size, CTL_QUERY_LEN,
"heap.arena.%u.size", arena_id);
ret = pmemobj_ctl_get(pop, arena_idx_size, &arena_size);
UT_ASSERTeq(ret, 0);
size_t test = ALIGN_UP(alloc_class[class_id].unit_size *
alloc_class[class_id].units_per_block, CHUNKSIZE);
UT_ASSERTeq(test, arena_size);
}
static void
create_alloc_class(void)
{
int ret;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.128.desc",
&alloc_class[0]);
UT_ASSERTeq(ret, 0);
ret = pmemobj_ctl_set(pop, "heap.alloc_class.129.desc",
&alloc_class[1]);
UT_ASSERTeq(ret, 0);
}
static void *
worker_arenas_size(void *arg)
{
int ret = -1;
int idx = (int)(intptr_t)arg;
int off_idx = idx + 128;
unsigned arena_id;
unsigned arena_id_new;
ret = pmemobj_ctl_exec(pop, "heap.arena.create",
&arena_id_new);
UT_ASSERTeq(ret, 0);
UT_ASSERT(arena_id_new >= 1);
ret = pmemobj_ctl_set(pop, "heap.thread.arena_id",
&arena_id_new);
UT_ASSERTeq(ret, 0);
ret = pmemobj_xalloc(pop, NULL, alloc_class[idx].unit_size, 0,
POBJ_CLASS_ID(off_idx), NULL, NULL);
UT_ASSERTeq(ret, 0);
/* we need to test 2 arenas so 2 threads are needed here */
util_mutex_lock(&lock);
nth++;
if (nth == NTHREAD)
os_cond_broadcast(&cond);
else
while (nth < NTHREAD)
os_cond_wait(&cond, &lock);
util_mutex_unlock(&lock);
ret = pmemobj_ctl_get(pop, "heap.thread.arena_id", &arena_id);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arena_id_new, arena_id);
check_arena_size(arena_id, (unsigned)idx);
return NULL;
}
static void *
worker_arenas_flag(void *arg)
{
int ret;
unsigned arenas[NARENAS];
for (unsigned i = 0; i < NARENAS; ++i) {
ret = pmemobj_ctl_exec(pop, "heap.arena.create",
&arenas[i]);
UT_ASSERTeq(ret, 0);
}
/*
* Tests POBJ_ARENA_ID with pmemobj_xalloc.
* All object are frees after pthread join.
*/
for (unsigned i = 0; i < 2; i++) {
ret = pmemobj_xalloc(pop,
NULL, alloc_class[i].unit_size, 0,
POBJ_CLASS_ID(i + 128) | \
POBJ_ARENA_ID(arenas[i]),
NULL, NULL);
UT_ASSERTeq(ret, 0);
check_arena_size(arenas[i], i);
}
/* test POBJ_ARENA_ID with pmemobj_xreserve */
struct pobj_action act;
PMEMoid oid = pmemobj_xreserve(pop, &act,
alloc_class[0].unit_size, 1,
POBJ_CLASS_ID(128) |
POBJ_ARENA_ID(arenas[2]));
pmemobj_publish(pop, &act, 1);
pmemobj_free(&oid);
UT_ASSERT(OID_IS_NULL(oid));
/* test POBJ_ARENA_ID with pmemobj_tx_xalloc */
TX_BEGIN(pop) {
pmemobj_tx_xalloc(alloc_class[1].unit_size, 0,
POBJ_CLASS_ID(129) | POBJ_ARENA_ID(arenas[3]));
} TX_END
check_arena_size(arenas[3], 1);
return NULL;
}
static void *
worker_arena_threads(void *arg)
{
int ret = -1;
struct arena_alloc *ref = (struct arena_alloc *)arg;
unsigned arena_id;
ret = pmemobj_ctl_get(pop, "heap.thread.arena_id", &arena_id);
UT_ASSERTeq(ret, 0);
UT_ASSERT(arena_id != 0);
ret = pmemobj_ctl_set(pop, "heap.thread.arena_id", &ref->arena);
UT_ASSERTeq(ret, 0);
PMEMoid oid[NOBJECT_THREAD];
unsigned d;
for (int i = 0; i < NOBJECT_THREAD; i++) {
ret = pmemobj_xalloc(pop, &oid[i],
alloc_class[ALLOC_CLASS_ARENA].unit_size,
0, POBJ_CLASS_ID(ALLOC_CLASS_ARENA + 128),
NULL, NULL);
UT_ASSERTeq(ret, 0);
d = labs((long)ref->oid.off - (long)oid[i].off);
/* objects are in the same block as the first one */
ASSERT(d <= alloc_class[ALLOC_CLASS_ARENA].unit_size *
(alloc_class[ALLOC_CLASS_ARENA].units_per_block - 1));
}
for (int i = 0; i < NOBJECT_THREAD; i++)
pmemobj_free(&oid[i]);
return NULL;
}
static void
worker_arena_ref_obj(struct arena_alloc *ref)
{
int ret = -1;
ret = pmemobj_ctl_set(pop, "heap.thread.arena_id", &ref->arena);
UT_ASSERTeq(ret, 0);
ret = pmemobj_xalloc(pop, &ref->oid,
alloc_class[ALLOC_CLASS_ARENA].unit_size,
0, POBJ_CLASS_ID(ALLOC_CLASS_ARENA + 128), NULL, NULL);
UT_ASSERTeq(ret, 0);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_ctl_arenas");
if (argc != 3)
UT_FATAL("usage: %s poolset [n|s|c|f|q|m|a]", argv[0]);
const char *path = argv[1];
char t = argv[2][0];
if ((pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL * 20,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_open: %s", path);
int ret = 0;
if (t == 'n') {
unsigned narenas = 0;
ret = pmemobj_ctl_get(pop, "heap.narenas.total", &narenas);
UT_ASSERTeq(ret, 0);
UT_ASSERTne(narenas, 0);
} else if (t == 's') {
os_thread_t threads[NTHREAD];
util_mutex_init(&lock);
util_cond_init(&cond);
create_alloc_class();
for (int i = 0; i < NTHREAD; i++)
THREAD_CREATE(&threads[i], NULL, worker_arenas_size,
(void *)(intptr_t)i);
for (int i = 0; i < NTHREAD; i++)
THREAD_JOIN(&threads[i], NULL);
PMEMoid oid, oid2;
POBJ_FOREACH_SAFE(pop, oid, oid2)
pmemobj_free(&oid);
util_mutex_destroy(&lock);
util_cond_destroy(&cond);
} else if (t == 'c') {
char arena_idx_auto[CTL_QUERY_LEN];
unsigned narenas_b = 0;
unsigned narenas_a = 0;
unsigned narenas_n = 4;
unsigned arena_id;
unsigned all_auto;
int automatic;
ret = pmemobj_ctl_get(pop, "heap.narenas.total", &narenas_b);
UT_ASSERTeq(ret, 0);
/* all arenas created at the start should be set to auto */
for (unsigned i = 1; i <= narenas_b; i++) {
SNPRINTF(arena_idx_auto, CTL_QUERY_LEN,
"heap.arena.%u.automatic", i);
ret = pmemobj_ctl_get(pop, arena_idx_auto, &automatic);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(automatic, 1);
}
ret = pmemobj_ctl_get(pop, "heap.narenas.automatic", &all_auto);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(narenas_b, all_auto);
/* all arenas created by user should not be auto */
for (unsigned i = 1; i <= narenas_n; i++) {
ret = pmemobj_ctl_exec(pop, "heap.arena.create",
&arena_id);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arena_id, narenas_b + i);
SNPRINTF(arena_idx_auto, CTL_QUERY_LEN,
"heap.arena.%u.automatic", arena_id);
ret = pmemobj_ctl_get(pop, arena_idx_auto, &automatic);
UT_ASSERTeq(automatic, 0);
/*
* after creation, number of auto
* arenas should be the same
*/
ret = pmemobj_ctl_get(pop, "heap.narenas.automatic",
&all_auto);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(narenas_b + i - 1, all_auto);
/* change the state of created arena to auto */
int activate = 1;
ret = pmemobj_ctl_set(pop, arena_idx_auto,
&activate);
UT_ASSERTeq(ret, 0);
ret = pmemobj_ctl_get(pop, arena_idx_auto, &automatic);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(automatic, 1);
/* number of auto arenas should increase */
ret = pmemobj_ctl_get(pop, "heap.narenas.automatic",
&all_auto);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(narenas_b + i, all_auto);
}
ret = pmemobj_ctl_get(pop, "heap.narenas.total", &narenas_a);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(narenas_b + narenas_n, narenas_a);
/* at least one automatic arena must exist */
for (unsigned i = 1; i <= narenas_a; i++) {
SNPRINTF(arena_idx_auto, CTL_QUERY_LEN,
"heap.arena.%u.automatic", i);
automatic = 0;
if (i < narenas_a) {
ret = pmemobj_ctl_set(pop, arena_idx_auto,
&automatic);
UT_ASSERTeq(ret, 0);
} else {
/*
* last auto arena -
* cannot change the state to 0...
*/
ret = pmemobj_ctl_set(pop, arena_idx_auto,
&automatic);
UT_ASSERTeq(ret, -1);
/* ...but can change (overwrite) to 1 */
automatic = 1;
ret = pmemobj_ctl_set(pop, arena_idx_auto,
&automatic);
UT_ASSERTeq(ret, 0);
}
}
} else if (t == 'a') {
int ret;
unsigned arena_id_new;
char alloc_class_idx_desc[CTL_QUERY_LEN];
ret = pmemobj_ctl_exec(pop, "heap.arena.create",
&arena_id_new);
UT_ASSERTeq(ret, 0);
UT_ASSERT(arena_id_new >= 1);
SNPRINTF(alloc_class_idx_desc, CTL_QUERY_LEN,
"heap.alloc_class.%d.desc",
ALLOC_CLASS_ARENA + 128);
ret = pmemobj_ctl_set(pop, alloc_class_idx_desc,
&alloc_class[ALLOC_CLASS_ARENA]);
UT_ASSERTeq(ret, 0);
ref.arena = arena_id_new;
worker_arena_ref_obj(&ref);
os_thread_t threads[NTHREAD_ARENA];
for (int i = 0; i < NTHREAD_ARENA; i++) {
THREAD_CREATE(&threads[i], NULL, worker_arena_threads,
&ref);
}
for (int i = 0; i < NTHREAD_ARENA; i++)
THREAD_JOIN(&threads[i], NULL);
} else if (t == 'f') {
os_thread_t threads[NTHREADX];
create_alloc_class();
for (int i = 0; i < NTHREADX; i++)
THREAD_CREATE(&threads[i], NULL,
worker_arenas_flag, NULL);
for (int i = 0; i < NTHREADX; i++)
THREAD_JOIN(&threads[i], NULL);
PMEMoid oid, oid2;
POBJ_FOREACH_SAFE(pop, oid, oid2)
pmemobj_free(&oid);
} else if (t == 'q') {
unsigned total;
ret = pmemobj_ctl_get(pop, "heap.narenas.total", &total);
UT_ASSERTeq(ret, 0);
ret = pmemobj_xalloc(pop, NULL, alloc_class[0].unit_size, 0,
POBJ_ARENA_ID(total), NULL, NULL);
UT_ASSERTne(ret, 0);
} else if (t == 'm') {
unsigned max;
unsigned new_max;
ret = pmemobj_ctl_get(pop, "heap.narenas.max", &max);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(DEFAULT_ARENAS_MAX, max);
/* size should not decrease */
new_max = DEFAULT_ARENAS_MAX - 1;
ret = pmemobj_ctl_set(pop, "heap.narenas.max", &new_max);
UT_ASSERTne(ret, 0);
ret = pmemobj_ctl_get(pop, "heap.narenas.max", &max);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(DEFAULT_ARENAS_MAX, max);
/* size should increase */
new_max = DEFAULT_ARENAS_MAX + 1;
ret = pmemobj_ctl_set(pop, "heap.narenas.max", &new_max);
UT_ASSERTeq(ret, 0);
ret = pmemobj_ctl_get(pop, "heap.narenas.max", &max);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(DEFAULT_ARENAS_MAX + 1, max);
} else {
UT_ASSERT(0);
}
pmemobj_close(pop);
DONE(NULL);
}
| 11,314 | 23.651416 | 66 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/win_poolset_unmap/win_poolset_unmap.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* win_poolset_unmap.c -- test for windows mmap destructor.
*
* It checks whether all mappings are properly unmpapped and memory is properly
* unreserved when auto growing pool is used.
*/
#include "unittest.h"
#include "os.h"
#include "libpmemobj.h"
#define KILOBYTE (1 << 10)
#define MEGABYTE (1 << 20)
#define LAYOUT_NAME "poolset_unmap"
int
main(int argc, char *argv[])
{
START(argc, argv, "win_poolset_unmap");
if (argc != 2)
UT_FATAL("usage: %s path", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, 0,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
MEMORY_BASIC_INFORMATION basic_info;
SIZE_T bytes_returned;
SIZE_T offset = 0;
bytes_returned = VirtualQuery(pop, &basic_info,
sizeof(basic_info));
/*
* When opening pool, we try to remove all permissions on header.
* If this action fails VirtualQuery will return one region with
* size 8MB. If it succeeds, RegionSize will be equal to 4KB due
* to different header and rest of the mapping permissions.
*/
if (basic_info.RegionSize == 4 * KILOBYTE) {
/* header */
UT_ASSERTeq(bytes_returned, sizeof(basic_info));
UT_ASSERTeq(basic_info.State, MEM_COMMIT);
offset += basic_info.RegionSize;
/* first part */
bytes_returned = VirtualQuery((char *)pop + offset, &basic_info,
sizeof(basic_info));
UT_ASSERTeq(bytes_returned, sizeof(basic_info));
UT_ASSERTeq(basic_info.RegionSize, 8 * MEGABYTE - 4 * KILOBYTE);
UT_ASSERTeq(basic_info.State, MEM_COMMIT);
} else {
/* first part with header */
UT_ASSERTeq(bytes_returned, sizeof(basic_info));
UT_ASSERTeq(basic_info.RegionSize, 8 * MEGABYTE);
UT_ASSERTeq(basic_info.State, MEM_COMMIT);
}
offset += basic_info.RegionSize;
/* reservation after first part */
bytes_returned = VirtualQuery((char *)pop + offset, &basic_info,
sizeof(basic_info));
UT_ASSERTeq(bytes_returned, sizeof(basic_info));
UT_ASSERTeq(basic_info.RegionSize, (50 - 8) * MEGABYTE);
UT_ASSERTeq(basic_info.State, MEM_RESERVE);
DONE(NULL);
}
| 2,117 | 25.810127 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_compat/pmem2_compat.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* pmem2_compat.c -- compatibility test for libpmem vs libpmem2
*/
#include "unittest.h"
int
main(int argc, char *argv[])
{
UT_COMPILE_ERROR_ON(PMEM_F_MEM_NODRAIN != PMEM2_F_MEM_NODRAIN);
UT_COMPILE_ERROR_ON(PMEM_F_MEM_NONTEMPORAL != PMEM2_F_MEM_NONTEMPORAL);
UT_COMPILE_ERROR_ON(PMEM_F_MEM_TEMPORAL != PMEM2_F_MEM_TEMPORAL);
UT_COMPILE_ERROR_ON(PMEM_F_MEM_WC != PMEM2_F_MEM_WC);
UT_COMPILE_ERROR_ON(PMEM_F_MEM_WB != PMEM2_F_MEM_WB);
UT_COMPILE_ERROR_ON(PMEM_F_MEM_NOFLUSH != PMEM2_F_MEM_NOFLUSH);
return 0;
}
| 606 | 26.590909 | 72 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_tx_strdup/obj_tx_strdup.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_tx_strdup.c -- unit test for pmemobj_tx_strdup
*/
#include <sys/param.h>
#include <string.h>
#include <wchar.h>
#include "unittest.h"
#define LAYOUT_NAME "tx_strdup"
TOID_DECLARE(char, 0);
TOID_DECLARE(wchar_t, 1);
enum type_number {
TYPE_NO_TX,
TYPE_WCS_NO_TX,
TYPE_COMMIT,
TYPE_WCS_COMMIT,
TYPE_ABORT,
TYPE_WCS_ABORT,
TYPE_FREE_COMMIT,
TYPE_WCS_FREE_COMMIT,
TYPE_FREE_ABORT,
TYPE_WCS_FREE_ABORT,
TYPE_COMMIT_NESTED1,
TYPE_WCS_COMMIT_NESTED1,
TYPE_COMMIT_NESTED2,
TYPE_WCS_COMMIT_NESTED2,
TYPE_ABORT_NESTED1,
TYPE_WCS_ABORT_NESTED1,
TYPE_ABORT_NESTED2,
TYPE_WCS_ABORT_NESTED2,
TYPE_ABORT_AFTER_NESTED1,
TYPE_WCS_ABORT_AFTER_NESTED1,
TYPE_ABORT_AFTER_NESTED2,
TYPE_WCS_ABORT_AFTER_NESTED2,
TYPE_NOFLUSH,
TYPE_WCS_NOFLUSH,
};
#define TEST_STR_1 "Test string 1"
#define TEST_STR_2 "Test string 2"
#define TEST_WCS_1 L"Test string 3"
#define TEST_WCS_2 L"Test string 4"
#define MAX_FUNC 2
typedef void (*fn_tx_strdup)(TOID(char) *str, const char *s,
unsigned type_num);
typedef void (*fn_tx_wcsdup)(TOID(wchar_t) *wcs, const wchar_t *s,
unsigned type_num);
static unsigned counter;
/*
* tx_strdup -- duplicate a string using pmemobj_tx_strdup
*/
static void
tx_strdup(TOID(char) *str, const char *s, unsigned type_num)
{
TOID_ASSIGN(*str, pmemobj_tx_strdup(s, type_num));
}
/*
* tx_wcsdup -- duplicate a string using pmemobj_tx_wcsdup
*/
static void
tx_wcsdup(TOID(wchar_t) *wcs, const wchar_t *s, unsigned type_num)
{
TOID_ASSIGN(*wcs, pmemobj_tx_wcsdup(s, type_num));
}
/*
* tx_strdup_macro -- duplicate a string using macro
*/
static void
tx_strdup_macro(TOID(char) *str, const char *s, unsigned type_num)
{
TOID_ASSIGN(*str, TX_STRDUP(s, type_num));
}
/*
* tx_wcsdup_macro -- duplicate a wide character string using macro
*/
static void
tx_wcsdup_macro(TOID(wchar_t) *wcs, const wchar_t *s, unsigned type_num)
{
TOID_ASSIGN(*wcs, TX_WCSDUP(s, type_num));
}
static fn_tx_strdup do_tx_strdup[MAX_FUNC] = {tx_strdup, tx_strdup_macro};
static fn_tx_wcsdup do_tx_wcsdup[MAX_FUNC] = {tx_wcsdup, tx_wcsdup_macro};
/*
* do_tx_strdup_commit -- duplicate a string and commit the transaction
*/
static void
do_tx_strdup_commit(PMEMobjpool *pop)
{
TOID(char) str;
TOID(wchar_t) wcs;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str, TEST_STR_1, TYPE_COMMIT);
do_tx_wcsdup[counter](&wcs, TEST_WCS_1, TYPE_WCS_COMMIT);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERT(!TOID_IS_NULL(wcs));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT));
TOID_ASSIGN(wcs, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_COMMIT));
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERTeq(strcmp(TEST_STR_1, D_RO(str)), 0);
UT_ASSERTeq(wcscmp(TEST_WCS_1, D_RO(wcs)), 0);
}
/*
* do_tx_strdup_abort -- duplicate a string and abort the transaction
*/
static void
do_tx_strdup_abort(PMEMobjpool *pop)
{
TOID(char) str;
TOID(wchar_t) wcs;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str, TEST_STR_1, TYPE_ABORT);
do_tx_wcsdup[counter](&wcs, TEST_WCS_1, TYPE_WCS_ABORT);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERT(!TOID_IS_NULL(wcs));
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT));
TOID_ASSIGN(wcs, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_ABORT));
UT_ASSERT(TOID_IS_NULL(str));
UT_ASSERT(TOID_IS_NULL(wcs));
}
/*
* do_tx_strdup_null -- duplicate a NULL string to trigger tx abort
*/
static void
do_tx_strdup_null(PMEMobjpool *pop)
{
TOID(char) str;
TOID(wchar_t) wcs;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str, NULL, TYPE_ABORT);
do_tx_wcsdup[counter](&wcs, NULL, TYPE_WCS_ABORT);
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT));
TOID_ASSIGN(wcs, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_ABORT));
UT_ASSERT(TOID_IS_NULL(str));
UT_ASSERT(TOID_IS_NULL(wcs));
TX_BEGIN(pop) {
pmemobj_tx_xstrdup(NULL, TYPE_ABORT, POBJ_XALLOC_NO_ABORT);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, EINVAL);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
pmemobj_tx_strdup(NULL, TYPE_ABORT);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, EINVAL);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
pmemobj_tx_xstrdup(NULL, TYPE_ABORT, 0);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, EINVAL);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
/*
* do_tx_strdup_free_commit -- duplicate a string, free and commit the
* transaction
*/
static void
do_tx_strdup_free_commit(PMEMobjpool *pop)
{
TOID(char) str;
TOID(wchar_t) wcs;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str, TEST_STR_1, TYPE_FREE_COMMIT);
do_tx_wcsdup[counter](&wcs, TEST_WCS_1, TYPE_WCS_FREE_COMMIT);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERT(!TOID_IS_NULL(wcs));
int ret = pmemobj_tx_free(str.oid);
UT_ASSERTeq(ret, 0);
ret = pmemobj_tx_free(wcs.oid);
UT_ASSERTeq(ret, 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_COMMIT));
TOID_ASSIGN(wcs, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_FREE_COMMIT));
UT_ASSERT(TOID_IS_NULL(str));
UT_ASSERT(TOID_IS_NULL(wcs));
}
/*
* do_tx_strdup_free_abort -- duplicate a string, free and abort the
* transaction
*/
static void
do_tx_strdup_free_abort(PMEMobjpool *pop)
{
TOID(char) str;
TOID(wchar_t) wcs;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str, TEST_STR_1, TYPE_FREE_ABORT);
do_tx_wcsdup[counter](&wcs, TEST_WCS_1, TYPE_WCS_FREE_ABORT);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERT(!TOID_IS_NULL(wcs));
int ret = pmemobj_tx_free(str.oid);
UT_ASSERTeq(ret, 0);
ret = pmemobj_tx_free(wcs.oid);
UT_ASSERTeq(ret, 0);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ABORT));
TOID_ASSIGN(wcs, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_FREE_ABORT));
UT_ASSERT(TOID_IS_NULL(str));
UT_ASSERT(TOID_IS_NULL(wcs));
}
/*
* do_tx_strdup_commit_nested -- duplicate two string suing nested
* transaction and commit the transaction
*/
static void
do_tx_strdup_commit_nested(PMEMobjpool *pop)
{
TOID(char) str1;
TOID(char) str2;
TOID(wchar_t) wcs1;
TOID(wchar_t) wcs2;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str1, TEST_STR_1, TYPE_COMMIT_NESTED1);
do_tx_wcsdup[counter](&wcs1, TEST_WCS_1,
TYPE_WCS_COMMIT_NESTED1);
UT_ASSERT(!TOID_IS_NULL(str1));
UT_ASSERT(!TOID_IS_NULL(wcs1));
TX_BEGIN(pop) {
do_tx_strdup[counter](&str2, TEST_STR_2,
TYPE_COMMIT_NESTED2);
do_tx_wcsdup[counter](&wcs2, TEST_WCS_2,
TYPE_WCS_COMMIT_NESTED2);
UT_ASSERT(!TOID_IS_NULL(str2));
UT_ASSERT(!TOID_IS_NULL(wcs2));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str1, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_NESTED1));
TOID_ASSIGN(wcs1, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_COMMIT_NESTED1));
UT_ASSERT(!TOID_IS_NULL(str1));
UT_ASSERT(!TOID_IS_NULL(wcs1));
UT_ASSERTeq(strcmp(TEST_STR_1, D_RO(str1)), 0);
UT_ASSERTeq(wcscmp(TEST_WCS_1, D_RO(wcs1)), 0);
TOID_ASSIGN(str2, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_NESTED2));
TOID_ASSIGN(wcs2, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_COMMIT_NESTED2));
UT_ASSERT(!TOID_IS_NULL(str2));
UT_ASSERT(!TOID_IS_NULL(wcs2));
UT_ASSERTeq(strcmp(TEST_STR_2, D_RO(str2)), 0);
UT_ASSERTeq(wcscmp(TEST_WCS_2, D_RO(wcs2)), 0);
}
/*
* do_tx_strdup_commit_abort -- duplicate two string suing nested
* transaction and abort the transaction
*/
static void
do_tx_strdup_abort_nested(PMEMobjpool *pop)
{
TOID(char) str1;
TOID(char) str2;
TOID(wchar_t) wcs1;
TOID(wchar_t) wcs2;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str1, TEST_STR_1, TYPE_ABORT_NESTED1);
do_tx_wcsdup[counter](&wcs1, TEST_WCS_1,
TYPE_WCS_ABORT_NESTED1);
UT_ASSERT(!TOID_IS_NULL(str1));
UT_ASSERT(!TOID_IS_NULL(wcs1));
TX_BEGIN(pop) {
do_tx_strdup[counter](&str2, TEST_STR_2,
TYPE_ABORT_NESTED2);
do_tx_wcsdup[counter](&wcs2, TEST_WCS_2,
TYPE_WCS_ABORT_NESTED2);
UT_ASSERT(!TOID_IS_NULL(str2));
UT_ASSERT(!TOID_IS_NULL(wcs2));
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str1, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_NESTED1));
TOID_ASSIGN(wcs1, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_ABORT_NESTED1));
UT_ASSERT(TOID_IS_NULL(str1));
UT_ASSERT(TOID_IS_NULL(wcs1));
TOID_ASSIGN(str2, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_NESTED2));
TOID_ASSIGN(wcs2, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_ABORT_NESTED2));
UT_ASSERT(TOID_IS_NULL(str2));
UT_ASSERT(TOID_IS_NULL(wcs2));
}
/*
* do_tx_strdup_commit_abort -- duplicate two string suing nested
* transaction and abort after the nested transaction
*/
static void
do_tx_strdup_abort_after_nested(PMEMobjpool *pop)
{
TOID(char) str1;
TOID(char) str2;
TOID(wchar_t) wcs1;
TOID(wchar_t) wcs2;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str1, TEST_STR_1,
TYPE_ABORT_AFTER_NESTED1);
do_tx_wcsdup[counter](&wcs1, TEST_WCS_1,
TYPE_WCS_ABORT_AFTER_NESTED1);
UT_ASSERT(!TOID_IS_NULL(str1));
UT_ASSERT(!TOID_IS_NULL(wcs1));
TX_BEGIN(pop) {
do_tx_strdup[counter](&str2, TEST_STR_2,
TYPE_ABORT_AFTER_NESTED2);
do_tx_wcsdup[counter](&wcs2, TEST_WCS_2,
TYPE_WCS_ABORT_AFTER_NESTED2);
UT_ASSERT(!TOID_IS_NULL(str2));
UT_ASSERT(!TOID_IS_NULL(wcs2));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str1, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_AFTER_NESTED1));
TOID_ASSIGN(wcs1, POBJ_FIRST_TYPE_NUM(pop,
TYPE_WCS_ABORT_AFTER_NESTED1));
UT_ASSERT(TOID_IS_NULL(str1));
UT_ASSERT(TOID_IS_NULL(wcs1));
TOID_ASSIGN(str2, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_AFTER_NESTED2));
TOID_ASSIGN(wcs2, POBJ_FIRST_TYPE_NUM(pop,
TYPE_WCS_ABORT_AFTER_NESTED2));
UT_ASSERT(TOID_IS_NULL(str2));
UT_ASSERT(TOID_IS_NULL(wcs2));
}
/*
* do_tx_strdup_noflush -- allocates zeroed object
*/
static void
do_tx_strdup_noflush(PMEMobjpool *pop)
{
TX_BEGIN(pop) {
errno = 0;
pmemobj_tx_xstrdup(TEST_STR_1, TYPE_NOFLUSH,
POBJ_XALLOC_NO_FLUSH);
pmemobj_tx_xwcsdup(TEST_WCS_1, TYPE_WCS_NOFLUSH,
POBJ_XALLOC_NO_FLUSH);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_strdup");
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
for (counter = 0; counter < MAX_FUNC; counter++) {
do_tx_strdup_commit(pop);
do_tx_strdup_abort(pop);
do_tx_strdup_null(pop);
do_tx_strdup_free_commit(pop);
do_tx_strdup_free_abort(pop);
do_tx_strdup_commit_nested(pop);
do_tx_strdup_abort_nested(pop);
do_tx_strdup_abort_after_nested(pop);
}
do_tx_strdup_noflush(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 11,087 | 24.315068 | 74 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_tx_realloc/obj_tx_realloc.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* obj_tx_realloc.c -- unit test for pmemobj_tx_realloc and pmemobj_tx_zrealloc
*/
#include <sys/param.h>
#include <string.h>
#include "unittest.h"
#include "util.h"
#define LAYOUT_NAME "tx_realloc"
#define TEST_VALUE_1 1
#define OBJ_SIZE 1024
enum type_number {
TYPE_NO_TX,
TYPE_COMMIT,
TYPE_ABORT,
TYPE_TYPE,
TYPE_COMMIT_ZERO,
TYPE_COMMIT_ZERO_MACRO,
TYPE_ABORT_ZERO,
TYPE_ABORT_ZERO_MACRO,
TYPE_COMMIT_ALLOC,
TYPE_ABORT_ALLOC,
TYPE_ABORT_HUGE,
TYPE_ABORT_ZERO_HUGE,
TYPE_ABORT_ZERO_HUGE_MACRO,
TYPE_FREE,
};
struct object {
size_t value;
char data[OBJ_SIZE - sizeof(size_t)];
};
TOID_DECLARE(struct object, 0);
struct object_macro {
size_t value;
char data[OBJ_SIZE - sizeof(size_t)];
};
TOID_DECLARE(struct object_macro, TYPE_COMMIT_ZERO_MACRO);
/*
* do_tx_alloc -- do tx allocation with specified type number
*/
static PMEMoid
do_tx_alloc(PMEMobjpool *pop, unsigned type_num, size_t value)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, OID_NULL);
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_alloc(
sizeof(struct object), type_num));
if (!TOID_IS_NULL(obj)) {
D_RW(obj)->value = value;
}
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
return obj.oid;
}
/*
* do_tx_realloc_commit -- reallocate an object and commit the transaction
*/
static void
do_tx_realloc_commit(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_COMMIT, TEST_VALUE_1));
size_t new_size = 2 * pmemobj_alloc_usable_size(obj.oid);
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_realloc(obj.oid,
new_size, TYPE_COMMIT));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_abort -- reallocate an object and commit the transaction
*/
static void
do_tx_realloc_abort(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT, TEST_VALUE_1));
size_t new_size = 2 * pmemobj_alloc_usable_size(obj.oid);
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_realloc(obj.oid,
new_size, TYPE_ABORT));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) < new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_huge -- reallocate an object to a huge size to trigger tx abort
*/
static void
do_tx_realloc_huge(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT_HUGE, TEST_VALUE_1));
size_t new_size = PMEMOBJ_MAX_ALLOC_SIZE + 1;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_realloc(obj.oid,
new_size, TYPE_ABORT_HUGE));
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_HUGE));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) < new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_zrealloc_commit_macro -- reallocate an object, zero it and commit
* the transaction using macro
*/
static void
do_tx_zrealloc_commit_macro(PMEMobjpool *pop)
{
TOID(struct object_macro) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_COMMIT_ZERO_MACRO,
TEST_VALUE_1));
size_t old_size = pmemobj_alloc_usable_size(obj.oid);
size_t new_size = 2 * old_size;
TX_BEGIN(pop) {
obj = TX_ZREALLOC(obj, new_size);
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
void *new_ptr = (void *)((uintptr_t)D_RW(obj) + old_size);
UT_ASSERT(util_is_zeroed(new_ptr, new_size - old_size));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_ZERO_MACRO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
void *new_ptr = (void *)((uintptr_t)D_RW(obj) + old_size);
UT_ASSERT(util_is_zeroed(new_ptr, new_size - old_size));
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_zrealloc_commit -- reallocate an object, zero it and commit
* the transaction
*/
static void
do_tx_zrealloc_commit(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_COMMIT_ZERO, TEST_VALUE_1));
size_t old_size = pmemobj_alloc_usable_size(obj.oid);
size_t new_size = 2 * old_size;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_zrealloc(obj.oid,
new_size, TYPE_COMMIT_ZERO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
void *new_ptr = (void *)((uintptr_t)D_RW(obj) + old_size);
UT_ASSERT(util_is_zeroed(new_ptr, new_size - old_size));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_ZERO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
void *new_ptr = (void *)((uintptr_t)D_RW(obj) + old_size);
UT_ASSERT(util_is_zeroed(new_ptr, new_size - old_size));
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_abort_macro -- reallocate an object, zero it and commit the
* transaction using macro
*/
static void
do_tx_zrealloc_abort_macro(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT_ZERO_MACRO, TEST_VALUE_1));
size_t old_size = pmemobj_alloc_usable_size(obj.oid);
size_t new_size = 2 * old_size;
TX_BEGIN(pop) {
obj = TX_ZREALLOC(obj, new_size);
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
void *new_ptr = (void *)((uintptr_t)D_RW(obj) + old_size);
UT_ASSERT(util_is_zeroed(new_ptr, new_size - old_size));
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_ZERO_MACRO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) < new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_abort -- reallocate an object and commit the transaction
*/
static void
do_tx_zrealloc_abort(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT_ZERO, TEST_VALUE_1));
size_t old_size = pmemobj_alloc_usable_size(obj.oid);
size_t new_size = 2 * old_size;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_zrealloc(obj.oid,
new_size, TYPE_ABORT_ZERO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
void *new_ptr = (void *)((uintptr_t)D_RW(obj) + old_size);
UT_ASSERT(util_is_zeroed(new_ptr, new_size - old_size));
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_ZERO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) < new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_huge_macro -- reallocate an object to a huge size to trigger
* tx abort and zero it using macro
*/
static void
do_tx_zrealloc_huge_macro(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT_ZERO_HUGE_MACRO,
TEST_VALUE_1));
size_t old_size = pmemobj_alloc_usable_size(obj.oid);
size_t new_size = 2 * old_size;
TX_BEGIN(pop) {
obj = TX_ZREALLOC(obj, PMEMOBJ_MAX_ALLOC_SIZE + 1);
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_ZERO_HUGE_MACRO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) < new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_huge -- reallocate an object to a huge size to trigger tx abort
*/
static void
do_tx_zrealloc_huge(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT_ZERO_HUGE, TEST_VALUE_1));
size_t old_size = pmemobj_alloc_usable_size(obj.oid);
size_t new_size = 2 * old_size;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_zrealloc(obj.oid,
PMEMOBJ_MAX_ALLOC_SIZE + 1, TYPE_ABORT_ZERO_HUGE));
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_ZERO_HUGE));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) < new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_alloc_commit -- reallocate an allocated object
* and commit the transaction
*/
static void
do_tx_realloc_alloc_commit(PMEMobjpool *pop)
{
TOID(struct object) obj;
size_t new_size = 0;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_COMMIT_ALLOC,
TEST_VALUE_1));
UT_ASSERT(!TOID_IS_NULL(obj));
new_size = 2 * pmemobj_alloc_usable_size(obj.oid);
TOID_ASSIGN(obj, pmemobj_tx_realloc(obj.oid,
new_size, TYPE_COMMIT_ALLOC));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_ALLOC));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_alloc_abort -- reallocate an allocated object
* and commit the transaction
*/
static void
do_tx_realloc_alloc_abort(PMEMobjpool *pop)
{
TOID(struct object) obj;
size_t new_size = 0;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT_ALLOC,
TEST_VALUE_1));
UT_ASSERT(!TOID_IS_NULL(obj));
new_size = 2 * pmemobj_alloc_usable_size(obj.oid);
TOID_ASSIGN(obj, pmemobj_tx_realloc(obj.oid,
new_size, TYPE_ABORT_ALLOC));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_ALLOC));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_root_realloc -- retrieve root inside of transaction
*/
static void
do_tx_root_realloc(PMEMobjpool *pop)
{
TX_BEGIN(pop) {
PMEMoid root = pmemobj_root(pop, sizeof(struct object));
UT_ASSERT(!OID_IS_NULL(root));
UT_ASSERT(util_is_zeroed(pmemobj_direct(root),
sizeof(struct object)));
UT_ASSERTeq(sizeof(struct object), pmemobj_root_size(pop));
root = pmemobj_root(pop, 2 * sizeof(struct object));
UT_ASSERT(!OID_IS_NULL(root));
UT_ASSERT(util_is_zeroed(pmemobj_direct(root),
2 * sizeof(struct object)));
UT_ASSERTeq(2 * sizeof(struct object), pmemobj_root_size(pop));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
/*
* do_tx_realloc_free -- reallocate an allocated object
* and commit the transaction
*/
static void
do_tx_realloc_free(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_FREE, TEST_VALUE_1));
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_realloc(obj.oid,
0, TYPE_COMMIT));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE));
UT_ASSERT(TOID_IS_NULL(obj));
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_realloc");
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, 0,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
do_tx_root_realloc(pop);
do_tx_realloc_commit(pop);
do_tx_realloc_abort(pop);
do_tx_realloc_huge(pop);
do_tx_zrealloc_commit(pop);
do_tx_zrealloc_commit_macro(pop);
do_tx_zrealloc_abort(pop);
do_tx_zrealloc_abort_macro(pop);
do_tx_zrealloc_huge(pop);
do_tx_zrealloc_huge_macro(pop);
do_tx_realloc_alloc_commit(pop);
do_tx_realloc_alloc_abort(pop);
do_tx_realloc_free(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 12,874 | 25.767152 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_tx_lock/obj_tx_lock.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* obj_tx_lock.c -- unit test for pmemobj_tx_lock()
*/
#include "unittest.h"
#include "libpmemobj.h"
#include "obj.h"
#define LAYOUT_NAME "obj_tx_lock"
#define NUM_LOCKS 2
struct transaction_data {
PMEMmutex mutexes[NUM_LOCKS];
PMEMrwlock rwlocks[NUM_LOCKS];
};
static PMEMobjpool *Pop;
#define DO_LOCK(mtx, rwlock)\
pmemobj_tx_lock(TX_PARAM_MUTEX, &(mtx)[0]);\
pmemobj_tx_lock(TX_PARAM_MUTEX, &(mtx)[1]);\
pmemobj_tx_lock(TX_PARAM_RWLOCK, &(rwlock)[0]);\
pmemobj_tx_lock(TX_PARAM_RWLOCK, &(rwlock)[1])
#define IS_UNLOCKED(pop, mtx, rwlock)\
ret = 0;\
ret += pmemobj_mutex_trylock((pop), &(mtx)[0]);\
ret += pmemobj_mutex_trylock((pop), &(mtx)[1]);\
ret += pmemobj_rwlock_trywrlock((pop), &(rwlock)[0]);\
ret += pmemobj_rwlock_trywrlock((pop), &(rwlock)[1]);\
UT_ASSERTeq(ret, 0);\
pmemobj_mutex_unlock((pop), &(mtx)[0]);\
pmemobj_mutex_unlock((pop), &(mtx)[1]);\
pmemobj_rwlock_unlock((pop), &(rwlock)[0]);\
pmemobj_rwlock_unlock((pop), &(rwlock)[1])
#define IS_LOCKED(pop, mtx, rwlock)\
ret = pmemobj_mutex_trylock((pop), &(mtx)[0]);\
UT_ASSERT(ret != 0);\
ret = pmemobj_mutex_trylock((pop), &(mtx)[1]);\
UT_ASSERT(ret != 0);\
ret = pmemobj_rwlock_trywrlock((pop), &(rwlock)[0]);\
UT_ASSERT(ret != 0);\
ret = pmemobj_rwlock_trywrlock((pop), &(rwlock)[1]);\
UT_ASSERT(ret != 0)
/*
* do_tx_add_locks -- (internal) transaction where locks are added after
* transaction begins
*/
static void *
do_tx_add_locks(struct transaction_data *data)
{
int ret;
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
TX_BEGIN(Pop) {
DO_LOCK(data->mutexes, data->rwlocks);
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
} TX_ONABORT { /* not called */
UT_ASSERT(0);
} TX_END
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
return NULL;
}
/*
* do_tx_add_locks_nested -- (internal) transaction where locks
* are added after nested transaction begins
*/
static void *
do_tx_add_locks_nested(struct transaction_data *data)
{
int ret;
TX_BEGIN(Pop) {
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
TX_BEGIN(Pop) {
DO_LOCK(data->mutexes, data->rwlocks);
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
} TX_END
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
return NULL;
}
/*
* do_tx_add_locks_nested_all -- (internal) transaction where all locks
* are added in both transactions after transaction begins
*/
static void *
do_tx_add_locks_nested_all(struct transaction_data *data)
{
int ret;
TX_BEGIN(Pop) {
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
DO_LOCK(data->mutexes, data->rwlocks);
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
TX_BEGIN(Pop) {
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
DO_LOCK(data->mutexes, data->rwlocks);
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
} TX_END
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
return NULL;
}
/*
* do_tx_add_taken_lock -- (internal) verify that failed tx_lock doesn't add
* the lock to transaction
*/
static void *
do_tx_add_taken_lock(struct transaction_data *data)
{
/* wrlocks on Windows don't detect self-deadlocks */
#ifdef _WIN32
(void) data;
#else
UT_ASSERTeq(pmemobj_rwlock_wrlock(Pop, &data->rwlocks[0]), 0);
TX_BEGIN(Pop) {
UT_ASSERTne(pmemobj_tx_lock(TX_PARAM_RWLOCK, &data->rwlocks[0]),
0);
} TX_END
UT_ASSERTne(pmemobj_rwlock_trywrlock(Pop, &data->rwlocks[0]), 0);
UT_ASSERTeq(pmemobj_rwlock_unlock(Pop, &data->rwlocks[0]), 0);
#endif
return NULL;
}
/*
* do_tx_lock_fail -- call pmemobj_tx_lock with POBJ_TX_NO_ABORT flag
* and taken lock
*/
static void *
do_tx_lock_fail(struct transaction_data *data)
{
/* wrlocks on Windows don't detect self-deadlocks */
#ifdef _WIN32
(void) data;
#else
UT_ASSERTeq(pmemobj_rwlock_wrlock(Pop, &data->rwlocks[0]), 0);
int ret = 0;
/* return errno and abort transaction */
TX_BEGIN(Pop) {
pmemobj_tx_xlock(TX_PARAM_RWLOCK, &data->rwlocks[0], 0);
} TX_ONABORT {
UT_ASSERTne(errno, 0);
UT_ASSERTeq(pmemobj_rwlock_unlock(Pop, &data->rwlocks[0]), 0);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
/* return ret without abort transaction */
UT_ASSERTeq(pmemobj_rwlock_wrlock(Pop, &data->rwlocks[0]), 0);
TX_BEGIN(Pop) {
ret = pmemobj_tx_xlock(TX_PARAM_RWLOCK, &data->rwlocks[0],
POBJ_XLOCK_NO_ABORT);
} TX_ONCOMMIT {
UT_ASSERTne(ret, 0);
UT_ASSERTeq(pmemobj_rwlock_unlock(Pop, &data->rwlocks[0]), 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
/* return ret without abort transaction */
UT_ASSERTeq(pmemobj_rwlock_wrlock(Pop, &data->rwlocks[0]), 0);
TX_BEGIN(Pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
ret = pmemobj_tx_lock(TX_PARAM_RWLOCK, &data->rwlocks[0]);
} TX_ONCOMMIT {
UT_ASSERTne(ret, 0);
UT_ASSERTeq(pmemobj_rwlock_unlock(Pop, &data->rwlocks[0]), 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
/* return ret without abort transaction */
UT_ASSERTeq(pmemobj_rwlock_wrlock(Pop, &data->rwlocks[0]), 0);
TX_BEGIN(Pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
ret = pmemobj_tx_xlock(TX_PARAM_RWLOCK, &data->rwlocks[0], 0);
} TX_ONCOMMIT {
UT_ASSERTne(ret, 0);
UT_ASSERTeq(pmemobj_rwlock_unlock(Pop, &data->rwlocks[0]), 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
#endif
return NULL;
}
static void
do_fault_injection(struct transaction_data *data)
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "add_to_tx_and_lock");
int ret;
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
TX_BEGIN(Pop) {
int err = pmemobj_tx_lock(TX_PARAM_MUTEX, &data->mutexes[0]);
if (err)
pmemobj_tx_abort(err);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
UT_ASSERTeq(errno, ENOMEM);
} TX_END
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_lock");
if (argc < 3)
UT_FATAL("usage: %s <file> [l|n|a|t|f|w]", argv[0]);
if ((Pop = pmemobj_create(argv[1], LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
PMEMoid root = pmemobj_root(Pop, sizeof(struct transaction_data));
struct transaction_data *test_obj =
(struct transaction_data *)pmemobj_direct(root);
/* go through all arguments one by one */
for (int arg = 2; arg < argc; arg++) {
/* Scan the character of each argument. */
if (strchr("lnatfw", argv[arg][0]) == NULL ||
argv[arg][1] != '\0')
UT_FATAL("op must be l or n or a or t or f or w");
switch (argv[arg][0]) {
case 'l':
do_tx_add_locks(test_obj);
break;
case 'n':
do_tx_add_locks_nested(test_obj);
break;
case 'a':
do_tx_add_locks_nested_all(test_obj);
break;
case 't':
do_tx_add_taken_lock(test_obj);
break;
case 'f':
do_fault_injection(test_obj);
break;
case 'w':
do_tx_lock_fail(test_obj);
break;
}
}
pmemobj_close(Pop);
DONE(NULL);
}
| 7,003 | 24.75 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_memops/obj_memops.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* obj_memops.c -- basic memory operations tests
*
*/
#include <stddef.h>
#include "obj.h"
#include "memops.h"
#include "ulog.h"
#include "unittest.h"
#define TEST_ENTRIES 256
#define TEST_VALUES TEST_ENTRIES
enum fail_types {
FAIL_NONE,
FAIL_CHECKSUM,
FAIL_MODIFY_NEXT,
FAIL_MODIFY_VALUE,
};
struct test_object {
struct ULOG(TEST_ENTRIES) redo;
struct ULOG(TEST_ENTRIES) undo;
uint64_t values[TEST_VALUES];
};
static void
clear_test_values(struct test_object *object)
{
memset(object->values, 0, sizeof(uint64_t) * TEST_VALUES);
}
static int
redo_log_constructor(void *ctx, void *ptr, size_t usable_size, void *arg)
{
PMEMobjpool *pop = ctx;
const struct pmem_ops *p_ops = &pop->p_ops;
size_t capacity = ALIGN_DOWN(usable_size - sizeof(struct ulog),
CACHELINE_SIZE);
ulog_construct(OBJ_PTR_TO_OFF(ctx, ptr), capacity,
*(uint64_t *)arg, 1, 0, p_ops);
return 0;
}
static int
pmalloc_redo_extend(void *base, uint64_t *redo, uint64_t gen_num)
{
size_t s = SIZEOF_ALIGNED_ULOG(TEST_ENTRIES);
return pmalloc_construct(base, redo, s, redo_log_constructor, &gen_num,
0, OBJ_INTERNAL_OBJECT_MASK, 0);
}
static void
test_free_entry(void *base, uint64_t *next)
{
/* noop for fake ulog entries */
}
static void
test_set_entries(PMEMobjpool *pop,
struct operation_context *ctx, struct test_object *object,
size_t nentries, enum fail_types fail, enum operation_log_type type)
{
operation_start(ctx);
UT_ASSERT(nentries <= ARRAY_SIZE(object->values));
for (size_t i = 0; i < nentries; ++i) {
operation_add_typed_entry(ctx,
&object->values[i], i + 1,
ULOG_OPERATION_SET, type);
}
operation_reserve(ctx, nentries * 16);
if (fail != FAIL_NONE) {
operation_cancel(ctx);
switch (fail) {
case FAIL_CHECKSUM:
object->redo.checksum += 1;
break;
case FAIL_MODIFY_NEXT:
pmalloc_redo_extend(pop,
&object->redo.next, 0);
break;
case FAIL_MODIFY_VALUE:
object->redo.data[16] += 8;
break;
default:
UT_ASSERT(0);
}
ulog_recover((struct ulog *)&object->redo,
OBJ_OFF_IS_VALID_FROM_CTX, &pop->p_ops);
for (size_t i = 0; i < nentries; ++i)
UT_ASSERTeq(object->values[i], 0);
} else {
operation_process(ctx);
operation_finish(ctx, 0);
for (size_t i = 0; i < nentries; ++i)
UT_ASSERTeq(object->values[i], i + 1);
}
}
static void
test_merge_op(struct operation_context *ctx, struct test_object *object)
{
operation_start(ctx);
operation_add_typed_entry(ctx,
&object->values[0], 0b10,
ULOG_OPERATION_OR, LOG_PERSISTENT);
operation_add_typed_entry(ctx,
&object->values[0], 0b01,
ULOG_OPERATION_OR, LOG_PERSISTENT);
operation_add_typed_entry(ctx,
&object->values[0], 0b00,
ULOG_OPERATION_AND, LOG_PERSISTENT);
operation_add_typed_entry(ctx,
&object->values[0], 0b01,
ULOG_OPERATION_OR, LOG_PERSISTENT);
operation_process(ctx);
operation_finish(ctx, 0);
UT_ASSERTeq(object->values[0], 0b01);
}
static void
test_same_twice(struct operation_context *ctx, struct test_object *object)
{
operation_start(ctx);
operation_add_typed_entry(ctx,
&object->values[0], 5,
ULOG_OPERATION_SET, LOG_PERSISTENT);
operation_add_typed_entry(ctx,
&object->values[0], 10,
ULOG_OPERATION_SET, LOG_PERSISTENT);
operation_process(ctx);
UT_ASSERTeq(object->values[0], 10);
operation_cancel(ctx);
}
static void
test_redo(PMEMobjpool *pop, struct test_object *object)
{
struct operation_context *ctx = operation_new(
(struct ulog *)&object->redo, TEST_ENTRIES,
pmalloc_redo_extend, (ulog_free_fn)pfree,
&pop->p_ops, LOG_TYPE_REDO);
/*
* Keep this test first.
* It tests a situation where the number of objects being added
* is equal to the capacity of the log.
*/
test_set_entries(pop, ctx, object, TEST_ENTRIES - 1,
FAIL_NONE, LOG_PERSISTENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 100, FAIL_NONE, LOG_TRANSIENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 10, FAIL_NONE, LOG_PERSISTENT);
clear_test_values(object);
test_merge_op(ctx, object);
clear_test_values(object);
test_set_entries(pop, ctx, object, 100, FAIL_NONE, LOG_PERSISTENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 100, FAIL_CHECKSUM, LOG_PERSISTENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 10, FAIL_CHECKSUM, LOG_PERSISTENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 100, FAIL_MODIFY_VALUE,
LOG_PERSISTENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 10, FAIL_MODIFY_VALUE,
LOG_PERSISTENT);
clear_test_values(object);
test_same_twice(ctx, object);
clear_test_values(object);
operation_delete(ctx);
/*
* Verify that rebuilding redo_next works. This requires that
* object->redo->next is != 0 - to achieve that, this test must
* be preceded by a test that fails to finish the ulog's operation.
*/
ctx = operation_new(
(struct ulog *)&object->redo, TEST_ENTRIES,
NULL, test_free_entry, &pop->p_ops, LOG_TYPE_REDO);
test_set_entries(pop, ctx, object, 100, 0, LOG_PERSISTENT);
clear_test_values(object);
/* FAIL_MODIFY_NEXT tests can only happen after redo_next test */
test_set_entries(pop, ctx, object, 100, FAIL_MODIFY_NEXT,
LOG_PERSISTENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 10, FAIL_MODIFY_NEXT,
LOG_PERSISTENT);
clear_test_values(object);
operation_delete(ctx);
}
static void
test_undo_small_single_copy(struct operation_context *ctx,
struct test_object *object)
{
operation_start(ctx);
object->values[0] = 1;
object->values[1] = 2;
operation_add_buffer(ctx,
&object->values, &object->values, sizeof(*object->values) * 2,
ULOG_OPERATION_BUF_CPY);
object->values[0] = 2;
object->values[1] = 1;
operation_process(ctx);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
operation_start(ctx);
UT_ASSERTeq(object->values[0], 1);
UT_ASSERTeq(object->values[1], 2);
object->values[0] = 2;
object->values[1] = 1;
operation_process(ctx);
UT_ASSERTeq(object->values[0], 2);
UT_ASSERTeq(object->values[1], 1);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
}
static void
test_undo_small_single_set(struct operation_context *ctx,
struct test_object *object)
{
operation_start(ctx);
object->values[0] = 1;
object->values[1] = 2;
int c = 0;
operation_add_buffer(ctx,
&object->values, &c, sizeof(*object->values) * 2,
ULOG_OPERATION_BUF_SET);
operation_process(ctx);
UT_ASSERTeq(object->values[0], 0);
UT_ASSERTeq(object->values[1], 0);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
}
static void
test_undo_small_multiple_set(struct operation_context *ctx,
struct test_object *object)
{
operation_start(ctx);
object->values[0] = 1;
object->values[1] = 2;
int c = 0;
operation_add_buffer(ctx,
&object->values[0], &c, sizeof(*object->values),
ULOG_OPERATION_BUF_SET);
operation_add_buffer(ctx,
&object->values[1], &c, sizeof(*object->values),
ULOG_OPERATION_BUF_SET);
operation_process(ctx);
UT_ASSERTeq(object->values[0], 0);
UT_ASSERTeq(object->values[1], 0);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
}
static void
test_undo_large_single_copy(struct operation_context *ctx,
struct test_object *object)
{
operation_start(ctx);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
object->values[i] = i + 1;
operation_add_buffer(ctx,
&object->values, &object->values, sizeof(object->values),
ULOG_OPERATION_BUF_CPY);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
object->values[i] = i + 2;
operation_process(ctx);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
UT_ASSERTeq(object->values[i], i + 1);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
}
static void
test_undo_checksum_mismatch(PMEMobjpool *pop, struct operation_context *ctx,
struct test_object *object, struct ulog *log)
{
operation_start(ctx);
for (uint64_t i = 0; i < 20; ++i)
object->values[i] = i + 1;
operation_add_buffer(ctx,
&object->values, &object->values, sizeof(*object->values) * 20,
ULOG_OPERATION_BUF_CPY);
for (uint64_t i = 0; i < 20; ++i)
object->values[i] = i + 2;
pmemobj_persist(pop, &object->values, sizeof(*object->values) * 20);
log->data[100] += 1; /* corrupt the log somewhere */
pmemobj_persist(pop, &log->data[100], sizeof(log->data[100]));
operation_process(ctx);
/* the log shouldn't get applied */
for (uint64_t i = 0; i < 20; ++i)
UT_ASSERTeq(object->values[i], i + 2);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
}
static void
test_undo_large_copy(PMEMobjpool *pop, struct operation_context *ctx,
struct test_object *object)
{
operation_start(ctx);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
object->values[i] = i + 1;
operation_add_buffer(ctx,
&object->values, &object->values, sizeof(object->values),
ULOG_OPERATION_BUF_CPY);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
object->values[i] = i + 2;
operation_process(ctx);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
UT_ASSERTeq(object->values[i], i + 1);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
object->values[i] = i + 3;
operation_start(ctx);
operation_add_buffer(ctx,
&object->values, &object->values, sizeof(*object->values) * 26,
ULOG_OPERATION_BUF_CPY);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
object->values[i] = i + 4;
pmemobj_persist(pop, &object->values, sizeof(object->values));
operation_process(ctx);
for (uint64_t i = 0; i < 26; ++i)
UT_ASSERTeq(object->values[i], i + 3);
for (uint64_t i = 26; i < TEST_VALUES; ++i)
UT_ASSERTeq(object->values[i], i + 4);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
}
static int
test_undo_foreach(struct ulog_entry_base *e, void *arg,
const struct pmem_ops *p_ops)
{
size_t *nentries = arg;
++(*nentries);
return 0;
}
/*
* drain_empty -- drain for pmem_ops
*/
static void
drain_empty(void *ctx)
{
/* do nothing */
}
/*
* persist_empty -- persist for pmem_ops
*/
static int
persist_empty(void *ctx, const void *addr, size_t len, unsigned flags)
{
return 0;
}
/*
* flush_empty -- flush for pmem_ops
*/
static int
flush_empty(void *ctx, const void *addr, size_t len, unsigned flags)
{
return 0;
}
/*
* memcpy_libc -- memcpy for pmem_ops
*/
static void *
memcpy_libc(void *ctx, void *dest, const void *src, size_t len, unsigned flags)
{
return memcpy(dest, src, len);
}
/*
* memset_libc -- memset for pmem_ops
*/
static void *
memset_libc(void *ctx, void *ptr, int c, size_t sz, unsigned flags)
{
return memset(ptr, c, sz);
}
/*
* test_undo_log_reuse -- test for correct reuse of log space
*/
static void
test_undo_log_reuse()
{
#define ULOG_SIZE 1024
struct pmem_ops ops = {
.persist = persist_empty,
.flush = flush_empty,
.drain = drain_empty,
.memcpy = memcpy_libc,
.memmove = NULL,
.memset = memset_libc,
.base = NULL,
};
struct ULOG(ULOG_SIZE) *first = util_aligned_malloc(CACHELINE_SIZE,
SIZEOF_ULOG(ULOG_SIZE));
struct ULOG(ULOG_SIZE) *second = util_aligned_malloc(CACHELINE_SIZE,
SIZEOF_ULOG(ULOG_SIZE));
ulog_construct((uint64_t)(first), ULOG_SIZE, 0, 0, 0, &ops);
ulog_construct((uint64_t)(second), ULOG_SIZE, 0, 0, 0, &ops);
first->next = (uint64_t)(second);
struct operation_context *ctx = operation_new(
(struct ulog *)first, ULOG_SIZE,
NULL, test_free_entry,
&ops, LOG_TYPE_UNDO);
size_t nentries = 0;
ulog_foreach_entry((struct ulog *)first,
test_undo_foreach, &nentries, &ops,NULL);
UT_ASSERTeq(nentries, 0);
/* first, let's populate the log with some valid entries */
size_t entry_size = (ULOG_SIZE / 2) - sizeof(struct ulog_entry_buf);
size_t total_entries = ((ULOG_SIZE * 2) / entry_size);
char *data = MALLOC(entry_size);
memset(data, 0xc, entry_size); /* fill it with something */
for (size_t i = 0; i < total_entries; ++i) {
operation_add_buffer(ctx, (void *)0x123, data,
entry_size,
ULOG_OPERATION_BUF_CPY);
nentries = 0;
ulog_foreach_entry((struct ulog *)first,
test_undo_foreach, &nentries, &ops,NULL);
UT_ASSERTeq(nentries, i + 1);
}
operation_init(ctx); /* initialize a new operation */
/* let's overwrite old entries and see if they are no longer visible */
for (size_t i = 0; i < total_entries; ++i) {
operation_add_buffer(ctx, (void *)0x123, data,
entry_size,
ULOG_OPERATION_BUF_CPY);
nentries = 0;
ulog_foreach_entry((struct ulog *)first,
test_undo_foreach, &nentries, &ops,NULL);
UT_ASSERTeq(nentries, i + 1);
}
FREE(data);
operation_delete(ctx);
util_aligned_free(first);
util_aligned_free(second);
#undef ULOG_SIZE
}
/*
* test_undo_log_reuse -- test for correct reuse of log space
*/
static void
test_redo_cleanup_same_size(PMEMobjpool *pop, struct test_object *object)
{
#define ULOG_SIZE 1024
struct operation_context *ctx = operation_new(
(struct ulog *)&object->redo, TEST_ENTRIES,
pmalloc_redo_extend, (ulog_free_fn)pfree,
&pop->p_ops, LOG_TYPE_REDO);
int ret = pmalloc(pop, &object->redo.next, ULOG_SIZE, 0, 0);
UT_ASSERTeq(ret, 0);
/* undo logs are clobbered at the end, which shrinks their size */
size_t capacity = ulog_capacity((struct ulog *)&object->undo,
TEST_ENTRIES, &pop->p_ops);
/* builtin log + one next */
UT_ASSERTeq(capacity, TEST_ENTRIES * 2 + CACHELINE_SIZE);
operation_start(ctx); /* initialize a new operation */
struct pobj_action act;
pmemobj_reserve(pop, &act, ULOG_SIZE, 0);
palloc_publish(&pop->heap, &act, 1, ctx);
operation_delete(ctx);
#undef ULOG_SIZE
}
static void
test_undo(PMEMobjpool *pop, struct test_object *object)
{
struct operation_context *ctx = operation_new(
(struct ulog *)&object->undo, TEST_ENTRIES,
pmalloc_redo_extend, (ulog_free_fn)pfree,
&pop->p_ops, LOG_TYPE_UNDO);
test_undo_small_single_copy(ctx, object);
test_undo_small_single_set(ctx, object);
test_undo_small_multiple_set(ctx, object);
test_undo_large_single_copy(ctx, object);
test_undo_large_copy(pop, ctx, object);
test_undo_checksum_mismatch(pop, ctx, object,
(struct ulog *)&object->undo);
/* undo logs are clobbered at the end, which shrinks their size */
size_t capacity = ulog_capacity((struct ulog *)&object->undo,
TEST_ENTRIES, &pop->p_ops);
/* builtin log + one next */
UT_ASSERTeq(capacity, TEST_ENTRIES * 2 + CACHELINE_SIZE);
operation_delete(ctx);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_memops");
if (argc != 2)
UT_FATAL("usage: %s file-name", argv[0]);
const char *path = argv[1];
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, "obj_memops",
PMEMOBJ_MIN_POOL * 10, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
/*
* The ulog API requires cacheline alignment. A cacheline aligned new
* new allocator is created here to properly test the ulog api.
* A aligned object can then be allocated using pmemobj_xalloc.
*/
struct pobj_alloc_class_desc new_ac = {
.unit_size = sizeof(struct test_object),
.alignment = CACHELINE_SIZE,
.units_per_block = 1,
.header_type = POBJ_HEADER_NONE,
};
if (pmemobj_ctl_set(pop, "heap.alloc_class.new.desc", &new_ac) == -1)
UT_FATAL("Failed to set allocation class");
PMEMoid pobject;
if (pmemobj_xalloc(pop, &pobject, sizeof(struct test_object), 0,
POBJ_CLASS_ID(new_ac.class_id), NULL, NULL) == -1)
UT_FATAL("Failed to allocate object");
struct test_object *object = pmemobj_direct(pobject);
UT_ASSERTne(object, NULL);
ulog_construct(OBJ_PTR_TO_OFF(pop, &object->undo),
TEST_ENTRIES, 0, 0, 0, &pop->p_ops);
ulog_construct(OBJ_PTR_TO_OFF(pop, &object->redo),
TEST_ENTRIES, 0, 0, 0, &pop->p_ops);
test_redo(pop, object);
test_undo(pop, object);
test_redo_cleanup_same_size(pop, object);
test_undo_log_reuse();
pmemobj_close(pop);
DONE(NULL);
}
#ifdef _MSC_VER
/*
* Since libpmemobj is linked statically, we need to invoke its ctor/dtor.
*/
MSVC_CONSTR(libpmemobj_init)
MSVC_DESTR(libpmemobj_fini)
#endif
| 15,904 | 23.319572 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_strdup/obj_strdup.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* obj_strdup.c -- unit test for pmemobj_strdup
*/
#include <sys/param.h>
#include <string.h>
#include <wchar.h>
#include "unittest.h"
#include "libpmemobj.h"
#define LAYOUT_NAME "strdup"
TOID_DECLARE(char, 0);
TOID_DECLARE(wchar_t, 1);
enum type_number {
TYPE_SIMPLE,
TYPE_NULL,
TYPE_SIMPLE_ALLOC,
TYPE_SIMPLE_ALLOC_1,
TYPE_SIMPLE_ALLOC_2,
TYPE_NULL_ALLOC,
TYPE_NULL_ALLOC_1,
};
#define TEST_STR_1 "Test string 1"
#define TEST_STR_2 "Test string 2"
#define TEST_WCS_1 L"Test string 3"
#define TEST_WCS_2 L"Test string 4"
#define TEST_STR_EMPTY ""
#define TEST_WCS_EMPTY L""
/*
* do_strdup -- duplicate a string to not allocated toid using pmemobj_strdup
*/
static void
do_strdup(PMEMobjpool *pop)
{
TOID(char) str = TOID_NULL(char);
TOID(wchar_t) wcs = TOID_NULL(wchar_t);
pmemobj_strdup(pop, &str.oid, TEST_STR_1, TYPE_SIMPLE);
pmemobj_wcsdup(pop, &wcs.oid, TEST_WCS_1, TYPE_SIMPLE);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERT(!TOID_IS_NULL(wcs));
UT_ASSERTeq(strcmp(D_RO(str), TEST_STR_1), 0);
UT_ASSERTeq(wcscmp(D_RO(wcs), TEST_WCS_1), 0);
}
/*
* do_strdup_null -- duplicate a NULL string to not allocated toid
*/
static void
do_strdup_null(PMEMobjpool *pop)
{
TOID(char) str = TOID_NULL(char);
TOID(wchar_t) wcs = TOID_NULL(wchar_t);
pmemobj_strdup(pop, &str.oid, NULL, TYPE_NULL);
pmemobj_wcsdup(pop, &wcs.oid, NULL, TYPE_NULL);
UT_ASSERT(TOID_IS_NULL(str));
UT_ASSERT(TOID_IS_NULL(wcs));
}
/*
* do_alloc -- allocate toid and duplicate a string
*/
static TOID(char)
do_alloc(PMEMobjpool *pop, const char *s, unsigned type_num)
{
TOID(char) str;
POBJ_ZNEW(pop, &str, char);
pmemobj_strdup(pop, &str.oid, s, type_num);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERTeq(strcmp(D_RO(str), s), 0);
return str;
}
/*
* do_wcs_alloc -- allocate toid and duplicate a wide character string
*/
static TOID(wchar_t)
do_wcs_alloc(PMEMobjpool *pop, const wchar_t *s, unsigned type_num)
{
TOID(wchar_t) str;
POBJ_ZNEW(pop, &str, wchar_t);
pmemobj_wcsdup(pop, &str.oid, s, type_num);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERTeq(wcscmp(D_RO(str), s), 0);
return str;
}
/*
* do_strdup_alloc -- duplicate a string to allocated toid
*/
static void
do_strdup_alloc(PMEMobjpool *pop)
{
TOID(char) str1 = do_alloc(pop, TEST_STR_1, TYPE_SIMPLE_ALLOC_1);
TOID(wchar_t) wcs1 = do_wcs_alloc(pop, TEST_WCS_1, TYPE_SIMPLE_ALLOC_1);
TOID(char) str2 = do_alloc(pop, TEST_STR_2, TYPE_SIMPLE_ALLOC_2);
TOID(wchar_t) wcs2 = do_wcs_alloc(pop, TEST_WCS_2, TYPE_SIMPLE_ALLOC_2);
pmemobj_strdup(pop, &str1.oid, D_RO(str2), TYPE_SIMPLE_ALLOC);
pmemobj_wcsdup(pop, &wcs1.oid, D_RO(wcs2), TYPE_SIMPLE_ALLOC);
UT_ASSERTeq(strcmp(D_RO(str1), D_RO(str2)), 0);
UT_ASSERTeq(wcscmp(D_RO(wcs1), D_RO(wcs2)), 0);
}
/*
* do_strdup_null_alloc -- duplicate a NULL string to allocated toid
*/
static void
do_strdup_null_alloc(PMEMobjpool *pop)
{
TOID(char) str1 = do_alloc(pop, TEST_STR_1, TYPE_NULL_ALLOC_1);
TOID(wchar_t) wcs1 = do_wcs_alloc(pop, TEST_WCS_1, TYPE_NULL_ALLOC_1);
TOID(char) str2 = TOID_NULL(char);
TOID(wchar_t) wcs2 = TOID_NULL(wchar_t);
pmemobj_strdup(pop, &str1.oid, D_RO(str2), TYPE_NULL_ALLOC);
pmemobj_wcsdup(pop, &wcs1.oid, D_RO(wcs2), TYPE_NULL_ALLOC);
UT_ASSERT(!TOID_IS_NULL(str1));
UT_ASSERT(!TOID_IS_NULL(wcs1));
}
/*
* do_strdup_uint64_range -- duplicate string with
* type number equal to range of unsigned long long int
*/
static void
do_strdup_uint64_range(PMEMobjpool *pop)
{
TOID(char) str1;
TOID(char) str2 = do_alloc(pop, TEST_STR_2, TYPE_SIMPLE_ALLOC_1);
TOID(char) str3;
TOID(char) str4 = do_alloc(pop, TEST_STR_2, TYPE_SIMPLE_ALLOC_1);
pmemobj_strdup(pop, &str1.oid, D_RO(str2), UINT64_MAX);
pmemobj_strdup(pop, &str3.oid, D_RO(str4), UINT64_MAX - 1);
UT_ASSERTeq(strcmp(D_RO(str1), D_RO(str2)), 0);
UT_ASSERTeq(strcmp(D_RO(str3), D_RO(str4)), 0);
}
/*
* do_strdup_alloc_empty_string -- duplicate string to internal container
* associated with type number equal to range of unsigned long long int
* and unsigned long long int - 1
*/
static void
do_strdup_alloc_empty_string(PMEMobjpool *pop)
{
TOID(char) str1 = do_alloc(pop, TEST_STR_1, TYPE_SIMPLE_ALLOC_1);
TOID(wchar_t) wcs1 = do_wcs_alloc(pop, TEST_WCS_1, TYPE_SIMPLE_ALLOC_1);
pmemobj_strdup(pop, &str1.oid, TEST_STR_EMPTY, TYPE_SIMPLE_ALLOC);
pmemobj_wcsdup(pop, &wcs1.oid, TEST_WCS_EMPTY, TYPE_SIMPLE_ALLOC);
UT_ASSERTeq(strcmp(D_RO(str1), TEST_STR_EMPTY), 0);
UT_ASSERTeq(wcscmp(D_RO(wcs1), TEST_WCS_EMPTY), 0);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_strdup");
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
do_strdup(pop);
do_strdup_null(pop);
do_strdup_alloc(pop);
do_strdup_null_alloc(pop);
do_strdup_uint64_range(pop);
do_strdup_alloc_empty_string(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 5,017 | 26.571429 | 77 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem_is_pmem/pmem_is_pmem.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmem_is_pmem.c -- unit test for pmem_is_pmem()
*
* usage: pmem_is_pmem file [env]
*/
#include "unittest.h"
#define NTHREAD 16
static void *Addr;
static size_t Size;
/*
* worker -- the work each thread performs
*/
static void *
worker(void *arg)
{
int *ret = (int *)arg;
*ret = pmem_is_pmem(Addr, Size);
return NULL;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem_is_pmem");
if (argc < 2 || argc > 3)
UT_FATAL("usage: %s file [env]", argv[0]);
if (argc == 3)
UT_ASSERTeq(os_setenv("PMEM_IS_PMEM_FORCE", argv[2], 1), 0);
Addr = pmem_map_file(argv[1], 0, 0, 0, &Size, NULL);
UT_ASSERTne(Addr, NULL);
os_thread_t threads[NTHREAD];
int ret[NTHREAD];
/* kick off NTHREAD threads */
for (int i = 0; i < NTHREAD; i++)
THREAD_CREATE(&threads[i], NULL, worker, &ret[i]);
/* wait for all the threads to complete */
for (int i = 0; i < NTHREAD; i++)
THREAD_JOIN(&threads[i], NULL);
/* verify that all the threads return the same value */
for (int i = 1; i < NTHREAD; i++)
UT_ASSERTeq(ret[0], ret[i]);
UT_OUT("threads.is_pmem(Addr, Size): %d", ret[0]);
UT_ASSERTeq(os_unsetenv("PMEM_IS_PMEM_FORCE"), 0);
UT_OUT("is_pmem(Addr, Size): %d", pmem_is_pmem(Addr, Size));
/* zero-sized region is not pmem */
UT_OUT("is_pmem(Addr, 0): %d", pmem_is_pmem(Addr, 0));
UT_OUT("is_pmem(Addr + Size / 2, 0): %d",
pmem_is_pmem((char *)Addr + Size / 2, 0));
UT_OUT("is_pmem(Addr + Size, 0): %d",
pmem_is_pmem((char *)Addr + Size, 0));
DONE(NULL);
}
| 3,216 | 30.23301 | 74 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/rpmem_obc_int/rpmem_obc_int.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_obc_int.c -- integration test for rpmem_obc and rpmemd_obc modules
*/
#include "unittest.h"
#include "pmemcommon.h"
#include "librpmem.h"
#include "rpmem.h"
#include "rpmem_proto.h"
#include "rpmem_common.h"
#include "rpmem_util.h"
#include "rpmem_obc.h"
#include "rpmemd_obc.h"
#include "rpmemd_log.h"
#include "os.h"
#define POOL_SIZE 1024
#define NLANES 32
#define NLANES_RESP 16
#define PROVIDER RPMEM_PROV_LIBFABRIC_SOCKETS
#define POOL_DESC "pool_desc"
#define RKEY 0xabababababababab
#define RADDR 0x0101010101010101
#define PORT 1234
#define PERSIST_METHOD RPMEM_PM_GPSPM
#define RESP_ATTR_INIT {\
.port = PORT,\
.rkey = RKEY,\
.raddr = RADDR,\
.persist_method = PERSIST_METHOD,\
.nlanes = NLANES_RESP,\
}
#define REQ_ATTR_INIT {\
.pool_size = POOL_SIZE,\
.nlanes = NLANES,\
.provider = PROVIDER,\
.pool_desc = POOL_DESC,\
}
#define POOL_ATTR_INIT {\
.signature = "<RPMEM>",\
.major = 1,\
.compat_features = 2,\
.incompat_features = 3,\
.ro_compat_features = 4,\
.poolset_uuid = "POOLSET_UUID0123",\
.uuid = "UUID0123456789AB",\
.next_uuid = "NEXT_UUID0123456",\
.prev_uuid = "PREV_UUID0123456",\
.user_flags = "USER_FLAGS012345",\
}
#define POOL_ATTR_ALT {\
.signature = "<ALT>",\
.major = 5,\
.compat_features = 6,\
.incompat_features = 7,\
.ro_compat_features = 8,\
.poolset_uuid = "UUID_POOLSET_ALT",\
.uuid = "ALT_UUIDCDEFFEDC",\
.next_uuid = "456UUID_NEXT_ALT",\
.prev_uuid = "UUID012_ALT_PREV",\
.user_flags = "012345USER_FLAGS",\
}
TEST_CASE_DECLARE(client_create);
TEST_CASE_DECLARE(client_open);
TEST_CASE_DECLARE(client_set_attr);
TEST_CASE_DECLARE(server);
/*
* client_create -- perform create request
*/
int
client_create(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
int ret;
struct rpmem_obc *rpc;
struct rpmem_target_info *info;
struct rpmem_req_attr req = REQ_ATTR_INIT;
struct rpmem_pool_attr pool_attr = POOL_ATTR_INIT;
struct rpmem_resp_attr ex_res = RESP_ATTR_INIT;
struct rpmem_resp_attr res;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
ret = rpmem_obc_connect(rpc, info);
UT_ASSERTeq(ret, 0);
rpmem_target_free(info);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_create(rpc, &req, &res, &pool_attr);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(ex_res.port, res.port);
UT_ASSERTeq(ex_res.rkey, res.rkey);
UT_ASSERTeq(ex_res.raddr, res.raddr);
UT_ASSERTeq(ex_res.persist_method, res.persist_method);
UT_ASSERTeq(ex_res.nlanes, res.nlanes);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_close(rpc, 0);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_disconnect(rpc);
UT_ASSERTeq(ret, 0);
rpmem_obc_fini(rpc);
return 1;
}
/*
* client_open -- perform open request
*/
int
client_open(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
int ret;
struct rpmem_obc *rpc;
struct rpmem_target_info *info;
struct rpmem_req_attr req = REQ_ATTR_INIT;
struct rpmem_pool_attr ex_pool_attr = POOL_ATTR_INIT;
struct rpmem_pool_attr pool_attr;
struct rpmem_resp_attr ex_res = RESP_ATTR_INIT;
struct rpmem_resp_attr res;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
ret = rpmem_obc_connect(rpc, info);
UT_ASSERTeq(ret, 0);
rpmem_target_free(info);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_open(rpc, &req, &res, &pool_attr);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(ex_res.port, res.port);
UT_ASSERTeq(ex_res.rkey, res.rkey);
UT_ASSERTeq(ex_res.raddr, res.raddr);
UT_ASSERTeq(ex_res.persist_method, res.persist_method);
UT_ASSERTeq(ex_res.nlanes, res.nlanes);
UT_ASSERTeq(memcmp(&ex_pool_attr, &pool_attr,
sizeof(ex_pool_attr)), 0);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_close(rpc, 0);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_disconnect(rpc);
UT_ASSERTeq(ret, 0);
rpmem_obc_fini(rpc);
return 1;
}
/*
* client_set_attr -- perform set attributes request
*/
int
client_set_attr(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
int ret;
struct rpmem_obc *rpc;
struct rpmem_target_info *info;
const struct rpmem_pool_attr pool_attr = POOL_ATTR_ALT;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
ret = rpmem_obc_connect(rpc, info);
UT_ASSERTeq(ret, 0);
rpmem_target_free(info);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_set_attr(rpc, &pool_attr);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_close(rpc, 0);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_disconnect(rpc);
UT_ASSERTeq(ret, 0);
rpmem_obc_fini(rpc);
return 1;
}
/*
* req_arg -- request callbacks argument
*/
struct req_arg {
struct rpmem_resp_attr resp;
struct rpmem_pool_attr pool_attr;
int closing;
};
/*
* req_create -- process create request
*/
static int
req_create(struct rpmemd_obc *obc, void *arg,
const struct rpmem_req_attr *req,
const struct rpmem_pool_attr *pool_attr)
{
struct rpmem_req_attr ex_req = REQ_ATTR_INIT;
struct rpmem_pool_attr ex_pool_attr = POOL_ATTR_INIT;
UT_ASSERTne(arg, NULL);
UT_ASSERTeq(ex_req.provider, req->provider);
UT_ASSERTeq(ex_req.pool_size, req->pool_size);
UT_ASSERTeq(ex_req.nlanes, req->nlanes);
UT_ASSERTeq(strcmp(ex_req.pool_desc, req->pool_desc), 0);
UT_ASSERTeq(memcmp(&ex_pool_attr, pool_attr, sizeof(ex_pool_attr)), 0);
struct req_arg *args = arg;
return rpmemd_obc_create_resp(obc, 0, &args->resp);
}
/*
* req_open -- process open request
*/
static int
req_open(struct rpmemd_obc *obc, void *arg,
const struct rpmem_req_attr *req)
{
struct rpmem_req_attr ex_req = REQ_ATTR_INIT;
UT_ASSERTne(arg, NULL);
UT_ASSERTeq(ex_req.provider, req->provider);
UT_ASSERTeq(ex_req.pool_size, req->pool_size);
UT_ASSERTeq(ex_req.nlanes, req->nlanes);
UT_ASSERTeq(strcmp(ex_req.pool_desc, req->pool_desc), 0);
struct req_arg *args = arg;
return rpmemd_obc_open_resp(obc, 0,
&args->resp, &args->pool_attr);
}
/*
* req_set_attr -- process set attributes request
*/
static int
req_set_attr(struct rpmemd_obc *obc, void *arg,
const struct rpmem_pool_attr *pool_attr)
{
struct rpmem_pool_attr ex_pool_attr = POOL_ATTR_ALT;
UT_ASSERTne(arg, NULL);
UT_ASSERTeq(memcmp(&ex_pool_attr, pool_attr, sizeof(ex_pool_attr)), 0);
return rpmemd_obc_set_attr_resp(obc, 0);
}
/*
* req_close -- process close request
*/
static int
req_close(struct rpmemd_obc *obc, void *arg, int flags)
{
UT_ASSERTne(arg, NULL);
struct req_arg *args = arg;
args->closing = 1;
return rpmemd_obc_close_resp(obc, 0);
}
/*
* REQ -- server request callbacks
*/
static struct rpmemd_obc_requests REQ = {
.create = req_create,
.open = req_open,
.close = req_close,
.set_attr = req_set_attr,
};
/*
* server -- run server and process clients requests
*/
int
server(const struct test_case *tc, int argc, char *argv[])
{
int ret;
struct req_arg arg = {
.resp = RESP_ATTR_INIT,
.pool_attr = POOL_ATTR_INIT,
.closing = 0,
};
struct rpmemd_obc *obc;
obc = rpmemd_obc_init(0, 1);
UT_ASSERTne(obc, NULL);
ret = rpmemd_obc_status(obc, 0);
UT_ASSERTeq(ret, 0);
while (1) {
ret = rpmemd_obc_process(obc, &REQ, &arg);
if (arg.closing) {
break;
} else {
UT_ASSERTeq(ret, 0);
}
}
ret = rpmemd_obc_process(obc, &REQ, &arg);
UT_ASSERTeq(ret, 1);
rpmemd_obc_fini(obc);
return 0;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(server),
TEST_CASE(client_create),
TEST_CASE(client_open),
TEST_CASE(client_set_attr),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
START(argc, argv, "rpmem_obc");
common_init("rpmem_fip",
"RPMEM_LOG_LEVEL",
"RPMEM_LOG_FILE", 0, 0);
rpmemd_log_init("rpmemd", os_getenv("RPMEMD_LOG_FILE"), 0);
rpmemd_log_level = rpmemd_log_level_from_str(
os_getenv("RPMEMD_LOG_LEVEL"));
rpmem_util_cmds_init();
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
rpmem_util_cmds_fini();
common_fini();
rpmemd_log_close();
DONE(NULL);
}
| 8,537 | 20.780612 | 75 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/mmap_fixed/mmap_fixed.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* mmap_fixed.c -- test memory mapping with MAP_FIXED for various lengths
*
* This test is intended to be used for testing Windows implementation
* of memory mapping routines - mmap(), munmap(), msync() and mprotect().
* Those functions should provide the same functionality as their Linux
* counterparts, at least with respect to the features that are used
* in PMDK libraries.
*
* Known issues and differences between Linux and Windows implementation
* are described in src/common/mmap_windows.c.
*/
#include "unittest.h"
#include <sys/mman.h>
#define ALIGN(size) ((size) & ~(Ut_mmap_align - 1))
/*
* test_mmap_fixed -- test fixed mappings
*/
static void
test_mmap_fixed(const char *name1, const char *name2, size_t len1, size_t len2)
{
size_t len1_aligned = ALIGN(len1);
size_t len2_aligned = ALIGN(len2);
UT_OUT("len: %zu (%zu) + %zu (%zu) = %zu", len1, len1_aligned,
len2, len2_aligned, len1_aligned + len2_aligned);
int fd1 = OPEN(name1, O_CREAT|O_RDWR, S_IWUSR|S_IRUSR);
int fd2 = OPEN(name2, O_CREAT|O_RDWR, S_IWUSR|S_IRUSR);
POSIX_FALLOCATE(fd1, 0, (os_off_t)len1);
POSIX_FALLOCATE(fd2, 0, (os_off_t)len2);
char *ptr1 = mmap(NULL, len1_aligned + len2_aligned,
PROT_READ|PROT_WRITE, MAP_SHARED, fd1, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
UT_OUT("ptr1: %p, ptr2: %p", ptr1, ptr1 + len1_aligned);
char *ptr2 = mmap(ptr1 + len1_aligned, len2_aligned,
PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, fd2, 0);
UT_ASSERTne(ptr2, MAP_FAILED);
UT_ASSERTeq(ptr2, ptr1 + len1_aligned);
UT_ASSERTne(munmap(ptr1, len1_aligned), -1);
UT_ASSERTne(munmap(ptr2, len2_aligned), -1);
CLOSE(fd1);
CLOSE(fd2);
UNLINK(name1);
UNLINK(name2);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "mmap_fixed");
if (argc < 4)
UT_FATAL("usage: %s dirname len1 len2 ...", argv[0]);
size_t *lengths = MALLOC(sizeof(size_t) * (size_t)argc - 2);
UT_ASSERTne(lengths, NULL);
size_t appendix_length = 20; /* a file name length */
char *name1 = MALLOC(strlen(argv[1]) + appendix_length);
char *name2 = MALLOC(strlen(argv[1]) + appendix_length);
sprintf(name1, "%s\\testfile1", argv[1]);
sprintf(name2, "%s\\testfile2", argv[1]);
for (int i = 0; i < argc - 2; i++)
lengths[i] = ATOULL(argv[i + 2]);
for (int i = 0; i < argc - 2; i++)
for (int j = 0; j < argc - 2; j++)
test_mmap_fixed(name1, name2, lengths[i], lengths[j]);
FREE(name1);
FREE(name2);
FREE(lengths);
DONE(NULL);
}
| 2,522 | 26.129032 | 79 | c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.