repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_memcpy/memcpy_common.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* memcpy_common.h -- header file for common memcpy utilities
*/
#ifndef MEMCPY_COMMON_H
#define MEMCPY_COMMON_H 1
#include "unittest.h"
#include "file.h"
typedef void *(*memcpy_fn)(void *pmemdest, const void *src, size_t len,
unsigned flags);
typedef void (*persist_fn)(const void *ptr, size_t len);
extern unsigned Flags[10];
void do_memcpy(int fd, char *dest, int dest_off, char *src, int src_off,
size_t bytes, size_t mapped_len, const char *file_name, memcpy_fn fn,
unsigned flags, persist_fn p);
#endif
| 611 | 23.48 | 73 | h |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_memcpy/pmem2_memcpy.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_memcpy.c -- test for doing a memcpy from libpmem2
*
* usage: pmem2_memcpy file destoff srcoff length
*
*/
#include "unittest.h"
#include "file.h"
#include "ut_pmem2.h"
#include "memcpy_common.h"
/*
* do_memcpy_variants -- do_memcpy wrapper that tests multiple variants
* of memcpy functions
*/
static void
do_memcpy_variants(int fd, char *dest, int dest_off, char *src, int src_off,
size_t bytes, size_t mapped_len, const char *file_name,
persist_fn p, memcpy_fn fn)
{
for (int i = 0; i < ARRAY_SIZE(Flags); ++i) {
do_memcpy(fd, dest, dest_off, src, src_off, bytes, mapped_len,
file_name, fn, Flags[i], p);
}
}
int
main(int argc, char *argv[])
{
int fd;
char *dest;
char *src;
char *src_orig;
size_t mapped_len;
struct pmem2_config *cfg;
struct pmem2_source *psrc;
struct pmem2_map *map;
if (argc != 5)
UT_FATAL("usage: %s file destoff srcoff length", argv[0]);
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem2_memcpy %s %s %s %s %savx %savx512f",
argv[2], argv[3], argv[4], thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
util_init();
fd = OPEN(argv[1], O_RDWR);
UT_ASSERT(fd != -1);
int dest_off = atoi(argv[2]);
int src_off = atoi(argv[3]);
size_t bytes = strtoul(argv[4], NULL, 0);
PMEM2_CONFIG_NEW(&cfg);
PMEM2_SOURCE_FROM_FD(&psrc, fd);
PMEM2_CONFIG_SET_GRANULARITY(cfg, PMEM2_GRANULARITY_PAGE);
int ret = pmem2_map(cfg, psrc, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_CONFIG_DELETE(&cfg);
/* src > dst */
mapped_len = pmem2_map_get_size(map);
dest = pmem2_map_get_address(map);
if (dest == NULL)
UT_FATAL("!could not map file: %s", argv[1]);
src_orig = src = dest + mapped_len / 2;
UT_ASSERT(src > dest);
pmem2_persist_fn persist = pmem2_get_persist_fn(map);
memset(dest, 0, (2 * bytes));
persist(dest, 2 * bytes);
memset(src, 0, (2 * bytes));
persist(src, 2 * bytes);
pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map);
do_memcpy_variants(fd, dest, dest_off, src, src_off, bytes,
0, argv[1], persist, memcpy_fn);
src = dest;
dest = src_orig;
if (dest <= src)
UT_FATAL("cannot map files in memory order");
do_memcpy_variants(fd, dest, dest_off, src, src_off, bytes, mapped_len,
argv[1], persist, memcpy_fn);
ret = pmem2_unmap(&map);
UT_ASSERTeq(ret, 0);
CLOSE(fd);
DONE(NULL);
}
| 2,527 | 22.849057 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_is_zeroed/util_is_zeroed.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* util_is_zeroed.c -- unit test for util_is_zeroed
*/
#include "unittest.h"
#include "util.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "util_is_zeroed");
util_init();
char bigbuf[3000];
memset(bigbuf + 0, 0x11, 1000);
memset(bigbuf + 1000, 0x0, 1000);
memset(bigbuf + 2000, 0xff, 1000);
UT_ASSERTeq(util_is_zeroed(bigbuf, 1000), 0);
UT_ASSERTeq(util_is_zeroed(bigbuf + 1000, 1000), 1);
UT_ASSERTeq(util_is_zeroed(bigbuf + 2000, 1000), 0);
UT_ASSERTeq(util_is_zeroed(bigbuf, 0), 1);
UT_ASSERTeq(util_is_zeroed(bigbuf + 999, 1000), 0);
UT_ASSERTeq(util_is_zeroed(bigbuf + 1000, 1001), 0);
UT_ASSERTeq(util_is_zeroed(bigbuf + 1001, 1000), 0);
char *buf = bigbuf + 1000;
buf[0] = 1;
UT_ASSERTeq(util_is_zeroed(buf, 1000), 0);
memset(buf, 0, 1000);
buf[1] = 1;
UT_ASSERTeq(util_is_zeroed(buf, 1000), 0);
memset(buf, 0, 1000);
buf[239] = 1;
UT_ASSERTeq(util_is_zeroed(buf, 1000), 0);
memset(buf, 0, 1000);
buf[999] = 1;
UT_ASSERTeq(util_is_zeroed(buf, 1000), 0);
memset(buf, 0, 1000);
buf[1000] = 1;
UT_ASSERTeq(util_is_zeroed(buf, 1000), 1);
DONE(NULL);
}
| 1,196 | 20.763636 | 53 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_map_file/mocks_windows.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* mocks_windows.h -- redefinitions of libc functions
*
* This file is Windows-specific.
*
* This file should be included (i.e. using Forced Include) by libpmem
* files, when compiled for the purpose of pmem_map_file test.
* It would replace default implementation with mocked functions defined
* in pmem_map_file.c.
*
* These defines could be also passed as preprocessor definitions.
*/
#ifndef WRAP_REAL
#define os_posix_fallocate __wrap_os_posix_fallocate
#define os_ftruncate __wrap_os_ftruncate
#endif
| 608 | 28 | 72 | h |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_map_file/mocks_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* mocks_windows.c -- mocked functions used in pmem_map_file.c
* (Windows-specific)
*/
#include "unittest.h"
#define MAX_LEN (4 * 1024 * 1024)
/*
* posix_fallocate -- interpose on libc posix_fallocate()
*/
FUNC_MOCK(os_posix_fallocate, int, int fd, os_off_t offset, os_off_t len)
FUNC_MOCK_RUN_DEFAULT {
UT_OUT("posix_fallocate: off %ju len %ju", offset, len);
if (len > MAX_LEN)
return ENOSPC;
return _FUNC_REAL(os_posix_fallocate)(fd, offset, len);
}
FUNC_MOCK_END
/*
* ftruncate -- interpose on libc ftruncate()
*/
FUNC_MOCK(os_ftruncate, int, int fd, os_off_t len)
FUNC_MOCK_RUN_DEFAULT {
UT_OUT("ftruncate: len %ju", len);
if (len > MAX_LEN) {
errno = ENOSPC;
return -1;
}
return _FUNC_REAL(os_ftruncate)(fd, len);
}
FUNC_MOCK_END
| 868 | 21.868421 | 73 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_heap/obj_heap.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_heap.c -- unit test for heap
*
* operations are: 't', 'b', 'r', 'c', 'h', 'a', 'n', 's'
* t: do test_heap, test_recycler
* b: do fault_injection in function container_new_ravl
* r: do fault_injection in function recycler_new
* c: do fault_injection in function container_new_seglists
* h: do fault_injection in function heap_boot
* a: do fault_injection in function alloc_class_new
* n: do fault_injection in function alloc_class_collection_new
* s: do fault_injection in function stats_new
*/
#include "libpmemobj.h"
#include "palloc.h"
#include "heap.h"
#include "recycler.h"
#include "obj.h"
#include "unittest.h"
#include "util.h"
#include "container_ravl.h"
#include "container_seglists.h"
#include "container.h"
#include "alloc_class.h"
#include "valgrind_internal.h"
#include "set.h"
#define MOCK_POOL_SIZE PMEMOBJ_MIN_POOL
#define MAX_BLOCKS 3
struct mock_pop {
PMEMobjpool p;
void *heap;
};
static int
obj_heap_persist(void *ctx, const void *ptr, size_t sz, unsigned flags)
{
UT_ASSERTeq(pmem_msync(ptr, sz), 0);
return 0;
}
static int
obj_heap_flush(void *ctx, const void *ptr, size_t sz, unsigned flags)
{
UT_ASSERTeq(pmem_msync(ptr, sz), 0);
return 0;
}
static void
obj_heap_drain(void *ctx)
{
}
static void *
obj_heap_memset(void *ctx, void *ptr, int c, size_t sz, unsigned flags)
{
memset(ptr, c, sz);
UT_ASSERTeq(pmem_msync(ptr, sz), 0);
return ptr;
}
static void
init_run_with_score(struct heap_layout *l, uint32_t chunk_id, int score)
{
l->zone0.chunk_headers[chunk_id].size_idx = 1;
l->zone0.chunk_headers[chunk_id].type = CHUNK_TYPE_RUN;
l->zone0.chunk_headers[chunk_id].flags = 0;
struct chunk_run *run = (struct chunk_run *)
&l->zone0.chunks[chunk_id];
VALGRIND_DO_MAKE_MEM_UNDEFINED(run, sizeof(*run));
run->hdr.alignment = 0;
run->hdr.block_size = 1024;
memset(run->content, 0xFF, RUN_DEFAULT_BITMAP_SIZE);
UT_ASSERTeq(score % 64, 0);
score /= 64;
uint64_t *bitmap = (uint64_t *)run->content;
for (; score >= 0; --score) {
bitmap[score] = 0;
}
}
static void
init_run_with_max_block(struct heap_layout *l, uint32_t chunk_id)
{
l->zone0.chunk_headers[chunk_id].size_idx = 1;
l->zone0.chunk_headers[chunk_id].type = CHUNK_TYPE_RUN;
l->zone0.chunk_headers[chunk_id].flags = 0;
struct chunk_run *run = (struct chunk_run *)
&l->zone0.chunks[chunk_id];
VALGRIND_DO_MAKE_MEM_UNDEFINED(run, sizeof(*run));
uint64_t *bitmap = (uint64_t *)run->content;
run->hdr.block_size = 1024;
run->hdr.alignment = 0;
memset(bitmap, 0xFF, RUN_DEFAULT_BITMAP_SIZE);
/* the biggest block is 10 bits */
bitmap[3] =
0b1000001110111000111111110000111111000000000011111111110000000011;
}
static void
test_container(struct block_container *bc, struct palloc_heap *heap)
{
UT_ASSERTne(bc, NULL);
struct memory_block a = {1, 0, 1, 4};
struct memory_block b = {1, 0, 2, 8};
struct memory_block c = {1, 0, 3, 16};
struct memory_block d = {1, 0, 5, 32};
init_run_with_score(heap->layout, 1, 128);
memblock_rebuild_state(heap, &a);
memblock_rebuild_state(heap, &b);
memblock_rebuild_state(heap, &c);
memblock_rebuild_state(heap, &d);
int ret;
ret = bc->c_ops->insert(bc, &a);
UT_ASSERTeq(ret, 0);
ret = bc->c_ops->insert(bc, &b);
UT_ASSERTeq(ret, 0);
ret = bc->c_ops->insert(bc, &c);
UT_ASSERTeq(ret, 0);
ret = bc->c_ops->insert(bc, &d);
UT_ASSERTeq(ret, 0);
struct memory_block invalid_ret = {0, 0, 6, 0};
ret = bc->c_ops->get_rm_bestfit(bc, &invalid_ret);
UT_ASSERTeq(ret, ENOMEM);
struct memory_block b_ret = {0, 0, 2, 0};
ret = bc->c_ops->get_rm_bestfit(bc, &b_ret);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(b_ret.chunk_id, b.chunk_id);
struct memory_block a_ret = {0, 0, 1, 0};
ret = bc->c_ops->get_rm_bestfit(bc, &a_ret);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(a_ret.chunk_id, a.chunk_id);
struct memory_block c_ret = {0, 0, 3, 0};
ret = bc->c_ops->get_rm_bestfit(bc, &c_ret);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(c_ret.chunk_id, c.chunk_id);
struct memory_block d_ret = {0, 0, 4, 0}; /* less one than target */
ret = bc->c_ops->get_rm_bestfit(bc, &d_ret);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(d_ret.chunk_id, d.chunk_id);
ret = bc->c_ops->get_rm_bestfit(bc, &c_ret);
UT_ASSERTeq(ret, ENOMEM);
ret = bc->c_ops->insert(bc, &a);
UT_ASSERTeq(ret, 0);
ret = bc->c_ops->insert(bc, &b);
UT_ASSERTeq(ret, 0);
ret = bc->c_ops->insert(bc, &c);
UT_ASSERTeq(ret, 0);
bc->c_ops->rm_all(bc);
ret = bc->c_ops->is_empty(bc);
UT_ASSERTeq(ret, 1);
ret = bc->c_ops->get_rm_bestfit(bc, &c_ret);
UT_ASSERTeq(ret, ENOMEM);
bc->c_ops->destroy(bc);
}
static void
do_fault_injection_new_ravl()
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "container_new_ravl");
struct block_container *bc = container_new_ravl(NULL);
UT_ASSERTeq(bc, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
do_fault_injection_new_seglists()
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "container_new_seglists");
struct block_container *bc = container_new_seglists(NULL);
UT_ASSERTeq(bc, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
do_fault_injection_heap_boot()
{
if (!pmemobj_fault_injection_enabled())
return;
struct mock_pop *mpop = MMAP_ANON_ALIGNED(MOCK_POOL_SIZE,
Ut_mmap_align);
PMEMobjpool *pop = &mpop->p;
pop->p_ops.persist = obj_heap_persist;
uint64_t heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool);
struct pmem_ops *p_ops = &pop->p_ops;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "heap_boot");
int r = heap_boot(NULL, NULL, heap_size, &pop->heap_size, NULL, p_ops,
NULL, NULL);
UT_ASSERTne(r, 0);
UT_ASSERTeq(errno, ENOMEM);
}
static void
do_fault_injection_recycler()
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "recycler_new");
size_t active_arenas = 1;
struct recycler *r = recycler_new(NULL, 0, &active_arenas);
UT_ASSERTeq(r, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
do_fault_injection_class_new(int i)
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, i, "alloc_class_new");
struct alloc_class_collection *c = alloc_class_collection_new();
UT_ASSERTeq(c, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
do_fault_injection_class_collection_new()
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "alloc_class_collection_new");
struct alloc_class_collection *c = alloc_class_collection_new();
UT_ASSERTeq(c, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
do_fault_injection_stats()
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "stats_new");
struct stats *s = stats_new(NULL);
UT_ASSERTeq(s, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
test_heap(void)
{
struct mock_pop *mpop = MMAP_ANON_ALIGNED(MOCK_POOL_SIZE,
Ut_mmap_align);
PMEMobjpool *pop = &mpop->p;
memset(pop, 0, MOCK_POOL_SIZE);
pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop);
pop->p_ops.persist = obj_heap_persist;
pop->p_ops.flush = obj_heap_flush;
pop->p_ops.drain = obj_heap_drain;
pop->p_ops.memset = obj_heap_memset;
pop->p_ops.base = pop;
pop->set = MALLOC(sizeof(*(pop->set)));
pop->set->options = 0;
pop->set->directory_based = 0;
struct stats *s = stats_new(pop);
UT_ASSERTne(s, NULL);
void *heap_start = (char *)pop + pop->heap_offset;
uint64_t heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool);
struct palloc_heap *heap = &pop->heap;
struct pmem_ops *p_ops = &pop->p_ops;
UT_ASSERT(heap_check(heap_start, heap_size) != 0);
UT_ASSERT(heap_init(heap_start, heap_size,
&pop->heap_size, p_ops) == 0);
UT_ASSERT(heap_boot(heap, heap_start, heap_size,
&pop->heap_size,
pop, p_ops, s, pop->set) == 0);
UT_ASSERT(heap_buckets_init(heap) == 0);
UT_ASSERT(pop->heap.rt != NULL);
test_container((struct block_container *)container_new_ravl(heap),
heap);
test_container((struct block_container *)container_new_seglists(heap),
heap);
struct alloc_class *c_small = heap_get_best_class(heap, 1);
struct alloc_class *c_big = heap_get_best_class(heap, 2048);
UT_ASSERT(c_small->unit_size < c_big->unit_size);
/* new small buckets should be empty */
UT_ASSERT(c_big->type == CLASS_RUN);
struct memory_block blocks[MAX_BLOCKS] = {
{0, 0, 1, 0},
{0, 0, 1, 0},
{0, 0, 1, 0}
};
struct bucket *b_def = heap_bucket_acquire(heap,
DEFAULT_ALLOC_CLASS_ID, HEAP_ARENA_PER_THREAD);
for (int i = 0; i < MAX_BLOCKS; ++i) {
heap_get_bestfit_block(heap, b_def, &blocks[i]);
UT_ASSERT(blocks[i].block_off == 0);
}
heap_bucket_release(heap, b_def);
struct memory_block old_run = {0, 0, 1, 0};
struct memory_block new_run = {0, 0, 0, 0};
struct alloc_class *c_run = heap_get_best_class(heap, 1024);
struct bucket *b_run = heap_bucket_acquire(heap, c_run->id,
HEAP_ARENA_PER_THREAD);
/*
* Allocate blocks from a run until one run is exhausted.
*/
UT_ASSERTne(heap_get_bestfit_block(heap, b_run, &old_run), ENOMEM);
do {
new_run.chunk_id = 0;
new_run.block_off = 0;
new_run.size_idx = 1;
UT_ASSERTne(heap_get_bestfit_block(heap, b_run, &new_run),
ENOMEM);
UT_ASSERTne(new_run.size_idx, 0);
} while (old_run.block_off != new_run.block_off);
heap_bucket_release(heap, b_run);
stats_delete(pop, s);
UT_ASSERT(heap_check(heap_start, heap_size) == 0);
heap_cleanup(heap);
UT_ASSERT(heap->rt == NULL);
FREE(pop->set);
MUNMAP_ANON_ALIGNED(mpop, MOCK_POOL_SIZE);
}
/*
* test_heap_with_size -- tests scenarios with not-nicely aligned sizes
*/
static void
test_heap_with_size()
{
/*
* To trigger bug with incorrect metadata alignment we need to
* use a size that uses exactly the size used in bugged zone size
* calculations.
*/
size_t size = PMEMOBJ_MIN_POOL + sizeof(struct zone_header) +
sizeof(struct chunk_header) * MAX_CHUNK +
sizeof(PMEMobjpool);
struct mock_pop *mpop = MMAP_ANON_ALIGNED(size,
Ut_mmap_align);
PMEMobjpool *pop = &mpop->p;
memset(pop, 0, size);
pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop);
pop->p_ops.persist = obj_heap_persist;
pop->p_ops.flush = obj_heap_flush;
pop->p_ops.drain = obj_heap_drain;
pop->p_ops.memset = obj_heap_memset;
pop->p_ops.base = pop;
pop->set = MALLOC(sizeof(*(pop->set)));
pop->set->options = 0;
pop->set->directory_based = 0;
void *heap_start = (char *)pop + pop->heap_offset;
uint64_t heap_size = size - sizeof(PMEMobjpool);
struct palloc_heap *heap = &pop->heap;
struct pmem_ops *p_ops = &pop->p_ops;
UT_ASSERT(heap_check(heap_start, heap_size) != 0);
UT_ASSERT(heap_init(heap_start, heap_size,
&pop->heap_size, p_ops) == 0);
UT_ASSERT(heap_boot(heap, heap_start, heap_size,
&pop->heap_size,
pop, p_ops, NULL, pop->set) == 0);
UT_ASSERT(heap_buckets_init(heap) == 0);
UT_ASSERT(pop->heap.rt != NULL);
struct bucket *b_def = heap_bucket_acquire(heap,
DEFAULT_ALLOC_CLASS_ID, HEAP_ARENA_PER_THREAD);
struct memory_block mb;
mb.size_idx = 1;
while (heap_get_bestfit_block(heap, b_def, &mb) == 0)
;
/* mb should now be the last chunk in the heap */
char *ptr = mb.m_ops->get_real_data(&mb);
size_t s = mb.m_ops->get_real_size(&mb);
/* last chunk should be within the heap and accessible */
UT_ASSERT((size_t)ptr + s <= (size_t)mpop + size);
VALGRIND_DO_MAKE_MEM_DEFINED(ptr, s);
memset(ptr, 0xc, s);
heap_bucket_release(heap, b_def);
UT_ASSERT(heap_check(heap_start, heap_size) == 0);
heap_cleanup(heap);
UT_ASSERT(heap->rt == NULL);
FREE(pop->set);
MUNMAP_ANON_ALIGNED(mpop, size);
}
static void
test_recycler(void)
{
struct mock_pop *mpop = MMAP_ANON_ALIGNED(MOCK_POOL_SIZE,
Ut_mmap_align);
PMEMobjpool *pop = &mpop->p;
memset(pop, 0, MOCK_POOL_SIZE);
pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop);
pop->p_ops.persist = obj_heap_persist;
pop->p_ops.flush = obj_heap_flush;
pop->p_ops.drain = obj_heap_drain;
pop->p_ops.memset = obj_heap_memset;
pop->p_ops.base = pop;
pop->set = MALLOC(sizeof(*(pop->set)));
pop->set->options = 0;
pop->set->directory_based = 0;
void *heap_start = (char *)pop + pop->heap_offset;
uint64_t heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool);
struct palloc_heap *heap = &pop->heap;
struct pmem_ops *p_ops = &pop->p_ops;
struct stats *s = stats_new(pop);
UT_ASSERTne(s, NULL);
UT_ASSERT(heap_check(heap_start, heap_size) != 0);
UT_ASSERT(heap_init(heap_start, heap_size,
&pop->heap_size, p_ops) == 0);
UT_ASSERT(heap_boot(heap, heap_start, heap_size,
&pop->heap_size,
pop, p_ops, s, pop->set) == 0);
UT_ASSERT(heap_buckets_init(heap) == 0);
UT_ASSERT(pop->heap.rt != NULL);
/* trigger heap bucket populate */
struct memory_block m = MEMORY_BLOCK_NONE;
m.size_idx = 1;
struct bucket *b = heap_bucket_acquire(heap,
DEFAULT_ALLOC_CLASS_ID,
HEAP_ARENA_PER_THREAD);
UT_ASSERT(heap_get_bestfit_block(heap, b, &m) == 0);
heap_bucket_release(heap, b);
int ret;
size_t active_arenas = 1;
struct recycler *r = recycler_new(&pop->heap, 10000 /* never recalc */,
&active_arenas);
UT_ASSERTne(r, NULL);
init_run_with_score(pop->heap.layout, 0, 64);
init_run_with_score(pop->heap.layout, 1, 128);
init_run_with_score(pop->heap.layout, 15, 0);
struct memory_block mrun = {0, 0, 1, 0};
struct memory_block mrun2 = {1, 0, 1, 0};
memblock_rebuild_state(&pop->heap, &mrun);
memblock_rebuild_state(&pop->heap, &mrun2);
ret = recycler_put(r, &mrun,
recycler_element_new(&pop->heap, &mrun));
UT_ASSERTeq(ret, 0);
ret = recycler_put(r, &mrun2,
recycler_element_new(&pop->heap, &mrun2));
UT_ASSERTeq(ret, 0);
struct memory_block mrun_ret = MEMORY_BLOCK_NONE;
mrun_ret.size_idx = 1;
struct memory_block mrun2_ret = MEMORY_BLOCK_NONE;
mrun2_ret.size_idx = 1;
ret = recycler_get(r, &mrun_ret);
UT_ASSERTeq(ret, 0);
ret = recycler_get(r, &mrun2_ret);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(mrun2.chunk_id, mrun2_ret.chunk_id);
UT_ASSERTeq(mrun.chunk_id, mrun_ret.chunk_id);
init_run_with_score(pop->heap.layout, 7, 64);
init_run_with_score(pop->heap.layout, 2, 128);
init_run_with_score(pop->heap.layout, 5, 192);
init_run_with_score(pop->heap.layout, 10, 256);
mrun.chunk_id = 7;
mrun2.chunk_id = 2;
struct memory_block mrun3 = {5, 0, 1, 0};
struct memory_block mrun4 = {10, 0, 1, 0};
memblock_rebuild_state(&pop->heap, &mrun3);
memblock_rebuild_state(&pop->heap, &mrun4);
mrun_ret.size_idx = 1;
mrun2_ret.size_idx = 1;
struct memory_block mrun3_ret = MEMORY_BLOCK_NONE;
mrun3_ret.size_idx = 1;
struct memory_block mrun4_ret = MEMORY_BLOCK_NONE;
mrun4_ret.size_idx = 1;
ret = recycler_put(r, &mrun,
recycler_element_new(&pop->heap, &mrun));
UT_ASSERTeq(ret, 0);
ret = recycler_put(r, &mrun2,
recycler_element_new(&pop->heap, &mrun2));
UT_ASSERTeq(ret, 0);
ret = recycler_put(r, &mrun3,
recycler_element_new(&pop->heap, &mrun3));
UT_ASSERTeq(ret, 0);
ret = recycler_put(r, &mrun4,
recycler_element_new(&pop->heap, &mrun4));
UT_ASSERTeq(ret, 0);
ret = recycler_get(r, &mrun_ret);
UT_ASSERTeq(ret, 0);
ret = recycler_get(r, &mrun2_ret);
UT_ASSERTeq(ret, 0);
ret = recycler_get(r, &mrun3_ret);
UT_ASSERTeq(ret, 0);
ret = recycler_get(r, &mrun4_ret);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(mrun.chunk_id, mrun_ret.chunk_id);
UT_ASSERTeq(mrun2.chunk_id, mrun2_ret.chunk_id);
UT_ASSERTeq(mrun3.chunk_id, mrun3_ret.chunk_id);
UT_ASSERTeq(mrun4.chunk_id, mrun4_ret.chunk_id);
init_run_with_max_block(pop->heap.layout, 1);
struct memory_block mrun5 = {1, 0, 1, 0};
memblock_rebuild_state(&pop->heap, &mrun5);
ret = recycler_put(r, &mrun5,
recycler_element_new(&pop->heap, &mrun5));
UT_ASSERTeq(ret, 0);
struct memory_block mrun5_ret = MEMORY_BLOCK_NONE;
mrun5_ret.size_idx = 11;
ret = recycler_get(r, &mrun5_ret);
UT_ASSERTeq(ret, ENOMEM);
mrun5_ret = MEMORY_BLOCK_NONE;
mrun5_ret.size_idx = 10;
ret = recycler_get(r, &mrun5_ret);
UT_ASSERTeq(ret, 0);
recycler_delete(r);
stats_delete(pop, s);
heap_cleanup(heap);
UT_ASSERT(heap->rt == NULL);
FREE(pop->set);
MUNMAP_ANON_ALIGNED(mpop, MOCK_POOL_SIZE);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_heap");
if (argc < 2)
UT_FATAL("usage: %s path <t|b|r|c|h|a|n|s>", argv[0]);
switch (argv[1][0]) {
case 't':
test_heap();
test_heap_with_size();
test_recycler();
break;
case 'b':
do_fault_injection_new_ravl();
break;
case 'r':
do_fault_injection_recycler();
break;
case 'c':
do_fault_injection_new_seglists();
break;
case 'h':
do_fault_injection_heap_boot();
break;
case 'a':
/* first call alloc_class_new */
do_fault_injection_class_new(1);
/* second call alloc_class_new */
do_fault_injection_class_new(2);
break;
case 'n':
do_fault_injection_class_collection_new();
break;
case 's':
do_fault_injection_stats();
break;
default:
UT_FATAL("unknown operation");
}
DONE(NULL);
}
| 16,917 | 25.027692 | 72 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_movnt_align/movnt_align_common.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* movnt_align_common.c -- common part for tests doing a persistent movnt align
*/
#include "unittest.h"
#include "movnt_align_common.h"
char *Src;
char *Dst;
char *Scratch;
/*
* check_memmove -- invoke check function with pmem_memmove_persist
*/
void
check_memmove(size_t doff, size_t soff, size_t len, pmem_memmove_fn fn,
unsigned flags)
{
memset(Dst + doff, 1, len);
memset(Src + soff, 0, len);
fn(Dst + doff, Src + soff, len, flags);
if (memcmp(Dst + doff, Src + soff, len))
UT_FATAL("memcpy/memmove failed");
}
/*
* check_memmove -- invoke check function with pmem_memcpy_persist
*/
void
check_memcpy(size_t doff, size_t soff, size_t len, pmem_memcpy_fn fn,
unsigned flags)
{
memset(Dst, 2, N_BYTES);
memset(Src, 3, N_BYTES);
memset(Scratch, 2, N_BYTES);
memset(Dst + doff, 1, len);
memset(Src + soff, 0, len);
memcpy(Scratch + doff, Src + soff, len);
fn(Dst + doff, Src + soff, len, flags);
if (memcmp(Dst, Scratch, N_BYTES))
UT_FATAL("memcpy/memmove failed");
}
/*
* check_memset -- check pmem_memset_no_drain function
*/
void
check_memset(size_t off, size_t len, pmem_memset_fn fn, unsigned flags)
{
memset(Scratch, 2, N_BYTES);
memset(Scratch + off, 1, len);
memset(Dst, 2, N_BYTES);
fn(Dst + off, 1, len, flags);
if (memcmp(Dst, Scratch, N_BYTES))
UT_FATAL("memset failed");
}
unsigned Flags[] = {
0,
PMEM_F_MEM_NODRAIN,
PMEM_F_MEM_NONTEMPORAL,
PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_NODRAIN,
PMEM_F_MEM_WC,
PMEM_F_MEM_WB,
PMEM_F_MEM_NOFLUSH,
/* all possible flags */
PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH |
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL |
PMEM_F_MEM_WC | PMEM_F_MEM_WB,
};
| 1,830 | 21.060241 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_movnt_align/pmem2_movnt_align.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_movnt_align.c -- test for functions with non-temporal stores
*
* usage: pmem2_movnt_align file [C|F|B|S]
*
* C - pmem2_memcpy()
* B - pmem2_memmove() in backward direction
* F - pmem2_memmove() in forward direction
* S - pmem2_memset()
*/
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include "libpmem2.h"
#include "unittest.h"
#include "movnt_align_common.h"
#include "ut_pmem2.h"
static pmem2_memset_fn memset_fn;
static pmem2_memcpy_fn memcpy_fn;
static pmem2_memmove_fn memmove_fn;
static void
check_memmove_variants(size_t doff, size_t soff, size_t len)
{
for (int i = 0; i < ARRAY_SIZE(Flags); ++i)
check_memmove(doff, soff, len, memmove_fn, Flags[i]);
}
static void
check_memcpy_variants(size_t doff, size_t soff, size_t len)
{
for (int i = 0; i < ARRAY_SIZE(Flags); ++i)
check_memcpy(doff, soff, len, memcpy_fn, Flags[i]);
}
static void
check_memset_variants(size_t off, size_t len)
{
for (int i = 0; i < ARRAY_SIZE(Flags); ++i)
check_memset(off, len, memset_fn, Flags[i]);
}
int
main(int argc, char *argv[])
{
if (argc != 3)
UT_FATAL("usage: %s file type", argv[0]);
struct pmem2_config *cfg;
struct pmem2_source *src;
struct pmem2_map *map;
int fd;
char type = argv[2][0];
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem2_movnt_align %c %s %savx %savx512f", type,
thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
fd = OPEN(argv[1], O_RDWR);
PMEM2_CONFIG_NEW(&cfg);
PMEM2_SOURCE_FROM_FD(&src, fd);
PMEM2_CONFIG_SET_GRANULARITY(cfg, PMEM2_GRANULARITY_PAGE);
int ret = pmem2_map(cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_CONFIG_DELETE(&cfg);
memset_fn = pmem2_get_memset_fn(map);
memcpy_fn = pmem2_get_memcpy_fn(map);
memmove_fn = pmem2_get_memmove_fn(map);
ret = pmem2_unmap(&map);
UT_ASSERTeq(ret, 0);
CLOSE(fd);
size_t page_size = Ut_pagesize;
size_t s;
switch (type) {
case 'C': /* memcpy */
/* mmap with guard pages */
Src = MMAP_ANON_ALIGNED(N_BYTES, 0);
Dst = MMAP_ANON_ALIGNED(N_BYTES, 0);
if (Src == NULL || Dst == NULL)
UT_FATAL("!mmap");
Scratch = MALLOC(N_BYTES);
/* check memcpy with 0 size */
check_memcpy_variants(0, 0, 0);
/* check memcpy with unaligned size */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memcpy_variants(0, 0, N_BYTES - s);
/* check memcpy with unaligned begin */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memcpy_variants(s, 0, N_BYTES - s);
/* check memcpy with unaligned begin and end */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memcpy_variants(s, s, N_BYTES - 2 * s);
MUNMAP_ANON_ALIGNED(Src, N_BYTES);
MUNMAP_ANON_ALIGNED(Dst, N_BYTES);
FREE(Scratch);
break;
case 'B': /* memmove backward */
/* mmap with guard pages */
Src = MMAP_ANON_ALIGNED(2 * N_BYTES - page_size, 0);
Dst = Src + N_BYTES - page_size;
if (Src == NULL)
UT_FATAL("!mmap");
/* check memmove in backward direction with 0 size */
check_memmove_variants(0, 0, 0);
/* check memmove in backward direction with unaligned size */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(0, 0, N_BYTES - s);
/* check memmove in backward direction with unaligned begin */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(s, 0, N_BYTES - s);
/*
* check memmove in backward direction with unaligned begin
* and end
*/
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(s, s, N_BYTES - 2 * s);
MUNMAP_ANON_ALIGNED(Src, 2 * N_BYTES - page_size);
break;
case 'F': /* memmove forward */
/* mmap with guard pages */
Dst = MMAP_ANON_ALIGNED(2 * N_BYTES - page_size, 0);
Src = Dst + N_BYTES - page_size;
if (Src == NULL)
UT_FATAL("!mmap");
/* check memmove in forward direction with 0 size */
check_memmove_variants(0, 0, 0);
/* check memmove in forward direction with unaligned size */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(0, 0, N_BYTES - s);
/* check memmove in forward direction with unaligned begin */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(s, 0, N_BYTES - s);
/*
* check memmove in forward direction with unaligned begin
* and end
*/
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(s, s, N_BYTES - 2 * s);
MUNMAP_ANON_ALIGNED(Dst, 2 * N_BYTES - page_size);
break;
case 'S': /* memset */
/* mmap with guard pages */
Dst = MMAP_ANON_ALIGNED(N_BYTES, 0);
if (Dst == NULL)
UT_FATAL("!mmap");
Scratch = MALLOC(N_BYTES);
/* check memset with 0 size */
check_memset_variants(0, 0);
/* check memset with unaligned size */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memset_variants(0, N_BYTES - s);
/* check memset with unaligned begin */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memset_variants(s, N_BYTES - s);
/* check memset with unaligned begin and end */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memset_variants(s, N_BYTES - 2 * s);
MUNMAP_ANON_ALIGNED(Dst, N_BYTES);
FREE(Scratch);
break;
default:
UT_FATAL("!wrong type of test");
break;
}
DONE(NULL);
}
| 5,283 | 24.042654 | 69 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_movnt_align/movnt_align_common.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* movnt_align_common.h -- header file for common movnt_align test utilities
*/
#ifndef MOVNT_ALIGN_COMMON_H
#define MOVNT_ALIGN_COMMON_H 1
#include "unittest.h"
#include "file.h"
#define N_BYTES (Ut_pagesize * 2)
extern char *Src;
extern char *Dst;
extern char *Scratch;
extern unsigned Flags[10];
typedef void *(*mem_fn)(void *, const void *, size_t);
typedef void *pmem_memcpy_fn(void *pmemdest, const void *src, size_t len,
unsigned flags);
typedef void *pmem_memmove_fn(void *pmemdest, const void *src, size_t len,
unsigned flags);
typedef void *pmem_memset_fn(void *pmemdest, int c, size_t len, unsigned flags);
void check_memmove(size_t doff, size_t soff, size_t len, pmem_memmove_fn fn,
unsigned flags);
void check_memcpy(size_t doff, size_t soff, size_t len, pmem_memcpy_fn fn,
unsigned flags);
void check_memset(size_t off, size_t len, pmem_memset_fn fn, unsigned flags);
#endif
| 989 | 26.5 | 80 | h |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_memmove/pmem_memmove.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* pmem_memmove.c -- unit test for doing a memmove
*
* usage:
* pmem_memmove file b:length [d:{offset}] [s:offset] [o:{1|2} S:{overlap}]
*
*/
#include "unittest.h"
#include "util_pmem.h"
#include "file.h"
#include "memmove_common.h"
typedef void *pmem_memmove_fn(void *pmemdest, const void *src, size_t len,
unsigned flags);
static void *
pmem_memmove_persist_wrapper(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
(void) flags;
return pmem_memmove_persist(pmemdest, src, len);
}
static void *
pmem_memmove_nodrain_wrapper(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
(void) flags;
return pmem_memmove_nodrain(pmemdest, src, len);
}
static void
do_persist_ddax(const void *ptr, size_t size)
{
util_persist_auto(1, ptr, size);
}
static void
do_persist(const void *ptr, size_t size)
{
util_persist_auto(0, ptr, size);
}
/*
* swap_mappings - given to mmapped regions swap them.
*
* Try swapping src and dest by unmapping src, mapping a new dest with
* the original src address as a hint. If successful, unmap original dest.
* Map a new src with the original dest as a hint.
* In the event of an error caller must unmap all passed in mappings.
*/
static void
swap_mappings(char **dest, char **src, size_t size, int fd)
{
char *d = *dest;
char *s = *src;
char *ts;
char *td;
MUNMAP(*src, size);
/* mmap destination using src addr as hint */
td = MMAP(s, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
MUNMAP(*dest, size);
*dest = td;
/* mmap src using original destination addr as a hint */
ts = MMAP(d, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
-1, 0);
*src = ts;
}
static void
do_memmove_variants(char *dst, char *src, const char *file_name,
size_t dest_off, size_t src_off, size_t bytes, persist_fn p)
{
do_memmove(dst, src, file_name, dest_off, src_off,
bytes, pmem_memmove_persist_wrapper, 0, p);
do_memmove(dst, src, file_name, dest_off, src_off,
bytes, pmem_memmove_nodrain_wrapper, 0, p);
for (int i = 0; i < ARRAY_SIZE(Flags); ++i) {
do_memmove(dst, src, file_name, dest_off, src_off,
bytes, pmem_memmove, Flags[i], p);
}
}
int
main(int argc, char *argv[])
{
int fd;
char *dst;
char *src;
size_t dst_off = 0;
size_t src_off = 0;
size_t bytes = 0;
int who = 0;
size_t mapped_len;
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem_memmove %s %s %s %s %savx %savx512f",
argc > 2 ? argv[2] : "null",
argc > 3 ? argv[3] : "null",
argc > 4 ? argv[4] : "null",
thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
fd = OPEN(argv[1], O_RDWR);
enum file_type type = util_fd_get_type(fd);
if (type < 0)
UT_FATAL("cannot check type of file %s", argv[1]);
persist_fn p;
p = type == TYPE_DEVDAX ? do_persist_ddax : do_persist;
if (argc < 3)
USAGE();
for (int arg = 2; arg < argc; arg++) {
if (strchr("dsbo",
argv[arg][0]) == NULL || argv[arg][1] != ':')
UT_FATAL("op must be d: or s: or b: or o:");
size_t val = STRTOUL(&argv[arg][2], NULL, 0);
switch (argv[arg][0]) {
case 'd':
if (val <= 0)
UT_FATAL("bad offset (%lu) with d: option",
val);
dst_off = val;
break;
case 's':
if (val <= 0)
UT_FATAL("bad offset (%lu) with s: option",
val);
src_off = val;
break;
case 'b':
if (val <= 0)
UT_FATAL("bad length (%lu) with b: option",
val);
bytes = val;
break;
case 'o':
if (val != 1 && val != 0)
UT_FATAL("bad val (%lu) with o: option",
val);
who = (int)val;
break;
}
}
if (who == 0) {
/* src > dest */
dst = pmem_map_file(argv[1], 0, 0, 0, &mapped_len, NULL);
if (dst == NULL)
UT_FATAL("!could not mmap dest file %s", argv[1]);
src = MMAP(dst + mapped_len, mapped_len,
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
-1, 0);
/*
* Its very unlikely that src would not be > dest. pmem_map_file
* chooses the first unused address >= 1TB, large
* enough to hold the give range, and 1GB aligned. Log
* the error if the mapped addresses cannot be swapped
* but allow the test to continue.
*/
if (src <= dst) {
swap_mappings(&dst, &src, mapped_len, fd);
if (src <= dst)
UT_FATAL("cannot map files in memory order");
}
do_memmove_variants(dst, src, argv[1],
dst_off, src_off, bytes, p);
/* dest > src */
swap_mappings(&dst, &src, mapped_len, fd);
if (dst <= src)
UT_FATAL("cannot map files in memory order");
do_memmove_variants(dst, src, argv[1],
dst_off, src_off, bytes, p);
int ret = pmem_unmap(dst, mapped_len);
UT_ASSERTeq(ret, 0);
MUNMAP(src, mapped_len);
} else {
/* use the same buffer for source and destination */
dst = pmem_map_file(argv[1], 0, 0, 0, &mapped_len, NULL);
if (dst == NULL)
UT_FATAL("!Could not mmap %s: \n", argv[1]);
memset(dst, 0, bytes);
p(dst, bytes);
do_memmove_variants(dst, dst, argv[1],
dst_off, src_off, bytes, p);
int ret = pmem_unmap(dst, mapped_len);
UT_ASSERTeq(ret, 0);
}
CLOSE(fd);
DONE(NULL);
}
| 5,226 | 22.334821 | 75 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_ctl_heap_size/obj_ctl_heap_size.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* obj_ctl_heap_size.c -- tests for the ctl entry points: heap.size.*
*/
#include "unittest.h"
#define LAYOUT "obj_ctl_heap_size"
#define CUSTOM_GRANULARITY ((1 << 20) * 10)
#define OBJ_SIZE 1024
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_ctl_heap_size");
if (argc != 3)
UT_FATAL("usage: %s poolset [w|x]", argv[0]);
const char *path = argv[1];
char t = argv[2][0];
PMEMobjpool *pop;
if ((pop = pmemobj_open(path, LAYOUT)) == NULL)
UT_FATAL("!pmemobj_open: %s", path);
int ret = 0;
size_t disable_granularity = 0;
ret = pmemobj_ctl_set(pop, "heap.size.granularity",
&disable_granularity);
UT_ASSERTeq(ret, 0);
/* allocate until OOM */
while (pmemobj_alloc(pop, NULL, OBJ_SIZE, 0, NULL, NULL) == 0)
;
if (t == 'x') {
ssize_t extend_size = CUSTOM_GRANULARITY;
ret = pmemobj_ctl_exec(pop, "heap.size.extend", &extend_size);
UT_ASSERTeq(ret, 0);
} else if (t == 'w') {
ssize_t new_granularity = CUSTOM_GRANULARITY;
ret = pmemobj_ctl_set(pop, "heap.size.granularity",
&new_granularity);
UT_ASSERTeq(ret, 0);
ssize_t curr_granularity;
ret = pmemobj_ctl_get(pop, "heap.size.granularity",
&curr_granularity);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(new_granularity, curr_granularity);
} else {
UT_ASSERT(0);
}
/* should succeed */
ret = pmemobj_alloc(pop, NULL, OBJ_SIZE, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
pmemobj_close(pop);
DONE(NULL);
}
| 1,500 | 21.402985 | 69 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_basic_integration/obj_basic_integration.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_basic_integration.c -- Basic integration tests
*
*/
#include <stddef.h>
#include "unittest.h"
#include "obj.h"
#define TEST_STR "abcdefgh"
#define TEST_STR_LEN 8
#define TEST_VALUE 5
/*
* Layout definition
*/
POBJ_LAYOUT_BEGIN(basic);
POBJ_LAYOUT_ROOT(basic, struct dummy_root);
POBJ_LAYOUT_TOID(basic, struct dummy_node);
POBJ_LAYOUT_TOID(basic, struct dummy_node_c);
POBJ_LAYOUT_END(basic);
struct dummy_node {
int value;
char teststr[TEST_STR_LEN];
POBJ_LIST_ENTRY(struct dummy_node) plist;
POBJ_LIST_ENTRY(struct dummy_node) plist_m;
};
struct dummy_node_c {
int value;
char teststr[TEST_STR_LEN];
POBJ_LIST_ENTRY(struct dummy_node) plist;
POBJ_LIST_ENTRY(struct dummy_node) plist_m;
};
struct dummy_root {
int value;
PMEMmutex lock;
TOID(struct dummy_node) node;
POBJ_LIST_HEAD(dummy_list, struct dummy_node) dummies;
POBJ_LIST_HEAD(moved_list, struct dummy_node) moved;
};
static int
dummy_node_constructor(PMEMobjpool *pop, void *ptr, void *arg)
{
struct dummy_node *n = (struct dummy_node *)ptr;
int *test_val = (int *)arg;
n->value = *test_val;
pmemobj_persist(pop, &n->value, sizeof(n->value));
return 0;
}
static void
test_alloc_api(PMEMobjpool *pop)
{
TOID(struct dummy_node) node_zeroed;
TOID(struct dummy_node_c) node_constructed;
POBJ_ZNEW(pop, &node_zeroed, struct dummy_node);
UT_ASSERT_rt(OID_INSTANCEOF(node_zeroed.oid, struct dummy_node));
int *test_val = (int *)MALLOC(sizeof(*test_val));
*test_val = TEST_VALUE;
POBJ_NEW(pop, &node_constructed, struct dummy_node_c,
dummy_node_constructor, test_val);
FREE(test_val);
TOID(struct dummy_node) iter;
POBJ_FOREACH_TYPE(pop, iter) {
UT_ASSERTeq(D_RO(iter)->value, 0);
}
TOID(struct dummy_node_c) iter_c;
POBJ_FOREACH_TYPE(pop, iter_c) {
UT_ASSERTeq(D_RO(iter_c)->value, TEST_VALUE);
}
PMEMoid oid_iter;
int nodes_count = 0;
POBJ_FOREACH(pop, oid_iter) {
nodes_count++;
}
UT_ASSERTne(nodes_count, 0);
POBJ_FREE(&node_zeroed);
POBJ_FREE(&node_constructed);
nodes_count = 0;
POBJ_FOREACH(pop, oid_iter) {
nodes_count++;
}
UT_ASSERTeq(nodes_count, 0);
int val = 10;
POBJ_ALLOC(pop, &node_constructed, struct dummy_node_c,
sizeof(struct dummy_node_c),
dummy_node_constructor, &val);
POBJ_REALLOC(pop, &node_constructed, struct dummy_node_c,
sizeof(struct dummy_node_c) + 1000);
UT_ASSERTeq(pmemobj_type_num(node_constructed.oid),
TOID_TYPE_NUM(struct dummy_node_c));
POBJ_ZREALLOC(pop, &node_constructed, struct dummy_node_c,
sizeof(struct dummy_node_c) + 2000);
UT_ASSERTeq(pmemobj_type_num(node_constructed.oid),
TOID_TYPE_NUM(struct dummy_node_c));
POBJ_FREE(&node_constructed);
POBJ_ZALLOC(pop, &node_zeroed, struct dummy_node,
sizeof(struct dummy_node));
POBJ_FREE(&node_zeroed);
PMEMoid oid = OID_NULL;
POBJ_FREE(&oid);
int err = 0;
err = pmemobj_alloc(pop, NULL, SIZE_MAX, 0, NULL, NULL);
UT_ASSERTeq(err, -1);
UT_ASSERTeq(errno, ENOMEM);
err = pmemobj_zalloc(pop, NULL, SIZE_MAX, 0);
UT_ASSERTeq(err, -1);
UT_ASSERTeq(errno, ENOMEM);
err = pmemobj_alloc(pop, NULL, PMEMOBJ_MAX_ALLOC_SIZE + 1, 0, NULL,
NULL);
UT_ASSERTeq(err, -1);
UT_ASSERTeq(errno, ENOMEM);
err = pmemobj_zalloc(pop, NULL, PMEMOBJ_MAX_ALLOC_SIZE + 1, 0);
UT_ASSERTeq(err, -1);
UT_ASSERTeq(errno, ENOMEM);
}
static void
test_realloc_api(PMEMobjpool *pop)
{
PMEMoid oid = OID_NULL;
int ret;
ret = pmemobj_alloc(pop, &oid, 128, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("alloc: %u, size: %zu", 128,
pmemobj_alloc_usable_size(oid));
/* grow */
ret = pmemobj_realloc(pop, &oid, 655360, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("realloc: %u => %u, size: %zu", 128, 655360,
pmemobj_alloc_usable_size(oid));
/* shrink */
ret = pmemobj_realloc(pop, &oid, 1, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("realloc: %u => %u, size: %zu", 655360, 1,
pmemobj_alloc_usable_size(oid));
/* free */
ret = pmemobj_realloc(pop, &oid, 0, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(OID_IS_NULL(oid));
UT_OUT("free");
/* alloc */
ret = pmemobj_realloc(pop, &oid, 777, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("realloc: %u => %u, size: %zu", 0, 777,
pmemobj_alloc_usable_size(oid));
/* shrink */
ret = pmemobj_realloc(pop, &oid, 1, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("realloc: %u => %u, size: %zu", 777, 1,
pmemobj_alloc_usable_size(oid));
pmemobj_free(&oid);
UT_ASSERT(OID_IS_NULL(oid));
UT_ASSERTeq(pmemobj_alloc_usable_size(oid), 0);
UT_OUT("free");
/* alloc */
ret = pmemobj_realloc(pop, &oid, 1, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("realloc: %u => %u, size: %zu", 0, 1,
pmemobj_alloc_usable_size(oid));
/* do nothing */
ret = pmemobj_realloc(pop, &oid, 1, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
UT_OUT("realloc: %u => %u, size: %zu", 1, 1,
pmemobj_alloc_usable_size(oid));
pmemobj_free(&oid);
UT_ASSERT(OID_IS_NULL(oid));
UT_OUT("free");
/* do nothing */
ret = pmemobj_realloc(pop, &oid, 0, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(OID_IS_NULL(oid));
/* alloc */
ret = pmemobj_realloc(pop, &oid, 1, 0);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(oid));
/* grow beyond reasonable size */
ret = pmemobj_realloc(pop, &oid, SIZE_MAX, 0);
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(errno, ENOMEM);
ret = pmemobj_realloc(pop, &oid, PMEMOBJ_MAX_ALLOC_SIZE + 1, 0);
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(errno, ENOMEM);
pmemobj_free(&oid);
UT_ASSERT(OID_IS_NULL(oid));
}
static void
test_list_api(PMEMobjpool *pop)
{
TOID(struct dummy_root) root;
root = POBJ_ROOT(pop, struct dummy_root);
int nodes_count = 0;
UT_ASSERTeq(pmemobj_type_num(root.oid), POBJ_ROOT_TYPE_NUM);
UT_COMPILE_ERROR_ON(TOID_TYPE_NUM_OF(root) != POBJ_ROOT_TYPE_NUM);
TOID(struct dummy_node) first;
TOID(struct dummy_node) iter;
POBJ_LIST_FOREACH_REVERSE(iter, &D_RO(root)->dummies, plist) {
UT_OUT("POBJ_LIST_FOREACH_REVERSE: dummy_node %d",
D_RO(iter)->value);
nodes_count++;
}
UT_ASSERTeq(nodes_count, 0);
int test_val = TEST_VALUE;
PMEMoid ret;
/* should fail */
ret = POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(root)->dummies, plist,
SIZE_MAX, dummy_node_constructor,
&test_val);
UT_ASSERTeq(errno, ENOMEM);
UT_ASSERT(OID_IS_NULL(ret));
errno = 0;
ret = POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(root)->dummies, plist,
PMEMOBJ_MAX_ALLOC_SIZE + 1, dummy_node_constructor,
&test_val);
UT_ASSERTeq(errno, ENOMEM);
UT_ASSERT(OID_IS_NULL(ret));
POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(root)->dummies, plist,
sizeof(struct dummy_node), dummy_node_constructor,
&test_val);
test_val++;
POBJ_LIST_INSERT_NEW_TAIL(pop, &D_RW(root)->dummies, plist,
sizeof(struct dummy_node), dummy_node_constructor,
&test_val);
TOID(struct dummy_node) inserted =
POBJ_LIST_FIRST(&D_RW(root)->dummies);
UT_ASSERTeq(pmemobj_type_num(inserted.oid),
TOID_TYPE_NUM(struct dummy_node));
TOID(struct dummy_node) node;
POBJ_ZNEW(pop, &node, struct dummy_node);
POBJ_LIST_INSERT_HEAD(pop, &D_RW(root)->dummies, node, plist);
nodes_count = 0;
POBJ_LIST_FOREACH(iter, &D_RO(root)->dummies, plist) {
UT_OUT("POBJ_LIST_FOREACH: dummy_node %d", D_RO(iter)->value);
nodes_count++;
}
UT_ASSERTeq(nodes_count, 3);
/* now do the same, but w/o using FOREACH macro */
nodes_count = 0;
first = POBJ_LIST_FIRST(&D_RO(root)->dummies);
iter = first;
do {
UT_OUT("POBJ_LIST_NEXT: dummy_node %d", D_RO(iter)->value);
nodes_count++;
iter = POBJ_LIST_NEXT(iter, plist);
} while (!TOID_EQUALS(iter, first));
UT_ASSERTeq(nodes_count, 3);
POBJ_LIST_MOVE_ELEMENT_HEAD(pop, &D_RW(root)->dummies,
&D_RW(root)->moved, node, plist, plist_m);
UT_ASSERTeq(POBJ_LIST_EMPTY(&D_RW(root)->moved), 0);
POBJ_LIST_MOVE_ELEMENT_HEAD(pop, &D_RW(root)->moved,
&D_RW(root)->dummies, node, plist_m, plist);
POBJ_LIST_MOVE_ELEMENT_TAIL(pop, &D_RW(root)->dummies,
&D_RW(root)->moved, node, plist, plist_m);
UT_ASSERTeq(POBJ_LIST_EMPTY(&D_RW(root)->moved), 0);
POBJ_LIST_MOVE_ELEMENT_TAIL(pop, &D_RW(root)->moved,
&D_RW(root)->dummies, node, plist_m, plist);
POBJ_LIST_REMOVE(pop, &D_RW(root)->dummies, node, plist);
POBJ_LIST_INSERT_TAIL(pop, &D_RW(root)->dummies, node, plist);
POBJ_LIST_REMOVE_FREE(pop, &D_RW(root)->dummies, node, plist);
nodes_count = 0;
POBJ_LIST_FOREACH_REVERSE(iter, &D_RO(root)->dummies, plist) {
UT_OUT("POBJ_LIST_FOREACH_REVERSE: dummy_node %d",
D_RO(iter)->value);
nodes_count++;
}
UT_ASSERTeq(nodes_count, 2);
/* now do the same, but w/o using FOREACH macro */
nodes_count = 0;
first = POBJ_LIST_FIRST(&D_RO(root)->dummies);
iter = first;
do {
UT_OUT("POBJ_LIST_PREV: dummy_node %d", D_RO(iter)->value);
nodes_count++;
iter = POBJ_LIST_PREV(iter, plist);
} while (!TOID_EQUALS(iter, first));
UT_ASSERTeq(nodes_count, 2);
test_val++;
POBJ_LIST_INSERT_NEW_AFTER(pop, &D_RW(root)->dummies,
POBJ_LIST_FIRST(&D_RO(root)->dummies), plist,
sizeof(struct dummy_node), dummy_node_constructor,
&test_val);
test_val++;
POBJ_LIST_INSERT_NEW_BEFORE(pop, &D_RW(root)->dummies,
POBJ_LIST_LAST(&D_RO(root)->dummies, plist), plist,
sizeof(struct dummy_node), dummy_node_constructor,
&test_val);
nodes_count = 0;
POBJ_LIST_FOREACH_REVERSE(iter, &D_RO(root)->dummies, plist) {
UT_OUT("POBJ_LIST_FOREACH_REVERSE: dummy_node %d",
D_RO(iter)->value);
nodes_count++;
}
UT_ASSERTeq(nodes_count, 4);
/* now do the same, but w/o using FOREACH macro */
nodes_count = 0;
first = POBJ_LIST_LAST(&D_RO(root)->dummies, plist);
iter = first;
do {
UT_OUT("POBJ_LIST_PREV: dummy_node %d", D_RO(iter)->value);
nodes_count++;
iter = POBJ_LIST_PREV(iter, plist);
} while (!TOID_EQUALS(iter, first));
UT_ASSERTeq(nodes_count, 4);
}
static void
test_tx_api(PMEMobjpool *pop)
{
TOID(struct dummy_root) root;
TOID_ASSIGN(root, pmemobj_root(pop, sizeof(struct dummy_root)));
int *vstate = NULL; /* volatile state */
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
vstate = (int *)MALLOC(sizeof(*vstate));
*vstate = TEST_VALUE;
TX_ADD(root);
D_RW(root)->value = *vstate;
TOID_ASSIGN(D_RW(root)->node, OID_NULL);
} TX_FINALLY {
FREE(vstate);
vstate = NULL;
} TX_END
UT_ASSERTeq(vstate, NULL);
UT_ASSERTeq(D_RW(root)->value, TEST_VALUE);
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
TX_ADD(root);
D_RW(root)->node = TX_ALLOC(struct dummy_node, SIZE_MAX);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
UT_ASSERTeq(errno, ENOMEM);
} TX_END
errno = 0;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
D_RW(root)->node = TX_ZALLOC(struct dummy_node, SIZE_MAX);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
UT_ASSERTeq(errno, ENOMEM);
} TX_END
errno = 0;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
D_RW(root)->node = TX_XALLOC(struct dummy_node, SIZE_MAX,
POBJ_XALLOC_ZERO);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
UT_ASSERTeq(errno, ENOMEM);
} TX_END
errno = 0;
TX_BEGIN_LOCK(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
D_RW(root)->node = TX_ALLOC(struct dummy_node,
PMEMOBJ_MAX_ALLOC_SIZE + 1);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
UT_ASSERTeq(errno, ENOMEM);
} TX_END
errno = 0;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
D_RW(root)->node = TX_ZALLOC(struct dummy_node,
PMEMOBJ_MAX_ALLOC_SIZE + 1);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
UT_ASSERTeq(errno, ENOMEM);
} TX_END
errno = 0;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
TX_ADD(root);
D_RW(root)->node = TX_ZNEW(struct dummy_node);
D_RW(root)->node = TX_REALLOC(D_RO(root)->node, SIZE_MAX);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERTeq(errno, ENOMEM);
} TX_END
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
errno = 0;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
TX_ADD(root);
D_RW(root)->node = TX_ZNEW(struct dummy_node);
D_RW(root)->node = TX_REALLOC(D_RO(root)->node,
PMEMOBJ_MAX_ALLOC_SIZE + 1);
UT_ASSERT(0); /* should not get to this point */
} TX_ONABORT {
UT_ASSERTeq(errno, ENOMEM);
} TX_END
UT_ASSERT(TOID_IS_NULL(D_RO(root)->node));
errno = 0;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
TX_ADD(root);
D_RW(root)->node = TX_ZNEW(struct dummy_node);
TX_MEMSET(D_RW(D_RW(root)->node)->teststr, 'a', TEST_STR_LEN);
TX_MEMCPY(D_RW(D_RW(root)->node)->teststr, TEST_STR,
TEST_STR_LEN);
TX_SET(D_RW(root)->node, value, TEST_VALUE);
} TX_END
UT_ASSERTeq(D_RW(D_RW(root)->node)->value, TEST_VALUE);
UT_ASSERT(strncmp(D_RW(D_RW(root)->node)->teststr, TEST_STR,
TEST_STR_LEN) == 0);
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
TX_ADD(root);
UT_ASSERT(!TOID_IS_NULL(D_RW(root)->node));
TX_FREE(D_RW(root)->node);
D_RW(root)->node = TOID_NULL(struct dummy_node);
TOID_ASSIGN(D_RW(root)->node, OID_NULL);
} TX_END
errno = 0;
TX_BEGIN(pop) {
TX_BEGIN(NULL) {
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERT(errno == EFAULT);
} TX_END
errno = 0;
TX_BEGIN(pop) {
TX_BEGIN((PMEMobjpool *)(uintptr_t)7) {
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERT(errno == EINVAL);
} TX_END
UT_OUT("%s", pmemobj_errormsg());
TX_BEGIN(pop) {
pmemobj_tx_abort(ECANCELED);
} TX_END
UT_OUT("%s", pmemobj_errormsg());
}
static void
test_action_api(PMEMobjpool *pop)
{
struct pobj_action act[2];
uint64_t dest_value = 0;
PMEMoid oid = pmemobj_reserve(pop, &act[0], 1, 1);
pmemobj_set_value(pop, &act[1], &dest_value, 1);
pmemobj_publish(pop, act, 2);
UT_ASSERTeq(dest_value, 1);
pmemobj_free(&oid);
UT_ASSERT(OID_IS_NULL(oid));
oid = pmemobj_reserve(pop, &act[0], 1, 1);
TX_BEGIN(pop) {
pmemobj_tx_publish(act, 1);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
pmemobj_free(&oid);
UT_ASSERT(OID_IS_NULL(oid));
dest_value = 0;
oid = pmemobj_reserve(pop, &act[0], 1, 1);
pmemobj_set_value(pop, &act[1], &dest_value, 1);
pmemobj_cancel(pop, act, 2);
UT_ASSERTeq(dest_value, 0);
TOID(struct dummy_node) n =
POBJ_RESERVE_NEW(pop, struct dummy_node, &act[0]);
TOID(struct dummy_node_c) c =
POBJ_RESERVE_ALLOC(pop, struct dummy_node_c,
sizeof(struct dummy_node_c), &act[1]);
pmemobj_publish(pop, act, 2);
/* valgrind would warn in case they were not allocated */
D_RW(n)->value = 1;
D_RW(c)->value = 1;
pmemobj_persist(pop, D_RW(n), sizeof(struct dummy_node));
pmemobj_persist(pop, D_RW(c), sizeof(struct dummy_node_c));
}
static void
test_offsetof(void)
{
TOID(struct dummy_root) r;
TOID(struct dummy_node) n;
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(r, value) !=
offsetof(struct dummy_root, value));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(r, lock) !=
offsetof(struct dummy_root, lock));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(r, node) !=
offsetof(struct dummy_root, node));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(r, dummies) !=
offsetof(struct dummy_root, dummies));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(r, moved) !=
offsetof(struct dummy_root, moved));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(n, value) !=
offsetof(struct dummy_node, value));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(n, teststr) !=
offsetof(struct dummy_node, teststr));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(n, plist) !=
offsetof(struct dummy_node, plist));
UT_COMPILE_ERROR_ON(TOID_OFFSETOF(n, plist_m) !=
offsetof(struct dummy_node, plist_m));
}
static void
test_layout(void)
{
/* get number of declared types when there are no types declared */
POBJ_LAYOUT_BEGIN(mylayout);
POBJ_LAYOUT_END(mylayout);
size_t number_of_declared_types = POBJ_LAYOUT_TYPES_NUM(mylayout);
UT_ASSERTeq(number_of_declared_types, 0);
}
static void
test_root_size(PMEMobjpool *pop)
{
UT_ASSERTeq(pmemobj_root_size(pop), 0);
size_t alloc_size = sizeof(struct dummy_root);
pmemobj_root(pop, alloc_size);
UT_ASSERTeq(pmemobj_root_size(pop), sizeof(struct dummy_root));
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_basic_integration");
/* root doesn't count */
UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(basic) != 2);
if (argc < 2 || argc > 3)
UT_FATAL("usage: %s file-name [inject_fault]", argv[0]);
const char *path = argv[1];
const char *opt = argv[2];
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(basic),
0, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
test_root_size(pop);
test_alloc_api(pop);
test_realloc_api(pop);
test_list_api(pop);
test_tx_api(pop);
test_action_api(pop);
test_offsetof();
test_layout();
pmemobj_close(pop);
/* fault injection */
if (argc == 3 && strcmp(opt, "inject_fault") == 0) {
if (pmemobj_fault_injection_enabled()) {
pmemobj_inject_fault_at(PMEM_MALLOC, 1,
"heap_check_remote");
pop = pmemobj_open(path, POBJ_LAYOUT_NAME(basic));
UT_ASSERTeq(pop, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
}
if ((pop = pmemobj_open(path, POBJ_LAYOUT_NAME(basic))) == NULL)
UT_FATAL("!pmemobj_open: %s", path);
/* second open should fail, checks file locking */
if ((pmemobj_open(path, POBJ_LAYOUT_NAME(basic))) != NULL)
UT_FATAL("!pmemobj_open: %s", path);
pmemobj_close(pop);
int result = pmemobj_check(path, POBJ_LAYOUT_NAME(basic));
if (result < 0)
UT_OUT("!%s: pmemobj_check", path);
else if (result == 0)
UT_OUT("%s: pmemobj_check: not consistent", path);
DONE(NULL);
}
| 17,784 | 25.154412 | 68 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_pmemcheck/obj_pmemcheck.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
#include "unittest.h"
#include "valgrind_internal.h"
struct foo {
PMEMmutex bar;
};
static void
test_mutex_pmem_mapping_register(PMEMobjpool *pop)
{
PMEMoid foo;
int ret = pmemobj_alloc(pop, &foo, sizeof(struct foo), 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!OID_IS_NULL(foo));
struct foo *foop = pmemobj_direct(foo);
ret = pmemobj_mutex_lock(pop, &foop->bar);
/* foo->bar has been removed from pmem mappings collection */
VALGRIND_PRINT_PMEM_MAPPINGS;
UT_ASSERTeq(ret, 0);
ret = pmemobj_mutex_unlock(pop, &foop->bar);
UT_ASSERTeq(ret, 0);
pmemobj_free(&foo);
/* the entire foo object has been re-registered as pmem mapping */
VALGRIND_PRINT_PMEM_MAPPINGS;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_pmemcheck");
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], "pmemcheck", PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
test_mutex_pmem_mapping_register(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 1,127 | 21.56 | 71 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmreorder_simple/pmreorder_simple.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* pmreorder_simple.c -- a simple unit test for store reordering
*
* usage: pmreorder_simple g|b|c|m file
* g - write data in a consistent manner
* b - write data in a possibly inconsistent manner
* c - check data consistency
* m - write data to the pool in a consistent way,
* but at the beginning logs some inconsistent values
*
* See README file for more details.
*/
#include "unittest.h"
#include "util.h"
#include "valgrind_internal.h"
/*
* The struct three_field is inconsistent if flag is set and the fields have
* different values.
*/
struct three_field {
int first_field;
int second_field;
int third_field;
int flag;
};
/*
* write_consistent -- (internal) write data in a consistent manner
*/
static void
write_consistent(struct three_field *structp)
{
structp->first_field = 1;
structp->second_field = 1;
structp->third_field = 1;
pmem_persist(&structp->first_field, sizeof(int) * 3);
structp->flag = 1;
pmem_persist(&structp->flag, sizeof(structp->flag));
}
/*
* write_inconsistent -- (internal) write data in an inconsistent manner.
*/
static void
write_inconsistent(struct three_field *structp)
{
structp->flag = 1;
structp->first_field = 1;
structp->second_field = 1;
structp->third_field = 1;
pmem_persist(structp, sizeof(*structp));
}
/*
* check_consistency -- (internal) check struct three_field consistency
*/
static int
check_consistency(struct three_field *structp)
{
int consistent = 0;
if (structp->flag)
consistent = (structp->first_field != structp->second_field) ||
(structp->first_field != structp->third_field);
return consistent;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "pmreorder_simple");
util_init();
if ((argc != 3) || (strchr("gbcm", argv[1][0]) == NULL) ||
argv[1][1] != '\0')
UT_FATAL("usage: %s g|b|c|m file", argv[0]);
int fd = OPEN(argv[2], O_RDWR);
size_t size;
/* mmap and register in valgrind pmemcheck */
void *map = pmem_map_file(argv[2], 0, 0, 0, &size, NULL);
UT_ASSERTne(map, NULL);
struct three_field *structp = map;
char opt = argv[1][0];
/* clear the struct to get a consistent start state for writing */
if (strchr("gb", opt))
pmem_memset_persist(structp, 0, sizeof(*structp));
else if (strchr("m", opt)) {
/* set test values to log an inconsistent start state */
pmem_memset_persist(&structp->flag, 1, sizeof(int));
pmem_memset_persist(&structp->first_field, 0, sizeof(int) * 2);
pmem_memset_persist(&structp->third_field, 1, sizeof(int));
/* clear the struct to get back a consistent start state */
pmem_memset_persist(structp, 0, sizeof(*structp));
}
/* verify that DEFAULT_REORDER restores default engine */
VALGRIND_EMIT_LOG("PMREORDER_MARKER_CHANGE.BEGIN");
switch (opt) {
case 'g':
write_consistent(structp);
break;
case 'b':
write_inconsistent(structp);
break;
case 'm':
write_consistent(structp);
break;
case 'c':
return check_consistency(structp);
default:
UT_FATAL("Unrecognized option %c", opt);
}
VALGRIND_EMIT_LOG("PMREORDER_MARKER_CHANGE.END");
/* check if undefined marker will not cause an issue */
VALGRIND_EMIT_LOG("PMREORDER_MARKER_UNDEFINED.BEGIN");
VALGRIND_EMIT_LOG("PMREORDER_MARKER_UNDEFINED.END");
CLOSE(fd);
DONE(NULL);
}
| 3,335 | 24.082707 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/remote_obj_basic/remote_obj_basic.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* remote_obj_basic.c -- unit test for remote tests support
*
* usage: remote_obj_basic <create|open> <poolset-file>
*/
#include "unittest.h"
#define LAYOUT_NAME "remote_obj_basic"
int
main(int argc, char *argv[])
{
PMEMobjpool *pop;
START(argc, argv, "remote_obj_basic");
if (argc != 3)
UT_FATAL("usage: %s <create|open> <poolset-file>", argv[0]);
const char *mode = argv[1];
const char *file = argv[2];
if (strcmp(mode, "create") == 0) {
if ((pop = pmemobj_create(file, LAYOUT_NAME, 0,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", file);
else
UT_OUT("The pool set %s has been created", file);
} else if (strcmp(mode, "open") == 0) {
if ((pop = pmemobj_open(file, LAYOUT_NAME)) == NULL)
UT_FATAL("!pmemobj_open: %s", file);
else
UT_OUT("The pool set %s has been opened", file);
} else {
UT_FATAL("wrong mode: %s\n", argv[1]);
}
pmemobj_close(pop);
DONE(NULL);
}
| 1,019 | 20.25 | 62 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_ctl_debug/obj_ctl_debug.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* obj_ctl_debug.c -- tests for the ctl debug namesapce entry points
*/
#include "unittest.h"
#include "../../libpmemobj/obj.h"
#define LAYOUT "obj_ctl_debug"
#define BUFFER_SIZE 128
#define ALLOC_PATTERN 0xAC
static void
test_alloc_pattern(PMEMobjpool *pop)
{
int ret;
int pattern;
PMEMoid oid;
/* check default pattern */
ret = pmemobj_ctl_get(pop, "debug.heap.alloc_pattern", &pattern);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(pattern, PALLOC_CTL_DEBUG_NO_PATTERN);
/* check set pattern */
pattern = ALLOC_PATTERN;
ret = pmemobj_ctl_set(pop, "debug.heap.alloc_pattern", &pattern);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(pop->heap.alloc_pattern, pattern);
/* check alloc with pattern */
ret = pmemobj_alloc(pop, &oid, BUFFER_SIZE, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
char *buff = pmemobj_direct(oid);
int i;
for (i = 0; i < BUFFER_SIZE; i++)
/* should trigger memcheck error: read uninitialized values */
UT_ASSERTeq(*(buff + i), (char)pattern);
pmemobj_free(&oid);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_ctl_debug");
if (argc < 2)
UT_FATAL("usage: %s filename", argv[0]);
const char *path = argv[1];
PMEMobjpool *pop;
if ((pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_open: %s", path);
test_alloc_pattern(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 1,452 | 20.367647 | 68 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_list_macro/obj_list_macro.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* obj_list_macro.c -- unit tests for list module
*/
#include <stddef.h>
#include "libpmemobj.h"
#include "unittest.h"
TOID_DECLARE(struct item, 0);
TOID_DECLARE(struct list, 1);
struct item {
int id;
POBJ_LIST_ENTRY(struct item) next;
};
struct list {
POBJ_LIST_HEAD(listhead, struct item) head;
};
/* global lists */
static TOID(struct list) List;
static TOID(struct list) List_sec;
#define LAYOUT_NAME "list_macros"
/* usage macros */
#define FATAL_USAGE()\
UT_FATAL("usage: obj_list_macro <file> [PRnifr]")
#define FATAL_USAGE_PRINT()\
UT_FATAL("usage: obj_list_macro <file> P:<list>")
#define FATAL_USAGE_PRINT_REVERSE()\
UT_FATAL("usage: obj_list_macro <file> R:<list>")
#define FATAL_USAGE_INSERT()\
UT_FATAL("usage: obj_list_macro <file> i:<where>:<num>[:<id>]")
#define FATAL_USAGE_INSERT_NEW()\
UT_FATAL("usage: obj_list_macro <file> n:<where>:<num>[:<id>]")
#define FATAL_USAGE_REMOVE_FREE()\
UT_FATAL("usage: obj_list_macro <file> f:<list>:<num>")
#define FATAL_USAGE_REMOVE()\
UT_FATAL("usage: obj_list_macro <file> r:<list>:<num>")
#define FATAL_USAGE_MOVE()\
UT_FATAL("usage: obj_list_macro <file> m:<num>:<where>:<num>")
/*
* get_item_list -- get nth item from list
*/
static TOID(struct item)
get_item_list(TOID(struct list) list, int n)
{
TOID(struct item) item;
if (n >= 0) {
POBJ_LIST_FOREACH(item, &D_RO(list)->head, next) {
if (n == 0)
return item;
n--;
}
} else {
POBJ_LIST_FOREACH_REVERSE(item, &D_RO(list)->head, next) {
n++;
if (n == 0)
return item;
}
}
return TOID_NULL(struct item);
}
/*
* do_print -- print list elements in normal order
*/
static void
do_print(PMEMobjpool *pop, const char *arg)
{
int L; /* which list */
if (sscanf(arg, "P:%d", &L) != 1)
FATAL_USAGE_PRINT();
TOID(struct item) item;
if (L == 1) {
UT_OUT("list:");
POBJ_LIST_FOREACH(item, &D_RW(List)->head, next) {
UT_OUT("id = %d", D_RO(item)->id);
}
} else if (L == 2) {
UT_OUT("list sec:");
POBJ_LIST_FOREACH(item, &D_RW(List_sec)->head, next) {
UT_OUT("id = %d", D_RO(item)->id);
}
} else {
FATAL_USAGE_PRINT();
}
}
/*
* do_print_reverse -- print list elements in reverse order
*/
static void
do_print_reverse(PMEMobjpool *pop, const char *arg)
{
int L; /* which list */
if (sscanf(arg, "R:%d", &L) != 1)
FATAL_USAGE_PRINT_REVERSE();
TOID(struct item) item;
if (L == 1) {
UT_OUT("list reverse:");
POBJ_LIST_FOREACH_REVERSE(item, &D_RW(List)->head, next) {
UT_OUT("id = %d", D_RO(item)->id);
}
} else if (L == 2) {
UT_OUT("list sec reverse:");
POBJ_LIST_FOREACH_REVERSE(item, &D_RW(List_sec)->head, next) {
UT_OUT("id = %d", D_RO(item)->id);
}
} else {
FATAL_USAGE_PRINT_REVERSE();
}
}
/*
* item_constructor -- constructor which sets the item's id to
* new value
*/
static int
item_constructor(PMEMobjpool *pop, void *ptr, void *arg)
{
int id = *(int *)arg;
struct item *item = (struct item *)ptr;
item->id = id;
UT_OUT("constructor(id = %d)", id);
return 0;
}
/*
* do_insert_new -- insert new element to list
*/
static void
do_insert_new(PMEMobjpool *pop, const char *arg)
{
int n; /* which element on List */
int before;
int id;
int ret = sscanf(arg, "n:%d:%d:%d", &before, &n, &id);
if (ret != 3 && ret != 2)
FATAL_USAGE_INSERT_NEW();
int ptr = (ret == 3) ? id : 0;
TOID(struct item) item;
if (POBJ_LIST_EMPTY(&D_RW(List)->head)) {
POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(List)->head, next,
sizeof(struct item), item_constructor, &ptr);
if (POBJ_LIST_EMPTY(&D_RW(List)->head))
UT_FATAL("POBJ_LIST_INSERT_NEW_HEAD");
} else {
item = get_item_list(List, n);
UT_ASSERT(!TOID_IS_NULL(item));
if (!before) {
POBJ_LIST_INSERT_NEW_AFTER(pop, &D_RW(List)->head,
item, next, sizeof(struct item),
item_constructor, &ptr);
if (TOID_IS_NULL(POBJ_LIST_NEXT(item, next)))
UT_FATAL("POBJ_LIST_INSERT_NEW_AFTER");
} else {
POBJ_LIST_INSERT_NEW_BEFORE(pop, &D_RW(List)->head,
item, next, sizeof(struct item),
item_constructor, &ptr);
if (TOID_IS_NULL(POBJ_LIST_PREV(item, next)))
UT_FATAL("POBJ_LIST_INSERT_NEW_BEFORE");
}
}
}
/*
* do_insert -- insert element to list
*/
static void
do_insert(PMEMobjpool *pop, const char *arg)
{
int n; /* which element on List */
int before;
int id;
int ret = sscanf(arg, "i:%d:%d:%d", &before, &n, &id);
if (ret != 3 && ret != 2)
FATAL_USAGE_INSERT();
int ptr = (ret == 3) ? id : 0;
TOID(struct item) item;
POBJ_NEW(pop, &item, struct item, item_constructor, &ptr);
UT_ASSERT(!TOID_IS_NULL(item));
errno = 0;
if (POBJ_LIST_EMPTY(&D_RW(List)->head)) {
ret = POBJ_LIST_INSERT_HEAD(pop, &D_RW(List)->head,
item, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_INSERT_HEAD");
}
if (POBJ_LIST_EMPTY(&D_RW(List)->head))
UT_FATAL("POBJ_LIST_INSERT_HEAD");
} else {
TOID(struct item) elm = get_item_list(List, n);
UT_ASSERT(!TOID_IS_NULL(elm));
if (!before) {
ret = POBJ_LIST_INSERT_AFTER(pop, &D_RW(List)->head,
elm, item, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_INSERT_AFTER");
}
if (!TOID_EQUALS(item, POBJ_LIST_NEXT(elm, next)))
UT_FATAL("POBJ_LIST_INSERT_AFTER");
} else {
ret = POBJ_LIST_INSERT_BEFORE(pop, &D_RW(List)->head,
elm, item, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_INSERT_BEFORE");
}
if (!TOID_EQUALS(item, POBJ_LIST_PREV(elm, next)))
UT_FATAL("POBJ_LIST_INSERT_BEFORE");
}
}
}
/*
* do_remove_free -- remove and free element from list
*/
static void
do_remove_free(PMEMobjpool *pop, const char *arg)
{
int L; /* which list */
int n; /* which element */
if (sscanf(arg, "f:%d:%d", &L, &n) != 2)
FATAL_USAGE_REMOVE_FREE();
TOID(struct item) item;
TOID(struct list) tmp_list;
if (L == 1)
tmp_list = List;
else if (L == 2)
tmp_list = List_sec;
else
FATAL_USAGE_REMOVE_FREE();
if (POBJ_LIST_EMPTY(&D_RW(tmp_list)->head))
return;
item = get_item_list(tmp_list, n);
UT_ASSERT(!TOID_IS_NULL(item));
errno = 0;
int ret = POBJ_LIST_REMOVE_FREE(pop, &D_RW(tmp_list)->head,
item, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_REMOVE_FREE");
}
}
/*
* do_remove -- remove element from list
*/
static void
do_remove(PMEMobjpool *pop, const char *arg)
{
int L; /* which list */
int n; /* which element */
if (sscanf(arg, "r:%d:%d", &L, &n) != 2)
FATAL_USAGE_REMOVE();
TOID(struct item) item;
TOID(struct list) tmp_list;
if (L == 1)
tmp_list = List;
else if (L == 2)
tmp_list = List_sec;
else
FATAL_USAGE_REMOVE_FREE();
if (POBJ_LIST_EMPTY(&D_RW(tmp_list)->head))
return;
item = get_item_list(tmp_list, n);
UT_ASSERT(!TOID_IS_NULL(item));
errno = 0;
int ret = POBJ_LIST_REMOVE(pop, &D_RW(tmp_list)->head, item, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_REMOVE");
}
POBJ_FREE(&item);
}
/*
* do_move -- move element from one list to another
*/
static void
do_move(PMEMobjpool *pop, const char *arg)
{
int n;
int d;
int before;
if (sscanf(arg, "m:%d:%d:%d", &n, &before, &d) != 3)
FATAL_USAGE_MOVE();
int ret;
errno = 0;
if (POBJ_LIST_EMPTY(&D_RW(List)->head))
return;
if (POBJ_LIST_EMPTY(&D_RW(List_sec)->head)) {
ret = POBJ_LIST_MOVE_ELEMENT_HEAD(pop, &D_RW(List)->head,
&D_RW(List_sec)->head,
get_item_list(List, n),
next, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_MOVE_ELEMENT_HEAD");
}
} else {
if (before) {
ret = POBJ_LIST_MOVE_ELEMENT_BEFORE(pop,
&D_RW(List)->head,
&D_RW(List_sec)->head,
get_item_list(List_sec, d),
get_item_list(List, n),
next, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_MOVE_ELEMENT_BEFORE");
}
} else {
ret = POBJ_LIST_MOVE_ELEMENT_AFTER(pop,
&D_RW(List)->head,
&D_RW(List_sec)->head,
get_item_list(List_sec, d),
get_item_list(List, n),
next, next);
if (ret) {
UT_ASSERTeq(ret, -1);
UT_ASSERTne(errno, 0);
UT_FATAL("POBJ_LIST_MOVE_ELEMENT_AFTER");
}
}
}
}
/*
* do_cleanup -- de-initialization function
*/
static void
do_cleanup(PMEMobjpool *pop, TOID(struct list) list)
{
int ret;
errno = 0;
while (!POBJ_LIST_EMPTY(&D_RW(list)->head)) {
TOID(struct item) tmp = POBJ_LIST_FIRST(&D_RW(list)->head);
ret = POBJ_LIST_REMOVE_FREE(pop, &D_RW(list)->head, tmp, next);
UT_ASSERTeq(errno, 0);
UT_ASSERTeq(ret, 0);
}
POBJ_FREE(&list);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_list_macro");
if (argc < 2)
FATAL_USAGE();
const char *path = argv[1];
PMEMobjpool *pop;
if ((pop = pmemobj_create(path, LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
POBJ_ZNEW(pop, &List, struct list);
POBJ_ZNEW(pop, &List_sec, struct list);
int i;
for (i = 2; i < argc; i++) {
switch (argv[i][0]) {
case 'P':
do_print(pop, argv[i]);
break;
case 'R':
do_print_reverse(pop, argv[i]);
break;
case 'n':
do_insert_new(pop, argv[i]);
break;
case 'i':
do_insert(pop, argv[i]);
break;
case 'f':
do_remove_free(pop, argv[i]);
break;
case 'r':
do_remove(pop, argv[i]);
break;
case 'm':
do_move(pop, argv[i]);
break;
default:
FATAL_USAGE();
}
}
do_cleanup(pop, List);
do_cleanup(pop, List_sec);
pmemobj_close(pop);
DONE(NULL);
}
| 9,625 | 21.756501 | 68 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_critnib_mt/obj_critnib_mt.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* obj_critnib_mt.c -- multithreaded unit test for critnib
*/
#include <errno.h>
#include "critnib.h"
#include "rand.h"
#include "os_thread.h"
#include "unittest.h"
#include "util.h"
#include "valgrind_internal.h"
#define NITER_FAST 200000000
#define NITER_MID 20000000
#define NITER_SLOW 2000000
#define MAXTHREADS 4096
static int nthreads; /* number of threads */
static int nrthreads; /* in mixed tests, read threads */
static int nwthreads; /* ... and write threads */
static uint64_t
rnd_thid_r64(rng_t *seedp, uint16_t thid)
{
/*
* Stick arg (thread index) onto bits 16..31, to make it impossible for
* two worker threads to write the same value, while keeping both ends
* pseudo-random.
*/
uint64_t r = rnd64_r(seedp);
r &= ~0xffff0000ULL;
r |= ((uint64_t)thid) << 16;
return r;
}
static uint64_t
helgrind_count(uint64_t x)
{
/* Convert total number of ops to per-thread. */
x /= (unsigned)nthreads;
/*
* Reduce iteration count when running on foogrind, by a factor of 64.
* Multiple instances of foogrind cause exponential slowdown, so handle
* that as well (not that it's very useful for us...).
*/
return x >> (6 * On_valgrind);
}
/* 1024 random numbers, shared between threads. */
static uint64_t the1024[1024];
static struct critnib *c;
#define K 0xdeadbeefcafebabe
static void *
thread_read1(void *arg)
{
uint64_t niter = helgrind_count(NITER_FAST);
for (uint64_t count = 0; count < niter; count++)
UT_ASSERTeq(critnib_get(c, K), (void *)K);
return NULL;
}
static void *
thread_read1024(void *arg)
{
uint64_t niter = helgrind_count(NITER_FAST);
for (uint64_t count = 0; count < niter; count++) {
uint64_t v = the1024[count % ARRAY_SIZE(the1024)];
UT_ASSERTeq(critnib_get(c, v), (void *)v);
}
return NULL;
}
static void *
thread_write1024(void *arg)
{
rng_t rng;
randomize_r(&rng, (uintptr_t)arg);
uint64_t w1024[1024];
for (int i = 0; i < ARRAY_SIZE(w1024); i++)
w1024[i] = rnd_thid_r64(&rng, (uint16_t)(uintptr_t)arg);
uint64_t niter = helgrind_count(NITER_SLOW);
for (uint64_t count = 0; count < niter; count++) {
uint64_t v = w1024[count % ARRAY_SIZE(w1024)];
critnib_insert(c, v, (void *)v);
uint64_t r = (uint64_t)critnib_remove(c, v);
UT_ASSERTeq(v, r);
}
return NULL;
}
static void *
thread_read_write_remove(void *arg)
{
rng_t rng;
randomize_r(&rng, (uintptr_t)arg);
uint64_t niter = helgrind_count(NITER_SLOW);
for (uint64_t count = 0; count < niter; count++) {
uint64_t r, v = rnd_thid_r64(&rng, (uint16_t)(uintptr_t)arg);
critnib_insert(c, v, (void *)v);
r = (uint64_t)critnib_get(c, v);
UT_ASSERTeq(r, v);
r = (uint64_t)critnib_remove(c, v);
UT_ASSERTeq(r, v);
}
return NULL;
}
/*
* Reverse bits in a number: 1234 -> 4321 (swap _bit_ endianness).
*
* Doing this on successive numbers produces a van der Corput sequence,
* which covers the space nicely (relevant for <= tests).
*/
static uint64_t
revbits(uint64_t x)
{
uint64_t y = 0;
uint64_t a = 1;
uint64_t b = 0x8000000000000000;
for (; b; a <<= 1, b >>= 1) {
if (x & a)
y |= b;
}
return y;
}
static void *
thread_le1(void *arg)
{
uint64_t niter = helgrind_count(NITER_MID);
for (uint64_t count = 0; count < niter; count++) {
uint64_t y = revbits(count);
if (y < K)
UT_ASSERTeq(critnib_find_le(c, y), NULL);
else
UT_ASSERTeq(critnib_find_le(c, y), (void *)K);
}
return NULL;
}
static void *
thread_le1024(void *arg)
{
uint64_t niter = helgrind_count(NITER_MID);
for (uint64_t count = 0; count < niter; count++) {
uint64_t y = revbits(count);
critnib_find_le(c, y);
}
return NULL;
}
typedef void *(*thread_func_t)(void *);
/*
* Before starting the threads, we add "fixed_preload" of static values
* (K and 1), or "random_preload" of random numbers. Can't have both.
*/
static void
test(int fixed_preload, int random_preload, thread_func_t rthread,
thread_func_t wthread)
{
c = critnib_new();
if (fixed_preload >= 1)
critnib_insert(c, K, (void *)K);
if (fixed_preload >= 2)
critnib_insert(c, 1, (void *)1);
for (int i = 0; i < random_preload; i++)
critnib_insert(c, the1024[i], (void *)the1024[i]);
os_thread_t th[MAXTHREADS], wr[MAXTHREADS];
int ntr = wthread ? nrthreads : nthreads;
int ntw = wthread ? nwthreads : 0;
for (int i = 0; i < ntr; i++)
THREAD_CREATE(&th[i], 0, rthread, (void *)(uint64_t)i);
for (int i = 0; i < ntw; i++)
THREAD_CREATE(&wr[i], 0, wthread, (void *)(uint64_t)i);
/* The threads work here... */
for (int i = 0; i < ntr; i++) {
void *retval;
THREAD_JOIN(&th[i], &retval);
}
for (int i = 0; i < ntw; i++) {
void *retval;
THREAD_JOIN(&wr[i], &retval);
}
critnib_delete(c);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_critnib_mt");
util_init();
randomize(1); /* use a fixed reproducible seed */
for (int i = 0; i < ARRAY_SIZE(the1024); i++)
the1024[i] = rnd64();
nthreads = sysconf(_SC_NPROCESSORS_ONLN);
if (nthreads > MAXTHREADS)
nthreads = MAXTHREADS;
if (!nthreads)
nthreads = 8;
nwthreads = nthreads / 2;
if (!nwthreads)
nwthreads = 1;
nrthreads = nthreads - nwthreads;
if (!nrthreads)
nrthreads = 1;
test(1, 0, thread_read1, thread_write1024);
test(0, 1024, thread_read1024, thread_write1024);
test(0, 0, thread_read_write_remove, NULL);
test(1, 0, thread_le1, NULL);
test(0, 1024, thread_le1024, NULL);
DONE(NULL);
}
| 5,467 | 20.527559 | 72 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_ctl_arenas/obj_ctl_arenas.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* obj_ctl_arenas.c -- tests for the ctl entry points
* usage:
* obj_ctl_arenas <file> n - test for heap.narenas.total
*
* obj_ctl_arenas <file> s - test for heap.arena.[idx].size
* and heap.thread.arena_id (RW)
*
* obj_ctl_arenas <file> c - test for heap.arena.create,
* heap.arena.[idx].automatic and heap.narenas.automatic
* obj_ctl_arenas <file> a - mt test for heap.arena.create
* and heap.thread.arena_id
*
* obj_ctl_arenas <file> f - test for POBJ_ARENA_ID flag,
*
* obj_ctl_arenas <file> q - test for POBJ_ARENA_ID with
* non-exists arena id
*
* obj_ctl_arenas <file> m - test for heap.narenas.max (RW)
*/
#include <sched.h>
#include "sys_util.h"
#include "unittest.h"
#include "util.h"
#define CHUNKSIZE ((size_t)1024 * 256) /* 256 kilobytes */
#define LAYOUT "obj_ctl_arenas"
#define CTL_QUERY_LEN 256
#define NTHREAD 2
#define NTHREAD_ARENA 32
#define NOBJECT_THREAD 64
#define ALLOC_CLASS_ARENA 2
#define NTHREADX 16
#define NARENAS 16
#define DEFAULT_ARENAS_MAX (1 << 10)
static os_mutex_t lock;
static os_cond_t cond;
static PMEMobjpool *pop;
static int nth;
static struct pobj_alloc_class_desc alloc_class[] = {
{
.header_type = POBJ_HEADER_NONE,
.unit_size = 128,
.units_per_block = 1000,
.alignment = 0
},
{
.header_type = POBJ_HEADER_NONE,
.unit_size = 1024,
.units_per_block = 1000,
.alignment = 0
},
{
.header_type = POBJ_HEADER_NONE,
.unit_size = 111,
.units_per_block = CHUNKSIZE / 111,
.alignment = 0
},
};
struct arena_alloc {
unsigned arena;
PMEMoid oid;
};
static struct arena_alloc ref;
static void
check_arena_size(unsigned arena_id, unsigned class_id)
{
int ret;
size_t arena_size;
char arena_idx_size[CTL_QUERY_LEN];
SNPRINTF(arena_idx_size, CTL_QUERY_LEN,
"heap.arena.%u.size", arena_id);
ret = pmemobj_ctl_get(pop, arena_idx_size, &arena_size);
UT_ASSERTeq(ret, 0);
size_t test = ALIGN_UP(alloc_class[class_id].unit_size *
alloc_class[class_id].units_per_block, CHUNKSIZE);
UT_ASSERTeq(test, arena_size);
}
static void
create_alloc_class(void)
{
int ret;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.128.desc",
&alloc_class[0]);
UT_ASSERTeq(ret, 0);
ret = pmemobj_ctl_set(pop, "heap.alloc_class.129.desc",
&alloc_class[1]);
UT_ASSERTeq(ret, 0);
}
static void *
worker_arenas_size(void *arg)
{
int ret = -1;
int idx = (int)(intptr_t)arg;
int off_idx = idx + 128;
unsigned arena_id;
unsigned arena_id_new;
ret = pmemobj_ctl_exec(pop, "heap.arena.create",
&arena_id_new);
UT_ASSERTeq(ret, 0);
UT_ASSERT(arena_id_new >= 1);
ret = pmemobj_ctl_set(pop, "heap.thread.arena_id",
&arena_id_new);
UT_ASSERTeq(ret, 0);
ret = pmemobj_xalloc(pop, NULL, alloc_class[idx].unit_size, 0,
POBJ_CLASS_ID(off_idx), NULL, NULL);
UT_ASSERTeq(ret, 0);
/* we need to test 2 arenas so 2 threads are needed here */
util_mutex_lock(&lock);
nth++;
if (nth == NTHREAD)
os_cond_broadcast(&cond);
else
while (nth < NTHREAD)
os_cond_wait(&cond, &lock);
util_mutex_unlock(&lock);
ret = pmemobj_ctl_get(pop, "heap.thread.arena_id", &arena_id);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arena_id_new, arena_id);
check_arena_size(arena_id, (unsigned)idx);
return NULL;
}
static void *
worker_arenas_flag(void *arg)
{
int ret;
unsigned arenas[NARENAS];
for (unsigned i = 0; i < NARENAS; ++i) {
ret = pmemobj_ctl_exec(pop, "heap.arena.create",
&arenas[i]);
UT_ASSERTeq(ret, 0);
}
/*
* Tests POBJ_ARENA_ID with pmemobj_xalloc.
* All object are frees after pthread join.
*/
for (unsigned i = 0; i < 2; i++) {
ret = pmemobj_xalloc(pop,
NULL, alloc_class[i].unit_size, 0,
POBJ_CLASS_ID(i + 128) | \
POBJ_ARENA_ID(arenas[i]),
NULL, NULL);
UT_ASSERTeq(ret, 0);
check_arena_size(arenas[i], i);
}
/* test POBJ_ARENA_ID with pmemobj_xreserve */
struct pobj_action act;
PMEMoid oid = pmemobj_xreserve(pop, &act,
alloc_class[0].unit_size, 1,
POBJ_CLASS_ID(128) |
POBJ_ARENA_ID(arenas[2]));
pmemobj_publish(pop, &act, 1);
pmemobj_free(&oid);
UT_ASSERT(OID_IS_NULL(oid));
/* test POBJ_ARENA_ID with pmemobj_tx_xalloc */
TX_BEGIN(pop) {
pmemobj_tx_xalloc(alloc_class[1].unit_size, 0,
POBJ_CLASS_ID(129) | POBJ_ARENA_ID(arenas[3]));
} TX_END
check_arena_size(arenas[3], 1);
return NULL;
}
static void *
worker_arena_threads(void *arg)
{
int ret = -1;
struct arena_alloc *ref = (struct arena_alloc *)arg;
unsigned arena_id;
ret = pmemobj_ctl_get(pop, "heap.thread.arena_id", &arena_id);
UT_ASSERTeq(ret, 0);
UT_ASSERT(arena_id != 0);
ret = pmemobj_ctl_set(pop, "heap.thread.arena_id", &ref->arena);
UT_ASSERTeq(ret, 0);
PMEMoid oid[NOBJECT_THREAD];
unsigned d;
for (int i = 0; i < NOBJECT_THREAD; i++) {
ret = pmemobj_xalloc(pop, &oid[i],
alloc_class[ALLOC_CLASS_ARENA].unit_size,
0, POBJ_CLASS_ID(ALLOC_CLASS_ARENA + 128),
NULL, NULL);
UT_ASSERTeq(ret, 0);
d = labs((long)ref->oid.off - (long)oid[i].off);
/* objects are in the same block as the first one */
ASSERT(d <= alloc_class[ALLOC_CLASS_ARENA].unit_size *
(alloc_class[ALLOC_CLASS_ARENA].units_per_block - 1));
}
for (int i = 0; i < NOBJECT_THREAD; i++)
pmemobj_free(&oid[i]);
return NULL;
}
static void
worker_arena_ref_obj(struct arena_alloc *ref)
{
int ret = -1;
ret = pmemobj_ctl_set(pop, "heap.thread.arena_id", &ref->arena);
UT_ASSERTeq(ret, 0);
ret = pmemobj_xalloc(pop, &ref->oid,
alloc_class[ALLOC_CLASS_ARENA].unit_size,
0, POBJ_CLASS_ID(ALLOC_CLASS_ARENA + 128), NULL, NULL);
UT_ASSERTeq(ret, 0);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_ctl_arenas");
if (argc != 3)
UT_FATAL("usage: %s poolset [n|s|c|f|q|m|a]", argv[0]);
const char *path = argv[1];
char t = argv[2][0];
if ((pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL * 20,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_open: %s", path);
int ret = 0;
if (t == 'n') {
unsigned narenas = 0;
ret = pmemobj_ctl_get(pop, "heap.narenas.total", &narenas);
UT_ASSERTeq(ret, 0);
UT_ASSERTne(narenas, 0);
} else if (t == 's') {
os_thread_t threads[NTHREAD];
util_mutex_init(&lock);
util_cond_init(&cond);
create_alloc_class();
for (int i = 0; i < NTHREAD; i++)
THREAD_CREATE(&threads[i], NULL, worker_arenas_size,
(void *)(intptr_t)i);
for (int i = 0; i < NTHREAD; i++)
THREAD_JOIN(&threads[i], NULL);
PMEMoid oid, oid2;
POBJ_FOREACH_SAFE(pop, oid, oid2)
pmemobj_free(&oid);
util_mutex_destroy(&lock);
util_cond_destroy(&cond);
} else if (t == 'c') {
char arena_idx_auto[CTL_QUERY_LEN];
unsigned narenas_b = 0;
unsigned narenas_a = 0;
unsigned narenas_n = 4;
unsigned arena_id;
unsigned all_auto;
int automatic;
ret = pmemobj_ctl_get(pop, "heap.narenas.total", &narenas_b);
UT_ASSERTeq(ret, 0);
/* all arenas created at the start should be set to auto */
for (unsigned i = 1; i <= narenas_b; i++) {
SNPRINTF(arena_idx_auto, CTL_QUERY_LEN,
"heap.arena.%u.automatic", i);
ret = pmemobj_ctl_get(pop, arena_idx_auto, &automatic);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(automatic, 1);
}
ret = pmemobj_ctl_get(pop, "heap.narenas.automatic", &all_auto);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(narenas_b, all_auto);
/* all arenas created by user should not be auto */
for (unsigned i = 1; i <= narenas_n; i++) {
ret = pmemobj_ctl_exec(pop, "heap.arena.create",
&arena_id);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arena_id, narenas_b + i);
SNPRINTF(arena_idx_auto, CTL_QUERY_LEN,
"heap.arena.%u.automatic", arena_id);
ret = pmemobj_ctl_get(pop, arena_idx_auto, &automatic);
UT_ASSERTeq(automatic, 0);
/*
* after creation, number of auto
* arenas should be the same
*/
ret = pmemobj_ctl_get(pop, "heap.narenas.automatic",
&all_auto);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(narenas_b + i - 1, all_auto);
/* change the state of created arena to auto */
int activate = 1;
ret = pmemobj_ctl_set(pop, arena_idx_auto,
&activate);
UT_ASSERTeq(ret, 0);
ret = pmemobj_ctl_get(pop, arena_idx_auto, &automatic);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(automatic, 1);
/* number of auto arenas should increase */
ret = pmemobj_ctl_get(pop, "heap.narenas.automatic",
&all_auto);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(narenas_b + i, all_auto);
}
ret = pmemobj_ctl_get(pop, "heap.narenas.total", &narenas_a);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(narenas_b + narenas_n, narenas_a);
/* at least one automatic arena must exist */
for (unsigned i = 1; i <= narenas_a; i++) {
SNPRINTF(arena_idx_auto, CTL_QUERY_LEN,
"heap.arena.%u.automatic", i);
automatic = 0;
if (i < narenas_a) {
ret = pmemobj_ctl_set(pop, arena_idx_auto,
&automatic);
UT_ASSERTeq(ret, 0);
} else {
/*
* last auto arena -
* cannot change the state to 0...
*/
ret = pmemobj_ctl_set(pop, arena_idx_auto,
&automatic);
UT_ASSERTeq(ret, -1);
/* ...but can change (overwrite) to 1 */
automatic = 1;
ret = pmemobj_ctl_set(pop, arena_idx_auto,
&automatic);
UT_ASSERTeq(ret, 0);
}
}
} else if (t == 'a') {
int ret;
unsigned arena_id_new;
char alloc_class_idx_desc[CTL_QUERY_LEN];
ret = pmemobj_ctl_exec(pop, "heap.arena.create",
&arena_id_new);
UT_ASSERTeq(ret, 0);
UT_ASSERT(arena_id_new >= 1);
SNPRINTF(alloc_class_idx_desc, CTL_QUERY_LEN,
"heap.alloc_class.%d.desc",
ALLOC_CLASS_ARENA + 128);
ret = pmemobj_ctl_set(pop, alloc_class_idx_desc,
&alloc_class[ALLOC_CLASS_ARENA]);
UT_ASSERTeq(ret, 0);
ref.arena = arena_id_new;
worker_arena_ref_obj(&ref);
os_thread_t threads[NTHREAD_ARENA];
for (int i = 0; i < NTHREAD_ARENA; i++) {
THREAD_CREATE(&threads[i], NULL, worker_arena_threads,
&ref);
}
for (int i = 0; i < NTHREAD_ARENA; i++)
THREAD_JOIN(&threads[i], NULL);
} else if (t == 'f') {
os_thread_t threads[NTHREADX];
create_alloc_class();
for (int i = 0; i < NTHREADX; i++)
THREAD_CREATE(&threads[i], NULL,
worker_arenas_flag, NULL);
for (int i = 0; i < NTHREADX; i++)
THREAD_JOIN(&threads[i], NULL);
PMEMoid oid, oid2;
POBJ_FOREACH_SAFE(pop, oid, oid2)
pmemobj_free(&oid);
} else if (t == 'q') {
unsigned total;
ret = pmemobj_ctl_get(pop, "heap.narenas.total", &total);
UT_ASSERTeq(ret, 0);
ret = pmemobj_xalloc(pop, NULL, alloc_class[0].unit_size, 0,
POBJ_ARENA_ID(total), NULL, NULL);
UT_ASSERTne(ret, 0);
} else if (t == 'm') {
unsigned max;
unsigned new_max;
ret = pmemobj_ctl_get(pop, "heap.narenas.max", &max);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(DEFAULT_ARENAS_MAX, max);
/* size should not decrease */
new_max = DEFAULT_ARENAS_MAX - 1;
ret = pmemobj_ctl_set(pop, "heap.narenas.max", &new_max);
UT_ASSERTne(ret, 0);
ret = pmemobj_ctl_get(pop, "heap.narenas.max", &max);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(DEFAULT_ARENAS_MAX, max);
/* size should increase */
new_max = DEFAULT_ARENAS_MAX + 1;
ret = pmemobj_ctl_set(pop, "heap.narenas.max", &new_max);
UT_ASSERTeq(ret, 0);
ret = pmemobj_ctl_get(pop, "heap.narenas.max", &max);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(DEFAULT_ARENAS_MAX + 1, max);
} else {
UT_ASSERT(0);
}
pmemobj_close(pop);
DONE(NULL);
}
| 11,314 | 23.651416 | 66 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/win_poolset_unmap/win_poolset_unmap.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* win_poolset_unmap.c -- test for windows mmap destructor.
*
* It checks whether all mappings are properly unmpapped and memory is properly
* unreserved when auto growing pool is used.
*/
#include "unittest.h"
#include "os.h"
#include "libpmemobj.h"
#define KILOBYTE (1 << 10)
#define MEGABYTE (1 << 20)
#define LAYOUT_NAME "poolset_unmap"
int
main(int argc, char *argv[])
{
START(argc, argv, "win_poolset_unmap");
if (argc != 2)
UT_FATAL("usage: %s path", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, 0,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
MEMORY_BASIC_INFORMATION basic_info;
SIZE_T bytes_returned;
SIZE_T offset = 0;
bytes_returned = VirtualQuery(pop, &basic_info,
sizeof(basic_info));
/*
* When opening pool, we try to remove all permissions on header.
* If this action fails VirtualQuery will return one region with
* size 8MB. If it succeeds, RegionSize will be equal to 4KB due
* to different header and rest of the mapping permissions.
*/
if (basic_info.RegionSize == 4 * KILOBYTE) {
/* header */
UT_ASSERTeq(bytes_returned, sizeof(basic_info));
UT_ASSERTeq(basic_info.State, MEM_COMMIT);
offset += basic_info.RegionSize;
/* first part */
bytes_returned = VirtualQuery((char *)pop + offset, &basic_info,
sizeof(basic_info));
UT_ASSERTeq(bytes_returned, sizeof(basic_info));
UT_ASSERTeq(basic_info.RegionSize, 8 * MEGABYTE - 4 * KILOBYTE);
UT_ASSERTeq(basic_info.State, MEM_COMMIT);
} else {
/* first part with header */
UT_ASSERTeq(bytes_returned, sizeof(basic_info));
UT_ASSERTeq(basic_info.RegionSize, 8 * MEGABYTE);
UT_ASSERTeq(basic_info.State, MEM_COMMIT);
}
offset += basic_info.RegionSize;
/* reservation after first part */
bytes_returned = VirtualQuery((char *)pop + offset, &basic_info,
sizeof(basic_info));
UT_ASSERTeq(bytes_returned, sizeof(basic_info));
UT_ASSERTeq(basic_info.RegionSize, (50 - 8) * MEGABYTE);
UT_ASSERTeq(basic_info.State, MEM_RESERVE);
DONE(NULL);
}
| 2,117 | 25.810127 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_compat/pmem2_compat.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* pmem2_compat.c -- compatibility test for libpmem vs libpmem2
*/
#include "unittest.h"
int
main(int argc, char *argv[])
{
UT_COMPILE_ERROR_ON(PMEM_F_MEM_NODRAIN != PMEM2_F_MEM_NODRAIN);
UT_COMPILE_ERROR_ON(PMEM_F_MEM_NONTEMPORAL != PMEM2_F_MEM_NONTEMPORAL);
UT_COMPILE_ERROR_ON(PMEM_F_MEM_TEMPORAL != PMEM2_F_MEM_TEMPORAL);
UT_COMPILE_ERROR_ON(PMEM_F_MEM_WC != PMEM2_F_MEM_WC);
UT_COMPILE_ERROR_ON(PMEM_F_MEM_WB != PMEM2_F_MEM_WB);
UT_COMPILE_ERROR_ON(PMEM_F_MEM_NOFLUSH != PMEM2_F_MEM_NOFLUSH);
return 0;
}
| 606 | 26.590909 | 72 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_strdup/obj_tx_strdup.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_tx_strdup.c -- unit test for pmemobj_tx_strdup
*/
#include <sys/param.h>
#include <string.h>
#include <wchar.h>
#include "unittest.h"
#define LAYOUT_NAME "tx_strdup"
TOID_DECLARE(char, 0);
TOID_DECLARE(wchar_t, 1);
enum type_number {
TYPE_NO_TX,
TYPE_WCS_NO_TX,
TYPE_COMMIT,
TYPE_WCS_COMMIT,
TYPE_ABORT,
TYPE_WCS_ABORT,
TYPE_FREE_COMMIT,
TYPE_WCS_FREE_COMMIT,
TYPE_FREE_ABORT,
TYPE_WCS_FREE_ABORT,
TYPE_COMMIT_NESTED1,
TYPE_WCS_COMMIT_NESTED1,
TYPE_COMMIT_NESTED2,
TYPE_WCS_COMMIT_NESTED2,
TYPE_ABORT_NESTED1,
TYPE_WCS_ABORT_NESTED1,
TYPE_ABORT_NESTED2,
TYPE_WCS_ABORT_NESTED2,
TYPE_ABORT_AFTER_NESTED1,
TYPE_WCS_ABORT_AFTER_NESTED1,
TYPE_ABORT_AFTER_NESTED2,
TYPE_WCS_ABORT_AFTER_NESTED2,
TYPE_NOFLUSH,
TYPE_WCS_NOFLUSH,
};
#define TEST_STR_1 "Test string 1"
#define TEST_STR_2 "Test string 2"
#define TEST_WCS_1 L"Test string 3"
#define TEST_WCS_2 L"Test string 4"
#define MAX_FUNC 2
typedef void (*fn_tx_strdup)(TOID(char) *str, const char *s,
unsigned type_num);
typedef void (*fn_tx_wcsdup)(TOID(wchar_t) *wcs, const wchar_t *s,
unsigned type_num);
static unsigned counter;
/*
* tx_strdup -- duplicate a string using pmemobj_tx_strdup
*/
static void
tx_strdup(TOID(char) *str, const char *s, unsigned type_num)
{
TOID_ASSIGN(*str, pmemobj_tx_strdup(s, type_num));
}
/*
* tx_wcsdup -- duplicate a string using pmemobj_tx_wcsdup
*/
static void
tx_wcsdup(TOID(wchar_t) *wcs, const wchar_t *s, unsigned type_num)
{
TOID_ASSIGN(*wcs, pmemobj_tx_wcsdup(s, type_num));
}
/*
* tx_strdup_macro -- duplicate a string using macro
*/
static void
tx_strdup_macro(TOID(char) *str, const char *s, unsigned type_num)
{
TOID_ASSIGN(*str, TX_STRDUP(s, type_num));
}
/*
* tx_wcsdup_macro -- duplicate a wide character string using macro
*/
static void
tx_wcsdup_macro(TOID(wchar_t) *wcs, const wchar_t *s, unsigned type_num)
{
TOID_ASSIGN(*wcs, TX_WCSDUP(s, type_num));
}
static fn_tx_strdup do_tx_strdup[MAX_FUNC] = {tx_strdup, tx_strdup_macro};
static fn_tx_wcsdup do_tx_wcsdup[MAX_FUNC] = {tx_wcsdup, tx_wcsdup_macro};
/*
* do_tx_strdup_commit -- duplicate a string and commit the transaction
*/
static void
do_tx_strdup_commit(PMEMobjpool *pop)
{
TOID(char) str;
TOID(wchar_t) wcs;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str, TEST_STR_1, TYPE_COMMIT);
do_tx_wcsdup[counter](&wcs, TEST_WCS_1, TYPE_WCS_COMMIT);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERT(!TOID_IS_NULL(wcs));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT));
TOID_ASSIGN(wcs, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_COMMIT));
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERTeq(strcmp(TEST_STR_1, D_RO(str)), 0);
UT_ASSERTeq(wcscmp(TEST_WCS_1, D_RO(wcs)), 0);
}
/*
* do_tx_strdup_abort -- duplicate a string and abort the transaction
*/
static void
do_tx_strdup_abort(PMEMobjpool *pop)
{
TOID(char) str;
TOID(wchar_t) wcs;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str, TEST_STR_1, TYPE_ABORT);
do_tx_wcsdup[counter](&wcs, TEST_WCS_1, TYPE_WCS_ABORT);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERT(!TOID_IS_NULL(wcs));
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT));
TOID_ASSIGN(wcs, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_ABORT));
UT_ASSERT(TOID_IS_NULL(str));
UT_ASSERT(TOID_IS_NULL(wcs));
}
/*
* do_tx_strdup_null -- duplicate a NULL string to trigger tx abort
*/
static void
do_tx_strdup_null(PMEMobjpool *pop)
{
TOID(char) str;
TOID(wchar_t) wcs;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str, NULL, TYPE_ABORT);
do_tx_wcsdup[counter](&wcs, NULL, TYPE_WCS_ABORT);
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT));
TOID_ASSIGN(wcs, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_ABORT));
UT_ASSERT(TOID_IS_NULL(str));
UT_ASSERT(TOID_IS_NULL(wcs));
TX_BEGIN(pop) {
pmemobj_tx_xstrdup(NULL, TYPE_ABORT, POBJ_XALLOC_NO_ABORT);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, EINVAL);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
pmemobj_tx_strdup(NULL, TYPE_ABORT);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, EINVAL);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
pmemobj_tx_xstrdup(NULL, TYPE_ABORT, 0);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, EINVAL);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
/*
* do_tx_strdup_free_commit -- duplicate a string, free and commit the
* transaction
*/
static void
do_tx_strdup_free_commit(PMEMobjpool *pop)
{
TOID(char) str;
TOID(wchar_t) wcs;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str, TEST_STR_1, TYPE_FREE_COMMIT);
do_tx_wcsdup[counter](&wcs, TEST_WCS_1, TYPE_WCS_FREE_COMMIT);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERT(!TOID_IS_NULL(wcs));
int ret = pmemobj_tx_free(str.oid);
UT_ASSERTeq(ret, 0);
ret = pmemobj_tx_free(wcs.oid);
UT_ASSERTeq(ret, 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_COMMIT));
TOID_ASSIGN(wcs, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_FREE_COMMIT));
UT_ASSERT(TOID_IS_NULL(str));
UT_ASSERT(TOID_IS_NULL(wcs));
}
/*
* do_tx_strdup_free_abort -- duplicate a string, free and abort the
* transaction
*/
static void
do_tx_strdup_free_abort(PMEMobjpool *pop)
{
TOID(char) str;
TOID(wchar_t) wcs;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str, TEST_STR_1, TYPE_FREE_ABORT);
do_tx_wcsdup[counter](&wcs, TEST_WCS_1, TYPE_WCS_FREE_ABORT);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERT(!TOID_IS_NULL(wcs));
int ret = pmemobj_tx_free(str.oid);
UT_ASSERTeq(ret, 0);
ret = pmemobj_tx_free(wcs.oid);
UT_ASSERTeq(ret, 0);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ABORT));
TOID_ASSIGN(wcs, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_FREE_ABORT));
UT_ASSERT(TOID_IS_NULL(str));
UT_ASSERT(TOID_IS_NULL(wcs));
}
/*
* do_tx_strdup_commit_nested -- duplicate two string suing nested
* transaction and commit the transaction
*/
static void
do_tx_strdup_commit_nested(PMEMobjpool *pop)
{
TOID(char) str1;
TOID(char) str2;
TOID(wchar_t) wcs1;
TOID(wchar_t) wcs2;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str1, TEST_STR_1, TYPE_COMMIT_NESTED1);
do_tx_wcsdup[counter](&wcs1, TEST_WCS_1,
TYPE_WCS_COMMIT_NESTED1);
UT_ASSERT(!TOID_IS_NULL(str1));
UT_ASSERT(!TOID_IS_NULL(wcs1));
TX_BEGIN(pop) {
do_tx_strdup[counter](&str2, TEST_STR_2,
TYPE_COMMIT_NESTED2);
do_tx_wcsdup[counter](&wcs2, TEST_WCS_2,
TYPE_WCS_COMMIT_NESTED2);
UT_ASSERT(!TOID_IS_NULL(str2));
UT_ASSERT(!TOID_IS_NULL(wcs2));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str1, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_NESTED1));
TOID_ASSIGN(wcs1, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_COMMIT_NESTED1));
UT_ASSERT(!TOID_IS_NULL(str1));
UT_ASSERT(!TOID_IS_NULL(wcs1));
UT_ASSERTeq(strcmp(TEST_STR_1, D_RO(str1)), 0);
UT_ASSERTeq(wcscmp(TEST_WCS_1, D_RO(wcs1)), 0);
TOID_ASSIGN(str2, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_NESTED2));
TOID_ASSIGN(wcs2, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_COMMIT_NESTED2));
UT_ASSERT(!TOID_IS_NULL(str2));
UT_ASSERT(!TOID_IS_NULL(wcs2));
UT_ASSERTeq(strcmp(TEST_STR_2, D_RO(str2)), 0);
UT_ASSERTeq(wcscmp(TEST_WCS_2, D_RO(wcs2)), 0);
}
/*
* do_tx_strdup_commit_abort -- duplicate two string suing nested
* transaction and abort the transaction
*/
static void
do_tx_strdup_abort_nested(PMEMobjpool *pop)
{
TOID(char) str1;
TOID(char) str2;
TOID(wchar_t) wcs1;
TOID(wchar_t) wcs2;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str1, TEST_STR_1, TYPE_ABORT_NESTED1);
do_tx_wcsdup[counter](&wcs1, TEST_WCS_1,
TYPE_WCS_ABORT_NESTED1);
UT_ASSERT(!TOID_IS_NULL(str1));
UT_ASSERT(!TOID_IS_NULL(wcs1));
TX_BEGIN(pop) {
do_tx_strdup[counter](&str2, TEST_STR_2,
TYPE_ABORT_NESTED2);
do_tx_wcsdup[counter](&wcs2, TEST_WCS_2,
TYPE_WCS_ABORT_NESTED2);
UT_ASSERT(!TOID_IS_NULL(str2));
UT_ASSERT(!TOID_IS_NULL(wcs2));
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str1, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_NESTED1));
TOID_ASSIGN(wcs1, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_ABORT_NESTED1));
UT_ASSERT(TOID_IS_NULL(str1));
UT_ASSERT(TOID_IS_NULL(wcs1));
TOID_ASSIGN(str2, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_NESTED2));
TOID_ASSIGN(wcs2, POBJ_FIRST_TYPE_NUM(pop, TYPE_WCS_ABORT_NESTED2));
UT_ASSERT(TOID_IS_NULL(str2));
UT_ASSERT(TOID_IS_NULL(wcs2));
}
/*
* do_tx_strdup_commit_abort -- duplicate two string suing nested
* transaction and abort after the nested transaction
*/
static void
do_tx_strdup_abort_after_nested(PMEMobjpool *pop)
{
TOID(char) str1;
TOID(char) str2;
TOID(wchar_t) wcs1;
TOID(wchar_t) wcs2;
TX_BEGIN(pop) {
do_tx_strdup[counter](&str1, TEST_STR_1,
TYPE_ABORT_AFTER_NESTED1);
do_tx_wcsdup[counter](&wcs1, TEST_WCS_1,
TYPE_WCS_ABORT_AFTER_NESTED1);
UT_ASSERT(!TOID_IS_NULL(str1));
UT_ASSERT(!TOID_IS_NULL(wcs1));
TX_BEGIN(pop) {
do_tx_strdup[counter](&str2, TEST_STR_2,
TYPE_ABORT_AFTER_NESTED2);
do_tx_wcsdup[counter](&wcs2, TEST_WCS_2,
TYPE_WCS_ABORT_AFTER_NESTED2);
UT_ASSERT(!TOID_IS_NULL(str2));
UT_ASSERT(!TOID_IS_NULL(wcs2));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(str1, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_AFTER_NESTED1));
TOID_ASSIGN(wcs1, POBJ_FIRST_TYPE_NUM(pop,
TYPE_WCS_ABORT_AFTER_NESTED1));
UT_ASSERT(TOID_IS_NULL(str1));
UT_ASSERT(TOID_IS_NULL(wcs1));
TOID_ASSIGN(str2, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_AFTER_NESTED2));
TOID_ASSIGN(wcs2, POBJ_FIRST_TYPE_NUM(pop,
TYPE_WCS_ABORT_AFTER_NESTED2));
UT_ASSERT(TOID_IS_NULL(str2));
UT_ASSERT(TOID_IS_NULL(wcs2));
}
/*
* do_tx_strdup_noflush -- allocates zeroed object
*/
static void
do_tx_strdup_noflush(PMEMobjpool *pop)
{
TX_BEGIN(pop) {
errno = 0;
pmemobj_tx_xstrdup(TEST_STR_1, TYPE_NOFLUSH,
POBJ_XALLOC_NO_FLUSH);
pmemobj_tx_xwcsdup(TEST_WCS_1, TYPE_WCS_NOFLUSH,
POBJ_XALLOC_NO_FLUSH);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_strdup");
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
for (counter = 0; counter < MAX_FUNC; counter++) {
do_tx_strdup_commit(pop);
do_tx_strdup_abort(pop);
do_tx_strdup_null(pop);
do_tx_strdup_free_commit(pop);
do_tx_strdup_free_abort(pop);
do_tx_strdup_commit_nested(pop);
do_tx_strdup_abort_nested(pop);
do_tx_strdup_abort_after_nested(pop);
}
do_tx_strdup_noflush(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 11,087 | 24.315068 | 74 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_realloc/obj_tx_realloc.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* obj_tx_realloc.c -- unit test for pmemobj_tx_realloc and pmemobj_tx_zrealloc
*/
#include <sys/param.h>
#include <string.h>
#include "unittest.h"
#include "util.h"
#define LAYOUT_NAME "tx_realloc"
#define TEST_VALUE_1 1
#define OBJ_SIZE 1024
enum type_number {
TYPE_NO_TX,
TYPE_COMMIT,
TYPE_ABORT,
TYPE_TYPE,
TYPE_COMMIT_ZERO,
TYPE_COMMIT_ZERO_MACRO,
TYPE_ABORT_ZERO,
TYPE_ABORT_ZERO_MACRO,
TYPE_COMMIT_ALLOC,
TYPE_ABORT_ALLOC,
TYPE_ABORT_HUGE,
TYPE_ABORT_ZERO_HUGE,
TYPE_ABORT_ZERO_HUGE_MACRO,
TYPE_FREE,
};
struct object {
size_t value;
char data[OBJ_SIZE - sizeof(size_t)];
};
TOID_DECLARE(struct object, 0);
struct object_macro {
size_t value;
char data[OBJ_SIZE - sizeof(size_t)];
};
TOID_DECLARE(struct object_macro, TYPE_COMMIT_ZERO_MACRO);
/*
* do_tx_alloc -- do tx allocation with specified type number
*/
static PMEMoid
do_tx_alloc(PMEMobjpool *pop, unsigned type_num, size_t value)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, OID_NULL);
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_alloc(
sizeof(struct object), type_num));
if (!TOID_IS_NULL(obj)) {
D_RW(obj)->value = value;
}
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
return obj.oid;
}
/*
* do_tx_realloc_commit -- reallocate an object and commit the transaction
*/
static void
do_tx_realloc_commit(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_COMMIT, TEST_VALUE_1));
size_t new_size = 2 * pmemobj_alloc_usable_size(obj.oid);
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_realloc(obj.oid,
new_size, TYPE_COMMIT));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_abort -- reallocate an object and commit the transaction
*/
static void
do_tx_realloc_abort(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT, TEST_VALUE_1));
size_t new_size = 2 * pmemobj_alloc_usable_size(obj.oid);
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_realloc(obj.oid,
new_size, TYPE_ABORT));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) < new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_huge -- reallocate an object to a huge size to trigger tx abort
*/
static void
do_tx_realloc_huge(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT_HUGE, TEST_VALUE_1));
size_t new_size = PMEMOBJ_MAX_ALLOC_SIZE + 1;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_realloc(obj.oid,
new_size, TYPE_ABORT_HUGE));
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_HUGE));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) < new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_zrealloc_commit_macro -- reallocate an object, zero it and commit
* the transaction using macro
*/
static void
do_tx_zrealloc_commit_macro(PMEMobjpool *pop)
{
TOID(struct object_macro) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_COMMIT_ZERO_MACRO,
TEST_VALUE_1));
size_t old_size = pmemobj_alloc_usable_size(obj.oid);
size_t new_size = 2 * old_size;
TX_BEGIN(pop) {
obj = TX_ZREALLOC(obj, new_size);
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
void *new_ptr = (void *)((uintptr_t)D_RW(obj) + old_size);
UT_ASSERT(util_is_zeroed(new_ptr, new_size - old_size));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_ZERO_MACRO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
void *new_ptr = (void *)((uintptr_t)D_RW(obj) + old_size);
UT_ASSERT(util_is_zeroed(new_ptr, new_size - old_size));
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_zrealloc_commit -- reallocate an object, zero it and commit
* the transaction
*/
static void
do_tx_zrealloc_commit(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_COMMIT_ZERO, TEST_VALUE_1));
size_t old_size = pmemobj_alloc_usable_size(obj.oid);
size_t new_size = 2 * old_size;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_zrealloc(obj.oid,
new_size, TYPE_COMMIT_ZERO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
void *new_ptr = (void *)((uintptr_t)D_RW(obj) + old_size);
UT_ASSERT(util_is_zeroed(new_ptr, new_size - old_size));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_ZERO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
void *new_ptr = (void *)((uintptr_t)D_RW(obj) + old_size);
UT_ASSERT(util_is_zeroed(new_ptr, new_size - old_size));
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_abort_macro -- reallocate an object, zero it and commit the
* transaction using macro
*/
static void
do_tx_zrealloc_abort_macro(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT_ZERO_MACRO, TEST_VALUE_1));
size_t old_size = pmemobj_alloc_usable_size(obj.oid);
size_t new_size = 2 * old_size;
TX_BEGIN(pop) {
obj = TX_ZREALLOC(obj, new_size);
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
void *new_ptr = (void *)((uintptr_t)D_RW(obj) + old_size);
UT_ASSERT(util_is_zeroed(new_ptr, new_size - old_size));
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_ZERO_MACRO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) < new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_abort -- reallocate an object and commit the transaction
*/
static void
do_tx_zrealloc_abort(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT_ZERO, TEST_VALUE_1));
size_t old_size = pmemobj_alloc_usable_size(obj.oid);
size_t new_size = 2 * old_size;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_zrealloc(obj.oid,
new_size, TYPE_ABORT_ZERO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
void *new_ptr = (void *)((uintptr_t)D_RW(obj) + old_size);
UT_ASSERT(util_is_zeroed(new_ptr, new_size - old_size));
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_ZERO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) < new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_huge_macro -- reallocate an object to a huge size to trigger
* tx abort and zero it using macro
*/
static void
do_tx_zrealloc_huge_macro(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT_ZERO_HUGE_MACRO,
TEST_VALUE_1));
size_t old_size = pmemobj_alloc_usable_size(obj.oid);
size_t new_size = 2 * old_size;
TX_BEGIN(pop) {
obj = TX_ZREALLOC(obj, PMEMOBJ_MAX_ALLOC_SIZE + 1);
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_ZERO_HUGE_MACRO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) < new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_huge -- reallocate an object to a huge size to trigger tx abort
*/
static void
do_tx_zrealloc_huge(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT_ZERO_HUGE, TEST_VALUE_1));
size_t old_size = pmemobj_alloc_usable_size(obj.oid);
size_t new_size = 2 * old_size;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_zrealloc(obj.oid,
PMEMOBJ_MAX_ALLOC_SIZE + 1, TYPE_ABORT_ZERO_HUGE));
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_ZERO_HUGE));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) < new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_alloc_commit -- reallocate an allocated object
* and commit the transaction
*/
static void
do_tx_realloc_alloc_commit(PMEMobjpool *pop)
{
TOID(struct object) obj;
size_t new_size = 0;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_COMMIT_ALLOC,
TEST_VALUE_1));
UT_ASSERT(!TOID_IS_NULL(obj));
new_size = 2 * pmemobj_alloc_usable_size(obj.oid);
TOID_ASSIGN(obj, pmemobj_tx_realloc(obj.oid,
new_size, TYPE_COMMIT_ALLOC));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_ALLOC));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
TOID_ASSIGN(obj, POBJ_NEXT_TYPE_NUM(obj.oid));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_realloc_alloc_abort -- reallocate an allocated object
* and commit the transaction
*/
static void
do_tx_realloc_alloc_abort(PMEMobjpool *pop)
{
TOID(struct object) obj;
size_t new_size = 0;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_ABORT_ALLOC,
TEST_VALUE_1));
UT_ASSERT(!TOID_IS_NULL(obj));
new_size = 2 * pmemobj_alloc_usable_size(obj.oid);
TOID_ASSIGN(obj, pmemobj_tx_realloc(obj.oid,
new_size, TYPE_ABORT_ALLOC));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(pmemobj_alloc_usable_size(obj.oid) >= new_size);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_ALLOC));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_root_realloc -- retrieve root inside of transaction
*/
static void
do_tx_root_realloc(PMEMobjpool *pop)
{
TX_BEGIN(pop) {
PMEMoid root = pmemobj_root(pop, sizeof(struct object));
UT_ASSERT(!OID_IS_NULL(root));
UT_ASSERT(util_is_zeroed(pmemobj_direct(root),
sizeof(struct object)));
UT_ASSERTeq(sizeof(struct object), pmemobj_root_size(pop));
root = pmemobj_root(pop, 2 * sizeof(struct object));
UT_ASSERT(!OID_IS_NULL(root));
UT_ASSERT(util_is_zeroed(pmemobj_direct(root),
2 * sizeof(struct object)));
UT_ASSERTeq(2 * sizeof(struct object), pmemobj_root_size(pop));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
/*
* do_tx_realloc_free -- reallocate an allocated object
* and commit the transaction
*/
static void
do_tx_realloc_free(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_FREE, TEST_VALUE_1));
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_realloc(obj.oid,
0, TYPE_COMMIT));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE));
UT_ASSERT(TOID_IS_NULL(obj));
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_realloc");
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, 0,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
do_tx_root_realloc(pop);
do_tx_realloc_commit(pop);
do_tx_realloc_abort(pop);
do_tx_realloc_huge(pop);
do_tx_zrealloc_commit(pop);
do_tx_zrealloc_commit_macro(pop);
do_tx_zrealloc_abort(pop);
do_tx_zrealloc_abort_macro(pop);
do_tx_zrealloc_huge(pop);
do_tx_zrealloc_huge_macro(pop);
do_tx_realloc_alloc_commit(pop);
do_tx_realloc_alloc_abort(pop);
do_tx_realloc_free(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 12,874 | 25.767152 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_lock/obj_tx_lock.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* obj_tx_lock.c -- unit test for pmemobj_tx_lock()
*/
#include "unittest.h"
#include "libpmemobj.h"
#include "obj.h"
#define LAYOUT_NAME "obj_tx_lock"
#define NUM_LOCKS 2
struct transaction_data {
PMEMmutex mutexes[NUM_LOCKS];
PMEMrwlock rwlocks[NUM_LOCKS];
};
static PMEMobjpool *Pop;
#define DO_LOCK(mtx, rwlock)\
pmemobj_tx_lock(TX_PARAM_MUTEX, &(mtx)[0]);\
pmemobj_tx_lock(TX_PARAM_MUTEX, &(mtx)[1]);\
pmemobj_tx_lock(TX_PARAM_RWLOCK, &(rwlock)[0]);\
pmemobj_tx_lock(TX_PARAM_RWLOCK, &(rwlock)[1])
#define IS_UNLOCKED(pop, mtx, rwlock)\
ret = 0;\
ret += pmemobj_mutex_trylock((pop), &(mtx)[0]);\
ret += pmemobj_mutex_trylock((pop), &(mtx)[1]);\
ret += pmemobj_rwlock_trywrlock((pop), &(rwlock)[0]);\
ret += pmemobj_rwlock_trywrlock((pop), &(rwlock)[1]);\
UT_ASSERTeq(ret, 0);\
pmemobj_mutex_unlock((pop), &(mtx)[0]);\
pmemobj_mutex_unlock((pop), &(mtx)[1]);\
pmemobj_rwlock_unlock((pop), &(rwlock)[0]);\
pmemobj_rwlock_unlock((pop), &(rwlock)[1])
#define IS_LOCKED(pop, mtx, rwlock)\
ret = pmemobj_mutex_trylock((pop), &(mtx)[0]);\
UT_ASSERT(ret != 0);\
ret = pmemobj_mutex_trylock((pop), &(mtx)[1]);\
UT_ASSERT(ret != 0);\
ret = pmemobj_rwlock_trywrlock((pop), &(rwlock)[0]);\
UT_ASSERT(ret != 0);\
ret = pmemobj_rwlock_trywrlock((pop), &(rwlock)[1]);\
UT_ASSERT(ret != 0)
/*
* do_tx_add_locks -- (internal) transaction where locks are added after
* transaction begins
*/
static void *
do_tx_add_locks(struct transaction_data *data)
{
int ret;
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
TX_BEGIN(Pop) {
DO_LOCK(data->mutexes, data->rwlocks);
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
} TX_ONABORT { /* not called */
UT_ASSERT(0);
} TX_END
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
return NULL;
}
/*
* do_tx_add_locks_nested -- (internal) transaction where locks
* are added after nested transaction begins
*/
static void *
do_tx_add_locks_nested(struct transaction_data *data)
{
int ret;
TX_BEGIN(Pop) {
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
TX_BEGIN(Pop) {
DO_LOCK(data->mutexes, data->rwlocks);
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
} TX_END
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
return NULL;
}
/*
* do_tx_add_locks_nested_all -- (internal) transaction where all locks
* are added in both transactions after transaction begins
*/
static void *
do_tx_add_locks_nested_all(struct transaction_data *data)
{
int ret;
TX_BEGIN(Pop) {
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
DO_LOCK(data->mutexes, data->rwlocks);
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
TX_BEGIN(Pop) {
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
DO_LOCK(data->mutexes, data->rwlocks);
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
} TX_END
IS_LOCKED(Pop, data->mutexes, data->rwlocks);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
return NULL;
}
/*
* do_tx_add_taken_lock -- (internal) verify that failed tx_lock doesn't add
* the lock to transaction
*/
static void *
do_tx_add_taken_lock(struct transaction_data *data)
{
/* wrlocks on Windows don't detect self-deadlocks */
#ifdef _WIN32
(void) data;
#else
UT_ASSERTeq(pmemobj_rwlock_wrlock(Pop, &data->rwlocks[0]), 0);
TX_BEGIN(Pop) {
UT_ASSERTne(pmemobj_tx_lock(TX_PARAM_RWLOCK, &data->rwlocks[0]),
0);
} TX_END
UT_ASSERTne(pmemobj_rwlock_trywrlock(Pop, &data->rwlocks[0]), 0);
UT_ASSERTeq(pmemobj_rwlock_unlock(Pop, &data->rwlocks[0]), 0);
#endif
return NULL;
}
/*
* do_tx_lock_fail -- call pmemobj_tx_lock with POBJ_TX_NO_ABORT flag
* and taken lock
*/
static void *
do_tx_lock_fail(struct transaction_data *data)
{
/* wrlocks on Windows don't detect self-deadlocks */
#ifdef _WIN32
(void) data;
#else
UT_ASSERTeq(pmemobj_rwlock_wrlock(Pop, &data->rwlocks[0]), 0);
int ret = 0;
/* return errno and abort transaction */
TX_BEGIN(Pop) {
pmemobj_tx_xlock(TX_PARAM_RWLOCK, &data->rwlocks[0], 0);
} TX_ONABORT {
UT_ASSERTne(errno, 0);
UT_ASSERTeq(pmemobj_rwlock_unlock(Pop, &data->rwlocks[0]), 0);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
/* return ret without abort transaction */
UT_ASSERTeq(pmemobj_rwlock_wrlock(Pop, &data->rwlocks[0]), 0);
TX_BEGIN(Pop) {
ret = pmemobj_tx_xlock(TX_PARAM_RWLOCK, &data->rwlocks[0],
POBJ_XLOCK_NO_ABORT);
} TX_ONCOMMIT {
UT_ASSERTne(ret, 0);
UT_ASSERTeq(pmemobj_rwlock_unlock(Pop, &data->rwlocks[0]), 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
/* return ret without abort transaction */
UT_ASSERTeq(pmemobj_rwlock_wrlock(Pop, &data->rwlocks[0]), 0);
TX_BEGIN(Pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
ret = pmemobj_tx_lock(TX_PARAM_RWLOCK, &data->rwlocks[0]);
} TX_ONCOMMIT {
UT_ASSERTne(ret, 0);
UT_ASSERTeq(pmemobj_rwlock_unlock(Pop, &data->rwlocks[0]), 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
/* return ret without abort transaction */
UT_ASSERTeq(pmemobj_rwlock_wrlock(Pop, &data->rwlocks[0]), 0);
TX_BEGIN(Pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
ret = pmemobj_tx_xlock(TX_PARAM_RWLOCK, &data->rwlocks[0], 0);
} TX_ONCOMMIT {
UT_ASSERTne(ret, 0);
UT_ASSERTeq(pmemobj_rwlock_unlock(Pop, &data->rwlocks[0]), 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
#endif
return NULL;
}
static void
do_fault_injection(struct transaction_data *data)
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "add_to_tx_and_lock");
int ret;
IS_UNLOCKED(Pop, data->mutexes, data->rwlocks);
TX_BEGIN(Pop) {
int err = pmemobj_tx_lock(TX_PARAM_MUTEX, &data->mutexes[0]);
if (err)
pmemobj_tx_abort(err);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
UT_ASSERTeq(errno, ENOMEM);
} TX_END
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_lock");
if (argc < 3)
UT_FATAL("usage: %s <file> [l|n|a|t|f|w]", argv[0]);
if ((Pop = pmemobj_create(argv[1], LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
PMEMoid root = pmemobj_root(Pop, sizeof(struct transaction_data));
struct transaction_data *test_obj =
(struct transaction_data *)pmemobj_direct(root);
/* go through all arguments one by one */
for (int arg = 2; arg < argc; arg++) {
/* Scan the character of each argument. */
if (strchr("lnatfw", argv[arg][0]) == NULL ||
argv[arg][1] != '\0')
UT_FATAL("op must be l or n or a or t or f or w");
switch (argv[arg][0]) {
case 'l':
do_tx_add_locks(test_obj);
break;
case 'n':
do_tx_add_locks_nested(test_obj);
break;
case 'a':
do_tx_add_locks_nested_all(test_obj);
break;
case 't':
do_tx_add_taken_lock(test_obj);
break;
case 'f':
do_fault_injection(test_obj);
break;
case 'w':
do_tx_lock_fail(test_obj);
break;
}
}
pmemobj_close(Pop);
DONE(NULL);
}
| 7,003 | 24.75 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_memops/obj_memops.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* obj_memops.c -- basic memory operations tests
*
*/
#include <stddef.h>
#include "obj.h"
#include "memops.h"
#include "ulog.h"
#include "unittest.h"
#define TEST_ENTRIES 256
#define TEST_VALUES TEST_ENTRIES
enum fail_types {
FAIL_NONE,
FAIL_CHECKSUM,
FAIL_MODIFY_NEXT,
FAIL_MODIFY_VALUE,
};
struct test_object {
struct ULOG(TEST_ENTRIES) redo;
struct ULOG(TEST_ENTRIES) undo;
uint64_t values[TEST_VALUES];
};
static void
clear_test_values(struct test_object *object)
{
memset(object->values, 0, sizeof(uint64_t) * TEST_VALUES);
}
static int
redo_log_constructor(void *ctx, void *ptr, size_t usable_size, void *arg)
{
PMEMobjpool *pop = ctx;
const struct pmem_ops *p_ops = &pop->p_ops;
size_t capacity = ALIGN_DOWN(usable_size - sizeof(struct ulog),
CACHELINE_SIZE);
ulog_construct(OBJ_PTR_TO_OFF(ctx, ptr), capacity,
*(uint64_t *)arg, 1, 0, p_ops);
return 0;
}
static int
pmalloc_redo_extend(void *base, uint64_t *redo, uint64_t gen_num)
{
size_t s = SIZEOF_ALIGNED_ULOG(TEST_ENTRIES);
return pmalloc_construct(base, redo, s, redo_log_constructor, &gen_num,
0, OBJ_INTERNAL_OBJECT_MASK, 0);
}
static void
test_free_entry(void *base, uint64_t *next)
{
/* noop for fake ulog entries */
}
static void
test_set_entries(PMEMobjpool *pop,
struct operation_context *ctx, struct test_object *object,
size_t nentries, enum fail_types fail, enum operation_log_type type)
{
operation_start(ctx);
UT_ASSERT(nentries <= ARRAY_SIZE(object->values));
for (size_t i = 0; i < nentries; ++i) {
operation_add_typed_entry(ctx,
&object->values[i], i + 1,
ULOG_OPERATION_SET, type);
}
operation_reserve(ctx, nentries * 16);
if (fail != FAIL_NONE) {
operation_cancel(ctx);
switch (fail) {
case FAIL_CHECKSUM:
object->redo.checksum += 1;
break;
case FAIL_MODIFY_NEXT:
pmalloc_redo_extend(pop,
&object->redo.next, 0);
break;
case FAIL_MODIFY_VALUE:
object->redo.data[16] += 8;
break;
default:
UT_ASSERT(0);
}
ulog_recover((struct ulog *)&object->redo,
OBJ_OFF_IS_VALID_FROM_CTX, &pop->p_ops);
for (size_t i = 0; i < nentries; ++i)
UT_ASSERTeq(object->values[i], 0);
} else {
operation_process(ctx);
operation_finish(ctx, 0);
for (size_t i = 0; i < nentries; ++i)
UT_ASSERTeq(object->values[i], i + 1);
}
}
static void
test_merge_op(struct operation_context *ctx, struct test_object *object)
{
operation_start(ctx);
operation_add_typed_entry(ctx,
&object->values[0], 0b10,
ULOG_OPERATION_OR, LOG_PERSISTENT);
operation_add_typed_entry(ctx,
&object->values[0], 0b01,
ULOG_OPERATION_OR, LOG_PERSISTENT);
operation_add_typed_entry(ctx,
&object->values[0], 0b00,
ULOG_OPERATION_AND, LOG_PERSISTENT);
operation_add_typed_entry(ctx,
&object->values[0], 0b01,
ULOG_OPERATION_OR, LOG_PERSISTENT);
operation_process(ctx);
operation_finish(ctx, 0);
UT_ASSERTeq(object->values[0], 0b01);
}
static void
test_same_twice(struct operation_context *ctx, struct test_object *object)
{
operation_start(ctx);
operation_add_typed_entry(ctx,
&object->values[0], 5,
ULOG_OPERATION_SET, LOG_PERSISTENT);
operation_add_typed_entry(ctx,
&object->values[0], 10,
ULOG_OPERATION_SET, LOG_PERSISTENT);
operation_process(ctx);
UT_ASSERTeq(object->values[0], 10);
operation_cancel(ctx);
}
static void
test_redo(PMEMobjpool *pop, struct test_object *object)
{
struct operation_context *ctx = operation_new(
(struct ulog *)&object->redo, TEST_ENTRIES,
pmalloc_redo_extend, (ulog_free_fn)pfree,
&pop->p_ops, LOG_TYPE_REDO);
/*
* Keep this test first.
* It tests a situation where the number of objects being added
* is equal to the capacity of the log.
*/
test_set_entries(pop, ctx, object, TEST_ENTRIES - 1,
FAIL_NONE, LOG_PERSISTENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 100, FAIL_NONE, LOG_TRANSIENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 10, FAIL_NONE, LOG_PERSISTENT);
clear_test_values(object);
test_merge_op(ctx, object);
clear_test_values(object);
test_set_entries(pop, ctx, object, 100, FAIL_NONE, LOG_PERSISTENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 100, FAIL_CHECKSUM, LOG_PERSISTENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 10, FAIL_CHECKSUM, LOG_PERSISTENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 100, FAIL_MODIFY_VALUE,
LOG_PERSISTENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 10, FAIL_MODIFY_VALUE,
LOG_PERSISTENT);
clear_test_values(object);
test_same_twice(ctx, object);
clear_test_values(object);
operation_delete(ctx);
/*
* Verify that rebuilding redo_next works. This requires that
* object->redo->next is != 0 - to achieve that, this test must
* be preceded by a test that fails to finish the ulog's operation.
*/
ctx = operation_new(
(struct ulog *)&object->redo, TEST_ENTRIES,
NULL, test_free_entry, &pop->p_ops, LOG_TYPE_REDO);
test_set_entries(pop, ctx, object, 100, 0, LOG_PERSISTENT);
clear_test_values(object);
/* FAIL_MODIFY_NEXT tests can only happen after redo_next test */
test_set_entries(pop, ctx, object, 100, FAIL_MODIFY_NEXT,
LOG_PERSISTENT);
clear_test_values(object);
test_set_entries(pop, ctx, object, 10, FAIL_MODIFY_NEXT,
LOG_PERSISTENT);
clear_test_values(object);
operation_delete(ctx);
}
static void
test_undo_small_single_copy(struct operation_context *ctx,
struct test_object *object)
{
operation_start(ctx);
object->values[0] = 1;
object->values[1] = 2;
operation_add_buffer(ctx,
&object->values, &object->values, sizeof(*object->values) * 2,
ULOG_OPERATION_BUF_CPY);
object->values[0] = 2;
object->values[1] = 1;
operation_process(ctx);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
operation_start(ctx);
UT_ASSERTeq(object->values[0], 1);
UT_ASSERTeq(object->values[1], 2);
object->values[0] = 2;
object->values[1] = 1;
operation_process(ctx);
UT_ASSERTeq(object->values[0], 2);
UT_ASSERTeq(object->values[1], 1);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
}
static void
test_undo_small_single_set(struct operation_context *ctx,
struct test_object *object)
{
operation_start(ctx);
object->values[0] = 1;
object->values[1] = 2;
int c = 0;
operation_add_buffer(ctx,
&object->values, &c, sizeof(*object->values) * 2,
ULOG_OPERATION_BUF_SET);
operation_process(ctx);
UT_ASSERTeq(object->values[0], 0);
UT_ASSERTeq(object->values[1], 0);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
}
static void
test_undo_small_multiple_set(struct operation_context *ctx,
struct test_object *object)
{
operation_start(ctx);
object->values[0] = 1;
object->values[1] = 2;
int c = 0;
operation_add_buffer(ctx,
&object->values[0], &c, sizeof(*object->values),
ULOG_OPERATION_BUF_SET);
operation_add_buffer(ctx,
&object->values[1], &c, sizeof(*object->values),
ULOG_OPERATION_BUF_SET);
operation_process(ctx);
UT_ASSERTeq(object->values[0], 0);
UT_ASSERTeq(object->values[1], 0);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
}
static void
test_undo_large_single_copy(struct operation_context *ctx,
struct test_object *object)
{
operation_start(ctx);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
object->values[i] = i + 1;
operation_add_buffer(ctx,
&object->values, &object->values, sizeof(object->values),
ULOG_OPERATION_BUF_CPY);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
object->values[i] = i + 2;
operation_process(ctx);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
UT_ASSERTeq(object->values[i], i + 1);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
}
static void
test_undo_checksum_mismatch(PMEMobjpool *pop, struct operation_context *ctx,
struct test_object *object, struct ulog *log)
{
operation_start(ctx);
for (uint64_t i = 0; i < 20; ++i)
object->values[i] = i + 1;
operation_add_buffer(ctx,
&object->values, &object->values, sizeof(*object->values) * 20,
ULOG_OPERATION_BUF_CPY);
for (uint64_t i = 0; i < 20; ++i)
object->values[i] = i + 2;
pmemobj_persist(pop, &object->values, sizeof(*object->values) * 20);
log->data[100] += 1; /* corrupt the log somewhere */
pmemobj_persist(pop, &log->data[100], sizeof(log->data[100]));
operation_process(ctx);
/* the log shouldn't get applied */
for (uint64_t i = 0; i < 20; ++i)
UT_ASSERTeq(object->values[i], i + 2);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
}
static void
test_undo_large_copy(PMEMobjpool *pop, struct operation_context *ctx,
struct test_object *object)
{
operation_start(ctx);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
object->values[i] = i + 1;
operation_add_buffer(ctx,
&object->values, &object->values, sizeof(object->values),
ULOG_OPERATION_BUF_CPY);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
object->values[i] = i + 2;
operation_process(ctx);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
UT_ASSERTeq(object->values[i], i + 1);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
object->values[i] = i + 3;
operation_start(ctx);
operation_add_buffer(ctx,
&object->values, &object->values, sizeof(*object->values) * 26,
ULOG_OPERATION_BUF_CPY);
for (uint64_t i = 0; i < TEST_VALUES; ++i)
object->values[i] = i + 4;
pmemobj_persist(pop, &object->values, sizeof(object->values));
operation_process(ctx);
for (uint64_t i = 0; i < 26; ++i)
UT_ASSERTeq(object->values[i], i + 3);
for (uint64_t i = 26; i < TEST_VALUES; ++i)
UT_ASSERTeq(object->values[i], i + 4);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM);
}
static int
test_undo_foreach(struct ulog_entry_base *e, void *arg,
const struct pmem_ops *p_ops)
{
size_t *nentries = arg;
++(*nentries);
return 0;
}
/*
* drain_empty -- drain for pmem_ops
*/
static void
drain_empty(void *ctx)
{
/* do nothing */
}
/*
* persist_empty -- persist for pmem_ops
*/
static int
persist_empty(void *ctx, const void *addr, size_t len, unsigned flags)
{
return 0;
}
/*
* flush_empty -- flush for pmem_ops
*/
static int
flush_empty(void *ctx, const void *addr, size_t len, unsigned flags)
{
return 0;
}
/*
* memcpy_libc -- memcpy for pmem_ops
*/
static void *
memcpy_libc(void *ctx, void *dest, const void *src, size_t len, unsigned flags)
{
return memcpy(dest, src, len);
}
/*
* memset_libc -- memset for pmem_ops
*/
static void *
memset_libc(void *ctx, void *ptr, int c, size_t sz, unsigned flags)
{
return memset(ptr, c, sz);
}
/*
* test_undo_log_reuse -- test for correct reuse of log space
*/
static void
test_undo_log_reuse()
{
#define ULOG_SIZE 1024
struct pmem_ops ops = {
.persist = persist_empty,
.flush = flush_empty,
.drain = drain_empty,
.memcpy = memcpy_libc,
.memmove = NULL,
.memset = memset_libc,
.base = NULL,
};
struct ULOG(ULOG_SIZE) *first = util_aligned_malloc(CACHELINE_SIZE,
SIZEOF_ULOG(ULOG_SIZE));
struct ULOG(ULOG_SIZE) *second = util_aligned_malloc(CACHELINE_SIZE,
SIZEOF_ULOG(ULOG_SIZE));
ulog_construct((uint64_t)(first), ULOG_SIZE, 0, 0, 0, &ops);
ulog_construct((uint64_t)(second), ULOG_SIZE, 0, 0, 0, &ops);
first->next = (uint64_t)(second);
struct operation_context *ctx = operation_new(
(struct ulog *)first, ULOG_SIZE,
NULL, test_free_entry,
&ops, LOG_TYPE_UNDO);
size_t nentries = 0;
ulog_foreach_entry((struct ulog *)first,
test_undo_foreach, &nentries, &ops,NULL);
UT_ASSERTeq(nentries, 0);
/* first, let's populate the log with some valid entries */
size_t entry_size = (ULOG_SIZE / 2) - sizeof(struct ulog_entry_buf);
size_t total_entries = ((ULOG_SIZE * 2) / entry_size);
char *data = MALLOC(entry_size);
memset(data, 0xc, entry_size); /* fill it with something */
for (size_t i = 0; i < total_entries; ++i) {
operation_add_buffer(ctx, (void *)0x123, data,
entry_size,
ULOG_OPERATION_BUF_CPY);
nentries = 0;
ulog_foreach_entry((struct ulog *)first,
test_undo_foreach, &nentries, &ops,NULL);
UT_ASSERTeq(nentries, i + 1);
}
operation_init(ctx); /* initialize a new operation */
/* let's overwrite old entries and see if they are no longer visible */
for (size_t i = 0; i < total_entries; ++i) {
operation_add_buffer(ctx, (void *)0x123, data,
entry_size,
ULOG_OPERATION_BUF_CPY);
nentries = 0;
ulog_foreach_entry((struct ulog *)first,
test_undo_foreach, &nentries, &ops,NULL);
UT_ASSERTeq(nentries, i + 1);
}
FREE(data);
operation_delete(ctx);
util_aligned_free(first);
util_aligned_free(second);
#undef ULOG_SIZE
}
/*
* test_undo_log_reuse -- test for correct reuse of log space
*/
static void
test_redo_cleanup_same_size(PMEMobjpool *pop, struct test_object *object)
{
#define ULOG_SIZE 1024
struct operation_context *ctx = operation_new(
(struct ulog *)&object->redo, TEST_ENTRIES,
pmalloc_redo_extend, (ulog_free_fn)pfree,
&pop->p_ops, LOG_TYPE_REDO);
int ret = pmalloc(pop, &object->redo.next, ULOG_SIZE, 0, 0);
UT_ASSERTeq(ret, 0);
/* undo logs are clobbered at the end, which shrinks their size */
size_t capacity = ulog_capacity((struct ulog *)&object->undo,
TEST_ENTRIES, &pop->p_ops);
/* builtin log + one next */
UT_ASSERTeq(capacity, TEST_ENTRIES * 2 + CACHELINE_SIZE);
operation_start(ctx); /* initialize a new operation */
struct pobj_action act;
pmemobj_reserve(pop, &act, ULOG_SIZE, 0);
palloc_publish(&pop->heap, &act, 1, ctx);
operation_delete(ctx);
#undef ULOG_SIZE
}
static void
test_undo(PMEMobjpool *pop, struct test_object *object)
{
struct operation_context *ctx = operation_new(
(struct ulog *)&object->undo, TEST_ENTRIES,
pmalloc_redo_extend, (ulog_free_fn)pfree,
&pop->p_ops, LOG_TYPE_UNDO);
test_undo_small_single_copy(ctx, object);
test_undo_small_single_set(ctx, object);
test_undo_small_multiple_set(ctx, object);
test_undo_large_single_copy(ctx, object);
test_undo_large_copy(pop, ctx, object);
test_undo_checksum_mismatch(pop, ctx, object,
(struct ulog *)&object->undo);
/* undo logs are clobbered at the end, which shrinks their size */
size_t capacity = ulog_capacity((struct ulog *)&object->undo,
TEST_ENTRIES, &pop->p_ops);
/* builtin log + one next */
UT_ASSERTeq(capacity, TEST_ENTRIES * 2 + CACHELINE_SIZE);
operation_delete(ctx);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_memops");
if (argc != 2)
UT_FATAL("usage: %s file-name", argv[0]);
const char *path = argv[1];
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, "obj_memops",
PMEMOBJ_MIN_POOL * 10, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
/*
* The ulog API requires cacheline alignment. A cacheline aligned new
* new allocator is created here to properly test the ulog api.
* A aligned object can then be allocated using pmemobj_xalloc.
*/
struct pobj_alloc_class_desc new_ac = {
.unit_size = sizeof(struct test_object),
.alignment = CACHELINE_SIZE,
.units_per_block = 1,
.header_type = POBJ_HEADER_NONE,
};
if (pmemobj_ctl_set(pop, "heap.alloc_class.new.desc", &new_ac) == -1)
UT_FATAL("Failed to set allocation class");
PMEMoid pobject;
if (pmemobj_xalloc(pop, &pobject, sizeof(struct test_object), 0,
POBJ_CLASS_ID(new_ac.class_id), NULL, NULL) == -1)
UT_FATAL("Failed to allocate object");
struct test_object *object = pmemobj_direct(pobject);
UT_ASSERTne(object, NULL);
ulog_construct(OBJ_PTR_TO_OFF(pop, &object->undo),
TEST_ENTRIES, 0, 0, 0, &pop->p_ops);
ulog_construct(OBJ_PTR_TO_OFF(pop, &object->redo),
TEST_ENTRIES, 0, 0, 0, &pop->p_ops);
test_redo(pop, object);
test_undo(pop, object);
test_redo_cleanup_same_size(pop, object);
test_undo_log_reuse();
pmemobj_close(pop);
DONE(NULL);
}
#ifdef _MSC_VER
/*
* Since libpmemobj is linked statically, we need to invoke its ctor/dtor.
*/
MSVC_CONSTR(libpmemobj_init)
MSVC_DESTR(libpmemobj_fini)
#endif
| 15,904 | 23.319572 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_strdup/obj_strdup.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* obj_strdup.c -- unit test for pmemobj_strdup
*/
#include <sys/param.h>
#include <string.h>
#include <wchar.h>
#include "unittest.h"
#include "libpmemobj.h"
#define LAYOUT_NAME "strdup"
TOID_DECLARE(char, 0);
TOID_DECLARE(wchar_t, 1);
enum type_number {
TYPE_SIMPLE,
TYPE_NULL,
TYPE_SIMPLE_ALLOC,
TYPE_SIMPLE_ALLOC_1,
TYPE_SIMPLE_ALLOC_2,
TYPE_NULL_ALLOC,
TYPE_NULL_ALLOC_1,
};
#define TEST_STR_1 "Test string 1"
#define TEST_STR_2 "Test string 2"
#define TEST_WCS_1 L"Test string 3"
#define TEST_WCS_2 L"Test string 4"
#define TEST_STR_EMPTY ""
#define TEST_WCS_EMPTY L""
/*
* do_strdup -- duplicate a string to not allocated toid using pmemobj_strdup
*/
static void
do_strdup(PMEMobjpool *pop)
{
TOID(char) str = TOID_NULL(char);
TOID(wchar_t) wcs = TOID_NULL(wchar_t);
pmemobj_strdup(pop, &str.oid, TEST_STR_1, TYPE_SIMPLE);
pmemobj_wcsdup(pop, &wcs.oid, TEST_WCS_1, TYPE_SIMPLE);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERT(!TOID_IS_NULL(wcs));
UT_ASSERTeq(strcmp(D_RO(str), TEST_STR_1), 0);
UT_ASSERTeq(wcscmp(D_RO(wcs), TEST_WCS_1), 0);
}
/*
* do_strdup_null -- duplicate a NULL string to not allocated toid
*/
static void
do_strdup_null(PMEMobjpool *pop)
{
TOID(char) str = TOID_NULL(char);
TOID(wchar_t) wcs = TOID_NULL(wchar_t);
pmemobj_strdup(pop, &str.oid, NULL, TYPE_NULL);
pmemobj_wcsdup(pop, &wcs.oid, NULL, TYPE_NULL);
UT_ASSERT(TOID_IS_NULL(str));
UT_ASSERT(TOID_IS_NULL(wcs));
}
/*
* do_alloc -- allocate toid and duplicate a string
*/
static TOID(char)
do_alloc(PMEMobjpool *pop, const char *s, unsigned type_num)
{
TOID(char) str;
POBJ_ZNEW(pop, &str, char);
pmemobj_strdup(pop, &str.oid, s, type_num);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERTeq(strcmp(D_RO(str), s), 0);
return str;
}
/*
* do_wcs_alloc -- allocate toid and duplicate a wide character string
*/
static TOID(wchar_t)
do_wcs_alloc(PMEMobjpool *pop, const wchar_t *s, unsigned type_num)
{
TOID(wchar_t) str;
POBJ_ZNEW(pop, &str, wchar_t);
pmemobj_wcsdup(pop, &str.oid, s, type_num);
UT_ASSERT(!TOID_IS_NULL(str));
UT_ASSERTeq(wcscmp(D_RO(str), s), 0);
return str;
}
/*
* do_strdup_alloc -- duplicate a string to allocated toid
*/
static void
do_strdup_alloc(PMEMobjpool *pop)
{
TOID(char) str1 = do_alloc(pop, TEST_STR_1, TYPE_SIMPLE_ALLOC_1);
TOID(wchar_t) wcs1 = do_wcs_alloc(pop, TEST_WCS_1, TYPE_SIMPLE_ALLOC_1);
TOID(char) str2 = do_alloc(pop, TEST_STR_2, TYPE_SIMPLE_ALLOC_2);
TOID(wchar_t) wcs2 = do_wcs_alloc(pop, TEST_WCS_2, TYPE_SIMPLE_ALLOC_2);
pmemobj_strdup(pop, &str1.oid, D_RO(str2), TYPE_SIMPLE_ALLOC);
pmemobj_wcsdup(pop, &wcs1.oid, D_RO(wcs2), TYPE_SIMPLE_ALLOC);
UT_ASSERTeq(strcmp(D_RO(str1), D_RO(str2)), 0);
UT_ASSERTeq(wcscmp(D_RO(wcs1), D_RO(wcs2)), 0);
}
/*
* do_strdup_null_alloc -- duplicate a NULL string to allocated toid
*/
static void
do_strdup_null_alloc(PMEMobjpool *pop)
{
TOID(char) str1 = do_alloc(pop, TEST_STR_1, TYPE_NULL_ALLOC_1);
TOID(wchar_t) wcs1 = do_wcs_alloc(pop, TEST_WCS_1, TYPE_NULL_ALLOC_1);
TOID(char) str2 = TOID_NULL(char);
TOID(wchar_t) wcs2 = TOID_NULL(wchar_t);
pmemobj_strdup(pop, &str1.oid, D_RO(str2), TYPE_NULL_ALLOC);
pmemobj_wcsdup(pop, &wcs1.oid, D_RO(wcs2), TYPE_NULL_ALLOC);
UT_ASSERT(!TOID_IS_NULL(str1));
UT_ASSERT(!TOID_IS_NULL(wcs1));
}
/*
* do_strdup_uint64_range -- duplicate string with
* type number equal to range of unsigned long long int
*/
static void
do_strdup_uint64_range(PMEMobjpool *pop)
{
TOID(char) str1;
TOID(char) str2 = do_alloc(pop, TEST_STR_2, TYPE_SIMPLE_ALLOC_1);
TOID(char) str3;
TOID(char) str4 = do_alloc(pop, TEST_STR_2, TYPE_SIMPLE_ALLOC_1);
pmemobj_strdup(pop, &str1.oid, D_RO(str2), UINT64_MAX);
pmemobj_strdup(pop, &str3.oid, D_RO(str4), UINT64_MAX - 1);
UT_ASSERTeq(strcmp(D_RO(str1), D_RO(str2)), 0);
UT_ASSERTeq(strcmp(D_RO(str3), D_RO(str4)), 0);
}
/*
* do_strdup_alloc_empty_string -- duplicate string to internal container
* associated with type number equal to range of unsigned long long int
* and unsigned long long int - 1
*/
static void
do_strdup_alloc_empty_string(PMEMobjpool *pop)
{
TOID(char) str1 = do_alloc(pop, TEST_STR_1, TYPE_SIMPLE_ALLOC_1);
TOID(wchar_t) wcs1 = do_wcs_alloc(pop, TEST_WCS_1, TYPE_SIMPLE_ALLOC_1);
pmemobj_strdup(pop, &str1.oid, TEST_STR_EMPTY, TYPE_SIMPLE_ALLOC);
pmemobj_wcsdup(pop, &wcs1.oid, TEST_WCS_EMPTY, TYPE_SIMPLE_ALLOC);
UT_ASSERTeq(strcmp(D_RO(str1), TEST_STR_EMPTY), 0);
UT_ASSERTeq(wcscmp(D_RO(wcs1), TEST_WCS_EMPTY), 0);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_strdup");
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
do_strdup(pop);
do_strdup_null(pop);
do_strdup_alloc(pop);
do_strdup_null_alloc(pop);
do_strdup_uint64_range(pop);
do_strdup_alloc_empty_string(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 5,017 | 26.571429 | 77 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_is_pmem/pmem_is_pmem.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmem_is_pmem.c -- unit test for pmem_is_pmem()
*
* usage: pmem_is_pmem file [env]
*/
#include "unittest.h"
#define NTHREAD 16
static void *Addr;
static size_t Size;
/*
* worker -- the work each thread performs
*/
static void *
worker(void *arg)
{
int *ret = (int *)arg;
*ret = pmem_is_pmem(Addr, Size);
return NULL;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem_is_pmem");
if (argc < 2 || argc > 3)
UT_FATAL("usage: %s file [env]", argv[0]);
if (argc == 3)
UT_ASSERTeq(os_setenv("PMEM_IS_PMEM_FORCE", argv[2], 1), 0);
Addr = pmem_map_file(argv[1], 0, 0, 0, &Size, NULL);
UT_ASSERTne(Addr, NULL);
os_thread_t threads[NTHREAD];
int ret[NTHREAD];
/* kick off NTHREAD threads */
for (int i = 0; i < NTHREAD; i++)
THREAD_CREATE(&threads[i], NULL, worker, &ret[i]);
/* wait for all the threads to complete */
for (int i = 0; i < NTHREAD; i++)
THREAD_JOIN(&threads[i], NULL);
/* verify that all the threads return the same value */
for (int i = 1; i < NTHREAD; i++)
UT_ASSERTeq(ret[0], ret[i]);
UT_OUT("threads.is_pmem(Addr, Size): %d", ret[0]);
UT_ASSERTeq(os_unsetenv("PMEM_IS_PMEM_FORCE"), 0);
UT_OUT("is_pmem(Addr, Size): %d", pmem_is_pmem(Addr, Size));
/* zero-sized region is not pmem */
UT_OUT("is_pmem(Addr, 0): %d", pmem_is_pmem(Addr, 0));
UT_OUT("is_pmem(Addr + Size / 2, 0): %d",
pmem_is_pmem((char *)Addr + Size / 2, 0));
UT_OUT("is_pmem(Addr + Size, 0): %d",
pmem_is_pmem((char *)Addr + Size, 0));
DONE(NULL);
}
| 3,216 | 30.23301 | 74 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_obc_int/rpmem_obc_int.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_obc_int.c -- integration test for rpmem_obc and rpmemd_obc modules
*/
#include "unittest.h"
#include "pmemcommon.h"
#include "librpmem.h"
#include "rpmem.h"
#include "rpmem_proto.h"
#include "rpmem_common.h"
#include "rpmem_util.h"
#include "rpmem_obc.h"
#include "rpmemd_obc.h"
#include "rpmemd_log.h"
#include "os.h"
#define POOL_SIZE 1024
#define NLANES 32
#define NLANES_RESP 16
#define PROVIDER RPMEM_PROV_LIBFABRIC_SOCKETS
#define POOL_DESC "pool_desc"
#define RKEY 0xabababababababab
#define RADDR 0x0101010101010101
#define PORT 1234
#define PERSIST_METHOD RPMEM_PM_GPSPM
#define RESP_ATTR_INIT {\
.port = PORT,\
.rkey = RKEY,\
.raddr = RADDR,\
.persist_method = PERSIST_METHOD,\
.nlanes = NLANES_RESP,\
}
#define REQ_ATTR_INIT {\
.pool_size = POOL_SIZE,\
.nlanes = NLANES,\
.provider = PROVIDER,\
.pool_desc = POOL_DESC,\
}
#define POOL_ATTR_INIT {\
.signature = "<RPMEM>",\
.major = 1,\
.compat_features = 2,\
.incompat_features = 3,\
.ro_compat_features = 4,\
.poolset_uuid = "POOLSET_UUID0123",\
.uuid = "UUID0123456789AB",\
.next_uuid = "NEXT_UUID0123456",\
.prev_uuid = "PREV_UUID0123456",\
.user_flags = "USER_FLAGS012345",\
}
#define POOL_ATTR_ALT {\
.signature = "<ALT>",\
.major = 5,\
.compat_features = 6,\
.incompat_features = 7,\
.ro_compat_features = 8,\
.poolset_uuid = "UUID_POOLSET_ALT",\
.uuid = "ALT_UUIDCDEFFEDC",\
.next_uuid = "456UUID_NEXT_ALT",\
.prev_uuid = "UUID012_ALT_PREV",\
.user_flags = "012345USER_FLAGS",\
}
TEST_CASE_DECLARE(client_create);
TEST_CASE_DECLARE(client_open);
TEST_CASE_DECLARE(client_set_attr);
TEST_CASE_DECLARE(server);
/*
* client_create -- perform create request
*/
int
client_create(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
int ret;
struct rpmem_obc *rpc;
struct rpmem_target_info *info;
struct rpmem_req_attr req = REQ_ATTR_INIT;
struct rpmem_pool_attr pool_attr = POOL_ATTR_INIT;
struct rpmem_resp_attr ex_res = RESP_ATTR_INIT;
struct rpmem_resp_attr res;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
ret = rpmem_obc_connect(rpc, info);
UT_ASSERTeq(ret, 0);
rpmem_target_free(info);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_create(rpc, &req, &res, &pool_attr);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(ex_res.port, res.port);
UT_ASSERTeq(ex_res.rkey, res.rkey);
UT_ASSERTeq(ex_res.raddr, res.raddr);
UT_ASSERTeq(ex_res.persist_method, res.persist_method);
UT_ASSERTeq(ex_res.nlanes, res.nlanes);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_close(rpc, 0);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_disconnect(rpc);
UT_ASSERTeq(ret, 0);
rpmem_obc_fini(rpc);
return 1;
}
/*
* client_open -- perform open request
*/
int
client_open(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
int ret;
struct rpmem_obc *rpc;
struct rpmem_target_info *info;
struct rpmem_req_attr req = REQ_ATTR_INIT;
struct rpmem_pool_attr ex_pool_attr = POOL_ATTR_INIT;
struct rpmem_pool_attr pool_attr;
struct rpmem_resp_attr ex_res = RESP_ATTR_INIT;
struct rpmem_resp_attr res;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
ret = rpmem_obc_connect(rpc, info);
UT_ASSERTeq(ret, 0);
rpmem_target_free(info);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_open(rpc, &req, &res, &pool_attr);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(ex_res.port, res.port);
UT_ASSERTeq(ex_res.rkey, res.rkey);
UT_ASSERTeq(ex_res.raddr, res.raddr);
UT_ASSERTeq(ex_res.persist_method, res.persist_method);
UT_ASSERTeq(ex_res.nlanes, res.nlanes);
UT_ASSERTeq(memcmp(&ex_pool_attr, &pool_attr,
sizeof(ex_pool_attr)), 0);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_close(rpc, 0);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_disconnect(rpc);
UT_ASSERTeq(ret, 0);
rpmem_obc_fini(rpc);
return 1;
}
/*
* client_set_attr -- perform set attributes request
*/
int
client_set_attr(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
int ret;
struct rpmem_obc *rpc;
struct rpmem_target_info *info;
const struct rpmem_pool_attr pool_attr = POOL_ATTR_ALT;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
ret = rpmem_obc_connect(rpc, info);
UT_ASSERTeq(ret, 0);
rpmem_target_free(info);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_set_attr(rpc, &pool_attr);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_close(rpc, 0);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_disconnect(rpc);
UT_ASSERTeq(ret, 0);
rpmem_obc_fini(rpc);
return 1;
}
/*
* req_arg -- request callbacks argument
*/
struct req_arg {
struct rpmem_resp_attr resp;
struct rpmem_pool_attr pool_attr;
int closing;
};
/*
* req_create -- process create request
*/
static int
req_create(struct rpmemd_obc *obc, void *arg,
const struct rpmem_req_attr *req,
const struct rpmem_pool_attr *pool_attr)
{
struct rpmem_req_attr ex_req = REQ_ATTR_INIT;
struct rpmem_pool_attr ex_pool_attr = POOL_ATTR_INIT;
UT_ASSERTne(arg, NULL);
UT_ASSERTeq(ex_req.provider, req->provider);
UT_ASSERTeq(ex_req.pool_size, req->pool_size);
UT_ASSERTeq(ex_req.nlanes, req->nlanes);
UT_ASSERTeq(strcmp(ex_req.pool_desc, req->pool_desc), 0);
UT_ASSERTeq(memcmp(&ex_pool_attr, pool_attr, sizeof(ex_pool_attr)), 0);
struct req_arg *args = arg;
return rpmemd_obc_create_resp(obc, 0, &args->resp);
}
/*
* req_open -- process open request
*/
static int
req_open(struct rpmemd_obc *obc, void *arg,
const struct rpmem_req_attr *req)
{
struct rpmem_req_attr ex_req = REQ_ATTR_INIT;
UT_ASSERTne(arg, NULL);
UT_ASSERTeq(ex_req.provider, req->provider);
UT_ASSERTeq(ex_req.pool_size, req->pool_size);
UT_ASSERTeq(ex_req.nlanes, req->nlanes);
UT_ASSERTeq(strcmp(ex_req.pool_desc, req->pool_desc), 0);
struct req_arg *args = arg;
return rpmemd_obc_open_resp(obc, 0,
&args->resp, &args->pool_attr);
}
/*
* req_set_attr -- process set attributes request
*/
static int
req_set_attr(struct rpmemd_obc *obc, void *arg,
const struct rpmem_pool_attr *pool_attr)
{
struct rpmem_pool_attr ex_pool_attr = POOL_ATTR_ALT;
UT_ASSERTne(arg, NULL);
UT_ASSERTeq(memcmp(&ex_pool_attr, pool_attr, sizeof(ex_pool_attr)), 0);
return rpmemd_obc_set_attr_resp(obc, 0);
}
/*
* req_close -- process close request
*/
static int
req_close(struct rpmemd_obc *obc, void *arg, int flags)
{
UT_ASSERTne(arg, NULL);
struct req_arg *args = arg;
args->closing = 1;
return rpmemd_obc_close_resp(obc, 0);
}
/*
* REQ -- server request callbacks
*/
static struct rpmemd_obc_requests REQ = {
.create = req_create,
.open = req_open,
.close = req_close,
.set_attr = req_set_attr,
};
/*
* server -- run server and process clients requests
*/
int
server(const struct test_case *tc, int argc, char *argv[])
{
int ret;
struct req_arg arg = {
.resp = RESP_ATTR_INIT,
.pool_attr = POOL_ATTR_INIT,
.closing = 0,
};
struct rpmemd_obc *obc;
obc = rpmemd_obc_init(0, 1);
UT_ASSERTne(obc, NULL);
ret = rpmemd_obc_status(obc, 0);
UT_ASSERTeq(ret, 0);
while (1) {
ret = rpmemd_obc_process(obc, &REQ, &arg);
if (arg.closing) {
break;
} else {
UT_ASSERTeq(ret, 0);
}
}
ret = rpmemd_obc_process(obc, &REQ, &arg);
UT_ASSERTeq(ret, 1);
rpmemd_obc_fini(obc);
return 0;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(server),
TEST_CASE(client_create),
TEST_CASE(client_open),
TEST_CASE(client_set_attr),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
START(argc, argv, "rpmem_obc");
common_init("rpmem_fip",
"RPMEM_LOG_LEVEL",
"RPMEM_LOG_FILE", 0, 0);
rpmemd_log_init("rpmemd", os_getenv("RPMEMD_LOG_FILE"), 0);
rpmemd_log_level = rpmemd_log_level_from_str(
os_getenv("RPMEMD_LOG_LEVEL"));
rpmem_util_cmds_init();
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
rpmem_util_cmds_fini();
common_fini();
rpmemd_log_close();
DONE(NULL);
}
| 8,537 | 20.780612 | 75 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/mmap_fixed/mmap_fixed.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* mmap_fixed.c -- test memory mapping with MAP_FIXED for various lengths
*
* This test is intended to be used for testing Windows implementation
* of memory mapping routines - mmap(), munmap(), msync() and mprotect().
* Those functions should provide the same functionality as their Linux
* counterparts, at least with respect to the features that are used
* in PMDK libraries.
*
* Known issues and differences between Linux and Windows implementation
* are described in src/common/mmap_windows.c.
*/
#include "unittest.h"
#include <sys/mman.h>
#define ALIGN(size) ((size) & ~(Ut_mmap_align - 1))
/*
* test_mmap_fixed -- test fixed mappings
*/
static void
test_mmap_fixed(const char *name1, const char *name2, size_t len1, size_t len2)
{
size_t len1_aligned = ALIGN(len1);
size_t len2_aligned = ALIGN(len2);
UT_OUT("len: %zu (%zu) + %zu (%zu) = %zu", len1, len1_aligned,
len2, len2_aligned, len1_aligned + len2_aligned);
int fd1 = OPEN(name1, O_CREAT|O_RDWR, S_IWUSR|S_IRUSR);
int fd2 = OPEN(name2, O_CREAT|O_RDWR, S_IWUSR|S_IRUSR);
POSIX_FALLOCATE(fd1, 0, (os_off_t)len1);
POSIX_FALLOCATE(fd2, 0, (os_off_t)len2);
char *ptr1 = mmap(NULL, len1_aligned + len2_aligned,
PROT_READ|PROT_WRITE, MAP_SHARED, fd1, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
UT_OUT("ptr1: %p, ptr2: %p", ptr1, ptr1 + len1_aligned);
char *ptr2 = mmap(ptr1 + len1_aligned, len2_aligned,
PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, fd2, 0);
UT_ASSERTne(ptr2, MAP_FAILED);
UT_ASSERTeq(ptr2, ptr1 + len1_aligned);
UT_ASSERTne(munmap(ptr1, len1_aligned), -1);
UT_ASSERTne(munmap(ptr2, len2_aligned), -1);
CLOSE(fd1);
CLOSE(fd2);
UNLINK(name1);
UNLINK(name2);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "mmap_fixed");
if (argc < 4)
UT_FATAL("usage: %s dirname len1 len2 ...", argv[0]);
size_t *lengths = MALLOC(sizeof(size_t) * (size_t)argc - 2);
UT_ASSERTne(lengths, NULL);
size_t appendix_length = 20; /* a file name length */
char *name1 = MALLOC(strlen(argv[1]) + appendix_length);
char *name2 = MALLOC(strlen(argv[1]) + appendix_length);
sprintf(name1, "%s\\testfile1", argv[1]);
sprintf(name2, "%s\\testfile2", argv[1]);
for (int i = 0; i < argc - 2; i++)
lengths[i] = ATOULL(argv[i + 2]);
for (int i = 0; i < argc - 2; i++)
for (int j = 0; j < argc - 2; j++)
test_mmap_fixed(name1, name2, lengths[i], lengths[j]);
FREE(name1);
FREE(name2);
FREE(lengths);
DONE(NULL);
}
| 2,522 | 26.129032 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_movnt/pmem2_movnt.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_movnt.c -- test for MOVNT threshold
*
* usage: pmem2_movnt
*/
#include "unittest.h"
#include "ut_pmem2.h"
int
main(int argc, char *argv[])
{
int fd;
char *dst;
char *src;
struct pmem2_config *cfg;
struct pmem2_source *psrc;
struct pmem2_map *map;
if (argc != 2)
UT_FATAL("usage: %s file", argv[0]);
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem2_movnt %s %savx %savx512f",
thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
fd = OPEN(argv[1], O_RDWR);
PMEM2_CONFIG_NEW(&cfg);
PMEM2_SOURCE_FROM_FD(&psrc, fd);
PMEM2_CONFIG_SET_GRANULARITY(cfg, PMEM2_GRANULARITY_PAGE);
int ret = pmem2_map(cfg, psrc, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_CONFIG_DELETE(&cfg);
src = MEMALIGN(64, 8192);
dst = MEMALIGN(64, 8192);
memset(src, 0x88, 8192);
memset(dst, 0, 8192);
pmem2_memset_fn memset_fn = pmem2_get_memset_fn(map);
pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map);
pmem2_memmove_fn memmove_fn = pmem2_get_memmove_fn(map);
for (size_t size = 1; size <= 4096; size *= 2) {
memset(dst, 0, 4096);
memcpy_fn(dst, src, size, PMEM2_F_MEM_NODRAIN);
UT_ASSERTeq(memcmp(src, dst, size), 0);
UT_ASSERTeq(dst[size], 0);
}
for (size_t size = 1; size <= 4096; size *= 2) {
memset(dst, 0, 4096);
memmove_fn(dst, src, size, PMEM2_F_MEM_NODRAIN);
UT_ASSERTeq(memcmp(src, dst, size), 0);
UT_ASSERTeq(dst[size], 0);
}
for (size_t size = 1; size <= 4096; size *= 2) {
memset(dst, 0, 4096);
memset_fn(dst, 0x77, size, PMEM2_F_MEM_NODRAIN);
UT_ASSERTeq(dst[0], 0x77);
UT_ASSERTeq(dst[size - 1], 0x77);
UT_ASSERTeq(dst[size], 0);
}
ALIGNED_FREE(dst);
ALIGNED_FREE(src);
ret = pmem2_unmap(&map);
UT_ASSERTeq(ret, 0);
CLOSE(fd);
DONE(NULL);
}
| 1,945 | 21.113636 | 59 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_memmove/pmem2_memmove.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* pmem2_memmove.c -- test for doing a memmove
*
* usage:
* pmem2_memmove file b:length [d:{offset}] [s:offset] [o:{1|2} S:{overlap}]
*
*/
#include "unittest.h"
#include "ut_pmem2.h"
#include "file.h"
#include "memmove_common.h"
static void
do_memmove_variants(char *dst, char *src, const char *file_name,
size_t dest_off, size_t src_off, size_t bytes, persist_fn p,
memmove_fn fn)
{
for (int i = 0; i < ARRAY_SIZE(Flags); ++i) {
do_memmove(dst, src, file_name, dest_off, src_off,
bytes, fn, Flags[i], p);
}
}
int
main(int argc, char *argv[])
{
int fd;
char *dst;
char *src;
char *src_orig;
size_t dst_off = 0;
size_t src_off = 0;
size_t bytes = 0;
int who = 0;
size_t mapped_len;
struct pmem2_config *cfg;
struct pmem2_source *psrc;
struct pmem2_map *map;
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem2_memmove %s %s %s %s %savx %savx512f",
argc > 2 ? argv[2] : "null",
argc > 3 ? argv[3] : "null",
argc > 4 ? argv[4] : "null",
thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
fd = OPEN(argv[1], O_RDWR);
if (argc < 3)
USAGE();
PMEM2_CONFIG_NEW(&cfg);
PMEM2_SOURCE_FROM_FD(&psrc, fd);
PMEM2_CONFIG_SET_GRANULARITY(cfg, PMEM2_GRANULARITY_PAGE);
int ret = pmem2_map(cfg, psrc, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_CONFIG_DELETE(&cfg);
pmem2_persist_fn persist = pmem2_get_persist_fn(map);
mapped_len = pmem2_map_get_size(map);
dst = pmem2_map_get_address(map);
if (dst == NULL)
UT_FATAL("!could not map file: %s", argv[1]);
pmem2_memmove_fn memmove_fn = pmem2_get_memmove_fn(map);
for (int arg = 2; arg < argc; arg++) {
if (strchr("dsbo",
argv[arg][0]) == NULL || argv[arg][1] != ':')
UT_FATAL("op must be d: or s: or b: or o:");
size_t val = STRTOUL(&argv[arg][2], NULL, 0);
switch (argv[arg][0]) {
case 'd':
if (val <= 0)
UT_FATAL("bad offset (%lu) with d: option",
val);
dst_off = val;
break;
case 's':
if (val <= 0)
UT_FATAL("bad offset (%lu) with s: option",
val);
src_off = val;
break;
case 'b':
if (val <= 0)
UT_FATAL("bad length (%lu) with b: option",
val);
bytes = val;
break;
case 'o':
if (val != 1 && val != 0)
UT_FATAL("bad val (%lu) with o: option",
val);
who = (int)val;
break;
}
}
if (who == 0) {
src_orig = src = dst + mapped_len / 2;
UT_ASSERT(src > dst);
do_memmove_variants(dst, src, argv[1], dst_off, src_off,
bytes, persist, memmove_fn);
/* dest > src */
src = dst;
dst = src_orig;
if (dst <= src)
UT_FATAL("cannot map files in memory order");
do_memmove_variants(dst, src, argv[1], dst_off, src_off,
bytes, persist, memmove_fn);
} else {
/* use the same buffer for source and destination */
memset(dst, 0, bytes);
persist(dst, bytes);
do_memmove_variants(dst, dst, argv[1], dst_off, src_off,
bytes, persist, memmove_fn);
}
ret = pmem2_unmap(&map);
UT_ASSERTeq(ret, 0);
CLOSE(fd);
DONE(NULL);
}
| 3,184 | 20.52027 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_memmove/memmove_common.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* memmove_common.h -- header file for common memmove_common test utilities
*/
#ifndef MEMMOVE_COMMON_H
#define MEMMOVE_COMMON_H 1
#include "unittest.h"
#include "file.h"
extern unsigned Flags[10];
#define USAGE() do { UT_FATAL("usage: %s file b:length [d:{offset}] "\
"[s:{offset}] [o:{0|1}]", argv[0]); } while (0)
typedef void *(*memmove_fn)(void *pmemdest, const void *src, size_t len,
unsigned flags);
typedef void (*persist_fn)(const void *ptr, size_t len);
void do_memmove(char *dst, char *src, const char *file_name,
size_t dest_off, size_t src_off, size_t bytes,
memmove_fn fn, unsigned flags, persist_fn p);
void verify_contents(const char *file_name, int test, const char *buf1,
const char *buf2, size_t len);
#endif
| 832 | 25.870968 | 75 | h |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_memmove/memmove_common.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* memmove_common.c -- common part for tests doing a persistent memmove
*/
#include "unittest.h"
#include "memmove_common.h"
/*
* verify_contents -- verify that buffers match, if they don't - print contents
* of both and abort the test
*/
void
verify_contents(const char *file_name, int test,
const char *buf1, const char *buf2,
size_t len)
{
if (memcmp(buf1, buf2, len) == 0)
return;
for (size_t i = 0; i < len; ++i)
UT_ERR("%04zu 0x%02x 0x%02x %s", i, (uint8_t)buf1[i],
(uint8_t)buf2[i],
buf1[i] != buf2[i] ? "!!!" : "");
UT_FATAL("%s %d: %zu bytes do not match with memcmp",
file_name, test, len);
}
/*
* do_memmove: Worker function for memmove.
*
* Always work within the boundary of bytes. Fill in 1/2 of the src
* memory with the pattern we want to write. This allows us to check
* that we did not overwrite anything we were not supposed to in the
* dest. Use the non pmem version of the memset/memcpy commands
* so as not to introduce any possible side affects.
*/
void
do_memmove(char *dst, char *src, const char *file_name,
size_t dest_off, size_t src_off, size_t bytes,
memmove_fn fn, unsigned flags, persist_fn persist)
{
void *ret;
char *srcshadow = MALLOC(dest_off + src_off + bytes);
char *dstshadow = srcshadow;
if (src != dst)
dstshadow = MALLOC(dest_off + src_off + bytes);
char old;
memset(src, 0x11, bytes);
memset(dst, 0x22, bytes);
memset(src, 0x33, bytes / 4);
memset(src + bytes / 4, 0x44, bytes / 4);
persist(src, bytes);
persist(dst, bytes);
memcpy(srcshadow, src, bytes);
memcpy(dstshadow, dst, bytes);
/* TEST 1, dest == src */
old = *(char *)(dst + dest_off);
ret = fn(dst + dest_off, dst + dest_off, bytes / 2, flags);
UT_ASSERTeq(ret, dst + dest_off);
UT_ASSERTeq(*(char *)(dst + dest_off), old);
/* do the same using regular memmove and verify that buffers match */
memmove(dstshadow + dest_off, dstshadow + dest_off, bytes / 2);
verify_contents(file_name, 0, dstshadow, dst, bytes);
verify_contents(file_name, 1, srcshadow, src, bytes);
/* TEST 2, len == 0 */
old = *(char *)(dst + dest_off);
ret = fn(dst + dest_off, src + src_off, 0, flags);
UT_ASSERTeq(ret, dst + dest_off);
UT_ASSERTeq(*(char *)(dst + dest_off), old);
/* do the same using regular memmove and verify that buffers match */
memmove(dstshadow + dest_off, srcshadow + src_off, 0);
verify_contents(file_name, 2, dstshadow, dst, bytes);
verify_contents(file_name, 3, srcshadow, src, bytes);
/* TEST 3, len == bytes / 2 */
ret = fn(dst + dest_off, src + src_off, bytes / 2, flags);
UT_ASSERTeq(ret, dst + dest_off);
if (flags & PMEM_F_MEM_NOFLUSH)
/* for pmemcheck */
persist(dst + dest_off, bytes / 2);
/* do the same using regular memmove and verify that buffers match */
memmove(dstshadow + dest_off, srcshadow + src_off, bytes / 2);
verify_contents(file_name, 4, dstshadow, dst, bytes);
verify_contents(file_name, 5, srcshadow, src, bytes);
FREE(srcshadow);
if (dstshadow != srcshadow)
FREE(dstshadow);
}
unsigned Flags[] = {
0,
PMEM_F_MEM_NODRAIN,
PMEM_F_MEM_NONTEMPORAL,
PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_NODRAIN,
PMEM_F_MEM_WC,
PMEM_F_MEM_WB,
PMEM_F_MEM_NOFLUSH,
/* all possible flags */
PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH |
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL |
PMEM_F_MEM_WC | PMEM_F_MEM_WB,
};
| 3,503 | 28.694915 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_zones/obj_zones.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_zones.c -- allocates from a very large pool (exceeding 1 zone)
*
*/
#include <stddef.h>
#include <page_size.h>
#include "unittest.h"
#define LAYOUT_NAME "obj_zones"
#define ALLOC_SIZE ((8191 * (256 * 1024)) - 16) /* must evenly divide a zone */
/*
* test_create -- allocate all possible objects and log the number. It should
* exceed what would be possible on a single zone.
* Additionally, free one object so that we can later check that it can be
* allocated after the next open.
*/
static void
test_create(const char *path)
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, LAYOUT_NAME,
0, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
PMEMoid oid;
int n = 0;
while (1) {
if (pmemobj_alloc(pop, &oid, ALLOC_SIZE, 0, NULL, NULL) != 0)
break;
n++;
}
UT_OUT("allocated: %d", n);
pmemobj_free(&oid);
pmemobj_close(pop);
}
/*
* test_open -- in the open test we should be able to allocate exactly
* one object.
*/
static void
test_open(const char *path)
{
PMEMobjpool *pop;
if ((pop = pmemobj_open(path, LAYOUT_NAME)) == NULL)
UT_FATAL("!pmemobj_open: %s", path);
int ret = pmemobj_alloc(pop, NULL, ALLOC_SIZE, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
ret = pmemobj_alloc(pop, NULL, ALLOC_SIZE, 0, NULL, NULL);
UT_ASSERTne(ret, 0);
pmemobj_close(pop);
}
/*
* test_malloc_free -- test if alloc until OOM/free/alloc until OOM sequence
* produces the same number of allocations for the second alloc loop.
*/
static void
test_malloc_free(const char *path)
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, LAYOUT_NAME,
0, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
size_t alloc_size = PMEM_PAGESIZE * 32;
size_t max_allocs = 1000000;
PMEMoid *oid = MALLOC(sizeof(PMEMoid) * max_allocs);
size_t n = 0;
while (1) {
if (pmemobj_alloc(pop, &oid[n], alloc_size, 0, NULL, NULL) != 0)
break;
n++;
UT_ASSERTne(n, max_allocs);
}
size_t first_run_allocated = n;
for (size_t i = 0; i < n; ++i) {
pmemobj_free(&oid[i]);
}
n = 0;
while (1) {
if (pmemobj_alloc(pop, &oid[n], alloc_size, 0, NULL, NULL) != 0)
break;
n++;
}
UT_ASSERTeq(first_run_allocated, n);
pmemobj_close(pop);
FREE(oid);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_zones");
if (argc != 3)
UT_FATAL("usage: %s file-name [open|create]", argv[0]);
const char *path = argv[1];
char op = argv[2][0];
if (op == 'c')
test_create(path);
else if (op == 'o')
test_open(path);
else if (op == 'f')
test_malloc_free(path);
else
UT_FATAL("invalid operation");
DONE(NULL);
}
| 2,706 | 20.148438 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_locks_abort/obj_tx_locks_abort.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* obj_tx_locks_nested.c -- unit test for transaction locks
*/
#include "unittest.h"
#define LAYOUT_NAME "locks"
TOID_DECLARE_ROOT(struct root_obj);
TOID_DECLARE(struct obj, 1);
struct root_obj {
PMEMmutex lock;
TOID(struct obj) head;
};
struct obj {
int data;
PMEMmutex lock;
TOID(struct obj) next;
};
/*
* do_nested_tx-- (internal) nested transaction
*/
static void
do_nested_tx(PMEMobjpool *pop, TOID(struct obj) o, int value)
{
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(o)->lock, TX_PARAM_NONE) {
TX_ADD(o);
D_RW(o)->data = value;
if (!TOID_IS_NULL(D_RO(o)->next)) {
/*
* Add the object to undo log, while the mutex
* it contains is not locked.
*/
TX_ADD(D_RO(o)->next);
do_nested_tx(pop, D_RO(o)->next, value);
}
} TX_END;
}
/*
* do_aborted_nested_tx -- (internal) aborted nested transaction
*/
static void
do_aborted_nested_tx(PMEMobjpool *pop, TOID(struct obj) oid, int value)
{
TOID(struct obj) o = oid;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(o)->lock, TX_PARAM_NONE) {
TX_ADD(o);
D_RW(o)->data = value;
if (!TOID_IS_NULL(D_RO(o)->next)) {
/*
* Add the object to undo log, while the mutex
* it contains is not locked.
*/
TX_ADD(D_RO(o)->next);
do_nested_tx(pop, D_RO(o)->next, value);
}
pmemobj_tx_abort(EINVAL);
} TX_FINALLY {
o = oid;
while (!TOID_IS_NULL(o)) {
if (pmemobj_mutex_trylock(pop, &D_RW(o)->lock)) {
UT_OUT("trylock failed");
} else {
UT_OUT("trylock succeeded");
pmemobj_mutex_unlock(pop, &D_RW(o)->lock);
}
o = D_RO(o)->next;
}
} TX_END;
}
/*
* do_check -- (internal) print 'data' value of each object on the list
*/
static void
do_check(TOID(struct obj) o)
{
while (!TOID_IS_NULL(o)) {
UT_OUT("data = %d", D_RO(o)->data);
o = D_RO(o)->next;
}
}
int
main(int argc, char *argv[])
{
PMEMobjpool *pop;
START(argc, argv, "obj_tx_locks_abort");
if (argc > 3)
UT_FATAL("usage: %s <file>", argv[0]);
pop = pmemobj_create(argv[1], LAYOUT_NAME,
PMEMOBJ_MIN_POOL * 4, S_IWUSR | S_IRUSR);
if (pop == NULL)
UT_FATAL("!pmemobj_create");
TOID(struct root_obj) root = POBJ_ROOT(pop, struct root_obj);
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(root)->lock) {
TX_ADD(root);
D_RW(root)->head = TX_ZNEW(struct obj);
TOID(struct obj) o;
o = D_RW(root)->head;
D_RW(o)->data = 100;
pmemobj_mutex_zero(pop, &D_RW(o)->lock);
for (int i = 0; i < 3; i++) {
D_RW(o)->next = TX_ZNEW(struct obj);
o = D_RO(o)->next;
D_RW(o)->data = 101 + i;
pmemobj_mutex_zero(pop, &D_RW(o)->lock);
}
TOID_ASSIGN(D_RW(o)->next, OID_NULL);
} TX_END;
UT_OUT("initial state");
do_check(D_RO(root)->head);
UT_OUT("nested tx");
do_nested_tx(pop, D_RW(root)->head, 200);
do_check(D_RO(root)->head);
UT_OUT("aborted nested tx");
do_aborted_nested_tx(pop, D_RW(root)->head, 300);
do_check(D_RO(root)->head);
pmemobj_close(pop);
DONE(NULL);
}
| 2,994 | 20.392857 | 71 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_persist_valgrind/pmem2_persist_valgrind.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_persist_valgrind.c -- pmem2_persist_valgrind tests
*/
#include "out.h"
#include "unittest.h"
#include "ut_pmem2_utils.h"
#define DATA "XXXXXXXX"
#define STRIDE_SIZE 4096
/*
* test_ctx -- essential parameters used by test
*/
struct test_ctx {
int fd;
struct pmem2_map *map;
};
/*
* test_init -- prepare resources required for testing
*/
static int
test_init(const struct test_case *tc, int argc, char *argv[],
struct test_ctx *ctx)
{
if (argc < 1)
UT_FATAL("usage: %s <file>", tc->name);
char *file = argv[0];
ctx->fd = OPEN(file, O_RDWR);
struct pmem2_source *src;
int ret = pmem2_source_from_fd(&src, ctx->fd);
UT_PMEM2_EXPECT_RETURN(ret, 0);
struct pmem2_config *cfg;
/* fill pmem2_config in minimal scope */
ret = pmem2_config_new(&cfg);
UT_PMEM2_EXPECT_RETURN(ret, 0);
ret = pmem2_config_set_required_store_granularity(
cfg, PMEM2_GRANULARITY_PAGE);
UT_PMEM2_EXPECT_RETURN(ret, 0);
/* execute pmem2_map and validate the result */
ret = pmem2_map(cfg, src, &ctx->map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTne(ctx->map, NULL);
size_t size;
UT_ASSERTeq(pmem2_source_size(src, &size), 0);
UT_ASSERTeq(pmem2_map_get_size(ctx->map), size);
pmem2_config_delete(&cfg);
/* the function returns the number of consumed arguments */
return 1;
}
/*
* test_fini -- cleanup the test resources
*/
static void
test_fini(struct test_ctx *ctx)
{
pmem2_unmap(&ctx->map);
CLOSE(ctx->fd);
}
/*
* data_write -- write the data in mapped memory
*/
static void
data_write(void *addr, size_t size, size_t stride)
{
for (size_t offset = 0; offset + sizeof(DATA) <= size;
offset += stride) {
memcpy((void *)((uintptr_t)addr + offset), DATA, sizeof(DATA));
}
}
/*
* data_persist -- persist data in a range of mapped memory with defined stride
*/
static void
data_persist(struct pmem2_map *map, size_t len, size_t stride)
{
size_t map_size = pmem2_map_get_size(map);
char *addr = pmem2_map_get_address(map);
pmem2_persist_fn p_func = pmem2_get_persist_fn(map);
for (size_t offset = 0; offset + len <= map_size;
offset += stride) {
p_func(addr + offset, len);
}
}
/*
* test_persist_continuous_range -- persist continuous data in a range of
* the persistent memory
*/
static int
test_persist_continuous_range(const struct test_case *tc, int argc,
char *argv[])
{
struct test_ctx ctx = {0};
int ret = test_init(tc, argc, argv, &ctx);
char *addr = pmem2_map_get_address(ctx.map);
size_t map_size = pmem2_map_get_size(ctx.map);
data_write(addr, map_size, sizeof(DATA) /* stride */);
data_persist(ctx.map, map_size, map_size /* stride */);
test_fini(&ctx);
return ret;
}
/*
* test_persist_discontinuous_range -- persist discontinuous data in a range of
* the persistent memory
*/
static int
test_persist_discontinuous_range(const struct test_case *tc, int argc,
char *argv[])
{
struct test_ctx ctx = {0};
int ret = test_init(tc, argc, argv, &ctx);
char *addr = pmem2_map_get_address(ctx.map);
size_t map_size = pmem2_map_get_size(ctx.map);
data_write(addr, map_size, STRIDE_SIZE);
data_persist(ctx.map, sizeof(DATA), STRIDE_SIZE);
test_fini(&ctx);
return ret;
}
/*
* test_persist_discontinuous_range_partially -- persist part of discontinuous
* data in a range of persistent memory
*/
static int
test_persist_discontinuous_range_partially(const struct test_case *tc, int argc,
char *argv[])
{
struct test_ctx ctx = {0};
int ret = test_init(tc, argc, argv, &ctx);
char *addr = pmem2_map_get_address(ctx.map);
size_t map_size = pmem2_map_get_size(ctx.map);
data_write(addr, map_size, STRIDE_SIZE);
/* persist only a half of the writes */
data_persist(ctx.map, sizeof(DATA), 2 * STRIDE_SIZE);
test_fini(&ctx);
return ret;
}
/*
* test_persist_nonpmem_data -- persist data in a range of the memory mapped
* by mmap()
*/
static int
test_persist_nonpmem_data(const struct test_case *tc, int argc, char *argv[])
{
struct test_ctx ctx = {0};
/* pmem2_map is needed to get persist function */
int ret = test_init(tc, argc, argv, &ctx);
size_t size = pmem2_map_get_size(ctx.map);
int flags = MAP_SHARED;
int proto = PROT_READ | PROT_WRITE;
char *addr;
addr = mmap(NULL, size, proto, flags, ctx.fd, 0);
data_write(addr, size, sizeof(DATA) /* stride */);
pmem2_persist_fn p_func = pmem2_get_persist_fn(ctx.map);
p_func(addr, size);
munmap(addr, size);
test_fini(&ctx);
return ret;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_persist_continuous_range),
TEST_CASE(test_persist_discontinuous_range),
TEST_CASE(test_persist_discontinuous_range_partially),
TEST_CASE(test_persist_nonpmem_data),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem2_persist_valgrind");
out_init("pmem2_persist_valgrind", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0,
0);
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
out_fini();
DONE(NULL);
}
| 5,072 | 22.37788 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_ctl_alloc_class/obj_ctl_alloc_class.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2018, Intel Corporation */
/*
* obj_ctl_alloc_class.c -- tests for the ctl entry points: heap.alloc_class
*/
#include <sys/resource.h>
#include "unittest.h"
#define LAYOUT "obj_ctl_alloc_class"
static void
basic(const char *path)
{
PMEMobjpool *pop;
if ((pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL * 20,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
int ret;
PMEMoid oid;
size_t usable_size;
struct pobj_alloc_class_desc alloc_class_128;
alloc_class_128.header_type = POBJ_HEADER_NONE;
alloc_class_128.unit_size = 128;
alloc_class_128.units_per_block = 1000;
alloc_class_128.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.128.desc",
&alloc_class_128);
UT_ASSERTeq(ret, 0);
struct pobj_alloc_class_desc alloc_class_129;
alloc_class_129.header_type = POBJ_HEADER_COMPACT;
alloc_class_129.unit_size = 1024;
alloc_class_129.units_per_block = 1000;
alloc_class_129.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.129.desc",
&alloc_class_129);
UT_ASSERTeq(ret, 0);
struct pobj_alloc_class_desc alloc_class_128_r;
ret = pmemobj_ctl_get(pop, "heap.alloc_class.128.desc",
&alloc_class_128_r);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(alloc_class_128.header_type, alloc_class_128_r.header_type);
UT_ASSERTeq(alloc_class_128.unit_size, alloc_class_128_r.unit_size);
UT_ASSERT(alloc_class_128.units_per_block <=
alloc_class_128_r.units_per_block);
/*
* One unit from alloc class 128 - 128 bytes unit size, minimal headers.
*/
ret = pmemobj_xalloc(pop, &oid, 128, 0, POBJ_CLASS_ID(128), NULL, NULL);
UT_ASSERTeq(ret, 0);
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, 128);
pmemobj_free(&oid);
/*
* Reserve as above.
*/
struct pobj_action act;
oid = pmemobj_xreserve(pop, &act, 128, 0, POBJ_CLASS_ID(128));
UT_ASSERT(!OID_IS_NULL(oid));
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, 128);
pmemobj_cancel(pop, &act, 1);
/*
* One unit from alloc class 128 - 128 bytes unit size, minimal headers,
* but request size 1 byte.
*/
ret = pmemobj_xalloc(pop, &oid, 1, 0, POBJ_CLASS_ID(128), NULL, NULL);
UT_ASSERTeq(ret, 0);
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, 128);
pmemobj_free(&oid);
/*
* Two units from alloc class 129 -
* 1024 bytes unit size, compact headers.
*/
ret = pmemobj_xalloc(pop, &oid, 1024 + 1,
0, POBJ_CLASS_ID(129), NULL, NULL);
UT_ASSERTeq(ret, 0);
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, (1024 * 2) - 16); /* 2 units minus hdr */
pmemobj_free(&oid);
/*
* 64 units from alloc class 129
* - 1024 bytes unit size, compact headers.
*/
ret = pmemobj_xalloc(pop, &oid, (1024 * 64) - 16,
0, POBJ_CLASS_ID(129), NULL, NULL);
UT_ASSERTeq(ret, 0);
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, (1024 * 64) - 16);
pmemobj_free(&oid);
/*
* 65 units from alloc class 129 -
* 1024 bytes unit size, compact headers.
* Should fail, as it would require two bitmap modifications.
*/
ret = pmemobj_xalloc(pop, &oid, 1024 * 64 + 1, 0,
POBJ_CLASS_ID(129), NULL, NULL);
UT_ASSERTeq(ret, -1);
/*
* Nonexistent alloc class.
*/
ret = pmemobj_xalloc(pop, &oid, 1, 0, POBJ_CLASS_ID(130), NULL, NULL);
UT_ASSERTeq(ret, -1);
struct pobj_alloc_class_desc alloc_class_new;
alloc_class_new.header_type = POBJ_HEADER_NONE;
alloc_class_new.unit_size = 777;
alloc_class_new.units_per_block = 200;
alloc_class_new.class_id = 0;
alloc_class_new.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_new);
UT_ASSERTeq(ret, 0);
struct pobj_alloc_class_desc alloc_class_fail;
alloc_class_fail.header_type = POBJ_HEADER_NONE;
alloc_class_fail.unit_size = 777;
alloc_class_fail.units_per_block = 200;
alloc_class_fail.class_id = 0;
alloc_class_fail.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_fail);
UT_ASSERTeq(ret, -1);
ret = pmemobj_ctl_set(pop, "heap.alloc_class.200.desc",
&alloc_class_fail);
UT_ASSERTeq(ret, -1);
ret = pmemobj_xalloc(pop, &oid, 1, 0,
POBJ_CLASS_ID(alloc_class_new.class_id), NULL, NULL);
UT_ASSERTeq(ret, 0);
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, 777);
struct pobj_alloc_class_desc alloc_class_new_huge;
alloc_class_new_huge.header_type = POBJ_HEADER_NONE;
alloc_class_new_huge.unit_size = (2 << 23);
alloc_class_new_huge.units_per_block = 1;
alloc_class_new_huge.class_id = 0;
alloc_class_new_huge.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_new_huge);
UT_ASSERTeq(ret, 0);
ret = pmemobj_xalloc(pop, &oid, 1, 0,
POBJ_CLASS_ID(alloc_class_new_huge.class_id), NULL, NULL);
UT_ASSERTeq(ret, 0);
usable_size = pmemobj_alloc_usable_size(oid);
UT_ASSERTeq(usable_size, (2 << 23));
struct pobj_alloc_class_desc alloc_class_new_max;
alloc_class_new_max.header_type = POBJ_HEADER_COMPACT;
alloc_class_new_max.unit_size = PMEMOBJ_MAX_ALLOC_SIZE;
alloc_class_new_max.units_per_block = 1024;
alloc_class_new_max.class_id = 0;
alloc_class_new_max.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_new_max);
UT_ASSERTeq(ret, 0);
ret = pmemobj_xalloc(pop, &oid, 1, 0,
POBJ_CLASS_ID(alloc_class_new_max.class_id), NULL, NULL);
UT_ASSERTne(ret, 0);
struct pobj_alloc_class_desc alloc_class_new_loop;
alloc_class_new_loop.header_type = POBJ_HEADER_COMPACT;
alloc_class_new_loop.unit_size = 16384;
alloc_class_new_loop.units_per_block = 63;
alloc_class_new_loop.class_id = 0;
alloc_class_new_loop.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_new_loop);
UT_ASSERTeq(ret, 0);
size_t s = (63 * 16384) - 16;
ret = pmemobj_xalloc(pop, &oid, s + 1, 0,
POBJ_CLASS_ID(alloc_class_new_loop.class_id), NULL, NULL);
UT_ASSERTne(ret, 0);
struct pobj_alloc_class_desc alloc_class_tiny;
alloc_class_tiny.header_type = POBJ_HEADER_NONE;
alloc_class_tiny.unit_size = 7;
alloc_class_tiny.units_per_block = 1;
alloc_class_tiny.class_id = 0;
alloc_class_tiny.alignment = 0;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_tiny);
UT_ASSERTeq(ret, 0);
UT_ASSERT(alloc_class_tiny.units_per_block > 1);
for (int i = 0; i < 1000; ++i) {
ret = pmemobj_xalloc(pop, &oid, 7, 0,
POBJ_CLASS_ID(alloc_class_tiny.class_id), NULL, NULL);
UT_ASSERTeq(ret, 0);
}
pmemobj_close(pop);
}
static void
many(const char *path)
{
PMEMobjpool *pop;
if ((pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
unsigned nunits = UINT16_MAX + 1;
struct pobj_alloc_class_desc alloc_class_tiny;
alloc_class_tiny.header_type = POBJ_HEADER_NONE;
alloc_class_tiny.unit_size = 8;
alloc_class_tiny.units_per_block = nunits;
alloc_class_tiny.class_id = 0;
alloc_class_tiny.alignment = 0;
int ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc",
&alloc_class_tiny);
UT_ASSERTeq(ret, 0);
PMEMoid oid;
uint64_t *counterp = NULL;
for (size_t i = 0; i < nunits; ++i) {
pmemobj_xalloc(pop, &oid, 8, 0,
POBJ_CLASS_ID(alloc_class_tiny.class_id), NULL, NULL);
counterp = pmemobj_direct(oid);
(*counterp)++;
/*
* This works only because this is a fresh pool in a new file
* and so the counter must be initially zero.
* This might have to be fixed if that ever changes.
*/
UT_ASSERTeq(*counterp, 1);
}
pmemobj_close(pop);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_ctl_alloc_class");
if (argc != 3)
UT_FATAL("usage: %s file-name b|m", argv[0]);
const char *path = argv[1];
if (argv[2][0] == 'b')
basic(path);
else if (argv[2][0] == 'm')
many(path);
DONE(NULL);
}
| 7,857 | 26.865248 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/traces_pmem/traces_pmem.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* traces_pmem.c -- unit test traces for libraries pmem
*/
#include "unittest.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "traces_pmem");
UT_ASSERT(!pmem_check_version(PMEM_MAJOR_VERSION,
PMEM_MINOR_VERSION));
UT_ASSERT(!pmemblk_check_version(PMEMBLK_MAJOR_VERSION,
PMEMBLK_MINOR_VERSION));
UT_ASSERT(!pmemlog_check_version(PMEMLOG_MAJOR_VERSION,
PMEMLOG_MINOR_VERSION));
UT_ASSERT(!pmemobj_check_version(PMEMOBJ_MAJOR_VERSION,
PMEMOBJ_MINOR_VERSION));
DONE(NULL);
}
| 596 | 21.961538 | 56 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_debug/obj_debug.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* obj_debug.c -- unit test for debug features
*
* usage: obj_debug file operation [op_index]:...
*
* operations are 'f' or 'l' or 'r' or 'a' or 'n' or 's'
*
*/
#include <stddef.h>
#include <stdlib.h>
#include <sys/param.h>
#include "unittest.h"
#include "libpmemobj.h"
#define LAYOUT_NAME "layout_obj_debug"
TOID_DECLARE_ROOT(struct root);
TOID_DECLARE(struct tobj, 0);
TOID_DECLARE(struct int3_s, 1);
struct root {
POBJ_LIST_HEAD(listhead, struct tobj) lhead, lhead2;
uint32_t val;
};
struct tobj {
POBJ_LIST_ENTRY(struct tobj) next;
};
struct int3_s {
uint32_t i1;
uint32_t i2;
uint32_t i3;
};
typedef void (*func)(PMEMobjpool *pop, void *sync, void *cond);
static void
test_FOREACH(const char *path)
{
PMEMobjpool *pop = NULL;
PMEMoid varoid, nvaroid;
TOID(struct root) root;
TOID(struct tobj) var, nvar;
#define COMMANDS_FOREACH()\
do {\
POBJ_FOREACH(pop, varoid) {}\
POBJ_FOREACH_SAFE(pop, varoid, nvaroid) {}\
POBJ_FOREACH_TYPE(pop, var) {}\
POBJ_FOREACH_SAFE_TYPE(pop, var, nvar) {}\
POBJ_LIST_FOREACH(var, &D_RW(root)->lhead, next) {}\
POBJ_LIST_FOREACH_REVERSE(var, &D_RW(root)->lhead, next) {}\
} while (0)
if ((pop = pmemobj_create(path, LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
TOID_ASSIGN(root, pmemobj_root(pop, sizeof(struct root)));
POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(root)->lhead, next,
sizeof(struct tobj), NULL, NULL);
COMMANDS_FOREACH();
TX_BEGIN(pop) {
COMMANDS_FOREACH();
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
COMMANDS_FOREACH();
pmemobj_close(pop);
}
static void
test_lists(const char *path)
{
PMEMobjpool *pop = NULL;
TOID(struct root) root;
TOID(struct tobj) elm;
#define COMMANDS_LISTS()\
do {\
POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(root)->lhead, next,\
sizeof(struct tobj), NULL, NULL);\
POBJ_NEW(pop, &elm, struct tobj, NULL, NULL);\
POBJ_LIST_INSERT_AFTER(pop, &D_RW(root)->lhead,\
POBJ_LIST_FIRST(&D_RW(root)->lhead), elm, next);\
POBJ_LIST_MOVE_ELEMENT_HEAD(pop, &D_RW(root)->lhead,\
&D_RW(root)->lhead2, elm, next, next);\
POBJ_LIST_REMOVE(pop, &D_RW(root)->lhead2, elm, next);\
POBJ_FREE(&elm);\
} while (0)
if ((pop = pmemobj_create(path, LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
TOID_ASSIGN(root, pmemobj_root(pop, sizeof(struct root)));
COMMANDS_LISTS();
TX_BEGIN(pop) {
COMMANDS_LISTS();
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
COMMANDS_LISTS();
pmemobj_close(pop);
}
static int
int3_constructor(PMEMobjpool *pop, void *ptr, void *arg)
{
struct int3_s *args = (struct int3_s *)arg;
struct int3_s *val = (struct int3_s *)ptr;
val->i1 = args->i1;
val->i2 = args->i2;
val->i3 = args->i3;
pmemobj_persist(pop, val, sizeof(*val));
return 0;
}
static void
test_alloc_construct(const char *path)
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
TX_BEGIN(pop) {
struct int3_s args = { 1, 2, 3 };
PMEMoid allocation;
pmemobj_alloc(pop, &allocation, sizeof(allocation), 1,
int3_constructor, &args);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
pmemobj_close(pop);
}
static void
test_double_free(const char *path)
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
PMEMoid oid, oid2;
int err = pmemobj_zalloc(pop, &oid, 100, 0);
UT_ASSERTeq(err, 0);
UT_ASSERT(!OID_IS_NULL(oid));
oid2 = oid;
pmemobj_free(&oid);
pmemobj_free(&oid2);
}
static int
test_constr(PMEMobjpool *pop, void *ptr, void *arg)
{
PMEMoid oid;
pmemobj_alloc(pop, &oid, 1, 1, test_constr, NULL);
return 0;
}
static void
test_alloc_in_constructor(const char *path)
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
PMEMoid oid;
pmemobj_alloc(pop, &oid, 1, 1, test_constr, NULL);
}
static void
test_mutex_lock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_mutex_lock(pop, (PMEMmutex *)sync);
}
static void
test_mutex_unlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_mutex_unlock(pop, (PMEMmutex *)sync);
}
static void
test_mutex_trylock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_mutex_trylock(pop, (PMEMmutex *)sync);
}
static void
test_mutex_timedlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_mutex_timedlock(pop, (PMEMmutex *)sync, NULL);
}
static void
test_mutex_zero(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_mutex_zero(pop, (PMEMmutex *)sync);
}
static void
test_rwlock_rdlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_rdlock(pop, (PMEMrwlock *)sync);
}
static void
test_rwlock_wrlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_wrlock(pop, (PMEMrwlock *)sync);
}
static void
test_rwlock_timedrdlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_timedrdlock(pop, (PMEMrwlock *)sync, NULL);
}
static void
test_rwlock_timedwrlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_timedwrlock(pop, (PMEMrwlock *)sync, NULL);
}
static void
test_rwlock_tryrdlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_tryrdlock(pop, (PMEMrwlock *)sync);
}
static void
test_rwlock_trywrlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_trywrlock(pop, (PMEMrwlock *)sync);
}
static void
test_rwlock_unlock(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_unlock(pop, (PMEMrwlock *)sync);
}
static void
test_rwlock_zero(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_rwlock_zero(pop, (PMEMrwlock *)sync);
}
static void
test_cond_wait(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_cond_wait(pop, (PMEMcond *)cond, (PMEMmutex *)sync);
}
static void
test_cond_signal(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_cond_signal(pop, (PMEMcond *)cond);
}
static void
test_cond_broadcast(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_cond_broadcast(pop, (PMEMcond *)cond);
}
static void
test_cond_timedwait(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_cond_timedwait(pop, (PMEMcond *)cond, (PMEMmutex *)sync, NULL);
}
static void
test_cond_zero(PMEMobjpool *pop, void *sync, void *cond)
{
pmemobj_cond_zero(pop, (PMEMcond *)cond);
}
static void
test_sync_pop_check(unsigned long op_index)
{
PMEMobjpool *pop = (PMEMobjpool *)(uintptr_t)0x1;
func to_test[] = {
test_mutex_lock, test_mutex_unlock, test_mutex_trylock,
test_mutex_timedlock, test_mutex_zero, test_rwlock_rdlock,
test_rwlock_wrlock, test_rwlock_timedrdlock,
test_rwlock_timedwrlock, test_rwlock_tryrdlock,
test_rwlock_trywrlock, test_rwlock_unlock, test_rwlock_zero,
test_cond_wait, test_cond_signal, test_cond_broadcast,
test_cond_timedwait, test_cond_zero
};
if (op_index >= (sizeof(to_test) / sizeof(to_test[0])))
UT_FATAL("Invalid op_index provided");
PMEMmutex stack_sync;
PMEMcond stack_cond;
to_test[op_index](pop, &stack_sync, &stack_cond);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_debug");
if (argc < 3)
UT_FATAL("usage: %s file-name op:f|l|r|a|s [op_index]",
argv[0]);
const char *path = argv[1];
if (strchr("flrapns", argv[2][0]) == NULL || argv[2][1] != '\0')
UT_FATAL("op must be f or l or r or a or p or n or s");
unsigned long op_index;
char *tailptr;
switch (argv[2][0]) {
case 'f':
test_FOREACH(path);
break;
case 'l':
test_lists(path);
break;
case 'a':
test_alloc_construct(path);
break;
case 'p':
test_double_free(path);
break;
case 'n':
test_alloc_in_constructor(path);
break;
case 's':
if (argc != 4)
UT_FATAL("Provide an op_index with option s");
op_index = strtoul(argv[3], &tailptr, 10);
if (tailptr[0] != '\0')
UT_FATAL("Wrong op_index format");
test_sync_pop_check(op_index);
break;
}
DONE(NULL);
}
| 8,098 | 20.771505 | 72 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_config/pmem2_config.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* pmem_config.c -- pmem2_config unittests
*/
#include "fault_injection.h"
#include "unittest.h"
#include "ut_pmem2.h"
#include "config.h"
#include "out.h"
#include "source.h"
/*
* test_cfg_create_and_delete_valid - test pmem2_config allocation
*/
static int
test_cfg_create_and_delete_valid(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config *cfg;
int ret = pmem2_config_new(&cfg);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTne(cfg, NULL);
ret = pmem2_config_delete(&cfg);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTeq(cfg, NULL);
return 0;
}
/*
* test_cfg_alloc_enomem - test pmem2_config allocation with error injection
*/
static int
test_alloc_cfg_enomem(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config *cfg;
if (!core_fault_injection_enabled()) {
return 0;
}
core_inject_fault_at(PMEM_MALLOC, 1, "pmem2_malloc");
int ret = pmem2_config_new(&cfg);
UT_PMEM2_EXPECT_RETURN(ret, -ENOMEM);
UT_ASSERTeq(cfg, NULL);
return 0;
}
/*
* test_delete_null_config - test pmem2_delete on NULL config
*/
static int
test_delete_null_config(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config *cfg = NULL;
/* should not crash */
int ret = pmem2_config_delete(&cfg);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTeq(cfg, NULL);
return 0;
}
/*
* test_config_set_granularity_valid - check valid granularity values
*/
static int
test_config_set_granularity_valid(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
/* check default granularity */
enum pmem2_granularity g =
(enum pmem2_granularity)PMEM2_GRANULARITY_INVALID;
UT_ASSERTeq(cfg.requested_max_granularity, g);
/* change default granularity */
int ret = -1;
g = PMEM2_GRANULARITY_BYTE;
ret = pmem2_config_set_required_store_granularity(&cfg, g);
UT_ASSERTeq(cfg.requested_max_granularity, g);
UT_PMEM2_EXPECT_RETURN(ret, 0);
/* set granularity once more */
ret = -1;
g = PMEM2_GRANULARITY_PAGE;
ret = pmem2_config_set_required_store_granularity(&cfg, g);
UT_ASSERTeq(cfg.requested_max_granularity, g);
UT_PMEM2_EXPECT_RETURN(ret, 0);
return 0;
}
/*
* test_config_set_granularity_invalid - check invalid granularity values
*/
static int
test_config_set_granularity_invalid(const struct test_case *tc, int argc,
char *argv[])
{
/* pass invalid granularity */
int ret = 0;
enum pmem2_granularity g_inval = 999;
struct pmem2_config cfg;
pmem2_config_init(&cfg);
ret = pmem2_config_set_required_store_granularity(&cfg, g_inval);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_GRANULARITY_NOT_SUPPORTED);
return 0;
}
/*
* test_set_offset_too_large - setting offset which is too large
*/
static int
test_set_offset_too_large(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
/* let's try to set the offset which is too large */
size_t offset = (size_t)INT64_MAX + 1;
int ret = pmem2_config_set_offset(&cfg, offset);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_OFFSET_OUT_OF_RANGE);
return 0;
}
/*
* test_set_offset_success - setting a valid offset
*/
static int
test_set_offset_success(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
/* let's try to successfully set the offset */
size_t offset = Ut_mmap_align;
int ret = pmem2_config_set_offset(&cfg, offset);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(cfg.offset, offset);
return 0;
}
/*
* test_set_length_success - setting a valid length
*/
static int
test_set_length_success(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
/* let's try to successfully set the length, can be any length */
size_t length = Ut_mmap_align;
int ret = pmem2_config_set_length(&cfg, length);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(cfg.length, length);
return 0;
}
/*
* test_set_offset_max - setting maximum possible offset
*/
static int
test_set_offset_max(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
/* let's try to successfully set maximum possible offset */
size_t offset = (INT64_MAX / Ut_mmap_align) * Ut_mmap_align;
int ret = pmem2_config_set_offset(&cfg, offset);
UT_ASSERTeq(ret, 0);
return 0;
}
/*
* test_set_sharing_valid - setting valid sharing
*/
static int
test_set_sharing_valid(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
/* check sharing default value */
UT_ASSERTeq(cfg.sharing, PMEM2_SHARED);
int ret = pmem2_config_set_sharing(&cfg, PMEM2_PRIVATE);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(cfg.sharing, PMEM2_PRIVATE);
return 0;
}
/*
* test_set_sharing_invalid - setting invalid sharing
*/
static int
test_set_sharing_invalid(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_config cfg;
unsigned invalid_sharing = 777;
int ret = pmem2_config_set_sharing(&cfg, invalid_sharing);
UT_ASSERTeq(ret, PMEM2_E_INVALID_SHARING_VALUE);
return 0;
}
/*
* test_validate_unaligned_addr - setting unaligned addr and validating it
*/
static int
test_validate_unaligned_addr(const struct test_case *tc, int argc,
char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_validate_unaligned_addr <file>");
/* needed for source alignment */
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_source *src;
PMEM2_SOURCE_FROM_FD(&src, fd);
struct pmem2_config cfg;
pmem2_config_init(&cfg);
/* let's set addr which is unaligned */
cfg.addr = (char *)1;
int ret = pmem2_config_validate_addr_alignment(&cfg, src);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_ADDRESS_UNALIGNED);
PMEM2_SOURCE_DELETE(&src);
CLOSE(fd);
return 1;
}
/*
* test_set_wrong_addr_req_type - setting wrong addr request type
*/
static int
test_set_wrong_addr_req_type(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
/* "randomly" chosen invalid addr request type */
enum pmem2_address_request_type request_type = 999;
int ret = pmem2_config_set_address(&cfg, NULL, request_type);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_ADDRESS_REQUEST_TYPE);
return 0;
}
/*
* test_null_addr_noreplace - setting null addr when request type
* PMEM2_ADDRESS_FIXED_NOREPLACE is used
*/
static int
test_null_addr_noreplace(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
int ret = pmem2_config_set_address(
&cfg, NULL, PMEM2_ADDRESS_FIXED_NOREPLACE);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_ADDRESS_NULL);
return 0;
}
/*
* test_clear_address - using pmem2_config_clear_address func
*/
static int
test_clear_address(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
/* "randomly" chosen value of address and addr request type */
void *addr = (void *)(1024 * 1024);
int ret = pmem2_config_set_address(
&cfg, addr, PMEM2_ADDRESS_FIXED_NOREPLACE);
UT_ASSERTeq(ret, 0);
UT_ASSERTne(cfg.addr, NULL);
UT_ASSERTne(cfg.addr_request, PMEM2_ADDRESS_ANY);
pmem2_config_clear_address(&cfg);
UT_ASSERTeq(cfg.addr, NULL);
UT_ASSERTeq(cfg.addr_request, PMEM2_ADDRESS_ANY);
return 0;
}
/*
* test_set_valid_prot_flag -- set valid protection flag
*/
static int
test_set_valid_prot_flag(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
int ret = pmem2_config_set_protection(&cfg, PMEM2_PROT_READ);
UT_ASSERTeq(ret, 0);
ret = pmem2_config_set_protection(&cfg, PMEM2_PROT_WRITE);
UT_ASSERTeq(ret, 0);
ret = pmem2_config_set_protection(&cfg, PMEM2_PROT_EXEC);
UT_ASSERTeq(ret, 0);
ret = pmem2_config_set_protection(&cfg, PMEM2_PROT_NONE);
UT_ASSERTeq(ret, 0);
ret = pmem2_config_set_protection(&cfg,
PMEM2_PROT_WRITE | PMEM2_PROT_READ | PMEM2_PROT_EXEC);
UT_ASSERTeq(ret, 0);
return 0;
}
/*
* test_set_invalid_prot_flag -- set invalid protection flag
*/
static int
test_set_invalid_prot_flag(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_config cfg;
pmem2_config_init(&cfg);
int ret = pmem2_config_set_protection(&cfg, PROT_WRITE);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_PROT_FLAG);
UT_ASSERTeq(cfg.protection_flag, PMEM2_PROT_READ | PMEM2_PROT_WRITE);
return 0;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_cfg_create_and_delete_valid),
TEST_CASE(test_alloc_cfg_enomem),
TEST_CASE(test_delete_null_config),
TEST_CASE(test_config_set_granularity_valid),
TEST_CASE(test_config_set_granularity_invalid),
TEST_CASE(test_set_offset_too_large),
TEST_CASE(test_set_offset_success),
TEST_CASE(test_set_length_success),
TEST_CASE(test_set_offset_max),
TEST_CASE(test_set_sharing_valid),
TEST_CASE(test_set_sharing_invalid),
TEST_CASE(test_validate_unaligned_addr),
TEST_CASE(test_set_wrong_addr_req_type),
TEST_CASE(test_null_addr_noreplace),
TEST_CASE(test_clear_address),
TEST_CASE(test_set_valid_prot_flag),
TEST_CASE(test_set_invalid_prot_flag),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char **argv)
{
START(argc, argv, "pmem2_config");
util_init();
out_init("pmem2_config", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0, 0);
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
out_fini();
DONE(NULL);
}
| 9,397 | 22.792405 | 77 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_map_file_trunc/pmem_map_file_trunc.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* pmem_map_file_trunc.c -- test for mapping specially crafted files,
* which used to confuse Windows libc to truncate it by 1 byte
*
* See https://github.com/pmem/pmdk/pull/3728 for full description.
*
* usage: pmem_map_file_trunc file
*/
#include "unittest.h"
#define EXPECTED_SIZE (4 * 1024)
/*
* so called "Ctrl-Z" or EOF character
* https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/fopen-wfopen
*/
#define FILL_CHAR 0x1a
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem_map_file_trunc");
if (argc < 2)
UT_FATAL("not enough args");
size_t mapped;
int ispmem;
char *p;
os_stat_t st;
p = pmem_map_file(argv[1], EXPECTED_SIZE, PMEM_FILE_CREATE, 0644,
&mapped, &ispmem);
UT_ASSERT(p);
UT_ASSERTeq(mapped, EXPECTED_SIZE);
p[EXPECTED_SIZE - 1] = FILL_CHAR;
pmem_persist(&p[EXPECTED_SIZE - 1], 1);
pmem_unmap(p, EXPECTED_SIZE);
STAT(argv[1], &st);
UT_ASSERTeq(st.st_size, EXPECTED_SIZE);
p = pmem_map_file(argv[1], 0, 0, 0644, &mapped, &ispmem);
UT_ASSERT(p);
UT_ASSERTeq(mapped, EXPECTED_SIZE);
UT_ASSERTeq(p[EXPECTED_SIZE - 1], FILL_CHAR);
pmem_unmap(p, EXPECTED_SIZE);
STAT(argv[1], &st);
UT_ASSERTeq(st.st_size, EXPECTED_SIZE);
DONE(NULL);
}
| 1,302 | 20.716667 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_ravl/util_ravl.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* util_ravl.c -- unit test for ravl tree
*/
#include <stdint.h>
#include <stdlib.h>
#include "ravl.h"
#include "util.h"
#include "unittest.h"
#include "fault_injection.h"
static int
cmpkey(const void *lhs, const void *rhs)
{
intptr_t l = (intptr_t)lhs;
intptr_t r = (intptr_t)rhs;
return (int)(l - r);
}
static void
test_misc(void)
{
struct ravl *r = ravl_new(cmpkey);
struct ravl_node *n = NULL;
ravl_insert(r, (void *)3);
ravl_insert(r, (void *)6);
ravl_insert(r, (void *)1);
ravl_insert(r, (void *)7);
ravl_insert(r, (void *)9);
ravl_insert(r, (void *)5);
ravl_insert(r, (void *)8);
ravl_insert(r, (void *)2);
ravl_insert(r, (void *)4);
ravl_insert(r, (void *)10);
n = ravl_find(r, (void *)11, RAVL_PREDICATE_EQUAL);
UT_ASSERTeq(n, NULL);
n = ravl_find(r, (void *)10, RAVL_PREDICATE_GREATER);
UT_ASSERTeq(n, NULL);
n = ravl_find(r, (void *)11, RAVL_PREDICATE_GREATER);
UT_ASSERTeq(n, NULL);
n = ravl_find(r, (void *)11,
RAVL_PREDICATE_GREATER | RAVL_PREDICATE_EQUAL);
UT_ASSERTeq(n, NULL);
n = ravl_find(r, (void *)1, RAVL_PREDICATE_LESS);
UT_ASSERTeq(n, NULL);
n = ravl_find(r, (void *)0, RAVL_PREDICATE_LESS_EQUAL);
UT_ASSERTeq(n, NULL);
n = ravl_find(r, (void *)9, RAVL_PREDICATE_GREATER);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)10);
n = ravl_find(r, (void *)9, RAVL_PREDICATE_LESS);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)8);
n = ravl_find(r, (void *)9,
RAVL_PREDICATE_GREATER | RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)9);
n = ravl_find(r, (void *)9,
RAVL_PREDICATE_LESS | RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)9);
n = ravl_find(r, (void *)100, RAVL_PREDICATE_LESS);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)10);
n = ravl_find(r, (void *)0, RAVL_PREDICATE_GREATER);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)1);
n = ravl_find(r, (void *)3, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)10, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)6, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)9, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)7, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)1, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)5, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)8, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)2, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
n = ravl_find(r, (void *)4, RAVL_PREDICATE_EQUAL);
UT_ASSERTne(n, NULL);
ravl_remove(r, n);
ravl_delete(r);
}
static void
test_predicate(void)
{
struct ravl *r = ravl_new(cmpkey);
struct ravl_node *n = NULL;
ravl_insert(r, (void *)10);
ravl_insert(r, (void *)5);
ravl_insert(r, (void *)7);
n = ravl_find(r, (void *)6, RAVL_PREDICATE_GREATER);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)7);
n = ravl_find(r, (void *)6, RAVL_PREDICATE_LESS);
UT_ASSERTne(n, NULL);
UT_ASSERTeq(ravl_data(n), (void *)5);
ravl_delete(r);
}
static void
test_stress(void)
{
struct ravl *r = ravl_new(cmpkey);
for (int i = 0; i < 1000000; ++i) {
ravl_insert(r, (void *)(uintptr_t)rand());
}
ravl_delete(r);
}
struct foo {
int a;
int b;
int c;
};
static int
cmpfoo(const void *lhs, const void *rhs)
{
const struct foo *l = lhs;
const struct foo *r = rhs;
return ((l->a + l->b + l->c) - (r->a + r->b + r->c));
}
static void
test_emplace(void)
{
struct ravl *r = ravl_new_sized(cmpfoo, sizeof(struct foo));
struct foo a = {1, 2, 3};
struct foo b = {2, 3, 4};
struct foo z = {0, 0, 0};
ravl_emplace_copy(r, &a);
ravl_emplace_copy(r, &b);
struct ravl_node *n = ravl_find(r, &z, RAVL_PREDICATE_GREATER);
struct foo *fn = ravl_data(n);
UT_ASSERTeq(fn->a, a.a);
UT_ASSERTeq(fn->b, a.b);
UT_ASSERTeq(fn->c, a.c);
ravl_remove(r, n);
n = ravl_find(r, &z, RAVL_PREDICATE_GREATER);
fn = ravl_data(n);
UT_ASSERTeq(fn->a, b.a);
UT_ASSERTeq(fn->b, b.b);
UT_ASSERTeq(fn->c, b.c);
ravl_remove(r, n);
ravl_delete(r);
}
static void
test_fault_injection_ravl_sized()
{
if (!core_fault_injection_enabled())
return;
core_inject_fault_at(PMEM_MALLOC, 1, "ravl_new_sized");
struct ravl *r = ravl_new_sized(NULL, 0);
UT_ASSERTeq(r, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
test_fault_injection_ravl_node()
{
if (!core_fault_injection_enabled())
return;
struct foo a = {1, 2, 3};
struct ravl *r = ravl_new_sized(cmpfoo, sizeof(struct foo));
UT_ASSERTne(r, NULL);
core_inject_fault_at(PMEM_MALLOC, 1, "ravl_new_node");
int ret = ravl_emplace_copy(r, &a);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOMEM);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "util_ravl");
test_predicate();
test_misc();
test_stress();
test_emplace();
test_fault_injection_ravl_sized();
test_fault_injection_ravl_node();
DONE(NULL);
}
| 5,271 | 20.34413 | 64 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_sync/mocks_windows.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* mocks_windows.h -- redefinitions of pthread functions
*
* This file is Windows-specific.
*
* This file should be included (i.e. using Forced Include) by libpmemobj
* files, when compiled for the purpose of obj_sync test.
* It would replace default implementation with mocked functions defined
* in obj_sync.c.
*
* These defines could be also passed as preprocessor definitions.
*/
#ifndef WRAP_REAL
#define os_mutex_init __wrap_os_mutex_init
#define os_rwlock_init __wrap_os_rwlock_init
#define os_cond_init __wrap_os_cond_init
#endif
| 2,265 | 41.754717 | 74 | h |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_sync/obj_sync.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_sync.c -- unit test for PMEM-resident locks
*/
#include "obj.h"
#include "sync.h"
#include "unittest.h"
#include "sys_util.h"
#include "util.h"
#include "os.h"
#define MAX_THREAD_NUM 200
#define DATA_SIZE 128
#define LOCKED_MUTEX 1
#define NANO_PER_ONE 1000000000LL
#define TIMEOUT (NANO_PER_ONE / 1000LL)
#define WORKER_RUNS 10
#define MAX_OPENS 5
#define FATAL_USAGE() UT_FATAL("usage: obj_sync [mrc] <num_threads> <runs>\n")
/* posix thread worker typedef */
typedef void *(*worker)(void *);
/* the mock pmemobj pool */
static PMEMobjpool Mock_pop;
/* the tested object containing persistent synchronization primitives */
static struct mock_obj {
PMEMmutex mutex;
PMEMmutex mutex_locked;
PMEMcond cond;
PMEMrwlock rwlock;
int check_data;
uint8_t data[DATA_SIZE];
} *Test_obj;
PMEMobjpool *
pmemobj_pool_by_ptr(const void *arg)
{
return &Mock_pop;
}
/*
* mock_open_pool -- (internal) simulate pool opening
*/
static void
mock_open_pool(PMEMobjpool *pop)
{
util_fetch_and_add64(&pop->run_id, 2);
}
/*
* mutex_write_worker -- (internal) write data with mutex
*/
static void *
mutex_write_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
if (pmemobj_mutex_lock(&Mock_pop, &Test_obj->mutex)) {
UT_ERR("pmemobj_mutex_lock");
return NULL;
}
memset(Test_obj->data, (int)(uintptr_t)arg, DATA_SIZE);
if (pmemobj_mutex_unlock(&Mock_pop, &Test_obj->mutex))
UT_ERR("pmemobj_mutex_unlock");
}
return NULL;
}
/*
* mutex_check_worker -- (internal) check consistency with mutex
*/
static void *
mutex_check_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
if (pmemobj_mutex_lock(&Mock_pop, &Test_obj->mutex)) {
UT_ERR("pmemobj_mutex_lock");
return NULL;
}
uint8_t val = Test_obj->data[0];
for (int i = 1; i < DATA_SIZE; i++)
UT_ASSERTeq(Test_obj->data[i], val);
memset(Test_obj->data, 0, DATA_SIZE);
if (pmemobj_mutex_unlock(&Mock_pop, &Test_obj->mutex))
UT_ERR("pmemobj_mutex_unlock");
}
return NULL;
}
/*
* cond_write_worker -- (internal) write data with cond variable
*/
static void *
cond_write_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
if (pmemobj_mutex_lock(&Mock_pop, &Test_obj->mutex))
return NULL;
memset(Test_obj->data, (int)(uintptr_t)arg, DATA_SIZE);
Test_obj->check_data = 1;
if (pmemobj_cond_signal(&Mock_pop, &Test_obj->cond))
UT_ERR("pmemobj_cond_signal");
pmemobj_mutex_unlock(&Mock_pop, &Test_obj->mutex);
}
return NULL;
}
/*
* cond_check_worker -- (internal) check consistency with cond variable
*/
static void *
cond_check_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
if (pmemobj_mutex_lock(&Mock_pop, &Test_obj->mutex))
return NULL;
while (Test_obj->check_data != 1) {
if (pmemobj_cond_wait(&Mock_pop, &Test_obj->cond,
&Test_obj->mutex))
UT_ERR("pmemobj_cond_wait");
}
uint8_t val = Test_obj->data[0];
for (int i = 1; i < DATA_SIZE; i++)
UT_ASSERTeq(Test_obj->data[i], val);
memset(Test_obj->data, 0, DATA_SIZE);
pmemobj_mutex_unlock(&Mock_pop, &Test_obj->mutex);
}
return NULL;
}
/*
* rwlock_write_worker -- (internal) write data with rwlock
*/
static void *
rwlock_write_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
if (pmemobj_rwlock_wrlock(&Mock_pop, &Test_obj->rwlock)) {
UT_ERR("pmemobj_rwlock_wrlock");
return NULL;
}
memset(Test_obj->data, (int)(uintptr_t)arg, DATA_SIZE);
if (pmemobj_rwlock_unlock(&Mock_pop, &Test_obj->rwlock))
UT_ERR("pmemobj_rwlock_unlock");
}
return NULL;
}
/*
* rwlock_check_worker -- (internal) check consistency with rwlock
*/
static void *
rwlock_check_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
if (pmemobj_rwlock_rdlock(&Mock_pop, &Test_obj->rwlock)) {
UT_ERR("pmemobj_rwlock_rdlock");
return NULL;
}
uint8_t val = Test_obj->data[0];
for (int i = 1; i < DATA_SIZE; i++)
UT_ASSERTeq(Test_obj->data[i], val);
if (pmemobj_rwlock_unlock(&Mock_pop, &Test_obj->rwlock))
UT_ERR("pmemobj_rwlock_unlock");
}
return NULL;
}
/*
* timed_write_worker -- (internal) intentionally doing nothing
*/
static void *
timed_write_worker(void *arg)
{
return NULL;
}
/*
* timed_check_worker -- (internal) check consistency with mutex
*/
static void *
timed_check_worker(void *arg)
{
for (unsigned run = 0; run < WORKER_RUNS; run++) {
int mutex_id = (int)(uintptr_t)arg % 2;
PMEMmutex *mtx = mutex_id == LOCKED_MUTEX ?
&Test_obj->mutex_locked : &Test_obj->mutex;
struct timespec t1, t2, abs_time;
os_clock_gettime(CLOCK_REALTIME, &t1);
abs_time = t1;
abs_time.tv_nsec += TIMEOUT;
if (abs_time.tv_nsec >= NANO_PER_ONE) {
abs_time.tv_sec++;
abs_time.tv_nsec -= NANO_PER_ONE;
}
int ret = pmemobj_mutex_timedlock(&Mock_pop, mtx, &abs_time);
os_clock_gettime(CLOCK_REALTIME, &t2);
if (mutex_id == LOCKED_MUTEX) {
UT_ASSERTeq(ret, ETIMEDOUT);
uint64_t diff = (uint64_t)((t2.tv_sec - t1.tv_sec) *
NANO_PER_ONE + t2.tv_nsec - t1.tv_nsec);
UT_ASSERT(diff >= TIMEOUT);
return NULL;
}
if (ret == 0) {
UT_ASSERTne(mutex_id, LOCKED_MUTEX);
pmemobj_mutex_unlock(&Mock_pop, mtx);
} else if (ret == ETIMEDOUT) {
uint64_t diff = (uint64_t)((t2.tv_sec - t1.tv_sec)
* NANO_PER_ONE + t2.tv_nsec - t1.tv_nsec);
UT_ASSERT(diff >= TIMEOUT);
} else {
errno = ret;
UT_ERR("!pmemobj_mutex_timedlock");
}
}
return NULL;
}
/*
* cleanup -- (internal) clean up after each run
*/
static void
cleanup(char test_type)
{
switch (test_type) {
case 'm':
util_mutex_destroy(&((PMEMmutex_internal *)
&(Test_obj->mutex))->PMEMmutex_lock);
break;
case 'r':
util_rwlock_destroy(&((PMEMrwlock_internal *)
&(Test_obj->rwlock))->PMEMrwlock_lock);
break;
case 'c':
util_mutex_destroy(&((PMEMmutex_internal *)
&(Test_obj->mutex))->PMEMmutex_lock);
util_cond_destroy(&((PMEMcond_internal *)
&(Test_obj->cond))->PMEMcond_cond);
break;
case 't':
util_mutex_destroy(&((PMEMmutex_internal *)
&(Test_obj->mutex))->PMEMmutex_lock);
util_mutex_destroy(&((PMEMmutex_internal *)
&(Test_obj->mutex_locked))->PMEMmutex_lock);
break;
default:
FATAL_USAGE();
}
}
static int
obj_sync_persist(void *ctx, const void *ptr, size_t sz, unsigned flags)
{
/* no-op */
return 0;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_sync");
util_init();
if (argc < 4)
FATAL_USAGE();
worker writer;
worker checker;
char test_type = argv[1][0];
switch (test_type) {
case 'm':
writer = mutex_write_worker;
checker = mutex_check_worker;
break;
case 'r':
writer = rwlock_write_worker;
checker = rwlock_check_worker;
break;
case 'c':
writer = cond_write_worker;
checker = cond_check_worker;
break;
case 't':
writer = timed_write_worker;
checker = timed_check_worker;
break;
default:
FATAL_USAGE();
}
unsigned long num_threads = strtoul(argv[2], NULL, 10);
if (num_threads > MAX_THREAD_NUM)
UT_FATAL("Do not use more than %d threads.\n", MAX_THREAD_NUM);
unsigned long opens = strtoul(argv[3], NULL, 10);
if (opens > MAX_OPENS)
UT_FATAL("Do not use more than %d runs.\n", MAX_OPENS);
os_thread_t *write_threads
= (os_thread_t *)MALLOC(num_threads * sizeof(os_thread_t));
os_thread_t *check_threads
= (os_thread_t *)MALLOC(num_threads * sizeof(os_thread_t));
/* first pool open */
mock_open_pool(&Mock_pop);
Mock_pop.p_ops.persist = obj_sync_persist;
Mock_pop.p_ops.base = &Mock_pop;
Test_obj = (struct mock_obj *)MALLOC(sizeof(struct mock_obj));
/* zero-initialize the test object */
pmemobj_mutex_zero(&Mock_pop, &Test_obj->mutex);
pmemobj_mutex_zero(&Mock_pop, &Test_obj->mutex_locked);
pmemobj_cond_zero(&Mock_pop, &Test_obj->cond);
pmemobj_rwlock_zero(&Mock_pop, &Test_obj->rwlock);
Test_obj->check_data = 0;
memset(&Test_obj->data, 0, DATA_SIZE);
for (unsigned long run = 0; run < opens; run++) {
if (test_type == 't') {
pmemobj_mutex_lock(&Mock_pop,
&Test_obj->mutex_locked);
}
for (unsigned i = 0; i < num_threads; i++) {
THREAD_CREATE(&write_threads[i], NULL, writer,
(void *)(uintptr_t)i);
THREAD_CREATE(&check_threads[i], NULL, checker,
(void *)(uintptr_t)i);
}
for (unsigned i = 0; i < num_threads; i++) {
THREAD_JOIN(&write_threads[i], NULL);
THREAD_JOIN(&check_threads[i], NULL);
}
if (test_type == 't') {
pmemobj_mutex_unlock(&Mock_pop,
&Test_obj->mutex_locked);
}
/* up the run_id counter and cleanup */
mock_open_pool(&Mock_pop);
cleanup(test_type);
}
FREE(check_threads);
FREE(write_threads);
FREE(Test_obj);
DONE(NULL);
}
| 8,776 | 21.97644 | 78 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_sync/mocks_posix.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* mocks_posix.c -- redefinitions of lock functions (Posix implementation)
*/
#include <pthread.h>
#include "util.h"
#include "os.h"
#include "unittest.h"
FUNC_MOCK(pthread_mutex_init, int,
pthread_mutex_t *__restrict mutex,
const pthread_mutexattr_t *__restrict attr)
FUNC_MOCK_RUN_RET_DEFAULT_REAL(pthread_mutex_init, mutex, attr)
FUNC_MOCK_RUN(1) {
return -1;
}
FUNC_MOCK_END
FUNC_MOCK(pthread_rwlock_init, int,
pthread_rwlock_t *__restrict rwlock,
const pthread_rwlockattr_t *__restrict attr)
FUNC_MOCK_RUN_RET_DEFAULT_REAL(pthread_rwlock_init, rwlock, attr)
FUNC_MOCK_RUN(1) {
return -1;
}
FUNC_MOCK_END
FUNC_MOCK(pthread_cond_init, int,
pthread_cond_t *__restrict cond,
const pthread_condattr_t *__restrict attr)
FUNC_MOCK_RUN_RET_DEFAULT_REAL(pthread_cond_init, cond, attr)
FUNC_MOCK_RUN(1) {
return -1;
}
FUNC_MOCK_END
| 950 | 22.775 | 74 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/out_err_mt_win/out_err_mt_win.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* out_err_mt_win.c -- unit test for error messages
*/
#include <sys/types.h>
#include <stdarg.h>
#include <errno.h>
#include "unittest.h"
#include "valgrind_internal.h"
#include "util.h"
#define NUM_THREADS 16
static void
print_errors(const wchar_t *msg)
{
UT_OUT("%S", msg);
UT_OUT("PMEM: %S", pmem_errormsgW());
UT_OUT("PMEMOBJ: %S", pmemobj_errormsgW());
UT_OUT("PMEMLOG: %S", pmemlog_errormsgW());
UT_OUT("PMEMBLK: %S", pmemblk_errormsgW());
UT_OUT("PMEMPOOL: %S", pmempool_errormsgW());
}
static void
check_errors(int ver)
{
int ret;
int err_need;
int err_found;
ret = swscanf(pmem_errormsgW(),
L"libpmem major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEM_MAJOR_VERSION);
ret = swscanf(pmemobj_errormsgW(),
L"libpmemobj major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMOBJ_MAJOR_VERSION);
ret = swscanf(pmemlog_errormsgW(),
L"libpmemlog major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMLOG_MAJOR_VERSION);
ret = swscanf(pmemblk_errormsgW(),
L"libpmemblk major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMBLK_MAJOR_VERSION);
ret = swscanf(pmempool_errormsgW(),
L"libpmempool major version mismatch (need %d, found %d)",
&err_need, &err_found);
UT_ASSERTeq(ret, 2);
UT_ASSERTeq(err_need, ver);
UT_ASSERTeq(err_found, PMEMPOOL_MAJOR_VERSION);
}
static void *
do_test(void *arg)
{
int ver = *(int *)arg;
pmem_check_version(ver, 0);
pmemobj_check_version(ver, 0);
pmemlog_check_version(ver, 0);
pmemblk_check_version(ver, 0);
pmempool_check_version(ver, 0);
check_errors(ver);
return NULL;
}
static void
run_mt_test(void *(*worker)(void *))
{
os_thread_t thread[NUM_THREADS];
int ver[NUM_THREADS];
for (int i = 0; i < NUM_THREADS; ++i) {
ver[i] = 10000 + i;
THREAD_CREATE(&thread[i], NULL, worker, &ver[i]);
}
for (int i = 0; i < NUM_THREADS; ++i) {
THREAD_JOIN(&thread[i], NULL);
}
}
int
wmain(int argc, wchar_t *argv[])
{
STARTW(argc, argv, "out_err_mt_win");
if (argc != 6)
UT_FATAL("usage: %S file1 file2 file3 file4 dir",
argv[0]);
print_errors(L"start");
PMEMobjpool *pop = pmemobj_createW(argv[1], L"test",
PMEMOBJ_MIN_POOL, 0666);
PMEMlogpool *plp = pmemlog_createW(argv[2],
PMEMLOG_MIN_POOL, 0666);
PMEMblkpool *pbp = pmemblk_createW(argv[3],
128, PMEMBLK_MIN_POOL, 0666);
util_init();
pmem_check_version(10000, 0);
pmemobj_check_version(10001, 0);
pmemlog_check_version(10002, 0);
pmemblk_check_version(10003, 0);
pmempool_check_version(10006, 0);
print_errors(L"version check");
void *ptr = NULL;
/*
* We are testing library error reporting and we don't want this test
* to fail under memcheck.
*/
VALGRIND_DO_DISABLE_ERROR_REPORTING;
pmem_msync(ptr, 1);
VALGRIND_DO_ENABLE_ERROR_REPORTING;
print_errors(L"pmem_msync");
int ret;
PMEMoid oid;
ret = pmemobj_alloc(pop, &oid, 0, 0, NULL, NULL);
UT_ASSERTeq(ret, -1);
print_errors(L"pmemobj_alloc");
pmemlog_append(plp, NULL, PMEMLOG_MIN_POOL);
print_errors(L"pmemlog_append");
size_t nblock = pmemblk_nblock(pbp);
pmemblk_set_error(pbp, nblock + 1);
print_errors(L"pmemblk_set_error");
run_mt_test(do_test);
pmemobj_close(pop);
pmemlog_close(plp);
pmemblk_close(pbp);
PMEMpoolcheck *ppc;
struct pmempool_check_args args = {0, };
ppc = pmempool_check_init(&args, sizeof(args) / 2);
UT_ASSERTeq(ppc, NULL);
print_errors(L"pmempool_check_init");
DONEW(NULL);
}
| 3,844 | 22.30303 | 70 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_oid_thread/obj_oid_thread.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_oid_thread.c -- unit test for the reverse direct operation
*/
#include "unittest.h"
#include "lane.h"
#include "obj.h"
#include "sys_util.h"
#define MAX_PATH_LEN 255
#define LAYOUT_NAME "direct"
static os_mutex_t lock;
static os_cond_t cond;
static int flag = 1;
static PMEMoid thread_oid;
/*
* test_worker -- (internal) test worker thread
*/
static void *
test_worker(void *arg)
{
util_mutex_lock(&lock);
/* before pool is closed */
void *direct = pmemobj_direct(thread_oid);
UT_ASSERT(OID_EQUALS(thread_oid, pmemobj_oid(direct)));
flag = 0;
os_cond_signal(&cond);
util_mutex_unlock(&lock);
util_mutex_lock(&lock);
while (flag == 0)
os_cond_wait(&cond, &lock);
/* after pool is closed */
UT_ASSERT(OID_IS_NULL(pmemobj_oid(direct)));
util_mutex_unlock(&lock);
return NULL;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_oid_thread");
if (argc != 3)
UT_FATAL("usage: %s [directory] [# of pools]", argv[0]);
util_mutex_init(&lock);
util_cond_init(&cond);
unsigned npools = ATOU(argv[2]);
const char *dir = argv[1];
int r;
PMEMobjpool **pops = MALLOC(npools * sizeof(PMEMoid *));
size_t length = strlen(dir) + MAX_PATH_LEN;
char *path = MALLOC(length);
for (unsigned i = 0; i < npools; ++i) {
int ret = snprintf(path, length, "%s"OS_DIR_SEP_STR"testfile%d",
dir, i);
if (ret < 0 || ret >= length)
UT_FATAL("snprintf: %d", ret);
pops[i] = pmemobj_create(path, LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR);
if (pops[i] == NULL)
UT_FATAL("!pmemobj_create");
}
/* Address outside the pmemobj pool */
void *allocated_memory = MALLOC(sizeof(int));
UT_ASSERT(OID_IS_NULL(pmemobj_oid(allocated_memory)));
PMEMoid *oids = MALLOC(npools * sizeof(PMEMoid));
PMEMoid *tmpoids = MALLOC(npools * sizeof(PMEMoid));
UT_ASSERT(OID_IS_NULL(pmemobj_oid(NULL)));
oids[0] = OID_NULL;
for (unsigned i = 0; i < npools; ++i) {
uint64_t off = pops[i]->heap_offset;
oids[i] = (PMEMoid) {pops[i]->uuid_lo, off};
UT_ASSERT(OID_EQUALS(oids[i],
pmemobj_oid(pmemobj_direct(oids[i]))));
r = pmemobj_alloc(pops[i], &tmpoids[i], 100, 1, NULL, NULL);
UT_ASSERTeq(r, 0);
UT_ASSERT(OID_EQUALS(tmpoids[i],
pmemobj_oid(pmemobj_direct(tmpoids[i]))));
}
r = pmemobj_alloc(pops[0], &thread_oid, 100, 2, NULL, NULL);
UT_ASSERTeq(r, 0);
UT_ASSERT(!OID_IS_NULL(pmemobj_oid(pmemobj_direct(thread_oid))));
util_mutex_lock(&lock);
os_thread_t t;
THREAD_CREATE(&t, NULL, test_worker, NULL);
/* wait for the thread to perform the first direct */
while (flag != 0)
os_cond_wait(&cond, &lock);
for (unsigned i = 0; i < npools; ++i) {
pmemobj_free(&tmpoids[i]);
UT_ASSERT(OID_IS_NULL(pmemobj_oid(
pmemobj_direct(tmpoids[i]))));
pmemobj_close(pops[i]);
UT_ASSERT(OID_IS_NULL(pmemobj_oid(
pmemobj_direct(oids[i]))));
}
/* signal the waiting thread */
flag = 1;
os_cond_signal(&cond);
util_mutex_unlock(&lock);
THREAD_JOIN(&t, NULL);
FREE(path);
FREE(tmpoids);
FREE(oids);
FREE(pops);
FREE(allocated_memory);
util_mutex_destroy(&lock);
util_cond_destroy(&cond);
DONE(NULL);
}
| 3,186 | 21.602837 | 66 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/rpmem_common/rpmem_fip_common.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_fip_common.h -- common definitions for librpmem and rpmemd
*/
#ifndef RPMEM_FIP_COMMON_H
#define RPMEM_FIP_COMMON_H 1
#include <string.h>
#include <netinet/in.h>
#include <rdma/fabric.h>
#include <rdma/fi_cm.h>
#include <rdma/fi_rma.h>
#ifdef __cplusplus
extern "C" {
#endif
#define RPMEM_FIVERSION FI_VERSION(1, 4)
#define RPMEM_FIP_CQ_WAIT_MS 100
#define min(a, b) ((a) < (b) ? (a) : (b))
#define max(a, b) ((a) > (b) ? (a) : (b))
/*
* rpmem_fip_node -- client or server node type
*/
enum rpmem_fip_node {
RPMEM_FIP_NODE_CLIENT,
RPMEM_FIP_NODE_SERVER,
MAX_RPMEM_FIP_NODE,
};
/*
* rpmem_fip_probe -- list of providers
*/
struct rpmem_fip_probe {
unsigned providers;
size_t max_wq_size[MAX_RPMEM_PROV];
};
/*
* rpmem_fip_probe -- returns true if specified provider is available
*/
static inline int
rpmem_fip_probe(struct rpmem_fip_probe probe, enum rpmem_provider provider)
{
return (probe.providers & (1U << provider)) != 0;
}
/*
* rpmem_fip_probe_any -- returns true if any provider is available
*/
static inline int
rpmem_fip_probe_any(struct rpmem_fip_probe probe)
{
return probe.providers != 0;
}
int rpmem_fip_probe_get(const char *target, struct rpmem_fip_probe *probe);
struct fi_info *rpmem_fip_get_hints(enum rpmem_provider provider);
int rpmem_fip_read_eq_check(struct fid_eq *eq, struct fi_eq_cm_entry *entry,
uint32_t exp_event, fid_t exp_fid, int timeout);
int rpmem_fip_read_eq(struct fid_eq *eq, struct fi_eq_cm_entry *entry,
uint32_t *event, int timeout);
size_t rpmem_fip_cq_size(enum rpmem_persist_method pm,
enum rpmem_fip_node node);
size_t rpmem_fip_wq_size(enum rpmem_persist_method pm,
enum rpmem_fip_node node);
size_t rpmem_fip_rx_size(enum rpmem_persist_method pm,
enum rpmem_fip_node node);
size_t rpmem_fip_max_nlanes(struct fi_info *fi);
void rpmem_fip_print_info(struct fi_info *fi);
#ifdef __cplusplus
}
#endif
#endif
| 1,992 | 21.144444 | 76 | h |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/rpmem_common/rpmem_fip_common.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_common.c -- common definitions for librpmem and rpmemd
*/
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <stddef.h>
#include <errno.h>
#include "rpmem_common.h"
#include "rpmem_fip_common.h"
#include "rpmem_proto.h"
#include "rpmem_common_log.h"
#include "valgrind_internal.h"
#include <rdma/fi_errno.h>
/*
* rpmem_fip_get_hints -- return fabric interface information hints
*/
struct fi_info *
rpmem_fip_get_hints(enum rpmem_provider provider)
{
RPMEMC_ASSERT(provider < MAX_RPMEM_PROV);
struct fi_info *hints = fi_allocinfo();
if (!hints) {
RPMEMC_LOG(ERR, "!fi_allocinfo");
return NULL;
}
/* connection-oriented endpoint */
hints->ep_attr->type = FI_EP_MSG;
/*
* Basic memory registration mode indicates that MR attributes
* (rkey, lkey) are selected by provider.
*/
hints->domain_attr->mr_mode = FI_MR_BASIC;
/*
* FI_THREAD_SAFE indicates MT applications can access any
* resources through interface without any restrictions
*/
hints->domain_attr->threading = FI_THREAD_SAFE;
/*
* FI_MSG - SEND and RECV
* FI_RMA - WRITE and READ
*/
hints->caps = FI_MSG | FI_RMA;
/* must register locally accessed buffers */
hints->mode = FI_CONTEXT | FI_LOCAL_MR | FI_RX_CQ_DATA;
/* READ-after-WRITE and SEND-after-WRITE message ordering required */
hints->tx_attr->msg_order = FI_ORDER_RAW | FI_ORDER_SAW;
hints->addr_format = FI_SOCKADDR;
if (provider != RPMEM_PROV_UNKNOWN) {
const char *prov_name = rpmem_provider_to_str(provider);
RPMEMC_ASSERT(prov_name != NULL);
hints->fabric_attr->prov_name = strdup(prov_name);
if (!hints->fabric_attr->prov_name) {
RPMEMC_LOG(ERR, "!strdup(provider)");
goto err_strdup;
}
}
return hints;
err_strdup:
fi_freeinfo(hints);
return NULL;
}
/*
* rpmem_fip_probe_get -- return list of available providers
*/
int
rpmem_fip_probe_get(const char *target, struct rpmem_fip_probe *probe)
{
struct fi_info *hints = rpmem_fip_get_hints(RPMEM_PROV_UNKNOWN);
if (!hints)
return -1;
int ret;
struct fi_info *fi;
ret = fi_getinfo(RPMEM_FIVERSION, target, NULL, 0, hints, &fi);
if (ret) {
goto err_getinfo;
}
if (probe) {
memset(probe, 0, sizeof(*probe));
struct fi_info *prov = fi;
while (prov) {
enum rpmem_provider p = rpmem_provider_from_str(
prov->fabric_attr->prov_name);
if (p == RPMEM_PROV_UNKNOWN) {
prov = prov->next;
continue;
}
probe->providers |= (1U << p);
probe->max_wq_size[p] = prov->tx_attr->size;
prov = prov->next;
}
}
fi_freeinfo(fi);
err_getinfo:
fi_freeinfo(hints);
return ret;
}
/*
* rpmem_fip_read_eq -- read event queue entry with specified timeout
*/
int
rpmem_fip_read_eq(struct fid_eq *eq, struct fi_eq_cm_entry *entry,
uint32_t *event, int timeout)
{
int ret;
ssize_t sret;
struct fi_eq_err_entry err;
sret = fi_eq_sread(eq, event, entry, sizeof(*entry), timeout, 0);
VALGRIND_DO_MAKE_MEM_DEFINED(&sret, sizeof(sret));
if (timeout != -1 && (sret == -FI_ETIMEDOUT || sret == -FI_EAGAIN)) {
errno = ETIMEDOUT;
return 1;
}
if (sret < 0 || (size_t)sret != sizeof(*entry)) {
if (sret < 0)
ret = (int)sret;
else
ret = -1;
sret = fi_eq_readerr(eq, &err, 0);
if (sret < 0) {
errno = EIO;
RPMEMC_LOG(ERR, "error reading from event queue: "
"cannot read error from event queue: %s",
fi_strerror((int)sret));
} else if (sret > 0) {
RPMEMC_ASSERT(sret == sizeof(err));
errno = -err.prov_errno;
RPMEMC_LOG(ERR, "error reading from event queue: %s",
fi_eq_strerror(eq, err.prov_errno,
NULL, NULL, 0));
}
return ret;
}
return 0;
}
/*
* rpmem_fip_read_eq -- read event queue entry and expect specified event
* and fid
*
* Returns:
* 1 - timeout
* 0 - success
* otherwise - error
*/
int
rpmem_fip_read_eq_check(struct fid_eq *eq, struct fi_eq_cm_entry *entry,
uint32_t exp_event, fid_t exp_fid, int timeout)
{
uint32_t event;
int ret = rpmem_fip_read_eq(eq, entry, &event, timeout);
if (ret)
return ret;
if (event != exp_event || entry->fid != exp_fid) {
errno = EIO;
RPMEMC_LOG(ERR, "unexpected event received (%u) "
"expected (%u)%s", event, exp_event,
entry->fid != exp_fid ?
" invalid endpoint" : "");
return -1;
}
return 0;
}
/*
* rpmem_fip_lane_attr -- lane attributes
*
* This structure describes how many SQ, RQ and CQ entries are
* required for a single lane.
*
* NOTE:
* - WRITE, READ and SEND requests are placed in SQ,
* - RECV requests are placed in RQ.
*/
struct rpmem_fip_lane_attr {
size_t n_per_sq; /* number of entries per lane in send queue */
size_t n_per_rq; /* number of entries per lane in receive queue */
size_t n_per_cq; /* number of entries per lane in completion queue */
};
/* queues size required by remote persist operation methods */
static const struct rpmem_fip_lane_attr
rpmem_fip_lane_attrs[MAX_RPMEM_FIP_NODE][MAX_RPMEM_PM] = {
[RPMEM_FIP_NODE_CLIENT][RPMEM_PM_GPSPM] = {
.n_per_sq = 2, /* WRITE + SEND */
.n_per_rq = 1, /* RECV */
.n_per_cq = 3,
},
[RPMEM_FIP_NODE_CLIENT][RPMEM_PM_APM] = {
/* WRITE + READ for persist, WRITE + SEND for deep persist */
.n_per_sq = 2, /* WRITE + SEND */
.n_per_rq = 1, /* RECV */
.n_per_cq = 3,
},
[RPMEM_FIP_NODE_SERVER][RPMEM_PM_GPSPM] = {
.n_per_sq = 1, /* SEND */
.n_per_rq = 1, /* RECV */
.n_per_cq = 3,
},
[RPMEM_FIP_NODE_SERVER][RPMEM_PM_APM] = {
.n_per_sq = 1, /* SEND */
.n_per_rq = 1, /* RECV */
.n_per_cq = 3,
},
};
/*
* rpmem_fip_cq_size -- returns completion queue size based on
* persist method and node type
*/
size_t
rpmem_fip_cq_size(enum rpmem_persist_method pm, enum rpmem_fip_node node)
{
RPMEMC_ASSERT(pm < MAX_RPMEM_PM);
RPMEMC_ASSERT(node < MAX_RPMEM_FIP_NODE);
const struct rpmem_fip_lane_attr *attr =
&rpmem_fip_lane_attrs[node][pm];
return attr->n_per_cq ? : 1;
}
/*
* rpmem_fip_wq_size -- returns submission queue (transmit queue) size based
* on persist method and node type
*/
size_t
rpmem_fip_wq_size(enum rpmem_persist_method pm, enum rpmem_fip_node node)
{
RPMEMC_ASSERT(pm < MAX_RPMEM_PM);
RPMEMC_ASSERT(node < MAX_RPMEM_FIP_NODE);
const struct rpmem_fip_lane_attr *attr =
&rpmem_fip_lane_attrs[node][pm];
return attr->n_per_sq ? : 1;
}
/*
* rpmem_fip_rx_size -- returns receive queue size based
* on persist method and node type
*/
size_t
rpmem_fip_rx_size(enum rpmem_persist_method pm, enum rpmem_fip_node node)
{
RPMEMC_ASSERT(pm < MAX_RPMEM_PM);
RPMEMC_ASSERT(node < MAX_RPMEM_FIP_NODE);
const struct rpmem_fip_lane_attr *attr =
&rpmem_fip_lane_attrs[node][pm];
return attr->n_per_rq ? : 1;
}
/*
* rpmem_fip_max_nlanes -- returns maximum number of lanes
*/
size_t
rpmem_fip_max_nlanes(struct fi_info *fi)
{
return min(min(fi->domain_attr->tx_ctx_cnt,
fi->domain_attr->rx_ctx_cnt),
fi->domain_attr->cq_cnt);
}
/*
* rpmem_fip_print_info -- print some useful info about fabric interface
*/
void
rpmem_fip_print_info(struct fi_info *fi)
{
RPMEMC_LOG(INFO, "libfabric version: %s",
fi_tostr(fi, FI_TYPE_VERSION));
char *str = fi_tostr(fi, FI_TYPE_INFO);
char *buff = strdup(str);
if (!buff) {
RPMEMC_LOG(ERR, "!allocating string buffer for "
"libfabric interface information");
return;
}
RPMEMC_LOG(INFO, "libfabric interface info:");
char *nl;
char *last = buff;
while (last != NULL) {
nl = strchr(last, '\n');
if (nl) {
*nl = '\0';
nl++;
}
RPMEMC_LOG(INFO, "%s", last);
last = nl;
}
free(buff);
}
| 7,550 | 21.675676 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/rpmem_common/rpmem_common_log.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* rpmem_common_log.h -- common log macros for librpmem and rpmemd
*/
#if defined(RPMEMC_LOG_RPMEM) && defined(RPMEMC_LOG_RPMEMD)
#error Both RPMEMC_LOG_RPMEM and RPMEMC_LOG_RPMEMD defined
#elif !defined(RPMEMC_LOG_RPMEM) && !defined(RPMEMC_LOG_RPMEMD)
#define RPMEMC_LOG(level, fmt, args...) do {} while (0)
#define RPMEMC_DBG(level, fmt, args...) do {} while (0)
#define RPMEMC_FATAL(fmt, args...) do {} while (0)
#define RPMEMC_ASSERT(cond) do {} while (0)
#elif defined(RPMEMC_LOG_RPMEM)
#include "out.h"
#include "rpmem_util.h"
#define RPMEMC_LOG(level, fmt, args...) RPMEM_LOG(level, fmt, ## args)
#define RPMEMC_DBG(level, fmt, args...) RPMEM_DBG(fmt, ## args)
#define RPMEMC_FATAL(fmt, args...) RPMEM_FATAL(fmt, ## args)
#define RPMEMC_ASSERT(cond) RPMEM_ASSERT(cond)
#else
#include "rpmemd_log.h"
#define RPMEMC_LOG(level, fmt, args...) RPMEMD_LOG(level, fmt, ## args)
#define RPMEMC_DBG(level, fmt, args...) RPMEMD_DBG(fmt, ## args)
#define RPMEMC_FATAL(fmt, args...) RPMEMD_FATAL(fmt, ## args)
#define RPMEMC_ASSERT(cond) RPMEMD_ASSERT(cond)
#endif
| 1,160 | 28.769231 | 71 | h |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/rpmem_common/rpmem_common.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_common.h -- common definitions for librpmem and rpmemd
*/
#ifndef RPMEM_COMMON_H
#define RPMEM_COMMON_H 1
/*
* Values for SO_KEEPALIVE socket option
*/
#define RPMEM_CMD_ENV "RPMEM_CMD"
#define RPMEM_SSH_ENV "RPMEM_SSH"
#define RPMEM_DEF_CMD "rpmemd"
#define RPMEM_DEF_SSH "ssh"
#define RPMEM_PROV_SOCKET_ENV "RPMEM_ENABLE_SOCKETS"
#define RPMEM_PROV_VERBS_ENV "RPMEM_ENABLE_VERBS"
#define RPMEM_MAX_NLANES_ENV "RPMEM_MAX_NLANES"
#define RPMEM_WQ_SIZE_ENV "RPMEM_WORK_QUEUE_SIZE"
#define RPMEM_ACCEPT_TIMEOUT 30000
#define RPMEM_CONNECT_TIMEOUT 30000
#define RPMEM_MONITOR_TIMEOUT 1000
#include <stdint.h>
#include <sys/socket.h>
#include <netdb.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* rpmem_err -- error codes
*/
enum rpmem_err {
RPMEM_SUCCESS = 0,
RPMEM_ERR_BADPROTO = 1,
RPMEM_ERR_BADNAME = 2,
RPMEM_ERR_BADSIZE = 3,
RPMEM_ERR_BADNLANES = 4,
RPMEM_ERR_BADPROVIDER = 5,
RPMEM_ERR_FATAL = 6,
RPMEM_ERR_FATAL_CONN = 7,
RPMEM_ERR_BUSY = 8,
RPMEM_ERR_EXISTS = 9,
RPMEM_ERR_PROVNOSUP = 10,
RPMEM_ERR_NOEXIST = 11,
RPMEM_ERR_NOACCESS = 12,
RPMEM_ERR_POOL_CFG = 13,
MAX_RPMEM_ERR,
};
/*
* rpmem_persist_method -- remote persist operation method
*/
enum rpmem_persist_method {
RPMEM_PM_GPSPM = 1, /* General Purpose Server Persistency Method */
RPMEM_PM_APM = 2, /* Appliance Persistency Method */
MAX_RPMEM_PM,
};
const char *rpmem_persist_method_to_str(enum rpmem_persist_method pm);
/*
* rpmem_provider -- supported providers
*/
enum rpmem_provider {
RPMEM_PROV_UNKNOWN = 0,
RPMEM_PROV_LIBFABRIC_VERBS = 1,
RPMEM_PROV_LIBFABRIC_SOCKETS = 2,
MAX_RPMEM_PROV,
};
enum rpmem_provider rpmem_provider_from_str(const char *str);
const char *rpmem_provider_to_str(enum rpmem_provider provider);
/*
* rpmem_req_attr -- arguments for open/create request
*/
struct rpmem_req_attr {
size_t pool_size;
unsigned nlanes;
size_t buff_size;
enum rpmem_provider provider;
const char *pool_desc;
};
/*
* rpmem_resp_attr -- return arguments from open/create request
*/
struct rpmem_resp_attr {
unsigned short port;
uint64_t rkey;
uint64_t raddr;
unsigned nlanes;
enum rpmem_persist_method persist_method;
};
#define RPMEM_HAS_USER 0x1
#define RPMEM_HAS_SERVICE 0x2
#define RPMEM_FLAGS_USE_IPV4 0x4
#define RPMEM_MAX_USER (32 + 1) /* see useradd(8) + 1 for '\0' */
#define RPMEM_MAX_NODE (255 + 1) /* see gethostname(2) + 1 for '\0' */
#define RPMEM_MAX_SERVICE (NI_MAXSERV + 1) /* + 1 for '\0' */
#define RPMEM_HDR_SIZE 4096
#define RPMEM_CLOSE_FLAGS_REMOVE 0x1
#define RPMEM_DEF_BUFF_SIZE 8192
struct rpmem_target_info {
char user[RPMEM_MAX_USER];
char node[RPMEM_MAX_NODE];
char service[RPMEM_MAX_SERVICE];
unsigned flags;
};
extern unsigned Rpmem_max_nlanes;
extern unsigned Rpmem_wq_size;
extern int Rpmem_fork_unsafe;
int rpmem_b64_write(int sockfd, const void *buf, size_t len, int flags);
int rpmem_b64_read(int sockfd, void *buf, size_t len, int flags);
const char *rpmem_get_ip_str(const struct sockaddr *addr);
struct rpmem_target_info *rpmem_target_parse(const char *target);
void rpmem_target_free(struct rpmem_target_info *info);
int rpmem_xwrite(int fd, const void *buf, size_t len, int flags);
int rpmem_xread(int fd, void *buf, size_t len, int flags);
char *rpmem_get_ssh_conn_addr(void);
#ifdef __cplusplus
}
#endif
#endif
| 3,404 | 23.321429 | 72 | h |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/rpmem_common/rpmem_proto.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_proto.h -- rpmem protocol definitions
*/
#ifndef RPMEM_PROTO_H
#define RPMEM_PROTO_H 1
#include <stdint.h>
#include <endian.h>
#include "librpmem.h"
#ifdef __cplusplus
extern "C" {
#endif
#define PACKED __attribute__((packed))
#define RPMEM_PROTO "tcp"
#define RPMEM_PROTO_MAJOR 0
#define RPMEM_PROTO_MINOR 1
#define RPMEM_SIG_SIZE 8
#define RPMEM_UUID_SIZE 16
#define RPMEM_PROV_SIZE 32
#define RPMEM_USER_SIZE 16
/*
* rpmem_msg_type -- type of messages
*/
enum rpmem_msg_type {
RPMEM_MSG_TYPE_CREATE = 1, /* create request */
RPMEM_MSG_TYPE_CREATE_RESP = 2, /* create request response */
RPMEM_MSG_TYPE_OPEN = 3, /* open request */
RPMEM_MSG_TYPE_OPEN_RESP = 4, /* open request response */
RPMEM_MSG_TYPE_CLOSE = 5, /* close request */
RPMEM_MSG_TYPE_CLOSE_RESP = 6, /* close request response */
RPMEM_MSG_TYPE_SET_ATTR = 7, /* set attributes request */
/* set attributes request response */
RPMEM_MSG_TYPE_SET_ATTR_RESP = 8,
MAX_RPMEM_MSG_TYPE,
};
/*
* rpmem_pool_attr_packed -- a packed version
*/
struct rpmem_pool_attr_packed {
char signature[RPMEM_POOL_HDR_SIG_LEN]; /* pool signature */
uint32_t major; /* format major version number */
uint32_t compat_features; /* mask: compatible "may" features */
uint32_t incompat_features; /* mask: "must support" features */
uint32_t ro_compat_features; /* mask: force RO if unsupported */
unsigned char poolset_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* pool uuid */
unsigned char uuid[RPMEM_POOL_HDR_UUID_LEN]; /* first part uuid */
unsigned char next_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* next pool uuid */
unsigned char prev_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* prev pool uuid */
unsigned char user_flags[RPMEM_POOL_USER_FLAGS_LEN]; /* user flags */
} PACKED;
/*
* rpmem_msg_ibc_attr -- in-band connection attributes
*
* Used by create request response and open request response.
* Contains essential information to proceed with in-band connection
* initialization.
*/
struct rpmem_msg_ibc_attr {
uint32_t port; /* RDMA connection port */
uint32_t persist_method; /* persist method */
uint64_t rkey; /* remote key */
uint64_t raddr; /* remote address */
uint32_t nlanes; /* number of lanes */
} PACKED;
/*
* rpmem_msg_pool_desc -- remote pool descriptor
*/
struct rpmem_msg_pool_desc {
uint32_t size; /* size of pool descriptor */
uint8_t desc[0]; /* pool descriptor, null-terminated string */
} PACKED;
/*
* rpmem_msg_hdr -- message header which consists of type and size of message
*
* The type must be one of the rpmem_msg_type values.
*/
struct rpmem_msg_hdr {
uint32_t type; /* type of message */
uint64_t size; /* size of message */
uint8_t body[0];
} PACKED;
/*
* rpmem_msg_hdr_resp -- message response header which consists of type, size
* and status.
*
* The type must be one of the rpmem_msg_type values.
*/
struct rpmem_msg_hdr_resp {
uint32_t status; /* response status */
uint32_t type; /* type of message */
uint64_t size; /* size of message */
} PACKED;
/*
* rpmem_msg_common -- common fields for open/create messages
*/
struct rpmem_msg_common {
uint16_t major; /* protocol version major number */
uint16_t minor; /* protocol version minor number */
uint64_t pool_size; /* minimum required size of a pool */
uint32_t nlanes; /* number of lanes used by initiator */
uint32_t provider; /* provider */
uint64_t buff_size; /* buffer size for inline persist */
} PACKED;
/*
* rpmem_msg_create -- create request message
*
* The type of message must be set to RPMEM_MSG_TYPE_CREATE.
* The size of message must be set to
* sizeof(struct rpmem_msg_create) + pool_desc_size
*/
struct rpmem_msg_create {
struct rpmem_msg_hdr hdr; /* message header */
struct rpmem_msg_common c;
struct rpmem_pool_attr_packed pool_attr; /* pool attributes */
struct rpmem_msg_pool_desc pool_desc; /* pool descriptor */
} PACKED;
/*
* rpmem_msg_create_resp -- create request response message
*
* The type of message must be set to RPMEM_MSG_TYPE_CREATE_RESP.
* The size of message must be set to sizeof(struct rpmem_msg_create_resp).
*/
struct rpmem_msg_create_resp {
struct rpmem_msg_hdr_resp hdr; /* message header */
struct rpmem_msg_ibc_attr ibc; /* in-band connection attributes */
} PACKED;
/*
* rpmem_msg_open -- open request message
*
* The type of message must be set to RPMEM_MSG_TYPE_OPEN.
* The size of message must be set to
* sizeof(struct rpmem_msg_open) + pool_desc_size
*/
struct rpmem_msg_open {
struct rpmem_msg_hdr hdr; /* message header */
struct rpmem_msg_common c;
struct rpmem_msg_pool_desc pool_desc; /* pool descriptor */
} PACKED;
/*
* rpmem_msg_open_resp -- open request response message
*
* The type of message must be set to RPMEM_MSG_TYPE_OPEN_RESP.
* The size of message must be set to sizeof(struct rpmem_msg_open_resp)
*/
struct rpmem_msg_open_resp {
struct rpmem_msg_hdr_resp hdr; /* message header */
struct rpmem_msg_ibc_attr ibc; /* in-band connection attributes */
struct rpmem_pool_attr_packed pool_attr; /* pool attributes */
} PACKED;
/*
* rpmem_msg_close -- close request message
*
* The type of message must be set to RPMEM_MSG_TYPE_CLOSE
* The size of message must be set to sizeof(struct rpmem_msg_close)
*/
struct rpmem_msg_close {
struct rpmem_msg_hdr hdr; /* message header */
uint32_t flags; /* flags */
} PACKED;
/*
* rpmem_msg_close_resp -- close request response message
*
* The type of message must be set to RPMEM_MSG_TYPE_CLOSE_RESP
* The size of message must be set to sizeof(struct rpmem_msg_close_resp)
*/
struct rpmem_msg_close_resp {
struct rpmem_msg_hdr_resp hdr; /* message header */
/* no more fields */
} PACKED;
#define RPMEM_FLUSH_WRITE 0U /* flush / persist using RDMA WRITE */
#define RPMEM_DEEP_PERSIST 1U /* deep persist operation */
#define RPMEM_PERSIST_SEND 2U /* persist using RDMA SEND */
#define RPMEM_COMPLETION 4U /* schedule command with a completion */
/* the two least significant bits are reserved for mode of persist */
#define RPMEM_FLUSH_PERSIST_MASK 0x3U
#define RPMEM_PERSIST_MAX 2U /* maximum valid persist value */
/*
* rpmem_msg_persist -- remote persist message
*/
struct rpmem_msg_persist {
uint32_t flags; /* lane flags */
uint32_t lane; /* lane identifier */
uint64_t addr; /* remote memory address */
uint64_t size; /* remote memory size */
uint8_t data[];
};
/*
* rpmem_msg_persist_resp -- remote persist response message
*/
struct rpmem_msg_persist_resp {
uint32_t flags; /* lane flags */
uint32_t lane; /* lane identifier */
};
/*
* rpmem_msg_set_attr -- set attributes request message
*
* The type of message must be set to RPMEM_MSG_TYPE_SET_ATTR.
* The size of message must be set to sizeof(struct rpmem_msg_set_attr)
*/
struct rpmem_msg_set_attr {
struct rpmem_msg_hdr hdr; /* message header */
struct rpmem_pool_attr_packed pool_attr; /* pool attributes */
} PACKED;
/*
* rpmem_msg_set_attr_resp -- set attributes request response message
*
* The type of message must be set to RPMEM_MSG_TYPE_SET_ATTR_RESP.
* The size of message must be set to sizeof(struct rpmem_msg_set_attr_resp).
*/
struct rpmem_msg_set_attr_resp {
struct rpmem_msg_hdr_resp hdr; /* message header */
} PACKED;
/*
* XXX Begin: Suppress gcc conversion warnings for FreeBSD be*toh macros.
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
/*
* rpmem_ntoh_msg_ibc_attr -- convert rpmem_msg_ibc attr to host byte order
*/
static inline void
rpmem_ntoh_msg_ibc_attr(struct rpmem_msg_ibc_attr *ibc)
{
ibc->port = be32toh(ibc->port);
ibc->persist_method = be32toh(ibc->persist_method);
ibc->rkey = be64toh(ibc->rkey);
ibc->raddr = be64toh(ibc->raddr);
}
/*
* rpmem_ntoh_msg_pool_desc -- convert rpmem_msg_pool_desc to host byte order
*/
static inline void
rpmem_ntoh_msg_pool_desc(struct rpmem_msg_pool_desc *pool_desc)
{
pool_desc->size = be32toh(pool_desc->size);
}
/*
* rpmem_ntoh_pool_attr -- convert rpmem_pool_attr to host byte order
*/
static inline void
rpmem_ntoh_pool_attr(struct rpmem_pool_attr_packed *attr)
{
attr->major = be32toh(attr->major);
attr->ro_compat_features = be32toh(attr->ro_compat_features);
attr->incompat_features = be32toh(attr->incompat_features);
attr->compat_features = be32toh(attr->compat_features);
}
/*
* rpmem_ntoh_msg_hdr -- convert rpmem_msg_hdr to host byte order
*/
static inline void
rpmem_ntoh_msg_hdr(struct rpmem_msg_hdr *hdrp)
{
hdrp->type = be32toh(hdrp->type);
hdrp->size = be64toh(hdrp->size);
}
/*
* rpmem_hton_msg_hdr -- convert rpmem_msg_hdr to network byte order
*/
static inline void
rpmem_hton_msg_hdr(struct rpmem_msg_hdr *hdrp)
{
rpmem_ntoh_msg_hdr(hdrp);
}
/*
* rpmem_ntoh_msg_hdr_resp -- convert rpmem_msg_hdr_resp to host byte order
*/
static inline void
rpmem_ntoh_msg_hdr_resp(struct rpmem_msg_hdr_resp *hdrp)
{
hdrp->status = be32toh(hdrp->status);
hdrp->type = be32toh(hdrp->type);
hdrp->size = be64toh(hdrp->size);
}
/*
* rpmem_hton_msg_hdr_resp -- convert rpmem_msg_hdr_resp to network byte order
*/
static inline void
rpmem_hton_msg_hdr_resp(struct rpmem_msg_hdr_resp *hdrp)
{
rpmem_ntoh_msg_hdr_resp(hdrp);
}
/*
* rpmem_ntoh_msg_common -- convert rpmem_msg_common to host byte order
*/
static inline void
rpmem_ntoh_msg_common(struct rpmem_msg_common *msg)
{
msg->major = be16toh(msg->major);
msg->minor = be16toh(msg->minor);
msg->pool_size = be64toh(msg->pool_size);
msg->nlanes = be32toh(msg->nlanes);
msg->provider = be32toh(msg->provider);
msg->buff_size = be64toh(msg->buff_size);
}
/*
* rpmem_hton_msg_common -- convert rpmem_msg_common to network byte order
*/
static inline void
rpmem_hton_msg_common(struct rpmem_msg_common *msg)
{
rpmem_ntoh_msg_common(msg);
}
/*
* rpmem_ntoh_msg_create -- convert rpmem_msg_create to host byte order
*/
static inline void
rpmem_ntoh_msg_create(struct rpmem_msg_create *msg)
{
rpmem_ntoh_msg_hdr(&msg->hdr);
rpmem_ntoh_msg_common(&msg->c);
rpmem_ntoh_pool_attr(&msg->pool_attr);
rpmem_ntoh_msg_pool_desc(&msg->pool_desc);
}
/*
* rpmem_hton_msg_create -- convert rpmem_msg_create to network byte order
*/
static inline void
rpmem_hton_msg_create(struct rpmem_msg_create *msg)
{
rpmem_ntoh_msg_create(msg);
}
/*
* rpmem_ntoh_msg_create_resp -- convert rpmem_msg_create_resp to host byte
* order
*/
static inline void
rpmem_ntoh_msg_create_resp(struct rpmem_msg_create_resp *msg)
{
rpmem_ntoh_msg_hdr_resp(&msg->hdr);
rpmem_ntoh_msg_ibc_attr(&msg->ibc);
}
/*
* rpmem_hton_msg_create_resp -- convert rpmem_msg_create_resp to network byte
* order
*/
static inline void
rpmem_hton_msg_create_resp(struct rpmem_msg_create_resp *msg)
{
rpmem_ntoh_msg_create_resp(msg);
}
/*
* rpmem_ntoh_msg_open -- convert rpmem_msg_open to host byte order
*/
static inline void
rpmem_ntoh_msg_open(struct rpmem_msg_open *msg)
{
rpmem_ntoh_msg_hdr(&msg->hdr);
rpmem_ntoh_msg_common(&msg->c);
rpmem_ntoh_msg_pool_desc(&msg->pool_desc);
}
/*
* XXX End: Suppress gcc conversion warnings for FreeBSD be*toh macros
*/
#pragma GCC diagnostic pop
/*
* rpmem_hton_msg_open -- convert rpmem_msg_open to network byte order
*/
static inline void
rpmem_hton_msg_open(struct rpmem_msg_open *msg)
{
rpmem_ntoh_msg_open(msg);
}
/*
* rpmem_ntoh_msg_open_resp -- convert rpmem_msg_open_resp to host byte order
*/
static inline void
rpmem_ntoh_msg_open_resp(struct rpmem_msg_open_resp *msg)
{
rpmem_ntoh_msg_hdr_resp(&msg->hdr);
rpmem_ntoh_msg_ibc_attr(&msg->ibc);
rpmem_ntoh_pool_attr(&msg->pool_attr);
}
/*
* rpmem_hton_msg_open_resp -- convert rpmem_msg_open_resp to network byte order
*/
static inline void
rpmem_hton_msg_open_resp(struct rpmem_msg_open_resp *msg)
{
rpmem_ntoh_msg_open_resp(msg);
}
/*
* rpmem_ntoh_msg_set_attr -- convert rpmem_msg_set_attr to host byte order
*/
static inline void
rpmem_ntoh_msg_set_attr(struct rpmem_msg_set_attr *msg)
{
rpmem_ntoh_msg_hdr(&msg->hdr);
rpmem_ntoh_pool_attr(&msg->pool_attr);
}
/*
* rpmem_hton_msg_set_attr -- convert rpmem_msg_set_attr to network byte order
*/
static inline void
rpmem_hton_msg_set_attr(struct rpmem_msg_set_attr *msg)
{
rpmem_ntoh_msg_set_attr(msg);
}
/*
* rpmem_ntoh_msg_set_attr_resp -- convert rpmem_msg_set_attr_resp to host byte
* order
*/
static inline void
rpmem_ntoh_msg_set_attr_resp(struct rpmem_msg_set_attr_resp *msg)
{
rpmem_ntoh_msg_hdr_resp(&msg->hdr);
}
/*
* rpmem_hton_msg_set_attr_resp -- convert rpmem_msg_set_attr_resp to network
* byte order
*/
static inline void
rpmem_hton_msg_set_attr_resp(struct rpmem_msg_set_attr_resp *msg)
{
rpmem_hton_msg_hdr_resp(&msg->hdr);
}
/*
* rpmem_ntoh_msg_close -- convert rpmem_msg_close to host byte order
*/
static inline void
rpmem_ntoh_msg_close(struct rpmem_msg_close *msg)
{
rpmem_ntoh_msg_hdr(&msg->hdr);
}
/*
* rpmem_hton_msg_close -- convert rpmem_msg_close to network byte order
*/
static inline void
rpmem_hton_msg_close(struct rpmem_msg_close *msg)
{
rpmem_ntoh_msg_close(msg);
}
/*
* rpmem_ntoh_msg_close_resp -- convert rpmem_msg_close_resp to host byte order
*/
static inline void
rpmem_ntoh_msg_close_resp(struct rpmem_msg_close_resp *msg)
{
rpmem_ntoh_msg_hdr_resp(&msg->hdr);
}
/*
* rpmem_hton_msg_close_resp -- convert rpmem_msg_close_resp to network byte
* order
*/
static inline void
rpmem_hton_msg_close_resp(struct rpmem_msg_close_resp *msg)
{
rpmem_ntoh_msg_close_resp(msg);
}
/*
* pack_rpmem_pool_attr -- copy pool attributes to a packed structure
*/
static inline void
pack_rpmem_pool_attr(const struct rpmem_pool_attr *src,
struct rpmem_pool_attr_packed *dst)
{
memcpy(dst->signature, src->signature, sizeof(src->signature));
dst->major = src->major;
dst->compat_features = src->compat_features;
dst->incompat_features = src->incompat_features;
dst->ro_compat_features = src->ro_compat_features;
memcpy(dst->poolset_uuid, src->poolset_uuid, sizeof(dst->poolset_uuid));
memcpy(dst->uuid, src->uuid, sizeof(dst->uuid));
memcpy(dst->next_uuid, src->next_uuid, sizeof(dst->next_uuid));
memcpy(dst->prev_uuid, src->prev_uuid, sizeof(dst->prev_uuid));
memcpy(dst->user_flags, src->user_flags, sizeof(dst->user_flags));
}
/*
* unpack_rpmem_pool_attr -- copy pool attributes to an unpacked structure
*/
static inline void
unpack_rpmem_pool_attr(const struct rpmem_pool_attr_packed *src,
struct rpmem_pool_attr *dst)
{
memcpy(dst->signature, src->signature, sizeof(src->signature));
dst->major = src->major;
dst->compat_features = src->compat_features;
dst->incompat_features = src->incompat_features;
dst->ro_compat_features = src->ro_compat_features;
memcpy(dst->poolset_uuid, src->poolset_uuid, sizeof(dst->poolset_uuid));
memcpy(dst->uuid, src->uuid, sizeof(dst->uuid));
memcpy(dst->next_uuid, src->next_uuid, sizeof(dst->next_uuid));
memcpy(dst->prev_uuid, src->prev_uuid, sizeof(dst->prev_uuid));
memcpy(dst->user_flags, src->user_flags, sizeof(dst->user_flags));
}
#ifdef __cplusplus
}
#endif
#endif
| 15,016 | 26.503663 | 80 | h |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/rpmem_common/rpmem_fip_lane.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* rpmem_fip_lane.h -- rpmem fabric provider lane definition
*/
#include <sched.h>
#include <stdint.h>
#include "sys_util.h"
/*
* rpmem_fip_lane -- basic lane structure
*
* This structure consist of a synchronization object and a return value.
* It is possible to wait on the lane for specified event. The event can be
* signalled by another thread which can pass the return value if required.
*
* The sync variable can store up to 64 different events, each event on
* separate bit.
*/
struct rpmem_fip_lane {
os_spinlock_t lock;
int ret;
uint64_t sync;
};
/*
* rpmem_fip_lane_init -- initialize basic lane structure
*/
static inline int
rpmem_fip_lane_init(struct rpmem_fip_lane *lanep)
{
lanep->ret = 0;
lanep->sync = 0;
return util_spin_init(&lanep->lock, PTHREAD_PROCESS_PRIVATE);
}
/*
* rpmem_fip_lane_fini -- deinitialize basic lane structure
*/
static inline void
rpmem_fip_lane_fini(struct rpmem_fip_lane *lanep)
{
util_spin_destroy(&lanep->lock);
}
/*
* rpmem_fip_lane_busy -- return true if lane has pending events
*/
static inline int
rpmem_fip_lane_busy(struct rpmem_fip_lane *lanep)
{
util_spin_lock(&lanep->lock);
int ret = lanep->sync != 0;
util_spin_unlock(&lanep->lock);
return ret;
}
/*
* rpmem_fip_lane_begin -- begin waiting for specified event(s)
*/
static inline void
rpmem_fip_lane_begin(struct rpmem_fip_lane *lanep, uint64_t sig)
{
util_spin_lock(&lanep->lock);
lanep->ret = 0;
lanep->sync |= sig;
util_spin_unlock(&lanep->lock);
}
static inline int
rpmem_fip_lane_is_busy(struct rpmem_fip_lane *lanep, uint64_t sig)
{
util_spin_lock(&lanep->lock);
int ret = (lanep->sync & sig) != 0;
util_spin_unlock(&lanep->lock);
return ret;
}
static inline int
rpmem_fip_lane_ret(struct rpmem_fip_lane *lanep)
{
util_spin_lock(&lanep->lock);
int ret = lanep->ret;
util_spin_unlock(&lanep->lock);
return ret;
}
/*
* rpmem_fip_lane_wait -- wait for specified event(s)
*/
static inline int
rpmem_fip_lane_wait(struct rpmem_fip_lane *lanep, uint64_t sig)
{
while (rpmem_fip_lane_is_busy(lanep, sig))
sched_yield();
return rpmem_fip_lane_ret(lanep);
}
/*
* rpmem_fip_lane_signal -- signal lane about specified event
*/
static inline void
rpmem_fip_lane_signal(struct rpmem_fip_lane *lanep, uint64_t sig)
{
util_spin_lock(&lanep->lock);
lanep->sync &= ~sig;
util_spin_unlock(&lanep->lock);
}
/*
* rpmem_fip_lane_signal -- signal lane about specified event and store
* return value
*/
static inline void
rpmem_fip_lane_sigret(struct rpmem_fip_lane *lanep, uint64_t sig, int ret)
{
util_spin_lock(&lanep->lock);
lanep->ret = ret;
lanep->sync &= ~sig;
util_spin_unlock(&lanep->lock);
}
| 2,754 | 20.523438 | 75 | h |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/rpmem_common/rpmem_fip_msg.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmem_fip_msg.h -- simple wrappers for fi_rma(3) and fi_msg(3) functions
*/
#ifndef RPMEM_FIP_MSG_H
#define RPMEM_FIP_MSG_H 1
#include <rdma/fi_rma.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* rpmem_fip_rma -- helper struct for RMA operation
*/
struct rpmem_fip_rma {
struct fi_msg_rma msg; /* message structure */
struct iovec msg_iov; /* IO vector buffer */
struct fi_rma_iov rma_iov; /* RMA IO vector buffer */
void *desc; /* local memory descriptor */
uint64_t flags; /* RMA operation flags */
};
/*
* rpmem_fip_msg -- helper struct for MSG operation
*/
struct rpmem_fip_msg {
struct fi_msg msg; /* message structure */
struct iovec iov; /* IO vector buffer */
void *desc; /* local memory descriptor */
uint64_t flags; /* MSG operation flags */
};
/*
* rpmem_fip_rma_init -- initialize RMA helper struct
*/
static inline void
rpmem_fip_rma_init(struct rpmem_fip_rma *rma, void *desc,
fi_addr_t addr, uint64_t rkey, void *context, uint64_t flags)
{
memset(rma, 0, sizeof(*rma));
rma->desc = desc;
rma->flags = flags;
rma->rma_iov.key = rkey;
rma->msg.context = context;
rma->msg.addr = addr;
rma->msg.desc = &rma->desc;
rma->msg.rma_iov = &rma->rma_iov;
rma->msg.rma_iov_count = 1;
rma->msg.msg_iov = &rma->msg_iov;
rma->msg.iov_count = 1;
}
/*
* rpmem_fip_msg_init -- initialize MSG helper struct
*/
static inline void
rpmem_fip_msg_init(struct rpmem_fip_msg *msg, void *desc, fi_addr_t addr,
void *context, void *buff, size_t len, uint64_t flags)
{
memset(msg, 0, sizeof(*msg));
msg->desc = desc;
msg->flags = flags;
msg->iov.iov_base = buff;
msg->iov.iov_len = len;
msg->msg.context = context;
msg->msg.addr = addr;
msg->msg.desc = &msg->desc;
msg->msg.msg_iov = &msg->iov;
msg->msg.iov_count = 1;
}
/*
* rpmem_fip_writemsg -- wrapper for fi_writemsg
*/
static inline int
rpmem_fip_writemsg(struct fid_ep *ep, struct rpmem_fip_rma *rma,
const void *buff, size_t len, uint64_t addr)
{
rma->rma_iov.addr = addr;
rma->rma_iov.len = len;
rma->msg_iov.iov_base = (void *)buff;
rma->msg_iov.iov_len = len;
return (int)fi_writemsg(ep, &rma->msg, rma->flags);
}
/*
* rpmem_fip_readmsg -- wrapper for fi_readmsg
*/
static inline int
rpmem_fip_readmsg(struct fid_ep *ep, struct rpmem_fip_rma *rma,
void *buff, size_t len, uint64_t addr)
{
rma->rma_iov.addr = addr;
rma->rma_iov.len = len;
rma->msg_iov.iov_base = buff;
rma->msg_iov.iov_len = len;
return (int)fi_readmsg(ep, &rma->msg, rma->flags);
}
/*
* rpmem_fip_sendmsg -- wrapper for fi_sendmsg
*/
static inline int
rpmem_fip_sendmsg(struct fid_ep *ep, struct rpmem_fip_msg *msg, size_t len)
{
msg->iov.iov_len = len;
return (int)fi_sendmsg(ep, &msg->msg, msg->flags);
}
/*
* rpmem_fip_recvmsg -- wrapper for fi_recvmsg
*/
static inline int
rpmem_fip_recvmsg(struct fid_ep *ep, struct rpmem_fip_msg *msg)
{
return (int)fi_recvmsg(ep, &msg->msg, msg->flags);
}
/*
* rpmem_fip_msg_get_pmsg -- returns message buffer as a persist message
*/
static inline struct rpmem_msg_persist *
rpmem_fip_msg_get_pmsg(struct rpmem_fip_msg *msg)
{
return (struct rpmem_msg_persist *)msg->iov.iov_base;
}
/*
* rpmem_fip_msg_get_pres -- returns message buffer as a persist response
*/
static inline struct rpmem_msg_persist_resp *
rpmem_fip_msg_get_pres(struct rpmem_fip_msg *msg)
{
return (struct rpmem_msg_persist_resp *)msg->iov.iov_base;
}
#ifdef __cplusplus
}
#endif
#endif
| 3,494 | 22.77551 | 75 | h |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/libpmempool.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* libpmempool.c -- entry points for libpmempool
*/
#include <stdlib.h>
#include <stdint.h>
#include <errno.h>
#include <sys/param.h>
#include "pmemcommon.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check.h"
#ifdef USE_RPMEM
#include "rpmem_common.h"
#include "rpmem_util.h"
#endif
#ifdef _WIN32
#define ANSWER_BUFFSIZE 256
#endif
/*
* libpmempool_init -- load-time initialization for libpmempool
*
* Called automatically by the run-time loader.
*/
ATTR_CONSTRUCTOR
void
libpmempool_init(void)
{
common_init(PMEMPOOL_LOG_PREFIX, PMEMPOOL_LOG_LEVEL_VAR,
PMEMPOOL_LOG_FILE_VAR, PMEMPOOL_MAJOR_VERSION,
PMEMPOOL_MINOR_VERSION);
LOG(3, NULL);
#ifdef USE_RPMEM
util_remote_init();
rpmem_util_cmds_init();
#endif
}
/*
* libpmempool_fini -- libpmempool cleanup routine
*
* Called automatically when the process terminates.
*/
ATTR_DESTRUCTOR
void
libpmempool_fini(void)
{
LOG(3, NULL);
#ifdef USE_RPMEM
util_remote_unload();
util_remote_fini();
rpmem_util_cmds_fini();
#endif
common_fini();
}
/*
* pmempool_check_versionU -- see if library meets application version
* requirements
*/
#ifndef _WIN32
static inline
#endif
const char *
pmempool_check_versionU(unsigned major_required, unsigned minor_required)
{
LOG(3, "major_required %u minor_required %u",
major_required, minor_required);
if (major_required != PMEMPOOL_MAJOR_VERSION) {
ERR("libpmempool major version mismatch (need %u, found %u)",
major_required, PMEMPOOL_MAJOR_VERSION);
return out_get_errormsg();
}
if (minor_required > PMEMPOOL_MINOR_VERSION) {
ERR("libpmempool minor version mismatch (need %u, found %u)",
minor_required, PMEMPOOL_MINOR_VERSION);
return out_get_errormsg();
}
return NULL;
}
#ifndef _WIN32
/*
* pmempool_check_version -- see if lib meets application version requirements
*/
const char *
pmempool_check_version(unsigned major_required, unsigned minor_required)
{
return pmempool_check_versionU(major_required, minor_required);
}
#else
/*
* pmempool_check_versionW -- see if library meets application version
* requirements as widechar
*/
const wchar_t *
pmempool_check_versionW(unsigned major_required, unsigned minor_required)
{
if (pmempool_check_versionU(major_required, minor_required) != NULL)
return out_get_errormsgW();
else
return NULL;
}
#endif
/*
* pmempool_errormsgU -- return last error message
*/
#ifndef _WIN32
static inline
#endif
const char *
pmempool_errormsgU(void)
{
return out_get_errormsg();
}
#ifndef _WIN32
/*
* pmempool_errormsg -- return last error message
*/
const char *
pmempool_errormsg(void)
{
return pmempool_errormsgU();
}
#else
/*
* pmempool_errormsgW -- return last error message as widechar
*/
const wchar_t *
pmempool_errormsgW(void)
{
return out_get_errormsgW();
}
#endif
/*
* pmempool_ppc_set_default -- (internal) set default values of check context
*/
static void
pmempool_ppc_set_default(PMEMpoolcheck *ppc)
{
/* all other fields should be zeroed */
const PMEMpoolcheck ppc_default = {
.args = {
.pool_type = PMEMPOOL_POOL_TYPE_DETECT,
},
.result = CHECK_RESULT_CONSISTENT,
};
*ppc = ppc_default;
}
/*
* pmempool_check_initU -- initialize check context
*/
#ifndef _WIN32
static inline
#endif
PMEMpoolcheck *
pmempool_check_initU(struct pmempool_check_argsU *args, size_t args_size)
{
LOG(3, "path %s backup_path %s pool_type %u flags %x", args->path,
args->backup_path, args->pool_type, args->flags);
/*
* Currently one size of args structure is supported. The version of the
* pmempool_check_args structure can be distinguished based on provided
* args_size.
*/
if (args_size < sizeof(struct pmempool_check_args)) {
ERR("provided args_size is not supported");
errno = EINVAL;
return NULL;
}
/*
* Dry run does not allow to made changes possibly performed during
* repair. Advanced allow to perform more complex repairs. Questions
* are ask only if repairs are made. So dry run, advanced and always_yes
* can be set only if repair is set.
*/
if (util_flag_isclr(args->flags, PMEMPOOL_CHECK_REPAIR) &&
util_flag_isset(args->flags, PMEMPOOL_CHECK_DRY_RUN |
PMEMPOOL_CHECK_ADVANCED | PMEMPOOL_CHECK_ALWAYS_YES)) {
ERR("dry_run, advanced and always_yes are applicable only if "
"repair is set");
errno = EINVAL;
return NULL;
}
/*
* dry run does not modify anything so performing backup is redundant
*/
if (util_flag_isset(args->flags, PMEMPOOL_CHECK_DRY_RUN) &&
args->backup_path != NULL) {
ERR("dry run does not allow one to perform backup");
errno = EINVAL;
return NULL;
}
/*
* libpmempool uses str format of communication so it must be set
*/
if (util_flag_isclr(args->flags, PMEMPOOL_CHECK_FORMAT_STR)) {
ERR("PMEMPOOL_CHECK_FORMAT_STR flag must be set");
errno = EINVAL;
return NULL;
}
PMEMpoolcheck *ppc = calloc(1, sizeof(*ppc));
if (ppc == NULL) {
ERR("!calloc");
return NULL;
}
pmempool_ppc_set_default(ppc);
memcpy(&ppc->args, args, sizeof(ppc->args));
ppc->path = strdup(args->path);
if (!ppc->path) {
ERR("!strdup");
goto error_path_malloc;
}
ppc->args.path = ppc->path;
if (args->backup_path != NULL) {
ppc->backup_path = strdup(args->backup_path);
if (!ppc->backup_path) {
ERR("!strdup");
goto error_backup_path_malloc;
}
ppc->args.backup_path = ppc->backup_path;
}
if (check_init(ppc) != 0)
goto error_check_init;
return ppc;
error_check_init:
/* in case errno not set by any of the used functions set its value */
if (errno == 0)
errno = EINVAL;
free(ppc->backup_path);
error_backup_path_malloc:
free(ppc->path);
error_path_malloc:
free(ppc);
return NULL;
}
#ifndef _WIN32
/*
* pmempool_check_init -- initialize check context
*/
PMEMpoolcheck *
pmempool_check_init(struct pmempool_check_args *args, size_t args_size)
{
return pmempool_check_initU(args, args_size);
}
#else
/*
* pmempool_check_initW -- initialize check context as widechar
*/
PMEMpoolcheck *
pmempool_check_initW(struct pmempool_check_argsW *args, size_t args_size)
{
char *upath = util_toUTF8(args->path);
if (upath == NULL)
return NULL;
char *ubackup_path = NULL;
if (args->backup_path != NULL) {
ubackup_path = util_toUTF8(args->backup_path);
if (ubackup_path == NULL) {
util_free_UTF8(upath);
return NULL;
}
}
struct pmempool_check_argsU uargs = {
.path = upath,
.backup_path = ubackup_path,
.pool_type = args->pool_type,
.flags = args->flags
};
PMEMpoolcheck *ret = pmempool_check_initU(&uargs, args_size);
util_free_UTF8(ubackup_path);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* pmempool_checkU -- continue check till produce status to consume for caller
*/
#ifndef _WIN32
static inline
#endif
struct pmempool_check_statusU *
pmempool_checkU(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
ASSERTne(ppc, NULL);
struct check_status *result;
do {
result = check_step(ppc);
if (check_is_end(ppc->data) && result == NULL)
return NULL;
} while (result == NULL);
return check_status_get(result);
}
#ifndef _WIN32
/*
* pmempool_check -- continue check till produce status to consume for caller
*/
struct pmempool_check_status *
pmempool_check(PMEMpoolcheck *ppc)
{
return pmempool_checkU(ppc);
}
#else
/*
* pmempool_checkW -- continue check till produce status to consume for caller
*/
struct pmempool_check_statusW *
pmempool_checkW(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
ASSERTne(ppc, NULL);
/* check the cache and convert msg and answer */
char buf[ANSWER_BUFFSIZE];
memset(buf, 0, ANSWER_BUFFSIZE);
convert_status_cache(ppc, buf, ANSWER_BUFFSIZE);
struct check_status *uresult;
do {
uresult = check_step(ppc);
if (check_is_end(ppc->data) && uresult == NULL)
return NULL;
} while (uresult == NULL);
struct pmempool_check_statusU *uret_res = check_status_get(uresult);
const wchar_t *wmsg = util_toUTF16(uret_res->str.msg);
if (wmsg == NULL)
FATAL("!malloc");
struct pmempool_check_statusW *wret_res =
(struct pmempool_check_statusW *)uret_res;
/* pointer to old message is freed in next check step */
wret_res->str.msg = wmsg;
return wret_res;
}
#endif
/*
* pmempool_check_end -- end check and release check context
*/
enum pmempool_check_result
pmempool_check_end(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
const enum check_result result = ppc->result;
const unsigned sync_required = ppc->sync_required;
check_fini(ppc);
free(ppc->path);
free(ppc->backup_path);
free(ppc);
if (sync_required) {
switch (result) {
case CHECK_RESULT_CONSISTENT:
case CHECK_RESULT_REPAIRED:
return PMEMPOOL_CHECK_RESULT_SYNC_REQ;
default:
/* other results require fixing prior to sync */
;
}
}
switch (result) {
case CHECK_RESULT_CONSISTENT:
return PMEMPOOL_CHECK_RESULT_CONSISTENT;
case CHECK_RESULT_NOT_CONSISTENT:
return PMEMPOOL_CHECK_RESULT_NOT_CONSISTENT;
case CHECK_RESULT_REPAIRED:
return PMEMPOOL_CHECK_RESULT_REPAIRED;
case CHECK_RESULT_CANNOT_REPAIR:
return PMEMPOOL_CHECK_RESULT_CANNOT_REPAIR;
default:
return PMEMPOOL_CHECK_RESULT_ERROR;
}
}
| 9,142 | 20.873206 | 78 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/replica.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* replica.h -- module for synchronizing and transforming poolset
*/
#ifndef REPLICA_H
#define REPLICA_H
#include "libpmempool.h"
#include "pool.h"
#include "badblocks.h"
#ifdef __cplusplus
extern "C" {
#endif
#define UNDEF_REPLICA UINT_MAX
#define UNDEF_PART UINT_MAX
/*
* A part marked as broken does not exist or is damaged so that
* it cannot be opened and has to be recreated.
*/
#define IS_BROKEN (1U << 0)
/*
* A replica marked as inconsistent exists but has inconsistent metadata
* (e.g. inconsistent parts or replicas linkage)
*/
#define IS_INCONSISTENT (1U << 1)
/*
* A part or replica marked in this way has bad blocks inside.
*/
#define HAS_BAD_BLOCKS (1U << 2)
/*
* A part marked in this way has bad blocks in the header
*/
#define HAS_CORRUPTED_HEADER (1U << 3)
/*
* A flag which can be passed to sync_replica() to indicate that the function is
* called by pmempool_transform
*/
#define IS_TRANSFORMED (1U << 10)
/*
* Number of lanes utilized when working with remote replicas
*/
#define REMOTE_NLANES 1
/*
* Helping structures for storing part's health status
*/
struct part_health_status {
unsigned flags;
struct badblocks bbs; /* structure with bad blocks */
char *recovery_file_name; /* name of bad block recovery file */
int recovery_file_exists; /* bad block recovery file exists */
};
/*
* Helping structures for storing replica and poolset's health status
*/
struct replica_health_status {
unsigned nparts;
unsigned nhdrs;
/* a flag for the replica */
unsigned flags;
/* effective size of a pool, valid only for healthy replica */
size_t pool_size;
/* flags for each part */
struct part_health_status part[];
};
struct poolset_health_status {
unsigned nreplicas;
/* a flag for the poolset */
unsigned flags;
/* health statuses for each replica */
struct replica_health_status *replica[];
};
/* get index of the (r)th replica health status */
static inline unsigned
REP_HEALTHidx(struct poolset_health_status *set, unsigned r)
{
ASSERTne(set->nreplicas, 0);
return (set->nreplicas + r) % set->nreplicas;
}
/* get index of the (r + 1)th replica health status */
static inline unsigned
REPN_HEALTHidx(struct poolset_health_status *set, unsigned r)
{
ASSERTne(set->nreplicas, 0);
return (set->nreplicas + r + 1) % set->nreplicas;
}
/* get (p)th part health status */
static inline unsigned
PART_HEALTHidx(struct replica_health_status *rep, unsigned p)
{
ASSERTne(rep->nparts, 0);
return (rep->nparts + p) % rep->nparts;
}
/* get (r)th replica health status */
static inline struct replica_health_status *
REP_HEALTH(struct poolset_health_status *set, unsigned r)
{
return set->replica[REP_HEALTHidx(set, r)];
}
/* get (p)th part health status */
static inline unsigned
PART_HEALTH(struct replica_health_status *rep, unsigned p)
{
return rep->part[PART_HEALTHidx(rep, p)].flags;
}
uint64_t replica_get_part_offset(struct pool_set *set,
unsigned repn, unsigned partn);
void replica_align_badblock_offset_length(size_t *offset, size_t *length,
struct pool_set *set_in, unsigned repn, unsigned partn);
size_t replica_get_part_data_len(struct pool_set *set_in, unsigned repn,
unsigned partn);
uint64_t replica_get_part_data_offset(struct pool_set *set_in, unsigned repn,
unsigned part);
/*
* is_dry_run -- (internal) check whether only verification mode is enabled
*/
static inline bool
is_dry_run(unsigned flags)
{
/*
* PMEMPOOL_SYNC_DRY_RUN and PMEMPOOL_TRANSFORM_DRY_RUN
* have to have the same value in order to use this common function.
*/
ASSERT_COMPILE_ERROR_ON(PMEMPOOL_SYNC_DRY_RUN !=
PMEMPOOL_TRANSFORM_DRY_RUN);
return flags & PMEMPOOL_SYNC_DRY_RUN;
}
/*
* fix_bad_blocks -- (internal) fix bad blocks - it causes reading or creating
* bad blocks recovery files
* (depending on if they exist or not)
*/
static inline bool
fix_bad_blocks(unsigned flags)
{
return flags & PMEMPOOL_SYNC_FIX_BAD_BLOCKS;
}
int replica_remove_all_recovery_files(struct poolset_health_status *set_hs);
int replica_remove_part(struct pool_set *set, unsigned repn, unsigned partn,
int fix_bad_blocks);
int replica_create_poolset_health_status(struct pool_set *set,
struct poolset_health_status **set_hsp);
void replica_free_poolset_health_status(struct poolset_health_status *set_s);
int replica_check_poolset_health(struct pool_set *set,
struct poolset_health_status **set_hs,
int called_from_sync, unsigned flags);
int replica_is_part_broken(unsigned repn, unsigned partn,
struct poolset_health_status *set_hs);
int replica_has_bad_blocks(unsigned repn, struct poolset_health_status *set_hs);
int replica_part_has_bad_blocks(struct part_health_status *phs);
int replica_part_has_corrupted_header(unsigned repn, unsigned partn,
struct poolset_health_status *set_hs);
unsigned replica_find_unbroken_part(unsigned repn,
struct poolset_health_status *set_hs);
int replica_is_replica_broken(unsigned repn,
struct poolset_health_status *set_hs);
int replica_is_replica_consistent(unsigned repn,
struct poolset_health_status *set_hs);
int replica_is_replica_healthy(unsigned repn,
struct poolset_health_status *set_hs);
unsigned replica_find_healthy_replica(
struct poolset_health_status *set_hs);
unsigned replica_find_replica_healthy_header(
struct poolset_health_status *set_hs);
int replica_is_poolset_healthy(struct poolset_health_status *set_hs);
int replica_is_poolset_transformed(unsigned flags);
ssize_t replica_get_pool_size(struct pool_set *set, unsigned repn);
int replica_check_part_sizes(struct pool_set *set, size_t min_size);
int replica_check_part_dirs(struct pool_set *set);
int replica_check_local_part_dir(struct pool_set *set, unsigned repn,
unsigned partn);
int replica_open_replica_part_files(struct pool_set *set, unsigned repn);
int replica_open_poolset_part_files(struct pool_set *set);
int replica_sync(struct pool_set *set_in, struct poolset_health_status *set_hs,
unsigned flags);
int replica_transform(struct pool_set *set_in, struct pool_set *set_out,
unsigned flags);
#ifdef __cplusplus
}
#endif
#endif
| 6,216 | 28.325472 | 80 | h |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/check_blk.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* check_blk.c -- check pmemblk
*/
#include <inttypes.h>
#include <sys/param.h>
#include <endian.h>
#include "out.h"
#include "btt.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
enum question {
Q_BLK_BSIZE,
};
/*
* blk_get_max_bsize -- (internal) return maximum size of block for given file
* size
*/
static inline uint32_t
blk_get_max_bsize(uint64_t fsize)
{
LOG(3, NULL);
if (fsize == 0)
return 0;
/* default nfree */
uint32_t nfree = BTT_DEFAULT_NFREE;
/* number of blocks must be at least 2 * nfree */
uint32_t internal_nlba = 2 * nfree;
/* compute arena size from file size without pmemblk structure */
uint64_t arena_size = fsize - sizeof(struct pmemblk);
if (arena_size > BTT_MAX_ARENA)
arena_size = BTT_MAX_ARENA;
arena_size = btt_arena_datasize(arena_size, nfree);
/* compute maximum internal LBA size */
uint64_t internal_lbasize = (arena_size - BTT_ALIGNMENT) /
internal_nlba - BTT_MAP_ENTRY_SIZE;
ASSERT(internal_lbasize <= UINT32_MAX);
if (internal_lbasize < BTT_MIN_LBA_SIZE)
internal_lbasize = BTT_MIN_LBA_SIZE;
internal_lbasize = roundup(internal_lbasize, BTT_INTERNAL_LBA_ALIGNMENT)
- BTT_INTERNAL_LBA_ALIGNMENT;
return (uint32_t)internal_lbasize;
}
/*
* blk_read -- (internal) read pmemblk header
*/
static int
blk_read(PMEMpoolcheck *ppc)
{
/*
* Here we want to read the pmemblk header without the pool_hdr as we've
* already done it before.
*
* Take the pointer to fields right after pool_hdr, compute the size and
* offset of remaining fields.
*/
uint8_t *ptr = (uint8_t *)&ppc->pool->hdr.blk;
ptr += sizeof(ppc->pool->hdr.blk.hdr);
size_t size = sizeof(ppc->pool->hdr.blk) -
sizeof(ppc->pool->hdr.blk.hdr);
uint64_t offset = sizeof(ppc->pool->hdr.blk.hdr);
if (pool_read(ppc->pool, ptr, size, offset)) {
return CHECK_ERR(ppc, "cannot read pmemblk structure");
}
/* endianness conversion */
ppc->pool->hdr.blk.bsize = le32toh(ppc->pool->hdr.blk.bsize);
return 0;
}
/*
* blk_bsize_valid -- (internal) check if block size is valid for given file
* size
*/
static int
blk_bsize_valid(uint32_t bsize, uint64_t fsize)
{
uint32_t max_bsize = blk_get_max_bsize(fsize);
return (bsize >= max_bsize);
}
/*
* blk_hdr_check -- (internal) check pmemblk header
*/
static int
blk_hdr_check(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
CHECK_INFO(ppc, "checking pmemblk header");
if (blk_read(ppc)) {
ppc->result = CHECK_RESULT_ERROR;
return -1;
}
/* check for valid BTT Info arena as we can take bsize from it */
if (!ppc->pool->bttc.valid)
pool_blk_get_first_valid_arena(ppc->pool, &ppc->pool->bttc);
if (ppc->pool->bttc.valid) {
const uint32_t btt_bsize =
ppc->pool->bttc.btt_info.external_lbasize;
if (ppc->pool->hdr.blk.bsize != btt_bsize) {
CHECK_ASK(ppc, Q_BLK_BSIZE,
"invalid pmemblk.bsize.|Do you want to set "
"pmemblk.bsize to %u from BTT Info?",
btt_bsize);
}
} else if (!ppc->pool->bttc.zeroed) {
if (ppc->pool->hdr.blk.bsize < BTT_MIN_LBA_SIZE ||
blk_bsize_valid(ppc->pool->hdr.blk.bsize,
ppc->pool->set_file->size)) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
return CHECK_ERR(ppc, "invalid pmemblk.bsize");
}
}
if (ppc->result == CHECK_RESULT_CONSISTENT ||
ppc->result == CHECK_RESULT_REPAIRED)
CHECK_INFO(ppc, "pmemblk header correct");
return check_questions_sequence_validate(ppc);
}
/*
* blk_hdr_fix -- (internal) fix pmemblk header
*/
static int
blk_hdr_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question, void *ctx)
{
LOG(3, NULL);
uint32_t btt_bsize;
switch (question) {
case Q_BLK_BSIZE:
/*
* check for valid BTT Info arena as we can take bsize from it
*/
if (!ppc->pool->bttc.valid)
pool_blk_get_first_valid_arena(ppc->pool,
&ppc->pool->bttc);
btt_bsize = ppc->pool->bttc.btt_info.external_lbasize;
CHECK_INFO(ppc, "setting pmemblk.b_size to 0x%x", btt_bsize);
ppc->pool->hdr.blk.bsize = btt_bsize;
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
struct step {
int (*check)(PMEMpoolcheck *, location *);
int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *);
enum pool_type type;
};
static const struct step steps[] = {
{
.check = blk_hdr_check,
.type = POOL_TYPE_BLK
},
{
.fix = blk_hdr_fix,
.type = POOL_TYPE_BLK
},
{
.check = NULL,
.fix = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static inline int
step_exe(PMEMpoolcheck *ppc, location *loc)
{
ASSERT(loc->step < ARRAY_SIZE(steps));
ASSERTeq(ppc->pool->params.type, POOL_TYPE_BLK);
const struct step *step = &steps[loc->step++];
if (!(step->type & ppc->pool->params.type))
return 0;
if (!step->fix)
return step->check(ppc, loc);
if (blk_read(ppc)) {
ppc->result = CHECK_RESULT_ERROR;
return -1;
}
return check_answer_loop(ppc, loc, NULL, 1, step->fix);
}
/*
* check_blk -- entry point for pmemblk checks
*/
void
check_blk(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
location *loc = check_get_step_data(ppc->data);
/* do all checks */
while (CHECK_NOT_COMPLETE(loc, steps)) {
if (step_exe(ppc, loc))
break;
}
}
| 5,277 | 21.176471 | 78 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/check_sds.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* check_shutdown_state.c -- shutdown state check
*/
#include <stdio.h>
#include <inttypes.h>
#include <sys/mman.h>
#include <endian.h>
#include "out.h"
#include "util_pmem.h"
#include "libpmempool.h"
#include "libpmem.h"
#include "pmempool.h"
#include "pool.h"
#include "set.h"
#include "check_util.h"
enum question {
Q_RESET_SDS,
};
#define SDS_CHECK_STR "checking shutdown state"
#define SDS_OK_STR "shutdown state correct"
#define SDS_DIRTY_STR "shutdown state is dirty"
#define ADR_FAILURE_STR \
"an ADR failure was detected - your pool might be corrupted"
#define ZERO_SDS_STR \
"Do you want to zero shutdown state?"
#define RESET_SDS_STR \
"Do you want to reset shutdown state at your own risk? " \
"If you have more then one replica you will have to " \
"synchronize your pool after this operation."
#define SDS_FAIL_MSG(hdrp) \
IGNORE_SDS(hdrp) ? SDS_DIRTY_STR : ADR_FAILURE_STR
#define SDS_REPAIR_MSG(hdrp) \
IGNORE_SDS(hdrp) \
? SDS_DIRTY_STR ".|" ZERO_SDS_STR \
: ADR_FAILURE_STR ".|" RESET_SDS_STR
/*
* sds_check_replica -- (internal) check if replica is healthy
*/
static int
sds_check_replica(location *loc)
{
LOG(3, NULL);
struct pool_replica *rep = REP(loc->set, loc->replica);
if (rep->remote)
return 0;
/* make a copy of sds as we shouldn't modify a pool */
struct shutdown_state old_sds = loc->hdr.sds;
struct shutdown_state curr_sds;
if (IGNORE_SDS(&loc->hdr))
return util_is_zeroed(&old_sds, sizeof(old_sds)) ? 0 : -1;
shutdown_state_init(&curr_sds, NULL);
/* get current shutdown state */
for (unsigned p = 0; p < rep->nparts; ++p) {
if (shutdown_state_add_part(&curr_sds,
PART(rep, p)->fd, NULL))
return -1;
}
/* compare current and old shutdown state */
return shutdown_state_check(&curr_sds, &old_sds, NULL);
}
/*
* sds_check -- (internal) check shutdown_state
*/
static int
sds_check(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
CHECK_INFO(ppc, "%s" SDS_CHECK_STR, loc->prefix);
/* shutdown state is valid */
if (!sds_check_replica(loc)) {
CHECK_INFO(ppc, "%s" SDS_OK_STR, loc->prefix);
loc->step = CHECK_STEP_COMPLETE;
return 0;
}
/* shutdown state is NOT valid and can NOT be repaired */
if (CHECK_IS_NOT(ppc, REPAIR)) {
check_end(ppc->data);
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
return CHECK_ERR(ppc, "%s%s", loc->prefix,
SDS_FAIL_MSG(&loc->hdr));
}
/* shutdown state is NOT valid but can be repaired */
CHECK_ASK(ppc, Q_RESET_SDS, "%s%s", loc->prefix,
SDS_REPAIR_MSG(&loc->hdr));
return check_questions_sequence_validate(ppc);
}
/*
* sds_fix -- (internal) fix shutdown state
*/
static int
sds_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *context)
{
LOG(3, NULL);
switch (question) {
case Q_RESET_SDS:
CHECK_INFO(ppc, "%sresetting pool_hdr.sds", loc->prefix);
memset(&loc->hdr.sds, 0, sizeof(loc->hdr.sds));
++loc->healthy_replicas;
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
struct step {
int (*check)(PMEMpoolcheck *, location *);
int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *);
};
static const struct step steps[] = {
{
.check = sds_check,
},
{
.fix = sds_fix,
},
{
.check = NULL,
.fix = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static int
step_exe(PMEMpoolcheck *ppc, const struct step *steps, location *loc)
{
const struct step *step = &steps[loc->step++];
if (!step->fix)
return step->check(ppc, loc);
if (!check_has_answer(ppc->data))
return 0;
if (check_answer_loop(ppc, loc, NULL, 0 /* fail on no */, step->fix))
return -1;
util_convert2le_hdr(&loc->hdr);
memcpy(loc->hdrp, &loc->hdr, sizeof(loc->hdr));
util_persist_auto(loc->is_dev_dax, loc->hdrp, sizeof(*loc->hdrp));
util_convert2h_hdr_nocheck(&loc->hdr);
loc->pool_hdr_modified = 1;
return 0;
}
/*
* init_prefix -- prepare prefix for messages
*/
static void
init_prefix(location *loc)
{
if (loc->set->nreplicas > 1) {
int ret = util_snprintf(loc->prefix, PREFIX_MAX_SIZE,
"replica %u: ",
loc->replica);
if (ret < 0)
FATAL("!snprintf");
} else
loc->prefix[0] = '\0';
loc->step = 0;
}
/*
* init_location_data -- (internal) prepare location information
*/
static void
init_location_data(PMEMpoolcheck *ppc, location *loc)
{
ASSERTeq(loc->part, 0);
loc->set = ppc->pool->set_file->poolset;
if (ppc->result != CHECK_RESULT_PROCESS_ANSWERS)
init_prefix(loc);
struct pool_replica *rep = REP(loc->set, loc->replica);
loc->hdrp = HDR(rep, loc->part);
memcpy(&loc->hdr, loc->hdrp, sizeof(loc->hdr));
util_convert2h_hdr_nocheck(&loc->hdr);
loc->is_dev_dax = PART(rep, 0)->is_dev_dax;
}
/*
* sds_get_healthy_replicas_num -- (internal) get number of healthy replicas
*/
static void
sds_get_healthy_replicas_num(PMEMpoolcheck *ppc, location *loc)
{
const unsigned nreplicas = ppc->pool->set_file->poolset->nreplicas;
loc->healthy_replicas = 0;
loc->part = 0;
for (; loc->replica < nreplicas; loc->replica++) {
init_location_data(ppc, loc);
if (!sds_check_replica(loc)) {
++loc->healthy_replicas; /* healthy replica found */
}
}
loc->replica = 0; /* reset replica index */
}
/*
* check_sds -- entry point for shutdown state checks
*/
void
check_sds(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
const unsigned nreplicas = ppc->pool->set_file->poolset->nreplicas;
location *loc = check_get_step_data(ppc->data);
if (!loc->init_done) {
sds_get_healthy_replicas_num(ppc, loc);
if (loc->healthy_replicas == nreplicas) {
/* all replicas have healthy shutdown state */
/* print summary */
for (; loc->replica < nreplicas; loc->replica++) {
init_prefix(loc);
CHECK_INFO(ppc, "%s" SDS_CHECK_STR,
loc->prefix);
CHECK_INFO(ppc, "%s" SDS_OK_STR, loc->prefix);
}
return;
} else if (loc->healthy_replicas > 0) {
ppc->sync_required = true;
return;
}
loc->init_done = true;
}
/* produce single healthy replica */
loc->part = 0;
for (; loc->replica < nreplicas; loc->replica++) {
init_location_data(ppc, loc);
while (CHECK_NOT_COMPLETE(loc, steps)) {
ASSERT(loc->step < ARRAY_SIZE(steps));
if (step_exe(ppc, steps, loc))
return;
}
if (loc->healthy_replicas)
break;
}
if (loc->healthy_replicas == 0) {
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
CHECK_ERR(ppc, "cannot complete repair, reverting changes");
} else if (loc->healthy_replicas < nreplicas) {
ppc->sync_required = true;
}
}
| 6,571 | 21.662069 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/check_log.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* check_log.c -- check pmemlog
*/
#include <inttypes.h>
#include <sys/param.h>
#include <endian.h>
#include "out.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
enum question {
Q_LOG_START_OFFSET,
Q_LOG_END_OFFSET,
Q_LOG_WRITE_OFFSET,
};
/*
* log_read -- (internal) read pmemlog header
*/
static int
log_read(PMEMpoolcheck *ppc)
{
/*
* Here we want to read the pmemlog header without the pool_hdr as we've
* already done it before.
*
* Take the pointer to fields right after pool_hdr, compute the size and
* offset of remaining fields.
*/
uint8_t *ptr = (uint8_t *)&ppc->pool->hdr.log;
ptr += sizeof(ppc->pool->hdr.log.hdr);
size_t size = sizeof(ppc->pool->hdr.log) -
sizeof(ppc->pool->hdr.log.hdr);
uint64_t offset = sizeof(ppc->pool->hdr.log.hdr);
if (pool_read(ppc->pool, ptr, size, offset))
return CHECK_ERR(ppc, "cannot read pmemlog structure");
/* endianness conversion */
log_convert2h(&ppc->pool->hdr.log);
return 0;
}
/*
* log_hdr_check -- (internal) check pmemlog header
*/
static int
log_hdr_check(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
CHECK_INFO(ppc, "checking pmemlog header");
if (log_read(ppc)) {
ppc->result = CHECK_RESULT_ERROR;
return -1;
}
/* determine constant values for pmemlog */
const uint64_t d_start_offset =
roundup(sizeof(ppc->pool->hdr.log), LOG_FORMAT_DATA_ALIGN);
if (ppc->pool->hdr.log.start_offset != d_start_offset) {
if (CHECK_ASK(ppc, Q_LOG_START_OFFSET,
"invalid pmemlog.start_offset: 0x%jx.|Do you "
"want to set pmemlog.start_offset to default "
"0x%jx?",
ppc->pool->hdr.log.start_offset,
d_start_offset))
goto error;
}
if (ppc->pool->hdr.log.end_offset != ppc->pool->set_file->size) {
if (CHECK_ASK(ppc, Q_LOG_END_OFFSET,
"invalid pmemlog.end_offset: 0x%jx.|Do you "
"want to set pmemlog.end_offset to 0x%jx?",
ppc->pool->hdr.log.end_offset,
ppc->pool->set_file->size))
goto error;
}
if (ppc->pool->hdr.log.write_offset < d_start_offset ||
ppc->pool->hdr.log.write_offset > ppc->pool->set_file->size) {
if (CHECK_ASK(ppc, Q_LOG_WRITE_OFFSET,
"invalid pmemlog.write_offset: 0x%jx.|Do you "
"want to set pmemlog.write_offset to "
"pmemlog.end_offset?",
ppc->pool->hdr.log.write_offset))
goto error;
}
if (ppc->result == CHECK_RESULT_CONSISTENT ||
ppc->result == CHECK_RESULT_REPAIRED)
CHECK_INFO(ppc, "pmemlog header correct");
return check_questions_sequence_validate(ppc);
error:
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
check_end(ppc->data);
return -1;
}
/*
* log_hdr_fix -- (internal) fix pmemlog header
*/
static int
log_hdr_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question, void *ctx)
{
LOG(3, NULL);
uint64_t d_start_offset;
switch (question) {
case Q_LOG_START_OFFSET:
/* determine constant values for pmemlog */
d_start_offset = roundup(sizeof(ppc->pool->hdr.log),
LOG_FORMAT_DATA_ALIGN);
CHECK_INFO(ppc, "setting pmemlog.start_offset to 0x%jx",
d_start_offset);
ppc->pool->hdr.log.start_offset = d_start_offset;
break;
case Q_LOG_END_OFFSET:
CHECK_INFO(ppc, "setting pmemlog.end_offset to 0x%jx",
ppc->pool->set_file->size);
ppc->pool->hdr.log.end_offset = ppc->pool->set_file->size;
break;
case Q_LOG_WRITE_OFFSET:
CHECK_INFO(ppc, "setting pmemlog.write_offset to "
"pmemlog.end_offset");
ppc->pool->hdr.log.write_offset = ppc->pool->set_file->size;
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
struct step {
int (*check)(PMEMpoolcheck *, location *);
int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *);
enum pool_type type;
};
static const struct step steps[] = {
{
.check = log_hdr_check,
.type = POOL_TYPE_LOG
},
{
.fix = log_hdr_fix,
.type = POOL_TYPE_LOG
},
{
.check = NULL,
.fix = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static inline int
step_exe(PMEMpoolcheck *ppc, location *loc)
{
ASSERT(loc->step < ARRAY_SIZE(steps));
ASSERTeq(ppc->pool->params.type, POOL_TYPE_LOG);
const struct step *step = &steps[loc->step++];
if (!(step->type & ppc->pool->params.type))
return 0;
if (!step->fix)
return step->check(ppc, loc);
if (log_read(ppc)) {
ppc->result = CHECK_RESULT_ERROR;
return -1;
}
return check_answer_loop(ppc, loc, NULL, 1, step->fix);
}
/*
* check_log -- entry point for pmemlog checks
*/
void
check_log(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
location *loc = check_get_step_data(ppc->data);
/* do all checks */
while (CHECK_NOT_COMPLETE(loc, steps)) {
if (step_exe(ppc, loc))
break;
}
}
| 4,760 | 21.671429 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/check_util.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* check_util.h -- internal definitions check util
*/
#ifndef CHECK_UTIL_H
#define CHECK_UTIL_H
#include <time.h>
#include <limits.h>
#include <sys/param.h>
#ifdef __cplusplus
extern "C" {
#endif
#define CHECK_STEP_COMPLETE UINT_MAX
#define CHECK_INVALID_QUESTION UINT_MAX
#define REQUIRE_ADVANCED "the following error can be fixed using " \
"PMEMPOOL_CHECK_ADVANCED flag"
#ifndef min
#define min(a, b) ((a) < (b) ? (a) : (b))
#endif
/* check control context */
struct check_data;
struct arena;
/* queue of check statuses */
struct check_status;
/* container storing state of all check steps */
#define PREFIX_MAX_SIZE 30
typedef struct {
unsigned init_done;
unsigned step;
unsigned replica;
unsigned part;
int single_repl;
int single_part;
struct pool_set *set;
int is_dev_dax;
struct pool_hdr *hdrp;
/* copy of the pool header in host byte order */
struct pool_hdr hdr;
int hdr_valid;
/*
* If pool header has been modified this field indicates that
* the pool parameters structure requires refresh.
*/
int pool_hdr_modified;
unsigned healthy_replicas;
struct pool_hdr *next_part_hdrp;
struct pool_hdr *prev_part_hdrp;
struct pool_hdr *next_repl_hdrp;
struct pool_hdr *prev_repl_hdrp;
int next_part_hdr_valid;
int prev_part_hdr_valid;
int next_repl_hdr_valid;
int prev_repl_hdr_valid;
/* valid poolset uuid */
uuid_t *valid_puuid;
/* valid part uuid */
uuid_t *valid_uuid;
/* valid part pool header */
struct pool_hdr *valid_part_hdrp;
int valid_part_done;
unsigned valid_part_replica;
char prefix[PREFIX_MAX_SIZE];
struct arena *arenap;
uint64_t offset;
uint32_t narena;
uint8_t *bitmap;
uint8_t *dup_bitmap;
uint8_t *fbitmap;
struct list *list_inval;
struct list *list_flog_inval;
struct list *list_unmap;
struct {
int btti_header;
int btti_backup;
} valid;
struct {
struct btt_info btti;
uint64_t btti_offset;
} pool_valid;
} location;
/* check steps */
void check_bad_blocks(PMEMpoolcheck *ppc);
void check_backup(PMEMpoolcheck *ppc);
void check_pool_hdr(PMEMpoolcheck *ppc);
void check_pool_hdr_uuids(PMEMpoolcheck *ppc);
void check_sds(PMEMpoolcheck *ppc);
void check_log(PMEMpoolcheck *ppc);
void check_blk(PMEMpoolcheck *ppc);
void check_btt_info(PMEMpoolcheck *ppc);
void check_btt_map_flog(PMEMpoolcheck *ppc);
void check_write(PMEMpoolcheck *ppc);
struct check_data *check_data_alloc(void);
void check_data_free(struct check_data *data);
uint32_t check_step_get(struct check_data *data);
void check_step_inc(struct check_data *data);
location *check_get_step_data(struct check_data *data);
void check_end(struct check_data *data);
int check_is_end_util(struct check_data *data);
int check_status_create(PMEMpoolcheck *ppc, enum pmempool_check_msg_type type,
uint32_t arg, const char *fmt, ...) FORMAT_PRINTF(4, 5);
void check_status_release(PMEMpoolcheck *ppc, struct check_status *status);
void check_clear_status_cache(struct check_data *data);
struct check_status *check_pop_question(struct check_data *data);
struct check_status *check_pop_error(struct check_data *data);
struct check_status *check_pop_info(struct check_data *data);
bool check_has_error(struct check_data *data);
bool check_has_answer(struct check_data *data);
int check_push_answer(PMEMpoolcheck *ppc);
struct pmempool_check_status *check_status_get_util(
struct check_status *status);
int check_status_is(struct check_status *status,
enum pmempool_check_msg_type type);
/* create info status */
#define CHECK_INFO(ppc, ...)\
check_status_create(ppc, PMEMPOOL_CHECK_MSG_TYPE_INFO, 0, __VA_ARGS__)
/* create info status and append error message based on errno */
#define CHECK_INFO_ERRNO(ppc, ...)\
check_status_create(ppc, PMEMPOOL_CHECK_MSG_TYPE_INFO,\
(uint32_t)errno, __VA_ARGS__)
/* create error status */
#define CHECK_ERR(ppc, ...)\
check_status_create(ppc, PMEMPOOL_CHECK_MSG_TYPE_ERROR, 0, __VA_ARGS__)
/* create question status */
#define CHECK_ASK(ppc, question, ...)\
check_status_create(ppc, PMEMPOOL_CHECK_MSG_TYPE_QUESTION, question,\
__VA_ARGS__)
#define CHECK_NOT_COMPLETE(loc, steps)\
((loc)->step != CHECK_STEP_COMPLETE &&\
((steps)[(loc)->step].check != NULL ||\
(steps)[(loc)->step].fix != NULL))
int check_answer_loop(PMEMpoolcheck *ppc, location *data,
void *ctx, int fail_on_no,
int (*callback)(PMEMpoolcheck *, location *, uint32_t, void *ctx));
int check_questions_sequence_validate(PMEMpoolcheck *ppc);
const char *check_get_time_str(time_t time);
const char *check_get_uuid_str(uuid_t uuid);
const char *check_get_pool_type_str(enum pool_type type);
void check_insert_arena(PMEMpoolcheck *ppc, struct arena *arenap);
#ifdef _WIN32
void cache_to_utf8(struct check_data *data, char *buf, size_t size);
#endif
#define CHECK_IS(ppc, flag)\
util_flag_isset((ppc)->args.flags, PMEMPOOL_CHECK_ ## flag)
#define CHECK_IS_NOT(ppc, flag)\
util_flag_isclr((ppc)->args.flags, PMEMPOOL_CHECK_ ## flag)
#define CHECK_WITHOUT_FIXING(ppc)\
CHECK_IS_NOT(ppc, REPAIR) || CHECK_IS(ppc, DRY_RUN)
#ifdef __cplusplus
}
#endif
#endif
| 5,143 | 25.111675 | 78 | h |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/check_bad_blocks.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* check_bad_blocks.c -- pre-check bad_blocks
*/
#include <stddef.h>
#include <stdint.h>
#include <unistd.h>
#include "out.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
#include "set_badblocks.h"
#include "badblocks.h"
/*
* check_bad_blocks -- check poolset for bad_blocks
*/
void
check_bad_blocks(PMEMpoolcheck *ppc)
{
LOG(3, "ppc %p", ppc);
int ret;
if (!(ppc->pool->params.features.compat & POOL_FEAT_CHECK_BAD_BLOCKS)) {
/* skipping checking poolset for bad blocks */
ppc->result = CHECK_RESULT_CONSISTENT;
return;
}
if (ppc->pool->set_file->poolset) {
ret = badblocks_check_poolset(ppc->pool->set_file->poolset, 0);
} else {
ret = badblocks_check_file(ppc->pool->set_file->fname);
}
if (ret < 0) {
if (errno == ENOTSUP) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
CHECK_ERR(ppc, BB_NOT_SUPP);
return;
}
ppc->result = CHECK_RESULT_ERROR;
CHECK_ERR(ppc, "checking poolset for bad blocks failed -- '%s'",
ppc->path);
return;
}
if (ret > 0) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
CHECK_ERR(ppc,
"poolset contains bad blocks, use 'pmempool info --bad-blocks=yes' to print or 'pmempool sync --bad-blocks' to clear them");
}
}
| 1,329 | 20.803279 | 127 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/feature.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* feature.c -- implementation of pmempool_feature_(enable|disable|query)()
*/
#include <stddef.h>
#include <stdint.h>
#include <unistd.h>
#include <errno.h>
#include <sys/mman.h>
#include "libpmempool.h"
#include "util_pmem.h"
#include "pool_hdr.h"
#include "pool.h"
#define RW 0
#define RDONLY 1
#define FEATURE_INCOMPAT(X) \
(features_t)FEAT_INCOMPAT(X)
static const features_t f_singlehdr = FEAT_INCOMPAT(SINGLEHDR);
static const features_t f_cksum_2k = FEAT_INCOMPAT(CKSUM_2K);
static const features_t f_sds = FEAT_INCOMPAT(SDS);
static const features_t f_chkbb = FEAT_COMPAT(CHECK_BAD_BLOCKS);
#define FEAT_INVALID \
{UINT32_MAX, UINT32_MAX, UINT32_MAX};
static const features_t f_invalid = FEAT_INVALID;
#define FEATURE_MAXPRINT ((size_t)1024)
/*
* buff_concat -- (internal) concat formatted string to string buffer
*/
static int
buff_concat(char *buff, size_t *pos, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
const size_t size = FEATURE_MAXPRINT - *pos - 1;
int ret = vsnprintf(buff + *pos, size, fmt, ap);
va_end(ap);
if (ret < 0) {
ERR("vsprintf");
return ret;
}
if ((size_t)ret >= size) {
ERR("buffer truncated %d >= %zu", ret, size);
return -1;
}
*pos += (size_t)ret;
return 0;
}
/*
* buff_concat_features -- (internal) concat features string to string buffer
*/
static int
buff_concat_features(char *buff, size_t *pos, features_t f)
{
return buff_concat(buff, pos,
"{compat 0x%x, incompat 0x%x, ro_compat 0x%x}",
f.compat, f.incompat, f.ro_compat);
}
/*
* poolset_close -- (internal) close pool set
*/
static void
poolset_close(struct pool_set *set)
{
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = REP(set, r);
ASSERT(!rep->remote);
for (unsigned p = 0; p < rep->nparts; ++p) {
util_unmap_hdr(PART(rep, p));
}
}
util_poolset_close(set, DO_NOT_DELETE_PARTS);
}
/*
* features_check -- (internal) check if features are correct
*/
static int
features_check(features_t *features, struct pool_hdr *hdrp)
{
static char msg[FEATURE_MAXPRINT];
struct pool_hdr hdr;
memcpy(&hdr, hdrp, sizeof(hdr));
util_convert2h_hdr_nocheck(&hdr);
/* (features != f_invlaid) <=> features is set */
if (!util_feature_cmp(*features, f_invalid)) {
/* features from current and previous headers have to match */
if (!util_feature_cmp(*features, hdr.features)) {
size_t pos = 0;
if (buff_concat_features(msg, &pos, hdr.features))
goto err;
if (buff_concat(msg, &pos, "%s", " != "))
goto err;
if (buff_concat_features(msg, &pos, *features))
goto err;
ERR("features mismatch detected: %s", msg);
return -1;
} else {
return 0;
}
}
features_t unknown = util_get_unknown_features(
hdr.features, (features_t)POOL_FEAT_VALID);
/* all features are known */
if (util_feature_is_zero(unknown)) {
memcpy(features, &hdr.features, sizeof(*features));
return 0;
}
/* unknown features detected - print error message */
size_t pos = 0;
if (buff_concat_features(msg, &pos, unknown))
goto err;
ERR("invalid features detected: %s", msg);
err:
return -1;
}
/*
* get_pool_open_flags -- (internal) generate pool open flags
*/
static inline unsigned
get_pool_open_flags(struct pool_set *set, int rdonly)
{
unsigned flags = 0;
if (rdonly == RDONLY && !util_pool_has_device_dax(set))
flags = POOL_OPEN_COW;
flags |= POOL_OPEN_IGNORE_BAD_BLOCKS;
return flags;
}
/*
* get_mmap_flags -- (internal) generate mmap flags
*/
static inline int
get_mmap_flags(struct pool_set_part *part, int rdonly)
{
if (part->is_dev_dax)
return MAP_SHARED;
else
return rdonly ? MAP_PRIVATE : MAP_SHARED;
}
/*
* poolset_open -- (internal) open pool set
*/
static struct pool_set *
poolset_open(const char *path, int rdonly)
{
struct pool_set *set;
features_t features = FEAT_INVALID;
/* read poolset */
int ret = util_poolset_create_set(&set, path, 0, 0, true);
if (ret < 0) {
ERR("cannot open pool set -- '%s'", path);
goto err_poolset;
}
if (set->remote) {
ERR("poolsets with remote replicas are not supported");
errno = EINVAL;
goto err_open;
}
/* open a memory pool */
unsigned flags = get_pool_open_flags(set, rdonly);
if (util_pool_open_nocheck(set, flags))
goto err_open;
/* map all headers and check features */
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = REP(set, r);
ASSERT(!rep->remote);
for (unsigned p = 0; p < rep->nparts; ++p) {
struct pool_set_part *part = PART(rep, p);
int mmap_flags = get_mmap_flags(part, rdonly);
if (util_map_hdr(part, mmap_flags, rdonly)) {
part->hdr = NULL;
goto err_map_hdr;
}
if (features_check(&features, HDR(rep, p))) {
ERR(
"invalid features - replica #%d part #%d",
r, p);
goto err_open;
}
}
}
return set;
err_map_hdr:
/* unmap all headers */
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = REP(set, r);
ASSERT(!rep->remote);
for (unsigned p = 0; p < rep->nparts; ++p) {
util_unmap_hdr(PART(rep, p));
}
}
err_open:
/* close the memory pool and release pool set structure */
util_poolset_close(set, DO_NOT_DELETE_PARTS);
err_poolset:
return NULL;
}
/*
* get_hdr -- (internal) read header in host byte order
*/
static struct pool_hdr *
get_hdr(struct pool_set *set, unsigned rep, unsigned part)
{
static struct pool_hdr hdr;
/* copy header */
struct pool_hdr *hdrp = HDR(REP(set, rep), part);
memcpy(&hdr, hdrp, sizeof(hdr));
/* convert to host byte order and return */
util_convert2h_hdr_nocheck(&hdr);
return &hdr;
}
/*
* set_hdr -- (internal) convert header to little-endian, checksum and write
*/
static void
set_hdr(struct pool_set *set, unsigned rep, unsigned part, struct pool_hdr *src)
{
/* convert to little-endian and set new checksum */
const size_t skip_off = POOL_HDR_CSUM_END_OFF(src);
util_convert2le_hdr(src);
util_checksum(src, sizeof(*src), &src->checksum, 1, skip_off);
/* write header */
struct pool_replica *replica = REP(set, rep);
struct pool_hdr *dst = HDR(replica, part);
memcpy(dst, src, sizeof(*src));
util_persist_auto(PART(replica, part)->is_dev_dax, dst, sizeof(*src));
}
typedef enum {
DISABLED,
ENABLED
} fstate_t;
#define FEATURE_IS_ENABLED_STR "feature already enabled: %s"
#define FEATURE_IS_DISABLED_STR "feature already disabled: %s"
/*
* require_feature_is -- (internal) check if required feature is enabled
* (or disabled)
*/
static int
require_feature_is(struct pool_set *set, features_t feature, fstate_t req_state)
{
struct pool_hdr *hdrp = get_hdr((set), 0, 0);
fstate_t state = util_feature_is_set(hdrp->features, feature)
? ENABLED : DISABLED;
if (state == req_state)
return 1;
const char *msg = (state == ENABLED)
? FEATURE_IS_ENABLED_STR : FEATURE_IS_DISABLED_STR;
LOG(3, msg, util_feature2str(feature, NULL));
return 0;
}
#define FEATURE_IS_NOT_ENABLED_PRIOR_STR "enable %s prior to %s %s"
#define FEATURE_IS_NOT_DISABLED_PRIOR_STR "disable %s prior to %s %s"
/*
* require_other_feature_is -- (internal) check if other feature is enabled
* (or disabled) in case the other feature has to be enabled (or disabled)
* prior to the main one
*/
static int
require_other_feature_is(struct pool_set *set, features_t other,
fstate_t req_state, features_t feature, const char *cause)
{
struct pool_hdr *hdrp = get_hdr((set), 0, 0);
fstate_t state = util_feature_is_set(hdrp->features, other)
? ENABLED : DISABLED;
if (state == req_state)
return 1;
const char *msg = (req_state == ENABLED)
? FEATURE_IS_NOT_ENABLED_PRIOR_STR
: FEATURE_IS_NOT_DISABLED_PRIOR_STR;
ERR(msg, util_feature2str(other, NULL),
cause, util_feature2str(feature, NULL));
return 0;
}
/*
* feature_set -- (internal) enable (or disable) feature
*/
static void
feature_set(struct pool_set *set, features_t feature, int value)
{
for (unsigned r = 0; r < set->nreplicas; ++r) {
for (unsigned p = 0; p < REP(set, r)->nparts; ++p) {
struct pool_hdr *hdrp = get_hdr(set, r, p);
if (value == ENABLED)
util_feature_enable(&hdrp->features, feature);
else
util_feature_disable(&hdrp->features, feature);
set_hdr(set, r, p, hdrp);
}
}
}
/*
* query_feature -- (internal) query feature value
*/
static int
query_feature(const char *path, features_t feature)
{
struct pool_set *set = poolset_open(path, RDONLY);
if (!set)
goto err_open;
struct pool_hdr *hdrp = get_hdr(set, 0, 0);
const int query = util_feature_is_set(hdrp->features, feature);
poolset_close(set);
return query;
err_open:
return -1;
}
/*
* unsupported_feature -- (internal) report unsupported feature
*/
static inline int
unsupported_feature(features_t feature)
{
ERR("unsupported feature: %s", util_feature2str(feature, NULL));
errno = EINVAL;
return -1;
}
/*
* enable_singlehdr -- (internal) enable POOL_FEAT_SINGLEHDR
*/
static int
enable_singlehdr(const char *path)
{
return unsupported_feature(f_singlehdr);
}
/*
* disable_singlehdr -- (internal) disable POOL_FEAT_SINGLEHDR
*/
static int
disable_singlehdr(const char *path)
{
return unsupported_feature(f_singlehdr);
}
/*
* query_singlehdr -- (internal) query POOL_FEAT_SINGLEHDR
*/
static int
query_singlehdr(const char *path)
{
return query_feature(path, f_singlehdr);
}
/*
* enable_checksum_2k -- (internal) enable POOL_FEAT_CKSUM_2K
*/
static int
enable_checksum_2k(const char *path)
{
struct pool_set *set = poolset_open(path, RW);
if (!set)
return -1;
if (require_feature_is(set, f_cksum_2k, DISABLED))
feature_set(set, f_cksum_2k, ENABLED);
poolset_close(set);
return 0;
}
/*
* disable_checksum_2k -- (internal) disable POOL_FEAT_CKSUM_2K
*/
static int
disable_checksum_2k(const char *path)
{
struct pool_set *set = poolset_open(path, RW);
if (!set)
return -1;
int ret = 0;
if (!require_feature_is(set, f_cksum_2k, ENABLED))
goto exit;
/* check if POOL_FEAT_SDS is disabled */
if (!require_other_feature_is(set, f_sds, DISABLED,
f_cksum_2k, "disabling")) {
ret = -1;
goto exit;
}
feature_set(set, f_cksum_2k, DISABLED);
exit:
poolset_close(set);
return ret;
}
/*
* query_checksum_2k -- (internal) query POOL_FEAT_CKSUM_2K
*/
static int
query_checksum_2k(const char *path)
{
return query_feature(path, f_cksum_2k);
}
/*
* enable_shutdown_state -- (internal) enable POOL_FEAT_SDS
*/
static int
enable_shutdown_state(const char *path)
{
struct pool_set *set = poolset_open(path, RW);
if (!set)
return -1;
int ret = 0;
if (!require_feature_is(set, f_sds, DISABLED))
goto exit;
/* check if POOL_FEAT_CKSUM_2K is enabled */
if (!require_other_feature_is(set, f_cksum_2k, ENABLED,
f_sds, "enabling")) {
ret = -1;
goto exit;
}
feature_set(set, f_sds, ENABLED);
exit:
poolset_close(set);
return ret;
}
/*
* reset_shutdown_state -- zero all shutdown structures
*/
static void
reset_shutdown_state(struct pool_set *set)
{
for (unsigned rep = 0; rep < set->nreplicas; ++rep) {
for (unsigned part = 0; part < REP(set, rep)->nparts; ++part) {
struct pool_hdr *hdrp = HDR(REP(set, rep), part);
shutdown_state_init(&hdrp->sds, REP(set, rep));
}
}
}
/*
* disable_shutdown_state -- (internal) disable POOL_FEAT_SDS
*/
static int
disable_shutdown_state(const char *path)
{
struct pool_set *set = poolset_open(path, RW);
if (!set)
return -1;
if (require_feature_is(set, f_sds, ENABLED)) {
feature_set(set, f_sds, DISABLED);
reset_shutdown_state(set);
}
poolset_close(set);
return 0;
}
/*
* query_shutdown_state -- (internal) query POOL_FEAT_SDS
*/
static int
query_shutdown_state(const char *path)
{
return query_feature(path, f_sds);
}
/*
* enable_badblocks_checking -- (internal) enable POOL_FEAT_CHECK_BAD_BLOCKS
*/
static int
enable_badblocks_checking(const char *path)
{
#ifdef _WIN32
ERR("bad blocks checking is not supported on Windows");
return -1;
#else
struct pool_set *set = poolset_open(path, RW);
if (!set)
return -1;
if (require_feature_is(set, f_chkbb, DISABLED))
feature_set(set, f_chkbb, ENABLED);
poolset_close(set);
return 0;
#endif
}
/*
* disable_badblocks_checking -- (internal) disable POOL_FEAT_CHECK_BAD_BLOCKS
*/
static int
disable_badblocks_checking(const char *path)
{
struct pool_set *set = poolset_open(path, RW);
if (!set)
return -1;
int ret = 0;
if (!require_feature_is(set, f_chkbb, ENABLED))
goto exit;
feature_set(set, f_chkbb, DISABLED);
exit:
poolset_close(set);
return ret;
}
/*
* query_badblocks_checking -- (internal) query POOL_FEAT_CHECK_BAD_BLOCKS
*/
static int
query_badblocks_checking(const char *path)
{
return query_feature(path, f_chkbb);
}
struct feature_funcs {
int (*enable)(const char *);
int (*disable)(const char *);
int (*query)(const char *);
};
static struct feature_funcs features[] = {
{
.enable = enable_singlehdr,
.disable = disable_singlehdr,
.query = query_singlehdr
},
{
.enable = enable_checksum_2k,
.disable = disable_checksum_2k,
.query = query_checksum_2k
},
{
.enable = enable_shutdown_state,
.disable = disable_shutdown_state,
.query = query_shutdown_state
},
{
.enable = enable_badblocks_checking,
.disable = disable_badblocks_checking,
.query = query_badblocks_checking
},
};
#define FEATURE_FUNCS_MAX ARRAY_SIZE(features)
/*
* are_flags_valid -- (internal) check if flags are valid
*/
static inline int
are_flags_valid(unsigned flags)
{
if (flags != 0) {
ERR("invalid flags: 0x%x", flags);
errno = EINVAL;
return 0;
}
return 1;
}
/*
* is_feature_valid -- (internal) check if feature is valid
*/
static inline int
is_feature_valid(uint32_t feature)
{
if (feature >= FEATURE_FUNCS_MAX) {
ERR("invalid feature: 0x%x", feature);
errno = EINVAL;
return 0;
}
return 1;
}
/*
* pmempool_feature_enableU -- enable pool set feature
*/
#ifndef _WIN32
static inline
#endif
int
pmempool_feature_enableU(const char *path, enum pmempool_feature feature,
unsigned flags)
{
LOG(3, "path %s feature %x flags %x", path, feature, flags);
if (!is_feature_valid(feature))
return -1;
if (!are_flags_valid(flags))
return -1;
return features[feature].enable(path);
}
/*
* pmempool_feature_disableU -- disable pool set feature
*/
#ifndef _WIN32
static inline
#endif
int
pmempool_feature_disableU(const char *path, enum pmempool_feature feature,
unsigned flags)
{
LOG(3, "path %s feature %x flags %x", path, feature, flags);
if (!is_feature_valid(feature))
return -1;
if (!are_flags_valid(flags))
return -1;
return features[feature].disable(path);
}
/*
* pmempool_feature_queryU -- query pool set feature
*/
#ifndef _WIN32
static inline
#endif
int
pmempool_feature_queryU(const char *path, enum pmempool_feature feature,
unsigned flags)
{
LOG(3, "path %s feature %x flags %x", path, feature, flags);
/*
* XXX: Windows does not allow function call in a constant expressions
*/
#ifndef _WIN32
#define CHECK_INCOMPAT_MAPPING(FEAT, ENUM) \
COMPILE_ERROR_ON( \
util_feature2pmempool_feature(FEATURE_INCOMPAT(FEAT)) != ENUM)
CHECK_INCOMPAT_MAPPING(SINGLEHDR, PMEMPOOL_FEAT_SINGLEHDR);
CHECK_INCOMPAT_MAPPING(CKSUM_2K, PMEMPOOL_FEAT_CKSUM_2K);
CHECK_INCOMPAT_MAPPING(SDS, PMEMPOOL_FEAT_SHUTDOWN_STATE);
#undef CHECK_INCOMPAT_MAPPING
#endif
if (!is_feature_valid(feature))
return -1;
if (!are_flags_valid(flags))
return -1;
return features[feature].query(path);
}
#ifndef _WIN32
/*
* pmempool_feature_enable -- enable pool set feature
*/
int
pmempool_feature_enable(const char *path, enum pmempool_feature feature,
unsigned flags)
{
return pmempool_feature_enableU(path, feature, flags);
}
#else
/*
* pmempool_feature_enableW -- enable pool set feature as widechar
*/
int
pmempool_feature_enableW(const wchar_t *path, enum pmempool_feature feature,
unsigned flags)
{
char *upath = util_toUTF8(path);
if (upath == NULL) {
ERR("Invalid poolest/pool file path.");
return -1;
}
int ret = pmempool_feature_enableU(upath, feature, flags);
util_free_UTF8(upath);
return ret;
}
#endif
#ifndef _WIN32
/*
* pmempool_feature_disable -- disable pool set feature
*/
int
pmempool_feature_disable(const char *path, enum pmempool_feature feature,
unsigned flags)
{
return pmempool_feature_disableU(path, feature, flags);
}
#else
/*
* pmempool_feature_disableW -- disable pool set feature as widechar
*/
int
pmempool_feature_disableW(const wchar_t *path, enum pmempool_feature feature,
unsigned flags)
{
char *upath = util_toUTF8(path);
if (upath == NULL) {
ERR("Invalid poolest/pool file path.");
return -1;
}
int ret = pmempool_feature_disableU(upath, feature, flags);
util_free_UTF8(upath);
return ret;
}
#endif
#ifndef _WIN32
/*
* pmempool_feature_query -- query pool set feature
*/
int
pmempool_feature_query(const char *path, enum pmempool_feature feature,
unsigned flags)
{
return pmempool_feature_queryU(path, feature, flags);
}
#else
/*
* pmempool_feature_queryW -- query pool set feature as widechar
*/
int
pmempool_feature_queryW(const wchar_t *path, enum pmempool_feature feature,
unsigned flags)
{
char *upath = util_toUTF8(path);
if (upath == NULL) {
ERR("Invalid poolest/pool file path.");
return -1;
}
int ret = pmempool_feature_queryU(upath, feature, flags);
util_free_UTF8(upath);
return ret;
}
#endif
| 17,344 | 20.955696 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/check_btt_map_flog.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* check_btt_map_flog.c -- check BTT Map and Flog
*/
#include <stdint.h>
#include <sys/param.h>
#include <endian.h>
#include "out.h"
#include "btt.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
enum questions {
Q_REPAIR_MAP,
Q_REPAIR_FLOG,
};
/*
* flog_read -- (internal) read and convert flog from file
*/
static int
flog_read(PMEMpoolcheck *ppc, struct arena *arenap)
{
uint64_t flogoff = arenap->offset + arenap->btt_info.flogoff;
arenap->flogsize = btt_flog_size(arenap->btt_info.nfree);
arenap->flog = malloc(arenap->flogsize);
if (!arenap->flog) {
ERR("!malloc");
goto error_malloc;
}
if (pool_read(ppc->pool, arenap->flog, arenap->flogsize, flogoff))
goto error_read;
uint8_t *ptr = arenap->flog;
uint32_t i;
for (i = 0; i < arenap->btt_info.nfree; i++) {
struct btt_flog *flog = (struct btt_flog *)ptr;
btt_flog_convert2h(&flog[0]);
btt_flog_convert2h(&flog[1]);
ptr += BTT_FLOG_PAIR_ALIGN;
}
return 0;
error_read:
free(arenap->flog);
arenap->flog = NULL;
error_malloc:
return -1;
}
/*
* map_read -- (internal) read and convert map from file
*/
static int
map_read(PMEMpoolcheck *ppc, struct arena *arenap)
{
uint64_t mapoff = arenap->offset + arenap->btt_info.mapoff;
arenap->mapsize = btt_map_size(arenap->btt_info.external_nlba);
ASSERT(arenap->mapsize != 0);
arenap->map = malloc(arenap->mapsize);
if (!arenap->map) {
ERR("!malloc");
goto error_malloc;
}
if (pool_read(ppc->pool, arenap->map, arenap->mapsize, mapoff)) {
goto error_read;
}
uint32_t i;
for (i = 0; i < arenap->btt_info.external_nlba; i++)
arenap->map[i] = le32toh(arenap->map[i]);
return 0;
error_read:
free(arenap->map);
arenap->map = NULL;
error_malloc:
return -1;
}
/*
* list_item -- item for simple list
*/
struct list_item {
PMDK_LIST_ENTRY(list_item) next;
uint32_t val;
};
/*
* list -- simple list for storing numbers
*/
struct list {
PMDK_LIST_HEAD(listhead, list_item) head;
uint32_t count;
};
/*
* list_alloc -- (internal) allocate an empty list
*/
static struct list *
list_alloc(void)
{
struct list *list = malloc(sizeof(struct list));
if (!list) {
ERR("!malloc");
return NULL;
}
PMDK_LIST_INIT(&list->head);
list->count = 0;
return list;
}
/*
* list_push -- (internal) insert new element to the list
*/
static struct list_item *
list_push(struct list *list, uint32_t val)
{
struct list_item *item = malloc(sizeof(*item));
if (!item) {
ERR("!malloc");
return NULL;
}
item->val = val;
list->count++;
PMDK_LIST_INSERT_HEAD(&list->head, item, next);
return item;
}
/*
* list_pop -- (internal) pop element from list head
*/
static int
list_pop(struct list *list, uint32_t *valp)
{
if (!PMDK_LIST_EMPTY(&list->head)) {
struct list_item *i = PMDK_LIST_FIRST(&list->head);
PMDK_LIST_REMOVE(i, next);
if (valp)
*valp = i->val;
free(i);
list->count--;
return 1;
}
return 0;
}
/*
* list_free -- (internal) free the list
*/
static void
list_free(struct list *list)
{
while (list_pop(list, NULL))
;
free(list);
}
/*
* cleanup -- (internal) prepare resources for map and flog check
*/
static int
cleanup(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
if (loc->list_unmap)
list_free(loc->list_unmap);
if (loc->list_flog_inval)
list_free(loc->list_flog_inval);
if (loc->list_inval)
list_free(loc->list_inval);
if (loc->fbitmap)
free(loc->fbitmap);
if (loc->bitmap)
free(loc->bitmap);
if (loc->dup_bitmap)
free(loc->dup_bitmap);
return 0;
}
/*
* init -- (internal) initialize map and flog check
*/
static int
init(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
struct arena *arenap = loc->arenap;
/* read flog and map entries */
if (flog_read(ppc, arenap)) {
CHECK_ERR(ppc, "arena %u: cannot read BTT Flog", arenap->id);
goto error;
}
if (map_read(ppc, arenap)) {
CHECK_ERR(ppc, "arena %u: cannot read BTT Map", arenap->id);
goto error;
}
/* create bitmaps for checking duplicated blocks */
uint32_t bitmapsize = howmany(arenap->btt_info.internal_nlba, 8);
loc->bitmap = calloc(bitmapsize, 1);
if (!loc->bitmap) {
ERR("!calloc");
CHECK_ERR(ppc, "arena %u: cannot allocate memory for blocks "
"bitmap", arenap->id);
goto error;
}
loc->dup_bitmap = calloc(bitmapsize, 1);
if (!loc->dup_bitmap) {
ERR("!calloc");
CHECK_ERR(ppc, "arena %u: cannot allocate memory for "
"duplicated blocks bitmap", arenap->id);
goto error;
}
loc->fbitmap = calloc(bitmapsize, 1);
if (!loc->fbitmap) {
ERR("!calloc");
CHECK_ERR(ppc, "arena %u: cannot allocate memory for BTT Flog "
"bitmap", arenap->id);
goto error;
}
/* list of invalid map entries */
loc->list_inval = list_alloc();
if (!loc->list_inval) {
CHECK_ERR(ppc,
"arena %u: cannot allocate memory for invalid BTT map "
"entries list", arenap->id);
goto error;
}
/* list of invalid flog entries */
loc->list_flog_inval = list_alloc();
if (!loc->list_flog_inval) {
CHECK_ERR(ppc,
"arena %u: cannot allocate memory for invalid BTT Flog "
"entries list", arenap->id);
goto error;
}
/* list of unmapped blocks */
loc->list_unmap = list_alloc();
if (!loc->list_unmap) {
CHECK_ERR(ppc,
"arena %u: cannot allocate memory for unmaped blocks "
"list", arenap->id);
goto error;
}
return 0;
error:
ppc->result = CHECK_RESULT_ERROR;
cleanup(ppc, loc);
return -1;
}
/*
* map_get_postmap_lba -- extract postmap LBA from map entry
*/
static inline uint32_t
map_get_postmap_lba(struct arena *arenap, uint32_t i)
{
uint32_t entry = arenap->map[i];
/* if map record is in initial state (flags == 0b00) */
if (map_entry_is_initial(entry))
return i;
/* read postmap LBA otherwise */
return entry & BTT_MAP_ENTRY_LBA_MASK;
}
/*
* map_entry_check -- (internal) check single map entry
*/
static int
map_entry_check(PMEMpoolcheck *ppc, location *loc, uint32_t i)
{
struct arena *arenap = loc->arenap;
uint32_t lba = map_get_postmap_lba(arenap, i);
/* add duplicated and invalid entries to list */
if (lba < arenap->btt_info.internal_nlba) {
if (util_isset(loc->bitmap, lba)) {
CHECK_INFO(ppc, "arena %u: BTT Map entry %u duplicated "
"at %u", arenap->id, lba, i);
util_setbit(loc->dup_bitmap, lba);
if (!list_push(loc->list_inval, i))
return -1;
} else
util_setbit(loc->bitmap, lba);
} else {
CHECK_INFO(ppc, "arena %u: invalid BTT Map entry at %u",
arenap->id, i);
if (!list_push(loc->list_inval, i))
return -1;
}
return 0;
}
/*
* flog_entry_check -- (internal) check single flog entry
*/
static int
flog_entry_check(PMEMpoolcheck *ppc, location *loc, uint32_t i,
uint8_t **ptr)
{
struct arena *arenap = loc->arenap;
/* flog entry consists of two btt_flog structures */
struct btt_flog *flog = (struct btt_flog *)*ptr;
int next;
struct btt_flog *flog_cur = btt_flog_get_valid(flog, &next);
/* insert invalid and duplicated indexes to list */
if (!flog_cur) {
CHECK_INFO(ppc, "arena %u: invalid BTT Flog entry at %u",
arenap->id, i);
if (!list_push(loc->list_flog_inval, i))
return -1;
goto next;
}
uint32_t entry = flog_cur->old_map & BTT_MAP_ENTRY_LBA_MASK;
uint32_t new_entry = flog_cur->new_map & BTT_MAP_ENTRY_LBA_MASK;
/*
* Check if lba is in extranal_nlba range, and check if both old_map and
* new_map are in internal_nlba range.
*/
if (flog_cur->lba >= arenap->btt_info.external_nlba ||
entry >= arenap->btt_info.internal_nlba ||
new_entry >= arenap->btt_info.internal_nlba) {
CHECK_INFO(ppc, "arena %u: invalid BTT Flog entry at %u",
arenap->id, i);
if (!list_push(loc->list_flog_inval, i))
return -1;
goto next;
}
if (util_isset(loc->fbitmap, entry)) {
/*
* here we have two flog entries which holds the same free block
*/
CHECK_INFO(ppc, "arena %u: duplicated BTT Flog entry at %u\n",
arenap->id, i);
if (!list_push(loc->list_flog_inval, i))
return -1;
} else if (util_isset(loc->bitmap, entry)) {
/* here we have probably an unfinished write */
if (util_isset(loc->bitmap, new_entry)) {
/* Both old_map and new_map are already used in map. */
CHECK_INFO(ppc, "arena %u: duplicated BTT Flog entry "
"at %u", arenap->id, i);
util_setbit(loc->dup_bitmap, new_entry);
if (!list_push(loc->list_flog_inval, i))
return -1;
} else {
/*
* Unfinished write. Next time pool is opened, the map
* will be updated to new_map.
*/
util_setbit(loc->bitmap, new_entry);
util_setbit(loc->fbitmap, entry);
}
} else {
int flog_valid = 1;
/*
* Either flog entry is in its initial state:
* - current_btt_flog entry is first one in pair and
* - current_btt_flog.old_map == current_btt_flog.new_map and
* - current_btt_flog.seq == 0b01 and
* - second flog entry in pair is zeroed
* or
* current_btt_flog.old_map != current_btt_flog.new_map
*/
if (entry == new_entry)
flog_valid = (next == 1) && (flog_cur->seq == 1) &&
util_is_zeroed((const void *)&flog[1],
sizeof(flog[1]));
if (flog_valid) {
/* totally fine case */
util_setbit(loc->bitmap, entry);
util_setbit(loc->fbitmap, entry);
} else {
CHECK_INFO(ppc, "arena %u: invalid BTT Flog entry at "
"%u", arenap->id, i);
if (!list_push(loc->list_flog_inval, i))
return -1;
}
}
next:
*ptr += BTT_FLOG_PAIR_ALIGN;
return 0;
}
/*
* arena_map_flog_check -- (internal) check map and flog
*/
static int
arena_map_flog_check(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
struct arena *arenap = loc->arenap;
/* check map entries */
uint32_t i;
for (i = 0; i < arenap->btt_info.external_nlba; i++) {
if (map_entry_check(ppc, loc, i))
goto error_push;
}
/* check flog entries */
uint8_t *ptr = arenap->flog;
for (i = 0; i < arenap->btt_info.nfree; i++) {
if (flog_entry_check(ppc, loc, i, &ptr))
goto error_push;
}
/* check unmapped blocks and insert to list */
for (i = 0; i < arenap->btt_info.internal_nlba; i++) {
if (!util_isset(loc->bitmap, i)) {
CHECK_INFO(ppc, "arena %u: unmapped block %u",
arenap->id, i);
if (!list_push(loc->list_unmap, i))
goto error_push;
}
}
if (loc->list_unmap->count)
CHECK_INFO(ppc, "arena %u: number of unmapped blocks: %u",
arenap->id, loc->list_unmap->count);
if (loc->list_inval->count)
CHECK_INFO(ppc, "arena %u: number of invalid BTT Map entries: "
"%u", arenap->id, loc->list_inval->count);
if (loc->list_flog_inval->count)
CHECK_INFO(ppc, "arena %u: number of invalid BTT Flog entries: "
"%u", arenap->id, loc->list_flog_inval->count);
if (CHECK_IS_NOT(ppc, REPAIR) && loc->list_unmap->count > 0) {
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
check_end(ppc->data);
goto cleanup;
}
/*
* We are able to repair if and only if number of unmapped blocks is
* equal to sum of invalid map and flog entries.
*/
if (loc->list_unmap->count != (loc->list_inval->count +
loc->list_flog_inval->count)) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
CHECK_ERR(ppc, "arena %u: cannot repair BTT Map and Flog",
arenap->id);
goto cleanup;
}
if (CHECK_IS_NOT(ppc, ADVANCED) && loc->list_inval->count +
loc->list_flog_inval->count > 0) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
CHECK_INFO(ppc, REQUIRE_ADVANCED);
CHECK_ERR(ppc, "BTT Map and / or BTT Flog contain invalid "
"entries");
check_end(ppc->data);
goto cleanup;
}
if (loc->list_inval->count > 0) {
CHECK_ASK(ppc, Q_REPAIR_MAP, "Do you want to repair invalid "
"BTT Map entries?");
}
if (loc->list_flog_inval->count > 0) {
CHECK_ASK(ppc, Q_REPAIR_FLOG, "Do you want to repair invalid "
"BTT Flog entries?");
}
return check_questions_sequence_validate(ppc);
error_push:
CHECK_ERR(ppc, "arena %u: cannot allocate momory for list item",
arenap->id);
ppc->result = CHECK_RESULT_ERROR;
cleanup:
cleanup(ppc, loc);
return -1;
}
/*
* arena_map_flog_fix -- (internal) fix map and flog
*/
static int
arena_map_flog_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *ctx)
{
LOG(3, NULL);
ASSERTeq(ctx, NULL);
ASSERTne(loc, NULL);
struct arena *arenap = loc->arenap;
uint32_t inval;
uint32_t unmap;
switch (question) {
case Q_REPAIR_MAP:
/*
* Cause first of duplicated map entries seems valid till we
* find second of them we must find all first map entries
* pointing to the postmap LBA's we know are duplicated to mark
* them with error flag.
*/
for (uint32_t i = 0; i < arenap->btt_info.external_nlba; i++) {
uint32_t lba = map_get_postmap_lba(arenap, i);
if (lba >= arenap->btt_info.internal_nlba)
continue;
if (!util_isset(loc->dup_bitmap, lba))
continue;
arenap->map[i] = BTT_MAP_ENTRY_ERROR | lba;
util_clrbit(loc->dup_bitmap, lba);
CHECK_INFO(ppc,
"arena %u: storing 0x%x at %u BTT Map entry",
arenap->id, arenap->map[i], i);
}
/*
* repair invalid or duplicated map entries by using unmapped
* blocks
*/
while (list_pop(loc->list_inval, &inval)) {
if (!list_pop(loc->list_unmap, &unmap)) {
ppc->result = CHECK_RESULT_ERROR;
return -1;
}
arenap->map[inval] = unmap | BTT_MAP_ENTRY_ERROR;
CHECK_INFO(ppc, "arena %u: storing 0x%x at %u BTT Map "
"entry", arenap->id, arenap->map[inval], inval);
}
break;
case Q_REPAIR_FLOG:
/* repair invalid flog entries using unmapped blocks */
while (list_pop(loc->list_flog_inval, &inval)) {
if (!list_pop(loc->list_unmap, &unmap)) {
ppc->result = CHECK_RESULT_ERROR;
return -1;
}
struct btt_flog *flog = (struct btt_flog *)
(arenap->flog + inval * BTT_FLOG_PAIR_ALIGN);
memset(&flog[1], 0, sizeof(flog[1]));
uint32_t entry = unmap | BTT_MAP_ENTRY_ERROR;
flog[0].lba = inval;
flog[0].new_map = entry;
flog[0].old_map = entry;
flog[0].seq = 1;
CHECK_INFO(ppc, "arena %u: repairing BTT Flog at %u "
"with free block entry 0x%x", loc->arenap->id,
inval, entry);
}
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
struct step {
int (*check)(PMEMpoolcheck *, location *);
int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *);
};
static const struct step steps[] = {
{
.check = init,
},
{
.check = arena_map_flog_check,
},
{
.fix = arena_map_flog_fix,
},
{
.check = cleanup,
},
{
.check = NULL,
.fix = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static inline int
step_exe(PMEMpoolcheck *ppc, location *loc)
{
ASSERT(loc->step < ARRAY_SIZE(steps));
const struct step *step = &steps[loc->step++];
if (!step->fix)
return step->check(ppc, loc);
if (!check_answer_loop(ppc, loc, NULL, 1, step->fix))
return 0;
cleanup(ppc, loc);
return -1;
}
/*
* check_btt_map_flog -- perform check and fixing of map and flog
*/
void
check_btt_map_flog(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
location *loc = check_get_step_data(ppc->data);
if (ppc->pool->blk_no_layout)
return;
/* initialize check */
if (!loc->arenap && loc->narena == 0 &&
ppc->result != CHECK_RESULT_PROCESS_ANSWERS) {
CHECK_INFO(ppc, "checking BTT Map and Flog");
loc->arenap = PMDK_TAILQ_FIRST(&ppc->pool->arenas);
loc->narena = 0;
}
while (loc->arenap != NULL) {
/* add info about checking next arena */
if (ppc->result != CHECK_RESULT_PROCESS_ANSWERS &&
loc->step == 0) {
CHECK_INFO(ppc, "arena %u: checking BTT Map and Flog",
loc->narena);
}
/* do all checks */
while (CHECK_NOT_COMPLETE(loc, steps)) {
if (step_exe(ppc, loc))
return;
}
/* jump to next arena */
loc->arenap = PMDK_TAILQ_NEXT(loc->arenap, next);
loc->narena++;
loc->step = 0;
}
}
| 15,734 | 21.937318 | 73 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/check_backup.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* check_backup.c -- pre-check backup
*/
#include <stddef.h>
#include <stdint.h>
#include <unistd.h>
#include "out.h"
#include "file.h"
#include "os.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
enum question {
Q_OVERWRITE_EXISTING_FILE,
Q_OVERWRITE_EXISTING_PARTS
};
/*
* location_release -- (internal) release poolset structure
*/
static void
location_release(location *loc)
{
if (loc->set) {
util_poolset_free(loc->set);
loc->set = NULL;
}
}
/*
* backup_nonpoolset_requirements -- (internal) check backup requirements
*/
static int
backup_nonpoolset_requirements(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, "backup_path %s", ppc->backup_path);
int exists = util_file_exists(ppc->backup_path);
if (exists < 0) {
return CHECK_ERR(ppc,
"unable to access the backup destination: %s",
ppc->backup_path);
}
if (!exists) {
errno = 0;
return 0;
}
if ((size_t)util_file_get_size(ppc->backup_path) !=
ppc->pool->set_file->size) {
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc,
"destination of the backup does not match the size of the source pool file: %s",
ppc->backup_path);
}
if (CHECK_WITHOUT_FIXING(ppc)) {
location_release(loc);
loc->step = CHECK_STEP_COMPLETE;
return 0;
}
CHECK_ASK(ppc, Q_OVERWRITE_EXISTING_FILE,
"destination of the backup already exists.|Do you want to overwrite it?");
return check_questions_sequence_validate(ppc);
}
/*
* backup_nonpoolset_overwrite -- (internal) overwrite pool
*/
static int
backup_nonpoolset_overwrite(PMEMpoolcheck *ppc, location *loc,
uint32_t question, void *context)
{
LOG(3, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_OVERWRITE_EXISTING_FILE:
if (pool_copy(ppc->pool, ppc->backup_path, 1 /* overwrite */)) {
location_release(loc);
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc, "cannot perform backup");
}
location_release(loc);
loc->step = CHECK_STEP_COMPLETE;
return 0;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
/*
* backup_nonpoolset_create -- (internal) create backup
*/
static int
backup_nonpoolset_create(PMEMpoolcheck *ppc, location *loc)
{
CHECK_INFO(ppc, "creating backup file: %s", ppc->backup_path);
if (pool_copy(ppc->pool, ppc->backup_path, 0)) {
location_release(loc);
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc, "cannot perform backup");
}
location_release(loc);
loc->step = CHECK_STEP_COMPLETE;
return 0;
}
/*
* backup_poolset_requirements -- (internal) check backup requirements
*/
static int
backup_poolset_requirements(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, "backup_path %s", ppc->backup_path);
if (ppc->pool->set_file->poolset->nreplicas > 1) {
CHECK_INFO(ppc,
"backup of a poolset with multiple replicas is not supported");
goto err;
}
if (pool_set_parse(&loc->set, ppc->backup_path)) {
CHECK_INFO_ERRNO(ppc, "invalid poolset backup file: %s",
ppc->backup_path);
goto err;
}
if (loc->set->nreplicas > 1) {
CHECK_INFO(ppc,
"backup to a poolset with multiple replicas is not supported");
goto err_poolset;
}
ASSERTeq(loc->set->nreplicas, 1);
struct pool_replica *srep = ppc->pool->set_file->poolset->replica[0];
struct pool_replica *drep = loc->set->replica[0];
if (srep->nparts != drep->nparts) {
CHECK_INFO(ppc,
"number of part files in the backup poolset must match number of part files in the source poolset");
goto err_poolset;
}
int overwrite_required = 0;
for (unsigned p = 0; p < srep->nparts; p++) {
int exists = util_file_exists(drep->part[p].path);
if (exists < 0) {
CHECK_INFO(ppc,
"unable to access the part of the destination poolset: %s",
ppc->backup_path);
goto err_poolset;
}
if (srep->part[p].filesize != drep->part[p].filesize) {
CHECK_INFO(ppc,
"size of the part %u of the backup poolset does not match source poolset",
p);
goto err_poolset;
}
if (!exists) {
errno = 0;
continue;
}
overwrite_required = true;
if ((size_t)util_file_get_size(drep->part[p].path) !=
srep->part[p].filesize) {
CHECK_INFO(ppc,
"destination of the backup part does not match size of the source part file: %s",
drep->part[p].path);
goto err_poolset;
}
}
if (CHECK_WITHOUT_FIXING(ppc)) {
location_release(loc);
loc->step = CHECK_STEP_COMPLETE;
return 0;
}
if (overwrite_required) {
CHECK_ASK(ppc, Q_OVERWRITE_EXISTING_PARTS,
"part files of the destination poolset of the backup already exist.|"
"Do you want to overwrite them?");
}
return check_questions_sequence_validate(ppc);
err_poolset:
location_release(loc);
err:
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc, "unable to backup poolset");
}
/*
* backup_poolset -- (internal) backup the poolset
*/
static int
backup_poolset(PMEMpoolcheck *ppc, location *loc, int overwrite)
{
struct pool_replica *srep = ppc->pool->set_file->poolset->replica[0];
struct pool_replica *drep = loc->set->replica[0];
for (unsigned p = 0; p < srep->nparts; p++) {
if (overwrite == 0) {
CHECK_INFO(ppc, "creating backup file: %s",
drep->part[p].path);
}
if (pool_set_part_copy(&drep->part[p], &srep->part[p],
overwrite)) {
location_release(loc);
ppc->result = CHECK_RESULT_ERROR;
CHECK_INFO(ppc, "unable to create backup file");
return CHECK_ERR(ppc, "unable to backup poolset");
}
}
return 0;
}
/*
* backup_poolset_overwrite -- (internal) backup poolset with overwrite
*/
static int
backup_poolset_overwrite(PMEMpoolcheck *ppc, location *loc,
uint32_t question, void *context)
{
LOG(3, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_OVERWRITE_EXISTING_PARTS:
if (backup_poolset(ppc, loc, 1 /* overwrite */)) {
location_release(loc);
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc, "cannot perform backup");
}
location_release(loc);
loc->step = CHECK_STEP_COMPLETE;
return 0;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
/*
* backup_poolset_create -- (internal) backup poolset
*/
static int
backup_poolset_create(PMEMpoolcheck *ppc, location *loc)
{
if (backup_poolset(ppc, loc, 0)) {
location_release(loc);
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc, "cannot perform backup");
}
location_release(loc);
loc->step = CHECK_STEP_COMPLETE;
return 0;
}
struct step {
int (*check)(PMEMpoolcheck *, location *);
int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *);
int poolset;
};
static const struct step steps[] = {
{
.check = backup_nonpoolset_requirements,
.poolset = false,
},
{
.fix = backup_nonpoolset_overwrite,
.poolset = false,
},
{
.check = backup_nonpoolset_create,
.poolset = false
},
{
.check = backup_poolset_requirements,
.poolset = true,
},
{
.fix = backup_poolset_overwrite,
.poolset = true,
},
{
.check = backup_poolset_create,
.poolset = true
},
{
.check = NULL,
.fix = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static int
step_exe(PMEMpoolcheck *ppc, location *loc)
{
ASSERT(loc->step < ARRAY_SIZE(steps));
const struct step *step = &steps[loc->step++];
if (step->poolset == 0 && ppc->pool->params.is_poolset == 1)
return 0;
if (!step->fix)
return step->check(ppc, loc);
if (!check_has_answer(ppc->data))
return 0;
if (check_answer_loop(ppc, loc, NULL, 1, step->fix))
return -1;
ppc->result = CHECK_RESULT_CONSISTENT;
return 0;
}
/*
* check_backup -- perform backup if requested and needed
*/
void
check_backup(PMEMpoolcheck *ppc)
{
LOG(3, "backup_path %s", ppc->backup_path);
if (ppc->backup_path == NULL)
return;
location *loc = check_get_step_data(ppc->data);
/* do all checks */
while (CHECK_NOT_COMPLETE(loc, steps)) {
if (step_exe(ppc, loc))
break;
}
}
| 7,968 | 20.654891 | 103 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/check_btt_info.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* check_btt_info.c -- check BTT Info
*/
#include <stdlib.h>
#include <stdint.h>
#include <endian.h>
#include "out.h"
#include "util.h"
#include "btt.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
enum question {
Q_RESTORE_FROM_BACKUP,
Q_REGENERATE,
Q_REGENERATE_CHECKSUM,
Q_RESTORE_FROM_HEADER
};
/*
* location_release -- (internal) release check_btt_info_loc allocations
*/
static void
location_release(location *loc)
{
free(loc->arenap);
loc->arenap = NULL;
}
/*
* btt_info_checksum -- (internal) check BTT Info checksum
*/
static int
btt_info_checksum(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
loc->arenap = calloc(1, sizeof(struct arena));
if (!loc->arenap) {
ERR("!calloc");
ppc->result = CHECK_RESULT_INTERNAL_ERROR;
CHECK_ERR(ppc, "cannot allocate memory for arena");
goto error_cleanup;
}
/* read the BTT Info header at well known offset */
if (pool_read(ppc->pool, &loc->arenap->btt_info,
sizeof(loc->arenap->btt_info), loc->offset)) {
CHECK_ERR(ppc, "arena %u: cannot read BTT Info header",
loc->arenap->id);
ppc->result = CHECK_RESULT_ERROR;
goto error_cleanup;
}
loc->arenap->id = ppc->pool->narenas;
/* BLK is consistent even without BTT Layout */
if (ppc->pool->params.type == POOL_TYPE_BLK) {
int is_zeroed = util_is_zeroed((const void *)
&loc->arenap->btt_info, sizeof(loc->arenap->btt_info));
if (is_zeroed) {
CHECK_INFO(ppc, "BTT Layout not written");
loc->step = CHECK_STEP_COMPLETE;
ppc->pool->blk_no_layout = 1;
location_release(loc);
check_end(ppc->data);
return 0;
}
}
/* check consistency of BTT Info */
if (pool_btt_info_valid(&loc->arenap->btt_info)) {
CHECK_INFO(ppc, "arena %u: BTT Info header checksum correct",
loc->arenap->id);
loc->valid.btti_header = 1;
} else if (CHECK_IS_NOT(ppc, REPAIR)) {
CHECK_ERR(ppc, "arena %u: BTT Info header checksum incorrect",
loc->arenap->id);
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
check_end(ppc->data);
goto error_cleanup;
}
return 0;
error_cleanup:
location_release(loc);
return -1;
}
/*
* btt_info_backup -- (internal) check BTT Info backup
*/
static int
btt_info_backup(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
/* check BTT Info backup consistency */
const size_t btt_info_size = sizeof(ppc->pool->bttc.btt_info);
uint64_t btt_info_off = pool_next_arena_offset(ppc->pool, loc->offset) -
btt_info_size;
if (pool_read(ppc->pool, &ppc->pool->bttc.btt_info, btt_info_size,
btt_info_off)) {
CHECK_ERR(ppc, "arena %u: cannot read BTT Info backup",
loc->arenap->id);
goto error;
}
/* check whether this BTT Info backup is valid */
if (pool_btt_info_valid(&ppc->pool->bttc.btt_info)) {
loc->valid.btti_backup = 1;
/* restore BTT Info from backup */
if (!loc->valid.btti_header && CHECK_IS(ppc, REPAIR))
CHECK_ASK(ppc, Q_RESTORE_FROM_BACKUP, "arena %u: BTT "
"Info header checksum incorrect.|Restore BTT "
"Info from backup?", loc->arenap->id);
}
/*
* if BTT Info backup require repairs it will be fixed in further steps
*/
return check_questions_sequence_validate(ppc);
error:
ppc->result = CHECK_RESULT_ERROR;
location_release(loc);
return -1;
}
/*
* btt_info_from_backup_fix -- (internal) fix BTT Info using its backup
*/
static int
btt_info_from_backup_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *ctx)
{
LOG(3, NULL);
ASSERTeq(ctx, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_RESTORE_FROM_BACKUP:
CHECK_INFO(ppc,
"arena %u: restoring BTT Info header from backup",
loc->arenap->id);
memcpy(&loc->arenap->btt_info, &ppc->pool->bttc.btt_info,
sizeof(loc->arenap->btt_info));
loc->valid.btti_header = 1;
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
/*
* btt_info_gen -- (internal) ask whether try to regenerate BTT Info
*/
static int
btt_info_gen(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
if (loc->valid.btti_header)
return 0;
ASSERT(CHECK_IS(ppc, REPAIR));
if (!loc->pool_valid.btti_offset) {
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
check_end(ppc->data);
return CHECK_ERR(ppc, "can not find any valid BTT Info");
}
CHECK_ASK(ppc, Q_REGENERATE,
"arena %u: BTT Info header checksum incorrect.|Do you want to "
"regenerate BTT Info?", loc->arenap->id);
return check_questions_sequence_validate(ppc);
}
/*
* btt_info_gen_fix -- (internal) fix by regenerating BTT Info
*/
static int
btt_info_gen_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *ctx)
{
LOG(3, NULL);
ASSERTeq(ctx, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_REGENERATE:
CHECK_INFO(ppc, "arena %u: regenerating BTT Info header",
loc->arenap->id);
/*
* We do not have valid BTT Info backup so we get first valid
* BTT Info and try to calculate BTT Info for current arena
*/
uint64_t arena_size = ppc->pool->set_file->size - loc->offset;
if (arena_size > BTT_MAX_ARENA)
arena_size = BTT_MAX_ARENA;
uint64_t space_left = ppc->pool->set_file->size - loc->offset -
arena_size;
struct btt_info *bttd = &loc->arenap->btt_info;
struct btt_info *btts = &loc->pool_valid.btti;
btt_info_convert2h(bttd);
/*
* all valid BTT Info structures have the same signature, UUID,
* parent UUID, flags, major, minor, external LBA size, internal
* LBA size, nfree, info size and data offset
*/
memcpy(bttd->sig, btts->sig, BTTINFO_SIG_LEN);
memcpy(bttd->uuid, btts->uuid, BTTINFO_UUID_LEN);
memcpy(bttd->parent_uuid, btts->parent_uuid, BTTINFO_UUID_LEN);
memset(bttd->unused, 0, BTTINFO_UNUSED_LEN);
bttd->flags = btts->flags;
bttd->major = btts->major;
bttd->minor = btts->minor;
/* other parameters can be calculated */
if (btt_info_set(bttd, btts->external_lbasize, btts->nfree,
arena_size, space_left)) {
CHECK_ERR(ppc, "can not restore BTT Info");
return -1;
}
ASSERTeq(bttd->external_lbasize, btts->external_lbasize);
ASSERTeq(bttd->internal_lbasize, btts->internal_lbasize);
ASSERTeq(bttd->nfree, btts->nfree);
ASSERTeq(bttd->infosize, btts->infosize);
ASSERTeq(bttd->dataoff, btts->dataoff);
return 0;
default:
ERR("not implemented question id: %u", question);
return -1;
}
}
/*
* btt_info_checksum_retry -- (internal) check BTT Info checksum
*/
static int
btt_info_checksum_retry(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
if (loc->valid.btti_header)
return 0;
btt_info_convert2le(&loc->arenap->btt_info);
/* check consistency of BTT Info */
if (pool_btt_info_valid(&loc->arenap->btt_info)) {
CHECK_INFO(ppc, "arena %u: BTT Info header checksum correct",
loc->arenap->id);
loc->valid.btti_header = 1;
return 0;
}
if (CHECK_IS_NOT(ppc, ADVANCED)) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
CHECK_INFO(ppc, REQUIRE_ADVANCED);
CHECK_ERR(ppc, "arena %u: BTT Info header checksum incorrect",
loc->arenap->id);
check_end(ppc->data);
goto error_cleanup;
}
CHECK_ASK(ppc, Q_REGENERATE_CHECKSUM,
"arena %u: BTT Info header checksum incorrect.|Do you want to "
"regenerate BTT Info checksum?", loc->arenap->id);
return check_questions_sequence_validate(ppc);
error_cleanup:
location_release(loc);
return -1;
}
/*
* btt_info_checksum_fix -- (internal) fix by regenerating BTT Info checksum
*/
static int
btt_info_checksum_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *ctx)
{
LOG(3, NULL);
ASSERTeq(ctx, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_REGENERATE_CHECKSUM:
util_checksum(&loc->arenap->btt_info, sizeof(struct btt_info),
&loc->arenap->btt_info.checksum, 1, 0);
loc->valid.btti_header = 1;
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
/*
* btt_info_backup_checksum -- (internal) check BTT Info backup checksum
*/
static int
btt_info_backup_checksum(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
ASSERT(loc->valid.btti_header);
if (loc->valid.btti_backup)
return 0;
/* BTT Info backup is not valid so it must be fixed */
if (CHECK_IS_NOT(ppc, REPAIR)) {
CHECK_ERR(ppc,
"arena %u: BTT Info backup checksum incorrect",
loc->arenap->id);
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
check_end(ppc->data);
goto error_cleanup;
}
CHECK_ASK(ppc, Q_RESTORE_FROM_HEADER,
"arena %u: BTT Info backup checksum incorrect.|Do you want to "
"restore it from BTT Info header?", loc->arenap->id);
return check_questions_sequence_validate(ppc);
error_cleanup:
location_release(loc);
return -1;
}
/*
* btt_info_backup_fix -- (internal) prepare restore BTT Info backup from header
*/
static int
btt_info_backup_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *ctx)
{
LOG(3, NULL);
ASSERTeq(ctx, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_RESTORE_FROM_HEADER:
/* BTT Info backup would be restored in check_write step */
CHECK_INFO(ppc,
"arena %u: restoring BTT Info backup from header",
loc->arenap->id);
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
struct step {
int (*check)(PMEMpoolcheck *, location *);
int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *);
};
static const struct step steps[] = {
{
.check = btt_info_checksum,
},
{
.check = btt_info_backup,
},
{
.fix = btt_info_from_backup_fix,
},
{
.check = btt_info_gen,
},
{
.fix = btt_info_gen_fix,
},
{
.check = btt_info_checksum_retry,
},
{
.fix = btt_info_checksum_fix,
},
{
.check = btt_info_backup_checksum,
},
{
.fix = btt_info_backup_fix,
},
{
.check = NULL,
.fix = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static inline int
step_exe(PMEMpoolcheck *ppc, location *loc)
{
ASSERT(loc->step < ARRAY_SIZE(steps));
const struct step *step = &steps[loc->step++];
if (!step->fix)
return step->check(ppc, loc);
if (!check_answer_loop(ppc, loc, NULL, 1, step->fix))
return 0;
if (check_has_error(ppc->data))
location_release(loc);
return -1;
}
/*
* check_btt_info -- entry point for btt info check
*/
void
check_btt_info(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
location *loc = check_get_step_data(ppc->data);
uint64_t nextoff = 0;
/* initialize check */
if (!loc->offset) {
CHECK_INFO(ppc, "checking BTT Info headers");
loc->offset = sizeof(struct pool_hdr);
if (ppc->pool->params.type == POOL_TYPE_BLK)
loc->offset += ALIGN_UP(sizeof(struct pmemblk) -
sizeof(struct pool_hdr),
BLK_FORMAT_DATA_ALIGN);
loc->pool_valid.btti_offset = pool_get_first_valid_btt(
ppc->pool, &loc->pool_valid.btti, loc->offset, NULL);
/* Without valid BTT Info we can not proceed */
if (!loc->pool_valid.btti_offset) {
if (ppc->pool->params.type == POOL_TYPE_BTT) {
CHECK_ERR(ppc,
"can not find any valid BTT Info");
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
check_end(ppc->data);
return;
}
} else
btt_info_convert2h(&loc->pool_valid.btti);
}
do {
/* jump to next offset */
if (ppc->result != CHECK_RESULT_PROCESS_ANSWERS) {
loc->offset += nextoff;
loc->step = 0;
loc->valid.btti_header = 0;
loc->valid.btti_backup = 0;
}
/* do all checks */
while (CHECK_NOT_COMPLETE(loc, steps)) {
if (step_exe(ppc, loc) || ppc->pool->blk_no_layout == 1)
return;
}
/* save offset and insert BTT to cache for next steps */
loc->arenap->offset = loc->offset;
loc->arenap->valid = true;
check_insert_arena(ppc, loc->arenap);
nextoff = le64toh(loc->arenap->btt_info.nextoff);
} while (nextoff > 0);
}
| 11,735 | 22.011765 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/check_util.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* check_util.c -- check utility functions
*/
#include <stdio.h>
#include <stdint.h>
#include "out.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
#define CHECK_END UINT_MAX
/* separate info part of message from question part of message */
#define MSG_SEPARATOR '|'
/* error part of message must have '.' at the end */
#define MSG_PLACE_OF_SEPARATION '.'
#define MAX_MSG_STR_SIZE 8192
#define CHECK_ANSWER_YES "yes"
#define CHECK_ANSWER_NO "no"
#define STR_MAX 256
#define TIME_STR_FMT "%a %b %d %Y %H:%M:%S"
#define UUID_STR_MAX 37
enum check_answer {
PMEMPOOL_CHECK_ANSWER_EMPTY,
PMEMPOOL_CHECK_ANSWER_YES,
PMEMPOOL_CHECK_ANSWER_NO,
PMEMPOOL_CHECK_ANSWER_DEFAULT,
};
/* queue of check statuses */
struct check_status {
PMDK_TAILQ_ENTRY(check_status) next;
struct pmempool_check_status status;
unsigned question;
enum check_answer answer;
char *msg;
};
PMDK_TAILQ_HEAD(check_status_head, check_status);
/* check control context */
struct check_data {
unsigned step;
location step_data;
struct check_status *error;
struct check_status_head infos;
struct check_status_head questions;
struct check_status_head answers;
struct check_status *check_status_cache;
};
/*
* check_data_alloc -- allocate and initialize check_data structure
*/
struct check_data *
check_data_alloc(void)
{
LOG(3, NULL);
struct check_data *data = calloc(1, sizeof(*data));
if (data == NULL) {
ERR("!calloc");
return NULL;
}
PMDK_TAILQ_INIT(&data->infos);
PMDK_TAILQ_INIT(&data->questions);
PMDK_TAILQ_INIT(&data->answers);
return data;
}
/*
* check_data_free -- clean and deallocate check_data
*/
void
check_data_free(struct check_data *data)
{
LOG(3, NULL);
if (data->error != NULL) {
free(data->error);
data->error = NULL;
}
if (data->check_status_cache != NULL) {
free(data->check_status_cache);
data->check_status_cache = NULL;
}
while (!PMDK_TAILQ_EMPTY(&data->infos)) {
struct check_status *statp = PMDK_TAILQ_FIRST(&data->infos);
PMDK_TAILQ_REMOVE(&data->infos, statp, next);
free(statp);
}
while (!PMDK_TAILQ_EMPTY(&data->questions)) {
struct check_status *statp = PMDK_TAILQ_FIRST(&data->questions);
PMDK_TAILQ_REMOVE(&data->questions, statp, next);
free(statp);
}
while (!PMDK_TAILQ_EMPTY(&data->answers)) {
struct check_status *statp = PMDK_TAILQ_FIRST(&data->answers);
PMDK_TAILQ_REMOVE(&data->answers, statp, next);
free(statp);
}
free(data);
}
/*
* check_step_get - return current check step number
*/
uint32_t
check_step_get(struct check_data *data)
{
return data->step;
}
/*
* check_step_inc -- move to next step number
*/
void
check_step_inc(struct check_data *data)
{
if (check_is_end_util(data))
return;
++data->step;
memset(&data->step_data, 0, sizeof(location));
}
/*
* check_get_step_data -- return pointer to check step data
*/
location *
check_get_step_data(struct check_data *data)
{
return &data->step_data;
}
/*
* check_end -- mark check as ended
*/
void
check_end(struct check_data *data)
{
LOG(3, NULL);
data->step = CHECK_END;
}
/*
* check_is_end_util -- return if check has ended
*/
int
check_is_end_util(struct check_data *data)
{
return data->step == CHECK_END;
}
/*
* status_alloc -- (internal) allocate and initialize check_status
*/
static inline struct check_status *
status_alloc(void)
{
struct check_status *status = malloc(sizeof(*status));
if (!status)
FATAL("!malloc");
status->msg = malloc(sizeof(char) * MAX_MSG_STR_SIZE);
if (!status->msg) {
free(status);
FATAL("!malloc");
}
status->status.str.msg = status->msg;
status->answer = PMEMPOOL_CHECK_ANSWER_EMPTY;
status->question = CHECK_INVALID_QUESTION;
return status;
}
/*
* status_release -- (internal) release check_status
*/
static void
status_release(struct check_status *status)
{
#ifdef _WIN32
/* dealloc duplicate string after conversion */
if (status->status.str.msg != status->msg)
free((void *)status->status.str.msg);
#endif
free(status->msg);
free(status);
}
/*
* status_msg_info_only -- (internal) separate info part of the message
*
* If message is in form of "info.|question" it modifies it as follows
* "info\0|question"
*/
static inline int
status_msg_info_only(const char *msg)
{
char *sep = strchr(msg, MSG_SEPARATOR);
if (sep) {
ASSERTne(sep, msg);
--sep;
ASSERTeq(*sep, MSG_PLACE_OF_SEPARATION);
*sep = '\0';
return 0;
}
return -1;
}
/*
* status_msg_info_and_question -- (internal) join info and question
*
* If message is in form "info.|question" it will replace MSG_SEPARATOR '|' with
* space to get "info. question"
*/
static inline int
status_msg_info_and_question(const char *msg)
{
char *sep = strchr(msg, MSG_SEPARATOR);
if (sep) {
*sep = ' ';
return 0;
}
return -1;
}
/*
* status_push -- (internal) push single status object
*/
static int
status_push(PMEMpoolcheck *ppc, struct check_status *st, uint32_t question)
{
if (st->status.type == PMEMPOOL_CHECK_MSG_TYPE_ERROR) {
ASSERTeq(ppc->data->error, NULL);
ppc->data->error = st;
return -1;
} else if (st->status.type == PMEMPOOL_CHECK_MSG_TYPE_INFO) {
if (CHECK_IS(ppc, VERBOSE))
PMDK_TAILQ_INSERT_TAIL(&ppc->data->infos, st, next);
else
check_status_release(ppc, st);
return 0;
}
/* st->status.type == PMEMPOOL_CHECK_MSG_TYPE_QUESTION */
if (CHECK_IS_NOT(ppc, REPAIR)) {
/* error status */
if (status_msg_info_only(st->msg)) {
ERR("no error message for the user");
st->msg[0] = '\0';
}
st->status.type = PMEMPOOL_CHECK_MSG_TYPE_ERROR;
return status_push(ppc, st, question);
}
if (CHECK_IS(ppc, ALWAYS_YES)) {
if (!status_msg_info_only(st->msg)) {
/* information status */
st->status.type = PMEMPOOL_CHECK_MSG_TYPE_INFO;
status_push(ppc, st, question);
st = status_alloc();
}
/* answer status */
ppc->result = CHECK_RESULT_PROCESS_ANSWERS;
st->question = question;
st->answer = PMEMPOOL_CHECK_ANSWER_YES;
st->status.type = PMEMPOOL_CHECK_MSG_TYPE_QUESTION;
PMDK_TAILQ_INSERT_TAIL(&ppc->data->answers, st, next);
} else {
/* question message */
status_msg_info_and_question(st->msg);
st->question = question;
ppc->result = CHECK_RESULT_ASK_QUESTIONS;
st->answer = PMEMPOOL_CHECK_ANSWER_EMPTY;
PMDK_TAILQ_INSERT_TAIL(&ppc->data->questions, st, next);
}
return 0;
}
/*
* check_status_create -- create single status, push it to proper queue
*
* MSG_SEPARATOR character in fmt is treated as message separator. If creating
* question but check arguments do not allow to make any changes (asking any
* question is pointless) it takes part of message before MSG_SEPARATOR
* character and use it to create error message. Character just before separator
* must be a MSG_PLACE_OF_SEPARATION character. Return non 0 value if error
* status would be created.
*
* The arg is an additional argument for specified type of status.
*/
int
check_status_create(PMEMpoolcheck *ppc, enum pmempool_check_msg_type type,
uint32_t arg, const char *fmt, ...)
{
if (CHECK_IS_NOT(ppc, VERBOSE) && type == PMEMPOOL_CHECK_MSG_TYPE_INFO)
return 0;
struct check_status *st = status_alloc();
ASSERT(CHECK_IS(ppc, FORMAT_STR));
va_list ap;
va_start(ap, fmt);
int p = vsnprintf(st->msg, MAX_MSG_STR_SIZE, fmt, ap);
va_end(ap);
/* append possible strerror at the end of the message */
if (type != PMEMPOOL_CHECK_MSG_TYPE_QUESTION && arg && p > 0) {
char buff[UTIL_MAX_ERR_MSG];
util_strerror((int)arg, buff, UTIL_MAX_ERR_MSG);
int ret = util_snprintf(st->msg + p,
MAX_MSG_STR_SIZE - (size_t)p, ": %s", buff);
if (ret < 0) {
ERR("!snprintf");
status_release(st);
return -1;
}
}
st->status.type = type;
return status_push(ppc, st, arg);
}
/*
* check_status_release -- release single status object
*/
void
check_status_release(PMEMpoolcheck *ppc, struct check_status *status)
{
if (status->status.type == PMEMPOOL_CHECK_MSG_TYPE_ERROR)
ppc->data->error = NULL;
status_release(status);
}
/*
* pop_status -- (internal) pop single message from check_status queue
*/
static struct check_status *
pop_status(struct check_data *data, struct check_status_head *queue)
{
if (!PMDK_TAILQ_EMPTY(queue)) {
ASSERTeq(data->check_status_cache, NULL);
data->check_status_cache = PMDK_TAILQ_FIRST(queue);
PMDK_TAILQ_REMOVE(queue, data->check_status_cache, next);
return data->check_status_cache;
}
return NULL;
}
/*
* check_pop_question -- pop single question from questions queue
*/
struct check_status *
check_pop_question(struct check_data *data)
{
return pop_status(data, &data->questions);
}
/*
* check_pop_info -- pop single info from information queue
*/
struct check_status *
check_pop_info(struct check_data *data)
{
return pop_status(data, &data->infos);
}
/*
* check_pop_error -- pop error from state
*/
struct check_status *
check_pop_error(struct check_data *data)
{
if (data->error) {
ASSERTeq(data->check_status_cache, NULL);
data->check_status_cache = data->error;
data->error = NULL;
return data->check_status_cache;
}
return NULL;
}
#ifdef _WIN32
void
cache_to_utf8(struct check_data *data, char *buf, size_t size)
{
if (data->check_status_cache == NULL)
return;
struct check_status *status = data->check_status_cache;
/* if it was a question, convert it and the answer to utf8 */
if (status->status.type == PMEMPOOL_CHECK_MSG_TYPE_QUESTION) {
struct pmempool_check_statusW *wstatus =
(struct pmempool_check_statusW *)&status->status;
wchar_t *wstring = (wchar_t *)wstatus->str.msg;
status->status.str.msg = util_toUTF8(wstring);
if (status->status.str.msg == NULL)
FATAL("!malloc");
util_free_UTF16(wstring);
if (util_toUTF8_buff(wstatus->str.answer, buf, size) != 0)
FATAL("Invalid answer conversion %s",
out_get_errormsg());
status->status.str.answer = buf;
}
}
#endif
/*
* check_clear_status_cache -- release check_status from cache
*/
void
check_clear_status_cache(struct check_data *data)
{
if (data->check_status_cache) {
switch (data->check_status_cache->status.type) {
case PMEMPOOL_CHECK_MSG_TYPE_INFO:
case PMEMPOOL_CHECK_MSG_TYPE_ERROR:
/*
* Info and error statuses are disposable. After showing
* them to the user we have to release them.
*/
status_release(data->check_status_cache);
data->check_status_cache = NULL;
break;
case PMEMPOOL_CHECK_MSG_TYPE_QUESTION:
/*
* Question status after being showed to the user carry
* users answer. It must be kept till answer would be
* processed so it can not be released from cache. It
* has to be pushed to the answers queue, processed and
* released after that.
*/
break;
default:
ASSERT(0);
}
}
}
/*
* status_answer_push -- (internal) push single answer to answers queue
*/
static void
status_answer_push(struct check_data *data, struct check_status *st)
{
ASSERTeq(st->status.type, PMEMPOOL_CHECK_MSG_TYPE_QUESTION);
PMDK_TAILQ_INSERT_TAIL(&data->answers, st, next);
}
/*
* check_push_answer -- process answer and push it to answers queue
*/
int
check_push_answer(PMEMpoolcheck *ppc)
{
if (ppc->data->check_status_cache == NULL)
return 0;
/* check if answer is "yes" or "no" */
struct check_status *status = ppc->data->check_status_cache;
if (status->status.str.answer != NULL) {
if (strcmp(status->status.str.answer, CHECK_ANSWER_YES) == 0)
status->answer = PMEMPOOL_CHECK_ANSWER_YES;
else if (strcmp(status->status.str.answer, CHECK_ANSWER_NO)
== 0)
status->answer = PMEMPOOL_CHECK_ANSWER_NO;
}
if (status->answer == PMEMPOOL_CHECK_ANSWER_EMPTY) {
/* invalid answer provided */
status_answer_push(ppc->data, ppc->data->check_status_cache);
ppc->data->check_status_cache = NULL;
CHECK_INFO(ppc, "Answer must be either %s or %s",
CHECK_ANSWER_YES, CHECK_ANSWER_NO);
return -1;
}
/* push answer */
PMDK_TAILQ_INSERT_TAIL(&ppc->data->answers,
ppc->data->check_status_cache, next);
ppc->data->check_status_cache = NULL;
return 0;
}
/*
* check_has_error - check if error exists
*/
bool
check_has_error(struct check_data *data)
{
return data->error != NULL;
}
/*
* check_has_answer - check if any answer exists
*/
bool
check_has_answer(struct check_data *data)
{
return !PMDK_TAILQ_EMPTY(&data->answers);
}
/*
* pop_answer -- (internal) pop single answer from answers queue
*/
static struct check_status *
pop_answer(struct check_data *data)
{
struct check_status *ret = NULL;
if (!PMDK_TAILQ_EMPTY(&data->answers)) {
ret = PMDK_TAILQ_FIRST(&data->answers);
PMDK_TAILQ_REMOVE(&data->answers, ret, next);
}
return ret;
}
/*
* check_status_get_util -- extract pmempool_check_status from check_status
*/
struct pmempool_check_status *
check_status_get_util(struct check_status *status)
{
return &status->status;
}
/*
* check_answer_loop -- loop through all available answers and process them
*/
int
check_answer_loop(PMEMpoolcheck *ppc, location *data, void *ctx, int fail_on_no,
int (*callback)(PMEMpoolcheck *, location *, uint32_t, void *ctx))
{
struct check_status *answer;
while ((answer = pop_answer(ppc->data)) != NULL) {
/* if answer is "no" we cannot fix an issue */
if (answer->answer != PMEMPOOL_CHECK_ANSWER_YES) {
if (fail_on_no ||
answer->answer != PMEMPOOL_CHECK_ANSWER_NO) {
CHECK_ERR(ppc,
"cannot complete repair, reverting changes");
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
goto error;
}
ppc->result = CHECK_RESULT_REPAIRED;
check_status_release(ppc, answer);
continue;
}
/* perform fix */
if (callback(ppc, data, answer->question, ctx)) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
goto error;
}
if (ppc->result == CHECK_RESULT_ERROR)
goto error;
/* fix succeeded */
ppc->result = CHECK_RESULT_REPAIRED;
check_status_release(ppc, answer);
}
return 0;
error:
check_status_release(ppc, answer);
return -1;
}
/*
* check_questions_sequence_validate -- generate return value from result
*
* Sequence of questions can result in one of the following results: CONSISTENT,
* REPAIRED, ASK_QUESTIONS of PROCESS_ANSWERS. If result == ASK_QUESTIONS it
* returns -1 to indicate existence of unanswered questions.
*/
int
check_questions_sequence_validate(PMEMpoolcheck *ppc)
{
ASSERT(ppc->result == CHECK_RESULT_CONSISTENT ||
ppc->result == CHECK_RESULT_ASK_QUESTIONS ||
ppc->result == CHECK_RESULT_PROCESS_ANSWERS ||
ppc->result == CHECK_RESULT_REPAIRED);
if (ppc->result == CHECK_RESULT_ASK_QUESTIONS) {
ASSERT(!PMDK_TAILQ_EMPTY(&ppc->data->questions));
return -1;
}
return 0;
}
/*
* check_get_time_str -- returns time in human-readable format
*/
const char *
check_get_time_str(time_t time)
{
static char str_buff[STR_MAX] = {0, };
struct tm *tm = util_localtime(&time);
if (tm)
strftime(str_buff, STR_MAX, TIME_STR_FMT, tm);
else {
int ret = util_snprintf(str_buff, STR_MAX, "unknown");
if (ret < 0) {
ERR("!snprintf");
return "";
}
}
return str_buff;
}
/*
* check_get_uuid_str -- returns uuid in human readable format
*/
const char *
check_get_uuid_str(uuid_t uuid)
{
static char uuid_str[UUID_STR_MAX] = {0, };
int ret = util_uuid_to_string(uuid, uuid_str);
if (ret != 0) {
ERR("failed to covert uuid to string");
return "";
}
return uuid_str;
}
/*
* pmempool_check_insert_arena -- insert arena to list
*/
void
check_insert_arena(PMEMpoolcheck *ppc, struct arena *arenap)
{
PMDK_TAILQ_INSERT_TAIL(&ppc->pool->arenas, arenap, next);
ppc->pool->narenas++;
}
| 15,575 | 22.247761 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/pool.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* pool.h -- internal definitions for pool processing functions
*/
#ifndef POOL_H
#define POOL_H
#include <stdbool.h>
#include <sys/types.h>
#include "libpmemobj.h"
#include "queue.h"
#include "set.h"
#include "log.h"
#include "blk.h"
#include "btt_layout.h"
#ifdef __cplusplus
extern "C" {
#endif
#include "alloc.h"
#include "fault_injection.h"
enum pool_type {
POOL_TYPE_UNKNOWN = (1 << 0),
POOL_TYPE_LOG = (1 << 1),
POOL_TYPE_BLK = (1 << 2),
POOL_TYPE_OBJ = (1 << 3),
POOL_TYPE_BTT = (1 << 4),
POOL_TYPE_ANY = POOL_TYPE_UNKNOWN | POOL_TYPE_LOG |
POOL_TYPE_BLK | POOL_TYPE_OBJ | POOL_TYPE_BTT,
};
struct pool_params {
enum pool_type type;
char signature[POOL_HDR_SIG_LEN];
features_t features;
size_t size;
mode_t mode;
int is_poolset;
int is_part;
int is_dev_dax;
int is_pmem;
union {
struct {
uint64_t bsize;
} blk;
struct {
char layout[PMEMOBJ_MAX_LAYOUT];
} obj;
};
};
struct pool_set_file {
int fd;
char *fname;
void *addr;
size_t size;
struct pool_set *poolset;
time_t mtime;
mode_t mode;
};
struct arena {
PMDK_TAILQ_ENTRY(arena) next;
struct btt_info btt_info;
uint32_t id;
bool valid;
bool zeroed;
uint64_t offset;
uint8_t *flog;
size_t flogsize;
uint32_t *map;
size_t mapsize;
};
struct pool_data {
struct pool_params params;
struct pool_set_file *set_file;
int blk_no_layout;
union {
struct pool_hdr pool;
struct pmemlog log;
struct pmemblk blk;
} hdr;
enum {
UUID_NOP = 0,
UUID_FROM_BTT,
UUID_NOT_FROM_BTT,
} uuid_op;
struct arena bttc;
PMDK_TAILQ_HEAD(arenashead, arena) arenas;
uint32_t narenas;
};
struct pool_data *pool_data_alloc(PMEMpoolcheck *ppc);
void pool_data_free(struct pool_data *pool);
void pool_params_from_header(struct pool_params *params,
const struct pool_hdr *hdr);
int pool_set_parse(struct pool_set **setp, const char *path);
void *pool_set_file_map(struct pool_set_file *file, uint64_t offset);
int pool_read(struct pool_data *pool, void *buff, size_t nbytes,
uint64_t off);
int pool_write(struct pool_data *pool, const void *buff, size_t nbytes,
uint64_t off);
int pool_copy(struct pool_data *pool, const char *dst_path, int overwrite);
int pool_set_part_copy(struct pool_set_part *dpart,
struct pool_set_part *spart, int overwrite);
int pool_memset(struct pool_data *pool, uint64_t off, int c, size_t count);
unsigned pool_set_files_count(struct pool_set_file *file);
int pool_set_file_map_headers(struct pool_set_file *file, int rdonly, int prv);
void pool_set_file_unmap_headers(struct pool_set_file *file);
void pool_hdr_default(enum pool_type type, struct pool_hdr *hdrp);
enum pool_type pool_hdr_get_type(const struct pool_hdr *hdrp);
enum pool_type pool_set_type(struct pool_set *set);
const char *pool_get_pool_type_str(enum pool_type type);
int pool_btt_info_valid(struct btt_info *infop);
int pool_blk_get_first_valid_arena(struct pool_data *pool,
struct arena *arenap);
int pool_blk_bsize_valid(uint32_t bsize, uint64_t fsize);
uint64_t pool_next_arena_offset(struct pool_data *pool, uint64_t header_offset);
uint64_t pool_get_first_valid_btt(struct pool_data *pool,
struct btt_info *infop, uint64_t offset, bool *zeroed);
size_t pool_get_min_size(enum pool_type);
#if FAULT_INJECTION
void
pmempool_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at);
int
pmempool_fault_injection_enabled(void);
#else
static inline void
pmempool_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
abort();
}
static inline int
pmempool_fault_injection_enabled(void)
{
return 0;
}
#endif
#ifdef __cplusplus
}
#endif
#endif
| 3,712 | 21.640244 | 80 | h |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/pool.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* pool.c -- pool processing functions
*/
#include <stdio.h>
#include <stdint.h>
#include <sys/mman.h>
#include <unistd.h>
#include <fcntl.h>
#include <endian.h>
#ifndef _WIN32
#include <sys/ioctl.h>
#ifdef __FreeBSD__
#include <sys/disk.h>
#define BLKGETSIZE64 DIOCGMEDIASIZE
#else
#include <linux/fs.h>
#endif
#endif
#include "libpmem.h"
#include "libpmemlog.h"
#include "libpmemblk.h"
#include "libpmempool.h"
#include "out.h"
#include "pmempool.h"
#include "pool.h"
#include "lane.h"
#include "obj.h"
#include "btt.h"
#include "file.h"
#include "os.h"
#include "set.h"
#include "check_util.h"
#include "util_pmem.h"
#include "mmap.h"
/* arbitrary size of a maximum file part being read / write at once */
#define RW_BUFFERING_SIZE (128 * 1024 * 1024)
/*
* pool_btt_lseek -- (internal) perform lseek in BTT file mode
*/
static inline os_off_t
pool_btt_lseek(struct pool_data *pool, os_off_t offset, int whence)
{
os_off_t result;
if ((result = os_lseek(pool->set_file->fd, offset, whence)) == -1)
ERR("!lseek");
return result;
}
/*
* pool_btt_read -- (internal) perform read in BTT file mode
*/
static inline ssize_t
pool_btt_read(struct pool_data *pool, void *dst, size_t count)
{
size_t total = 0;
ssize_t nread;
while (count > total &&
(nread = util_read(pool->set_file->fd, dst, count - total))) {
if (nread == -1) {
ERR("!read");
return total ? (ssize_t)total : -1;
}
dst = (void *)((ssize_t)dst + nread);
total += (size_t)nread;
}
return (ssize_t)total;
}
/*
* pool_btt_write -- (internal) perform write in BTT file mode
*/
static inline ssize_t
pool_btt_write(struct pool_data *pool, const void *src, size_t count)
{
ssize_t nwrite = 0;
size_t total = 0;
while (count > total &&
(nwrite = util_write(pool->set_file->fd, src,
count - total))) {
if (nwrite == -1) {
ERR("!write");
return total ? (ssize_t)total : -1;
}
src = (void *)((ssize_t)src + nwrite);
total += (size_t)nwrite;
}
return (ssize_t)total;
}
/*
* pool_set_read_header -- (internal) read a header of a pool set
*/
static int
pool_set_read_header(const char *fname, struct pool_hdr *hdr)
{
struct pool_set *set;
int ret = 0;
if (util_poolset_read(&set, fname)) {
return -1;
}
/* open the first part set file to read the pool header values */
const struct pool_set_part *part = PART(REP(set, 0), 0);
int fdp = util_file_open(part->path, NULL, 0, O_RDONLY);
if (fdp < 0) {
ERR("cannot open poolset part file");
ret = -1;
goto err_pool_set;
}
/* read the pool header from first pool set file */
if (pread(fdp, hdr, sizeof(*hdr), 0) != sizeof(*hdr)) {
ERR("cannot read pool header from poolset");
ret = -1;
goto err_close_part;
}
err_close_part:
os_close(fdp);
err_pool_set:
util_poolset_free(set);
return ret;
}
/*
* pool_set_map -- (internal) map poolset
*/
static int
pool_set_map(const char *fname, struct pool_set **poolset, unsigned flags)
{
ASSERTeq(util_is_poolset_file(fname), 1);
struct pool_hdr hdr;
if (pool_set_read_header(fname, &hdr))
return -1;
util_convert2h_hdr_nocheck(&hdr);
/* parse pool type from first pool set file */
enum pool_type type = pool_hdr_get_type(&hdr);
if (type == POOL_TYPE_UNKNOWN) {
ERR("cannot determine pool type from poolset");
return -1;
}
/*
* Open the poolset, the values passed to util_pool_open are read
* from the first poolset file, these values are then compared with
* the values from all headers of poolset files.
*/
struct pool_attr attr;
util_pool_hdr2attr(&attr, &hdr);
if (util_pool_open(poolset, fname, 0 /* minpartsize */, &attr,
NULL, NULL, flags | POOL_OPEN_IGNORE_SDS |
POOL_OPEN_IGNORE_BAD_BLOCKS)) {
ERR("opening poolset failed");
return -1;
}
return 0;
}
/*
* pool_params_from_header -- parse pool params from pool header
*/
void
pool_params_from_header(struct pool_params *params, const struct pool_hdr *hdr)
{
memcpy(params->signature, hdr->signature, sizeof(params->signature));
memcpy(¶ms->features, &hdr->features, sizeof(params->features));
/*
* Check if file is a part of pool set by comparing the UUID with the
* next part UUID. If it is the same it means the pool consist of a
* single file.
*/
int uuid_eq_next = uuidcmp(hdr->uuid, hdr->next_part_uuid);
int uuid_eq_prev = uuidcmp(hdr->uuid, hdr->prev_part_uuid);
params->is_part = !params->is_poolset && (uuid_eq_next || uuid_eq_prev);
params->type = pool_hdr_get_type(hdr);
}
/*
* pool_check_type_to_pool_type -- (internal) convert check pool type to
* internal pool type value
*/
static enum pool_type
pool_check_type_to_pool_type(enum pmempool_pool_type check_pool_type)
{
switch (check_pool_type) {
case PMEMPOOL_POOL_TYPE_LOG:
return POOL_TYPE_LOG;
case PMEMPOOL_POOL_TYPE_BLK:
return POOL_TYPE_BLK;
case PMEMPOOL_POOL_TYPE_OBJ:
return POOL_TYPE_OBJ;
default:
ERR("can not convert pmempool_pool_type %u to pool_type",
check_pool_type);
return POOL_TYPE_UNKNOWN;
}
}
/*
* pool_parse_params -- parse pool type, file size and block size
*/
static int
pool_params_parse(const PMEMpoolcheck *ppc, struct pool_params *params,
int check)
{
LOG(3, NULL);
int is_btt = ppc->args.pool_type == PMEMPOOL_POOL_TYPE_BTT;
params->type = POOL_TYPE_UNKNOWN;
params->is_poolset = util_is_poolset_file(ppc->path) == 1;
int fd = util_file_open(ppc->path, NULL, 0, O_RDONLY);
if (fd < 0)
return -1;
int ret = 0;
os_stat_t stat_buf;
ret = os_fstat(fd, &stat_buf);
if (ret)
goto out_close;
ASSERT(stat_buf.st_size >= 0);
params->mode = stat_buf.st_mode;
struct pool_set *set;
void *addr;
if (params->is_poolset) {
/*
* Need to close the poolset because it will be opened with
* flock in the following instructions.
*/
os_close(fd);
fd = -1;
if (check) {
if (pool_set_map(ppc->path, &set, 0))
return -1;
} else {
ret = util_poolset_create_set(&set, ppc->path,
0, 0, true);
if (ret < 0) {
LOG(2, "cannot open pool set -- '%s'",
ppc->path);
return -1;
}
if (set->remote) {
ERR("poolsets with remote replicas are not "
"supported");
return -1;
}
if (util_pool_open_nocheck(set,
POOL_OPEN_IGNORE_BAD_BLOCKS))
return -1;
}
params->size = set->poolsize;
addr = set->replica[0]->part[0].addr;
/*
* XXX mprotect for device dax with length not aligned to its
* page granularity causes SIGBUS on the next page fault.
* The length argument of this call should be changed to
* set->poolsize once the kernel issue is solved.
*/
if (mprotect(addr, set->replica[0]->repsize,
PROT_READ) < 0) {
ERR("!mprotect");
goto out_unmap;
}
params->is_dev_dax = set->replica[0]->part[0].is_dev_dax;
params->is_pmem = set->replica[0]->is_pmem;
} else if (is_btt) {
params->size = (size_t)stat_buf.st_size;
#ifndef _WIN32
if (params->mode & S_IFBLK)
if (ioctl(fd, BLKGETSIZE64, ¶ms->size)) {
ERR("!ioctl");
goto out_close;
}
#endif
addr = NULL;
} else {
enum file_type type = util_file_get_type(ppc->path);
if (type < 0) {
ret = -1;
goto out_close;
}
ssize_t s = util_file_get_size(ppc->path);
if (s < 0) {
ret = -1;
goto out_close;
}
params->size = (size_t)s;
int map_sync;
addr = util_map(fd, 0, params->size, MAP_SHARED, 1, 0,
&map_sync);
if (addr == NULL) {
ret = -1;
goto out_close;
}
params->is_dev_dax = type == TYPE_DEVDAX;
params->is_pmem = params->is_dev_dax || map_sync ||
pmem_is_pmem(addr, params->size);
}
/* stop processing for BTT device */
if (is_btt) {
params->type = POOL_TYPE_BTT;
params->is_part = false;
goto out_close;
}
struct pool_hdr hdr;
memcpy(&hdr, addr, sizeof(hdr));
util_convert2h_hdr_nocheck(&hdr);
pool_params_from_header(params, &hdr);
if (ppc->args.pool_type != PMEMPOOL_POOL_TYPE_DETECT) {
enum pool_type declared_type =
pool_check_type_to_pool_type(ppc->args.pool_type);
if ((params->type & ~declared_type) != 0) {
ERR("declared pool type does not match");
errno = EINVAL;
ret = 1;
goto out_unmap;
}
}
if (params->type == POOL_TYPE_BLK) {
struct pmemblk pbp;
memcpy(&pbp, addr, sizeof(pbp));
params->blk.bsize = le32toh(pbp.bsize);
} else if (params->type == POOL_TYPE_OBJ) {
struct pmemobjpool *pop = addr;
memcpy(params->obj.layout, pop->layout,
PMEMOBJ_MAX_LAYOUT);
}
out_unmap:
if (params->is_poolset) {
ASSERTeq(fd, -1);
ASSERTne(addr, NULL);
util_poolset_close(set, DO_NOT_DELETE_PARTS);
} else if (!is_btt) {
ASSERTne(fd, -1);
ASSERTne(addr, NULL);
munmap(addr, params->size);
}
out_close:
if (fd != -1)
os_close(fd);
return ret;
}
/*
* pool_set_file_open -- (internal) opens pool set file or regular file
*/
static struct pool_set_file *
pool_set_file_open(const char *fname, struct pool_params *params, int rdonly)
{
LOG(3, NULL);
struct pool_set_file *file = calloc(1, sizeof(*file));
if (!file)
return NULL;
file->fname = strdup(fname);
if (!file->fname)
goto err;
const char *path = file->fname;
if (params->type != POOL_TYPE_BTT) {
int ret = util_poolset_create_set(&file->poolset, path,
0, 0, true);
if (ret < 0) {
LOG(2, "cannot open pool set -- '%s'", path);
goto err_free_fname;
}
unsigned flags = (rdonly ? POOL_OPEN_COW : 0) |
POOL_OPEN_IGNORE_BAD_BLOCKS;
if (util_pool_open_nocheck(file->poolset, flags))
goto err_free_fname;
file->size = file->poolset->poolsize;
/* get modification time from the first part of first replica */
path = file->poolset->replica[0]->part[0].path;
file->addr = file->poolset->replica[0]->part[0].addr;
} else {
int oflag = rdonly ? O_RDONLY : O_RDWR;
file->fd = util_file_open(fname, NULL, 0, oflag);
file->size = params->size;
}
os_stat_t buf;
if (os_stat(path, &buf)) {
ERR("%s", path);
goto err_close_poolset;
}
file->mtime = buf.st_mtime;
file->mode = buf.st_mode;
return file;
err_close_poolset:
if (params->type != POOL_TYPE_BTT)
util_poolset_close(file->poolset, DO_NOT_DELETE_PARTS);
else if (file->fd != -1)
os_close(file->fd);
err_free_fname:
free(file->fname);
err:
free(file);
return NULL;
}
/*
* pool_set_parse -- parse poolset file
*/
int
pool_set_parse(struct pool_set **setp, const char *path)
{
LOG(3, "setp %p path %s", setp, path);
int fd = os_open(path, O_RDONLY);
int ret = 0;
if (fd < 0)
return 1;
if (util_poolset_parse(setp, path, fd)) {
ret = 1;
goto err_close;
}
err_close:
os_close(fd);
return ret;
}
/*
* pool_data_alloc -- allocate pool data and open set_file
*/
struct pool_data *
pool_data_alloc(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
struct pool_data *pool = calloc(1, sizeof(*pool));
if (!pool) {
ERR("!calloc");
return NULL;
}
PMDK_TAILQ_INIT(&pool->arenas);
pool->uuid_op = UUID_NOP;
if (pool_params_parse(ppc, &pool->params, 0))
goto error;
int rdonly = CHECK_IS_NOT(ppc, REPAIR);
int prv = CHECK_IS(ppc, DRY_RUN);
if (prv && pool->params.is_dev_dax) {
errno = ENOTSUP;
ERR("!cannot perform a dry run on dax device");
goto error;
}
pool->set_file = pool_set_file_open(ppc->path, &pool->params, prv);
if (pool->set_file == NULL)
goto error;
/*
* XXX mprotect for device dax with length not aligned to its
* page granularity causes SIGBUS on the next page fault.
* The length argument of this call should be changed to
* pool->set_file->poolsize once the kernel issue is solved.
*/
if (rdonly && mprotect(pool->set_file->addr,
pool->set_file->poolset->replica[0]->repsize,
PROT_READ) < 0)
goto error;
if (pool->params.type != POOL_TYPE_BTT) {
if (pool_set_file_map_headers(pool->set_file, rdonly, prv))
goto error;
}
return pool;
error:
pool_data_free(pool);
return NULL;
}
/*
* pool_set_file_close -- (internal) closes pool set file or regular file
*/
static void
pool_set_file_close(struct pool_set_file *file)
{
LOG(3, NULL);
if (file->poolset)
util_poolset_close(file->poolset, DO_NOT_DELETE_PARTS);
else if (file->addr) {
munmap(file->addr, file->size);
os_close(file->fd);
} else if (file->fd)
os_close(file->fd);
free(file->fname);
free(file);
}
/*
* pool_data_free -- close set_file and release pool data
*/
void
pool_data_free(struct pool_data *pool)
{
LOG(3, NULL);
if (pool->set_file) {
if (pool->params.type != POOL_TYPE_BTT)
pool_set_file_unmap_headers(pool->set_file);
pool_set_file_close(pool->set_file);
}
while (!PMDK_TAILQ_EMPTY(&pool->arenas)) {
struct arena *arenap = PMDK_TAILQ_FIRST(&pool->arenas);
if (arenap->map)
free(arenap->map);
if (arenap->flog)
free(arenap->flog);
PMDK_TAILQ_REMOVE(&pool->arenas, arenap, next);
free(arenap);
}
free(pool);
}
/*
* pool_set_file_map -- return mapped address at given offset
*/
void *
pool_set_file_map(struct pool_set_file *file, uint64_t offset)
{
if (file->addr == MAP_FAILED)
return NULL;
return (char *)file->addr + offset;
}
/*
* pool_read -- read from pool set file or regular file
*
* 'buff' has to be a buffer at least 'nbytes' long
* 'off' is an offset from the beginning of the pool
*/
int
pool_read(struct pool_data *pool, void *buff, size_t nbytes, uint64_t off)
{
if (off + nbytes > pool->set_file->size)
return -1;
if (pool->params.type != POOL_TYPE_BTT)
memcpy(buff, (char *)pool->set_file->addr + off, nbytes);
else {
if (pool_btt_lseek(pool, (os_off_t)off, SEEK_SET) == -1)
return -1;
if ((size_t)pool_btt_read(pool, buff, nbytes) != nbytes)
return -1;
}
return 0;
}
/*
* pool_write -- write to pool set file or regular file
*
* 'buff' has to be a buffer at least 'nbytes' long
* 'off' is an offset from the beginning of the pool
*/
int
pool_write(struct pool_data *pool, const void *buff, size_t nbytes,
uint64_t off)
{
if (off + nbytes > pool->set_file->size)
return -1;
if (pool->params.type != POOL_TYPE_BTT) {
memcpy((char *)pool->set_file->addr + off, buff, nbytes);
util_persist_auto(pool->params.is_pmem,
(char *)pool->set_file->addr + off, nbytes);
} else {
if (pool_btt_lseek(pool, (os_off_t)off, SEEK_SET) == -1)
return -1;
if ((size_t)pool_btt_write(pool, buff, nbytes) != nbytes)
return -1;
}
return 0;
}
/*
* pool_copy -- make a copy of the pool
*/
int
pool_copy(struct pool_data *pool, const char *dst_path, int overwrite)
{
struct pool_set_file *file = pool->set_file;
int dfd;
int exists = util_file_exists(dst_path);
if (exists < 0)
return -1;
if (exists) {
if (!overwrite) {
errno = EEXIST;
return -1;
}
dfd = util_file_open(dst_path, NULL, 0, O_RDWR);
} else {
errno = 0;
dfd = util_file_create(dst_path, file->size, 0);
}
if (dfd < 0)
return -1;
int result = 0;
os_stat_t stat_buf;
if (os_stat(file->fname, &stat_buf)) {
result = -1;
goto out_close;
}
if (fchmod(dfd, stat_buf.st_mode)) {
result = -1;
goto out_close;
}
void *daddr = mmap(NULL, file->size, PROT_READ | PROT_WRITE,
MAP_SHARED, dfd, 0);
if (daddr == MAP_FAILED) {
result = -1;
goto out_close;
}
if (pool->params.type != POOL_TYPE_BTT) {
void *saddr = pool_set_file_map(file, 0);
memcpy(daddr, saddr, file->size);
goto out_unmap;
}
void *buf = malloc(RW_BUFFERING_SIZE);
if (buf == NULL) {
ERR("!malloc");
result = -1;
goto out_unmap;
}
if (pool_btt_lseek(pool, 0, SEEK_SET) == -1) {
result = -1;
goto out_free;
}
ssize_t buf_read = 0;
void *dst = daddr;
while ((buf_read = pool_btt_read(pool, buf, RW_BUFFERING_SIZE))) {
if (buf_read == -1)
break;
memcpy(dst, buf, (size_t)buf_read);
dst = (void *)((ssize_t)dst + buf_read);
}
out_free:
free(buf);
out_unmap:
munmap(daddr, file->size);
out_close:
(void) os_close(dfd);
return result;
}
/*
* pool_set_part_copy -- make a copy of the poolset part
*/
int
pool_set_part_copy(struct pool_set_part *dpart, struct pool_set_part *spart,
int overwrite)
{
LOG(3, "dpart %p spart %p", dpart, spart);
int result = 0;
os_stat_t stat_buf;
if (os_fstat(spart->fd, &stat_buf)) {
ERR("!util_stat");
return -1;
}
size_t smapped = 0;
void *saddr = pmem_map_file(spart->path, 0, 0, S_IREAD, &smapped, NULL);
if (!saddr)
return -1;
size_t dmapped = 0;
int is_pmem;
void *daddr;
int exists = util_file_exists(dpart->path);
if (exists < 0) {
result = -1;
goto out_sunmap;
}
if (exists) {
if (!overwrite) {
errno = EEXIST;
result = -1;
goto out_sunmap;
}
daddr = pmem_map_file(dpart->path, 0, 0, S_IWRITE, &dmapped,
&is_pmem);
} else {
errno = 0;
daddr = pmem_map_file(dpart->path, dpart->filesize,
PMEM_FILE_CREATE | PMEM_FILE_EXCL,
stat_buf.st_mode, &dmapped, &is_pmem);
}
if (!daddr) {
result = -1;
goto out_sunmap;
}
#ifdef DEBUG
/* provide extra logging in case of wrong dmapped/smapped value */
if (dmapped < smapped) {
LOG(1, "dmapped < smapped: dmapped = %lu, smapped = %lu",
dmapped, smapped);
ASSERT(0);
}
#endif
if (is_pmem) {
pmem_memcpy_persist(daddr, saddr, smapped);
} else {
memcpy(daddr, saddr, smapped);
pmem_msync(daddr, smapped);
}
pmem_unmap(daddr, dmapped);
out_sunmap:
pmem_unmap(saddr, smapped);
return result;
}
/*
* pool_memset -- memset pool part described by off and count
*/
int
pool_memset(struct pool_data *pool, uint64_t off, int c, size_t count)
{
int result = 0;
if (pool->params.type != POOL_TYPE_BTT)
memset((char *)off, 0, count);
else {
if (pool_btt_lseek(pool, (os_off_t)off, SEEK_SET) == -1)
return -1;
size_t zero_size = min(count, RW_BUFFERING_SIZE);
void *buf = malloc(zero_size);
if (!buf) {
ERR("!malloc");
return -1;
}
memset(buf, c, zero_size);
ssize_t nwrite = 0;
do {
zero_size = min(zero_size, count);
nwrite = pool_btt_write(pool, buf, zero_size);
if (nwrite < 0) {
result = -1;
break;
}
count -= (size_t)nwrite;
} while (count > 0);
free(buf);
}
return result;
}
/*
* pool_set_files_count -- get total number of parts of all replicas
*/
unsigned
pool_set_files_count(struct pool_set_file *file)
{
unsigned ret = 0;
unsigned nreplicas = file->poolset->nreplicas;
for (unsigned r = 0; r < nreplicas; r++) {
struct pool_replica *rep = file->poolset->replica[r];
ret += rep->nparts;
}
return ret;
}
/*
* pool_set_file_map_headers -- map headers of each pool set part file
*/
int
pool_set_file_map_headers(struct pool_set_file *file, int rdonly, int prv)
{
if (!file->poolset)
return -1;
for (unsigned r = 0; r < file->poolset->nreplicas; r++) {
struct pool_replica *rep = file->poolset->replica[r];
for (unsigned p = 0; p < rep->nparts; p++) {
struct pool_set_part *part = &rep->part[p];
if (util_map_hdr(part,
prv ? MAP_PRIVATE : MAP_SHARED, rdonly)) {
part->hdr = NULL;
goto err;
}
}
}
return 0;
err:
pool_set_file_unmap_headers(file);
return -1;
}
/*
* pool_set_file_unmap_headers -- unmap headers of each pool set part file
*/
void
pool_set_file_unmap_headers(struct pool_set_file *file)
{
if (!file->poolset)
return;
for (unsigned r = 0; r < file->poolset->nreplicas; r++) {
struct pool_replica *rep = file->poolset->replica[r];
for (unsigned p = 0; p < rep->nparts; p++) {
struct pool_set_part *part = &rep->part[p];
util_unmap_hdr(part);
}
}
}
/*
* pool_get_signature -- (internal) return signature of specified pool type
*/
static const char *
pool_get_signature(enum pool_type type)
{
switch (type) {
case POOL_TYPE_LOG:
return LOG_HDR_SIG;
case POOL_TYPE_BLK:
return BLK_HDR_SIG;
case POOL_TYPE_OBJ:
return OBJ_HDR_SIG;
default:
return NULL;
}
}
/*
* pool_hdr_default -- return default pool header values
*/
void
pool_hdr_default(enum pool_type type, struct pool_hdr *hdrp)
{
memset(hdrp, 0, sizeof(*hdrp));
const char *sig = pool_get_signature(type);
ASSERTne(sig, NULL);
memcpy(hdrp->signature, sig, POOL_HDR_SIG_LEN);
switch (type) {
case POOL_TYPE_LOG:
hdrp->major = LOG_FORMAT_MAJOR;
hdrp->features = log_format_feat_default;
break;
case POOL_TYPE_BLK:
hdrp->major = BLK_FORMAT_MAJOR;
hdrp->features = blk_format_feat_default;
break;
case POOL_TYPE_OBJ:
hdrp->major = OBJ_FORMAT_MAJOR;
hdrp->features = obj_format_feat_default;
break;
default:
break;
}
}
/*
* pool_hdr_get_type -- return pool type based on pool header data
*/
enum pool_type
pool_hdr_get_type(const struct pool_hdr *hdrp)
{
if (memcmp(hdrp->signature, LOG_HDR_SIG, POOL_HDR_SIG_LEN) == 0)
return POOL_TYPE_LOG;
else if (memcmp(hdrp->signature, BLK_HDR_SIG, POOL_HDR_SIG_LEN) == 0)
return POOL_TYPE_BLK;
else if (memcmp(hdrp->signature, OBJ_HDR_SIG, POOL_HDR_SIG_LEN) == 0)
return POOL_TYPE_OBJ;
else
return POOL_TYPE_UNKNOWN;
}
/*
* pool_get_pool_type_str -- return human-readable pool type string
*/
const char *
pool_get_pool_type_str(enum pool_type type)
{
switch (type) {
case POOL_TYPE_BTT:
return "btt";
case POOL_TYPE_LOG:
return "pmemlog";
case POOL_TYPE_BLK:
return "pmemblk";
case POOL_TYPE_OBJ:
return "pmemobj";
default:
return "unknown";
}
}
/*
* pool_set_type -- get pool type of a poolset
*/
enum pool_type
pool_set_type(struct pool_set *set)
{
struct pool_hdr hdr;
/* open the first part file to read the pool header values */
const struct pool_set_part *part = PART(REP(set, 0), 0);
if (util_file_pread(part->path, &hdr, sizeof(hdr), 0) !=
sizeof(hdr)) {
ERR("cannot read pool header from poolset");
return POOL_TYPE_UNKNOWN;
}
util_convert2h_hdr_nocheck(&hdr);
enum pool_type type = pool_hdr_get_type(&hdr);
return type;
}
/*
* pool_btt_info_valid -- check consistency of BTT Info header
*/
int
pool_btt_info_valid(struct btt_info *infop)
{
if (memcmp(infop->sig, BTTINFO_SIG, BTTINFO_SIG_LEN) != 0)
return 0;
return util_checksum(infop, sizeof(*infop), &infop->checksum, 0, 0);
}
/*
* pool_blk_get_first_valid_arena -- get first valid BTT Info in arena
*/
int
pool_blk_get_first_valid_arena(struct pool_data *pool, struct arena *arenap)
{
arenap->zeroed = true;
uint64_t offset = pool_get_first_valid_btt(pool, &arenap->btt_info,
2 * BTT_ALIGNMENT, &arenap->zeroed);
if (offset != 0) {
arenap->offset = offset;
arenap->valid = true;
return 1;
}
return 0;
}
/*
* pool_next_arena_offset -- get offset of next arena
*
* Calculated offset is theoretical. Function does not check if such arena can
* exist.
*/
uint64_t
pool_next_arena_offset(struct pool_data *pool, uint64_t offset)
{
uint64_t lastoff = (pool->set_file->size & ~(BTT_ALIGNMENT - 1));
uint64_t nextoff = min(offset + BTT_MAX_ARENA, lastoff);
return nextoff;
}
/*
* pool_get_first_valid_btt -- return offset to first valid BTT Info
*
* - Return offset to valid BTT Info header in pool file.
* - Start looking from given offset.
* - Convert BTT Info header to host endianness.
* - Return the BTT Info header by pointer.
* - If zeroed pointer provided would check if all checked BTT Info are zeroed
* which is useful for BLK pools
*/
uint64_t
pool_get_first_valid_btt(struct pool_data *pool, struct btt_info *infop,
uint64_t offset, bool *zeroed)
{
/* if we have valid arena get BTT Info header from it */
if (pool->narenas != 0) {
struct arena *arenap = PMDK_TAILQ_FIRST(&pool->arenas);
memcpy(infop, &arenap->btt_info, sizeof(*infop));
return arenap->offset;
}
const size_t info_size = sizeof(*infop);
/* theoretical offsets to BTT Info header and backup */
uint64_t offsets[2] = {offset, 0};
while (offsets[0] < pool->set_file->size) {
/* calculate backup offset */
offsets[1] = pool_next_arena_offset(pool, offsets[0]) -
info_size;
/* check both offsets: header and backup */
for (int i = 0; i < 2; ++i) {
if (pool_read(pool, infop, info_size, offsets[i]))
continue;
/* check if all possible BTT Info are zeroed */
if (zeroed)
*zeroed &= util_is_zeroed((const void *)infop,
info_size);
/* check if read BTT Info is valid */
if (pool_btt_info_valid(infop)) {
btt_info_convert2h(infop);
return offsets[i];
}
}
/* jump to next arena */
offsets[0] += BTT_MAX_ARENA;
}
return 0;
}
/*
* pool_get_min_size -- return the minimum pool size of a pool of a given type
*/
size_t
pool_get_min_size(enum pool_type type)
{
switch (type) {
case POOL_TYPE_LOG:
return PMEMLOG_MIN_POOL;
case POOL_TYPE_BLK:
return PMEMBLK_MIN_POOL;
case POOL_TYPE_OBJ:
return PMEMOBJ_MIN_POOL;
default:
ERR("unknown type of a pool");
return SIZE_MAX;
}
}
#if FAULT_INJECTION
void
pmempool_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
core_inject_fault_at(type, nth, at);
}
int
pmempool_fault_injection_enabled(void)
{
return core_fault_injection_enabled();
}
#endif
| 24,738 | 21.009786 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem/pmem.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* pmem.c -- pmem entry points for libpmem
*
*
* PERSISTENT MEMORY INSTRUCTIONS ON X86
*
* The primary feature of this library is to provide a way to flush
* changes to persistent memory as outlined below (note that many
* of the decisions below are made at initialization time, and not
* repeated every time a flush is requested).
*
* To flush a range to pmem when CLWB is available:
*
* CLWB for each cache line in the given range.
*
* SFENCE to ensure the CLWBs above have completed.
*
* To flush a range to pmem when CLFLUSHOPT is available and CLWB is not
* (same as above but issue CLFLUSHOPT instead of CLWB):
*
* CLFLUSHOPT for each cache line in the given range.
*
* SFENCE to ensure the CLWBs above have completed.
*
* To flush a range to pmem when neither CLFLUSHOPT or CLWB are available
* (same as above but fences surrounding CLFLUSH are not required):
*
* CLFLUSH for each cache line in the given range.
*
* To memcpy a range of memory to pmem when MOVNT is available:
*
* Copy any non-64-byte portion of the destination using MOV.
*
* Use the flush flow above without the fence for the copied portion.
*
* Copy using MOVNTDQ, up to any non-64-byte aligned end portion.
* (The MOVNT instructions bypass the cache, so no flush is required.)
*
* Copy any unaligned end portion using MOV.
*
* Use the flush flow above for the copied portion (including fence).
*
* To memcpy a range of memory to pmem when MOVNT is not available:
*
* Just pass the call to the normal memcpy() followed by pmem_persist().
*
* To memset a non-trivial sized range of memory to pmem:
*
* Same as the memcpy cases above but store the given value instead
* of reading values from the source.
*
* These features are supported for ARM AARCH64 using equivalent ARM
* assembly instruction. Please refer to (arm_cacheops.h) for more details.
*
* INTERFACES FOR FLUSHING TO PERSISTENT MEMORY
*
* Given the flows above, three interfaces are provided for flushing a range
* so that the caller has the ability to separate the steps when necessary,
* but otherwise leaves the detection of available instructions to the libpmem:
*
* pmem_persist(addr, len)
*
* This is the common case, which just calls the two other functions:
*
* pmem_flush(addr, len);
* pmem_drain();
*
* pmem_flush(addr, len)
*
* CLWB or CLFLUSHOPT or CLFLUSH for each cache line
*
* pmem_drain()
*
* SFENCE unless using CLFLUSH
*
*
* INTERFACES FOR COPYING/SETTING RANGES OF MEMORY
*
* Given the flows above, the following interfaces are provided for the
* memmove/memcpy/memset operations to persistent memory:
*
* pmem_memmove_nodrain()
*
* Checks for overlapped ranges to determine whether to copy from
* the beginning of the range or from the end. If MOVNT instructions
* are available, uses the memory copy flow described above, otherwise
* calls the libc memmove() followed by pmem_flush(). Since no conditional
* compilation and/or architecture specific CFLAGS are in use at the
* moment, SSE2 ( thus movnt ) is just assumed to be available.
*
* pmem_memcpy_nodrain()
*
* Just calls pmem_memmove_nodrain().
*
* pmem_memset_nodrain()
*
* If MOVNT instructions are available, uses the memset flow described
* above, otherwise calls the libc memset() followed by pmem_flush().
*
* pmem_memmove_persist()
* pmem_memcpy_persist()
* pmem_memset_persist()
*
* Calls the appropriate _nodrain() function followed by pmem_drain().
*
*
* DECISIONS MADE AT INITIALIZATION TIME
*
* As much as possible, all decisions described above are made at library
* initialization time. This is achieved using function pointers that are
* setup by pmem_init() when the library loads.
*
* Func_fence is used by pmem_drain() to call one of:
* fence_empty()
* memory_barrier()
*
* Func_flush is used by pmem_flush() to call one of:
* flush_dcache()
* flush_dcache_invalidate_opt()
* flush_dcache_invalidate()
*
* Func_memmove_nodrain is used by memmove_nodrain() to call one of:
* memmove_nodrain_libc()
* memmove_nodrain_movnt()
*
* Func_memset_nodrain is used by memset_nodrain() to call one of:
* memset_nodrain_libc()
* memset_nodrain_movnt()
*
* DEBUG LOGGING
*
* Many of the functions here get called hundreds of times from loops
* iterating over ranges, making the usual LOG() calls at level 3
* impractical. The call tracing log for those functions is set at 15.
*/
#include <sys/mman.h>
#include <sys/stat.h>
#include <errno.h>
#include <fcntl.h>
#include "libpmem.h"
#include "pmem.h"
#include "pmem2_arch.h"
#include "out.h"
#include "os.h"
#include "mmap.h"
#include "file.h"
#include "valgrind_internal.h"
#include "os_deep.h"
#include "auto_flush.h"
struct pmem_funcs {
memmove_nodrain_func memmove_nodrain;
memset_nodrain_func memset_nodrain;
flush_func deep_flush;
flush_func flush;
fence_func fence;
};
static struct pmem_funcs Funcs;
static is_pmem_func Is_pmem = NULL;
/*
* pmem_has_hw_drain -- return whether or not HW drain was found
*
* Always false for x86: HW drain is done by HW with no SW involvement.
*/
int
pmem_has_hw_drain(void)
{
LOG(3, NULL);
return 0;
}
/*
* pmem_drain -- wait for any PM stores to drain from HW buffers
*/
void
pmem_drain(void)
{
LOG(15, NULL);
Funcs.fence();
}
/*
* pmem_has_auto_flush -- check if platform supports eADR
*/
int
pmem_has_auto_flush()
{
LOG(3, NULL);
return pmem2_auto_flush();
}
/*
* pmem_deep_flush -- flush processor cache for the given range
* regardless of eADR support on platform
*/
void
pmem_deep_flush(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len);
Funcs.deep_flush(addr, len);
}
/*
* pmem_flush -- flush processor cache for the given range
*/
void
pmem_flush(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len);
Funcs.flush(addr, len);
}
/*
* pmem_persist -- make any cached changes to a range of pmem persistent
*/
void
pmem_persist(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
pmem_flush(addr, len);
pmem_drain();
}
/*
* pmem_msync -- flush to persistence via msync
*
* Using msync() means this routine is less optimal for pmem (but it
* still works) but it also works for any memory mapped file, unlike
* pmem_persist() which is only safe where pmem_is_pmem() returns true.
*/
int
pmem_msync(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len);
/*
* msync requires addr to be a multiple of pagesize but there are no
* requirements for len. Align addr down and change len so that
* [addr, addr + len) still contains initial range.
*/
/* increase len by the amount we gain when we round addr down */
len += (uintptr_t)addr & (Pagesize - 1);
/* round addr down to page boundary */
uintptr_t uptr = (uintptr_t)addr & ~((uintptr_t)Pagesize - 1);
/*
* msync accepts addresses aligned to page boundary, so we may sync
* more and part of it may have been marked as undefined/inaccessible
* Msyncing such memory is not a bug, so as a workaround temporarily
* disable error reporting.
*/
VALGRIND_DO_DISABLE_ERROR_REPORTING;
int ret;
if ((ret = msync((void *)uptr, len, MS_SYNC)) < 0)
ERR("!msync");
VALGRIND_DO_ENABLE_ERROR_REPORTING;
/* full flush */
VALGRIND_DO_PERSIST(uptr, len);
return ret;
}
/*
* is_pmem_always -- (internal) always true (for meaningful parameters) version
* of pmem_is_pmem()
*/
static int
is_pmem_always(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
if (len == 0)
return 0;
return 1;
}
/*
* is_pmem_never -- (internal) never true version of pmem_is_pmem()
*/
static int
is_pmem_never(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
return 0;
}
/*
* pmem_is_pmem_init -- (internal) initialize Func_is_pmem pointer
*
* This should be done only once - on the first call to pmem_is_pmem().
* If PMEM_IS_PMEM_FORCE is set, it would override the default behavior
* of pmem_is_pmem().
*/
static void
pmem_is_pmem_init(void)
{
LOG(3, NULL);
static volatile unsigned init;
while (init != 2) {
if (!util_bool_compare_and_swap32(&init, 0, 1))
continue;
/*
* For debugging/testing, allow pmem_is_pmem() to be forced
* to always true or never true using environment variable
* PMEM_IS_PMEM_FORCE values of zero or one.
*
* This isn't #ifdef DEBUG because it has a trivial performance
* impact and it may turn out to be useful as a "chicken bit"
* for systems where pmem_is_pmem() isn't correctly detecting
* true persistent memory.
*/
char *ptr = os_getenv("PMEM_IS_PMEM_FORCE");
if (ptr) {
int val = atoi(ptr);
if (val == 0)
Is_pmem = is_pmem_never;
else if (val == 1)
Is_pmem = is_pmem_always;
VALGRIND_ANNOTATE_HAPPENS_BEFORE(&Is_pmem);
LOG(4, "PMEM_IS_PMEM_FORCE=%d", val);
}
if (Funcs.deep_flush == NULL)
Is_pmem = is_pmem_never;
if (!util_bool_compare_and_swap32(&init, 1, 2))
FATAL("util_bool_compare_and_swap32");
}
}
/*
* pmem_is_pmem -- return true if entire range is persistent memory
*/
int
pmem_is_pmem(const void *addr, size_t len)
{
LOG(10, "addr %p len %zu", addr, len);
static int once;
/* This is not thread-safe, but pmem_is_pmem_init() is. */
if (once == 0) {
pmem_is_pmem_init();
util_fetch_and_add32(&once, 1);
}
VALGRIND_ANNOTATE_HAPPENS_AFTER(&Is_pmem);
return Is_pmem(addr, len);
}
#define PMEM_FILE_ALL_FLAGS\
(PMEM_FILE_CREATE|PMEM_FILE_EXCL|PMEM_FILE_SPARSE|PMEM_FILE_TMPFILE)
#define PMEM_DAX_VALID_FLAGS\
(PMEM_FILE_CREATE|PMEM_FILE_SPARSE)
/*
* pmem_map_fileU -- create or open the file and map it to memory
*/
#ifndef _WIN32
static inline
#endif
void *
pmem_map_fileU(const char *path, size_t len, int flags,
mode_t mode, size_t *mapped_lenp, int *is_pmemp)
{
LOG(3, "path \"%s\" size %zu flags %x mode %o mapped_lenp %p "
"is_pmemp %p", path, len, flags, mode, mapped_lenp, is_pmemp);
int oerrno;
int fd;
int open_flags = O_RDWR;//O_RDONLY;//O_RDWR;O_RDONLY
int delete_on_err = 0;
int file_type = util_file_get_type(path);
#ifdef _WIN32
open_flags |= O_BINARY;
#endif
if (file_type == OTHER_ERROR)
return NULL;
if (flags & ~(PMEM_FILE_ALL_FLAGS)) {
ERR("invalid flag specified %x", flags);
errno = EINVAL;
return NULL;
}
if (file_type == TYPE_DEVDAX) {
if (flags & ~(PMEM_DAX_VALID_FLAGS)) {
ERR("flag unsupported for Device DAX %x", flags);
errno = EINVAL;
return NULL;
} else {
/* we are ignoring all of the flags */
flags = 0;
ssize_t actual_len = util_file_get_size(path);
if (actual_len < 0) {
ERR("unable to read Device DAX size");
errno = EINVAL;
return NULL;
}
if (len != 0 && len != (size_t)actual_len) {
ERR("Device DAX length must be either 0 or "
"the exact size of the device: %zu",
actual_len);
errno = EINVAL;
return NULL;
}
len = 0;
}
}
if (flags & PMEM_FILE_CREATE) {
if ((os_off_t)len < 0) {
ERR("invalid file length %zu", len);
errno = EINVAL;
return NULL;
}
open_flags |= O_CREAT;
}
if (flags & PMEM_FILE_EXCL)
open_flags |= O_EXCL;
if ((len != 0) && !(flags & PMEM_FILE_CREATE)) {
ERR("non-zero 'len' not allowed without PMEM_FILE_CREATE");
errno = EINVAL;
return NULL;
}
if ((len == 0) && (flags & PMEM_FILE_CREATE)) {
ERR("zero 'len' not allowed with PMEM_FILE_CREATE");
errno = EINVAL;
return NULL;
}
if ((flags & PMEM_FILE_TMPFILE) && !(flags & PMEM_FILE_CREATE)) {
ERR("PMEM_FILE_TMPFILE not allowed without PMEM_FILE_CREATE");
errno = EINVAL;
return NULL;
}
if (flags & PMEM_FILE_TMPFILE) {
if ((fd = util_tmpfile(path,
OS_DIR_SEP_STR"pmem.XXXXXX",
open_flags & O_EXCL)) < 0) {
LOG(2, "failed to create temporary file at \"%s\"",
path);
return NULL;
}
} else {
if ((fd = os_open(path, open_flags, mode)) < 0) {
ERR("!open %s", path);
return NULL;
}
if ((flags & PMEM_FILE_CREATE) && (flags & PMEM_FILE_EXCL))
delete_on_err = 1;
}
if (flags & PMEM_FILE_CREATE) {
/*
* Always set length of file to 'len'.
* (May either extend or truncate existing file.)
*/
if (os_ftruncate(fd, (os_off_t)len) != 0) {
ERR("!ftruncate");
goto err;
}
if ((flags & PMEM_FILE_SPARSE) == 0) {
if ((errno = os_posix_fallocate(fd, 0,
(os_off_t)len)) != 0) {
ERR("!posix_fallocate");
goto err;
}
}
} else {
ssize_t actual_size = util_fd_get_size(fd);
if (actual_size < 0) {
ERR("stat %s: negative size", path);
errno = EINVAL;
goto err;
}
len = (size_t)actual_size;
}
void *addr = pmem_map_register(fd, len, path, file_type == TYPE_DEVDAX);
if (addr == NULL)
goto err;
if (mapped_lenp != NULL)
*mapped_lenp = len;
if (is_pmemp != NULL)
*is_pmemp = pmem_is_pmem(addr, len);
LOG(3, "returning %p", addr);
VALGRIND_REGISTER_PMEM_MAPPING(addr, len);
VALGRIND_REGISTER_PMEM_FILE(fd, addr, len, 0);
(void) os_close(fd);
return addr;
err:
oerrno = errno;
(void) os_close(fd);
if (delete_on_err)
(void) os_unlink(path);
errno = oerrno;
return NULL;
}
#ifndef _WIN32
/*
* pmem_map_file -- create or open the file and map it to memory
*/
void *
pmem_map_file(const char *path, size_t len, int flags,
mode_t mode, size_t *mapped_lenp, int *is_pmemp)
{
return pmem_map_fileU(path, len, flags, mode, mapped_lenp, is_pmemp);
}
#else
/*
* pmem_map_fileW -- create or open the file and map it to memory
*/
void *
pmem_map_fileW(const wchar_t *path, size_t len, int flags, mode_t mode,
size_t *mapped_lenp, int *is_pmemp) {
char *upath = util_toUTF8(path);
if (upath == NULL)
return NULL;
void *ret = pmem_map_fileU(upath, len, flags, mode, mapped_lenp,
is_pmemp);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* pmem_unmap -- unmap the specified region
*/
int
pmem_unmap(void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
#ifndef _WIN32
util_range_unregister(addr, len);
#endif
VALGRIND_REMOVE_PMEM_MAPPING(addr, len);
return util_unmap(addr, len);
}
/*
* pmem_memmove -- memmove to pmem
*/
void *
pmem_memmove(void *pmemdest, const void *src, size_t len, unsigned flags)
{
LOG(15, "pmemdest %p src %p len %zu flags 0x%x",
pmemdest, src, len, flags);
#ifdef DEBUG
if (flags & ~PMEM_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM_API_START();
Funcs.memmove_nodrain(pmemdest, src, len, flags & ~PMEM_F_MEM_NODRAIN,
Funcs.flush);
if ((flags & (PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH)) == 0)
pmem_drain();
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memcpy -- memcpy to pmem
*/
void *
pmem_memcpy(void *pmemdest, const void *src, size_t len, unsigned flags)
{
LOG(15, "pmemdest %p src %p len %zu flags 0x%x",
pmemdest, src, len, flags);
#ifdef DEBUG
if (flags & ~PMEM_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM_API_START();
Funcs.memmove_nodrain(pmemdest, src, len, flags & ~PMEM_F_MEM_NODRAIN,
Funcs.flush);
if ((flags & (PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH)) == 0)
pmem_drain();
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memset -- memset to pmem
*/
void *
pmem_memset(void *pmemdest, int c, size_t len, unsigned flags)
{
LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x",
pmemdest, c, len, flags);
#ifdef DEBUG
if (flags & ~PMEM_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM_API_START();
Funcs.memset_nodrain(pmemdest, c, len, flags & ~PMEM_F_MEM_NODRAIN,
Funcs.flush);
if ((flags & (PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH)) == 0)
pmem_drain();
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memmove_nodrain -- memmove to pmem without hw drain
*/
void *
pmem_memmove_nodrain(void *pmemdest, const void *src, size_t len)
{
LOG(15, "pmemdest %p src %p len %zu", pmemdest, src, len);
PMEM_API_START();
Funcs.memmove_nodrain(pmemdest, src, len, 0, Funcs.flush);
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memcpy_nodrain -- memcpy to pmem without hw drain
*/
void *
pmem_memcpy_nodrain(void *pmemdest, const void *src, size_t len)
{
LOG(15, "pmemdest %p src %p len %zu", pmemdest, src, len);
PMEM_API_START();
Funcs.memmove_nodrain(pmemdest, src, len, 0, Funcs.flush);
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memmove_persist -- memmove to pmem
*/
void *
pmem_memmove_persist(void *pmemdest, const void *src, size_t len)
{
LOG(15, "pmemdest %p src %p len %zu", pmemdest, src, len);
PMEM_API_START();
Funcs.memmove_nodrain(pmemdest, src, len, 0, Funcs.flush);
pmem_drain();
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memcpy_persist -- memcpy to pmem
*/
void *
pmem_memcpy_persist(void *pmemdest, const void *src, size_t len)
{
LOG(15, "pmemdest %p src %p len %zu", pmemdest, src, len);
PMEM_API_START();
Funcs.memmove_nodrain(pmemdest, src, len, 0, Funcs.flush);
pmem_drain();
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memset_nodrain -- memset to pmem without hw drain
*/
void *
pmem_memset_nodrain(void *pmemdest, int c, size_t len)
{
LOG(15, "pmemdest %p c %d len %zu", pmemdest, c, len);
PMEM_API_START();
Funcs.memset_nodrain(pmemdest, c, len, 0, Funcs.flush);
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memset_persist -- memset to pmem
*/
void *
pmem_memset_persist(void *pmemdest, int c, size_t len)
{
LOG(15, "pmemdest %p c %d len %zu", pmemdest, c, len);
PMEM_API_START();
Funcs.memset_nodrain(pmemdest, c, len, 0, Funcs.flush);
pmem_drain();
PMEM_API_END();
return pmemdest;
}
/*
* memmove_nodrain_libc -- (internal) memmove to pmem using libc
*/
static void *
memmove_nodrain_libc(void *pmemdest, const void *src, size_t len,
unsigned flags, flush_func flush)
{
LOG(15, "pmemdest %p src %p len %zu flags 0x%x", pmemdest, src, len,
flags);
memmove(pmemdest, src, len);
if (!(flags & PMEM_F_MEM_NOFLUSH))
flush(pmemdest, len);
return pmemdest;
}
/*
* memset_nodrain_libc -- (internal) memset to pmem using libc
*/
static void *
memset_nodrain_libc(void *pmemdest, int c, size_t len, unsigned flags,
flush_func flush)
{
LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x", pmemdest, c, len,
flags);
memset(pmemdest, c, len);
if (!(flags & PMEM_F_MEM_NOFLUSH))
flush(pmemdest, len);
return pmemdest;
}
/*
* flush_empty -- (internal) do not flush the CPU cache
*/
static void
flush_empty(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
flush_empty_nolog(addr, len);
}
/*
* fence_empty -- (internal) issue the fence instruction
*/
static void
fence_empty(void)
{
LOG(15, NULL);
VALGRIND_DO_FENCE;
}
/*
* pmem_init -- load-time initialization for pmem.c
*/
void
pmem_init(void)
{
LOG(3, NULL);
struct pmem2_arch_info info;
info.memmove_nodrain = NULL;
info.memset_nodrain = NULL;
info.flush = NULL;
info.fence = NULL;
info.flush_has_builtin_fence = 0;
pmem2_arch_init(&info);
int flush;
char *e = os_getenv("PMEM_NO_FLUSH");
if (e && (strcmp(e, "1") == 0)) {
flush = 0;
LOG(3, "Forced not flushing CPU_cache");
} else if (e && (strcmp(e, "0") == 0)) {
flush = 1;
LOG(3, "Forced flushing CPU_cache");
} else if (pmem2_auto_flush() == 1) {
flush = 0;
LOG(3, "Not flushing CPU_cache, eADR detected");
} else {
flush = 1;
LOG(3, "Flushing CPU cache");
}
Funcs.deep_flush = info.flush;
if (flush) {
Funcs.flush = info.flush;
Funcs.memmove_nodrain = info.memmove_nodrain;
Funcs.memset_nodrain = info.memset_nodrain;
if (info.flush_has_builtin_fence)
Funcs.fence = fence_empty;
else
Funcs.fence = info.fence;
} else {
Funcs.memmove_nodrain = info.memmove_nodrain_eadr;
Funcs.memset_nodrain = info.memset_nodrain_eadr;
Funcs.flush = flush_empty;
Funcs.fence = info.fence;
}
char *ptr = os_getenv("PMEM_NO_GENERIC_MEMCPY");
long long no_generic = 0;
if (ptr)
no_generic = atoll(ptr);
if (info.memmove_nodrain == NULL) {
if (no_generic) {
Funcs.memmove_nodrain = memmove_nodrain_libc;
LOG(3, "using libc memmove");
} else {
Funcs.memmove_nodrain = memmove_nodrain_generic;
LOG(3, "using generic memmove");
}
} else {
Funcs.memmove_nodrain = info.memmove_nodrain;
}
if (info.memset_nodrain == NULL) {
if (no_generic) {
Funcs.memset_nodrain = memset_nodrain_libc;
LOG(3, "using libc memset");
} else {
Funcs.memset_nodrain = memset_nodrain_generic;
LOG(3, "using generic memset");
}
} else {
Funcs.memset_nodrain = info.memset_nodrain;
}
if (Funcs.flush == flush_empty)
LOG(3, "not flushing CPU cache");
else if (Funcs.flush != Funcs.deep_flush)
FATAL("invalid flush function address");
pmem_os_init(&Is_pmem);
}
/*
* pmem_deep_persist -- perform deep persist on a memory range
*
* It merely acts as wrapper around an msync call in most cases, the only
* exception is the case of an mmap'ed DAX device on Linux.
*/
int
pmem_deep_persist(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
pmem_deep_flush(addr, len);
return pmem_deep_drain(addr, len);
}
/*
* pmem_deep_drain -- perform deep drain on a memory range
*/
int
pmem_deep_drain(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
return os_range_deep_common((uintptr_t)addr, len);
}
#if VG_PMEMCHECK_ENABLED
/*
* pmem_emit_log -- logs library and function names to pmemcheck store log
*/
void
pmem_emit_log(const char *func, int order)
{
util_emit_log("libpmem", func, order);
}
#endif
#if FAULT_INJECTION
void
pmem_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
core_inject_fault_at(type, nth, at);
}
int
pmem_fault_injection_enabled(void)
{
return core_fault_injection_enabled();
}
#endif
| 21,858 | 21.817328 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem/pmem_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmem_windows.c -- pmem utilities with OS-specific implementation
*/
#include <memoryapi.h>
#include "pmem.h"
#include "out.h"
#include "mmap.h"
#include "win_mmap.h"
#include "sys/mman.h"
#if (NTDDI_VERSION >= NTDDI_WIN10_RS1)
typedef BOOL (WINAPI *PQVM)(
HANDLE, const void *,
enum WIN32_MEMORY_INFORMATION_CLASS, PVOID,
SIZE_T, PSIZE_T);
static PQVM Func_qvmi = NULL;
#endif
/*
* is_direct_mapped -- (internal) for each page in the given region
* checks with MM, if it's direct mapped.
*/
static int
is_direct_mapped(const void *begin, const void *end)
{
LOG(3, "begin %p end %p", begin, end);
#if (NTDDI_VERSION >= NTDDI_WIN10_RS1)
int retval = 1;
WIN32_MEMORY_REGION_INFORMATION region_info;
SIZE_T bytes_returned;
if (Func_qvmi == NULL) {
LOG(4, "QueryVirtualMemoryInformation not supported, "
"assuming non-DAX.");
return 0;
}
const void *begin_aligned = (const void *)rounddown((intptr_t)begin,
Pagesize);
const void *end_aligned = (const void *)roundup((intptr_t)end,
Pagesize);
for (const void *page = begin_aligned;
page < end_aligned;
page = (const void *)((char *)page + Pagesize)) {
if (Func_qvmi(GetCurrentProcess(), page,
MemoryRegionInfo, ®ion_info,
sizeof(region_info), &bytes_returned)) {
retval = region_info.DirectMapped;
} else {
LOG(4, "QueryVirtualMemoryInformation failed, assuming "
"non-DAX. Last error: %08x", GetLastError());
retval = 0;
}
if (retval == 0) {
LOG(4, "page %p is not direct mapped", page);
break;
}
}
return retval;
#else
/* if the MM API is not available the safest answer is NO */
return 0;
#endif /* NTDDI_VERSION >= NTDDI_WIN10_RS1 */
}
/*
* is_pmem_detect -- implement pmem_is_pmem()
*
* This function returns true only if the entire range can be confirmed
* as being direct access persistent memory. Finding any part of the
* range is not direct access, or failing to look up the information
* because it is unmapped or because any sort of error happens, just
* results in returning false.
*/
int
is_pmem_detect(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
if (len == 0)
return 0;
if (len > UINTPTR_MAX - (uintptr_t)addr) {
len = UINTPTR_MAX - (uintptr_t)addr;
LOG(4, "limit len to %zu to not get beyond address space", len);
}
int retval = 1;
const void *begin = addr;
const void *end = (const void *)((char *)addr + len);
LOG(4, "begin %p end %p", begin, end);
AcquireSRWLockShared(&FileMappingQLock);
PFILE_MAPPING_TRACKER mt;
PMDK_SORTEDQ_FOREACH(mt, &FileMappingQHead, ListEntry) {
if (mt->BaseAddress >= end) {
LOG(4, "ignoring all mapped ranges beyond given range");
break;
}
if (mt->EndAddress <= begin) {
LOG(4, "skipping all mapped ranges before given range");
continue;
}
if (!(mt->Flags & FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED)) {
LOG(4, "tracked range [%p, %p) is not direct mapped",
mt->BaseAddress, mt->EndAddress);
retval = 0;
break;
}
/*
* If there is a gap between the given region that we process
* currently and the mapped region in our tracking list, we
* need to process the gap by taking the long route of asking
* MM for each page in that range.
*/
if (begin < mt->BaseAddress &&
!is_direct_mapped(begin, mt->BaseAddress)) {
LOG(4, "untracked range [%p, %p) is not direct mapped",
begin, mt->BaseAddress);
retval = 0;
break;
}
/* push our begin to reflect what we have already processed */
begin = mt->EndAddress;
}
/*
* If we still have a range to verify, check with MM if the entire
* region is direct mapped.
*/
if (begin < end && !is_direct_mapped(begin, end)) {
LOG(4, "untracked end range [%p, %p) is not direct mapped",
begin, end);
retval = 0;
}
ReleaseSRWLockShared(&FileMappingQLock);
LOG(4, "returning %d", retval);
return retval;
}
/*
* pmem_map_register -- memory map file and register mapping
*/
void *
pmem_map_register(int fd, size_t len, const char *path, int is_dev_dax)
{
/* there is no device dax on windows */
ASSERTeq(is_dev_dax, 0);
return util_map(fd, 0, len, MAP_SHARED, 0, 0, NULL);
}
/*
* pmem_os_init -- os-dependent part of pmem initialization
*/
void
pmem_os_init(is_pmem_func *func)
{
LOG(3, NULL);
*func = is_pmem_detect;
#if NTDDI_VERSION >= NTDDI_WIN10_RS1
Func_qvmi = (PQVM)GetProcAddress(
GetModuleHandle(TEXT("KernelBase.dll")),
"QueryVirtualMemoryInformation");
#endif
}
| 6,186 | 27.643519 | 74 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/auto_flush_windows.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2019, Intel Corporation */
#ifndef PMEM2_AUTO_FLUSH_WINDOWS_H
#define PMEM2_AUTO_FLUSH_WINDOWS_H 1
#define ACPI_SIGNATURE 0x41435049 /* hex value of ACPI signature */
#define NFIT_REV_SIGNATURE 0x5449464e /* hex value of htonl(NFIT) signature */
#define NFIT_STR_SIGNATURE "NFIT"
#define NFIT_SIGNATURE_LEN 4
#define NFIT_OEM_ID_LEN 6
#define NFIT_OEM_TABLE_ID_LEN 8
#define NFIT_MAX_STRUCTURES 8
#define PCS_RESERVED 3
#define PCS_RESERVED_2 4
#define PCS_TYPE_NUMBER 7
/* check if bit on 'bit' position in number 'num' is set */
#define CHECK_BIT(num, bit) (((num) >> (bit)) & 1)
/*
* sets alignment of members of structure
*/
#pragma pack(1)
struct platform_capabilities
{
uint16_t type;
uint16_t length;
uint8_t highest_valid;
uint8_t reserved[PCS_RESERVED];
uint32_t capabilities;
uint8_t reserved2[PCS_RESERVED_2];
};
struct nfit_header
{
uint8_t signature[NFIT_SIGNATURE_LEN];
uint32_t length;
uint8_t revision;
uint8_t checksum;
uint8_t oem_id[NFIT_OEM_ID_LEN];
uint8_t oem_table_id[NFIT_OEM_TABLE_ID_LEN];
uint32_t oem_revision;
uint8_t creator_id[4];
uint32_t creator_revision;
uint32_t reserved;
};
#pragma pack()
#endif
| 1,215 | 22.843137 | 78 | h |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/deep_flush_linux.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* deep_flush_linux.c -- deep_flush functionality
*/
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include "deep_flush.h"
#include "libpmem2.h"
#include "map.h"
#include "os.h"
#include "out.h"
#include "persist.h"
#include "pmem2_utils.h"
#include "region_namespace.h"
/*
* pmem2_deep_flush_write -- perform write to deep_flush file
* on given region_id
*/
int
pmem2_deep_flush_write(unsigned region_id)
{
LOG(3, "region_id %d", region_id);
char deep_flush_path[PATH_MAX];
int deep_flush_fd;
char rbuf[2];
if (util_snprintf(deep_flush_path, PATH_MAX,
"/sys/bus/nd/devices/region%u/deep_flush", region_id) < 0) {
ERR("!snprintf");
return PMEM2_E_ERRNO;
}
if ((deep_flush_fd = os_open(deep_flush_path, O_RDONLY)) < 0) {
LOG(1, "!os_open(\"%s\", O_RDONLY)", deep_flush_path);
return 0;
}
if (read(deep_flush_fd, rbuf, sizeof(rbuf)) != 2) {
LOG(1, "!read(%d)", deep_flush_fd);
goto end;
}
if (rbuf[0] == '0' && rbuf[1] == '\n') {
LOG(3, "Deep flushing not needed");
goto end;
}
os_close(deep_flush_fd);
if ((deep_flush_fd = os_open(deep_flush_path, O_WRONLY)) < 0) {
LOG(1, "Cannot open deep_flush file %s to write",
deep_flush_path);
return 0;
}
if (write(deep_flush_fd, "1", 1) != 1) {
LOG(1, "Cannot write to deep_flush file %d", deep_flush_fd);
goto end;
}
end:
os_close(deep_flush_fd);
return 0;
}
/*
* pmem2_deep_flush_dax -- reads file type for map and check
* if it is device dax or reg file, depend on file type
* performs proper flush operation
*/
int
pmem2_deep_flush_dax(struct pmem2_map *map, void *ptr, size_t size)
{
int ret;
enum pmem2_file_type type = map->source.value.ftype;
if (type == PMEM2_FTYPE_REG) {
ret = pmem2_flush_file_buffers_os(map, ptr, size, 0);
if (ret) {
LOG(1, "cannot flush buffers addr %p len %zu",
ptr, size);
return ret;
}
} else if (type == PMEM2_FTYPE_DEVDAX) {
unsigned region_id;
int ret = pmem2_get_region_id(&map->source, ®ion_id);
if (ret < 0) {
LOG(1, "cannot find region id for dev %lu",
map->source.value.st_rdev);
return ret;
}
ret = pmem2_deep_flush_write(region_id);
if (ret) {
LOG(1, "cannot write to deep_flush file for region %d",
region_id);
return ret;
}
} else {
ASSERT(0);
}
return 0;
}
| 2,395 | 20.392857 | 67 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/config.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* config.h -- internal definitions for pmem2_config
*/
#ifndef PMEM2_CONFIG_H
#define PMEM2_CONFIG_H
#include "libpmem2.h"
#define PMEM2_GRANULARITY_INVALID ((enum pmem2_granularity) (-1))
#define PMEM2_ADDRESS_ANY 0 /* default value of the address request type */
struct pmem2_config {
/* offset from the beginning of the file */
size_t offset;
size_t length; /* length of the mapping */
/* persistence granularity requested by user */
void *addr; /* address of the mapping */
int addr_request; /* address request type */
enum pmem2_granularity requested_max_granularity;
enum pmem2_sharing_type sharing; /* the way the file will be mapped */
unsigned protection_flag;
};
void pmem2_config_init(struct pmem2_config *cfg);
int pmem2_config_validate_length(const struct pmem2_config *cfg,
size_t file_len, size_t alignment);
int pmem2_config_validate_addr_alignment(const struct pmem2_config *cfg,
const struct pmem2_source *src);
#endif /* PMEM2_CONFIG_H */
| 1,070 | 28.75 | 75 | h |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/map.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* map.h -- internal definitions for libpmem2
*/
#ifndef PMEM2_MAP_H
#define PMEM2_MAP_H
#include <stddef.h>
#include <stdbool.h>
#include "libpmem2.h"
#include "os.h"
#include "source.h"
#ifdef _WIN32
#include <windows.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
typedef int (*pmem2_deep_flush_fn)(struct pmem2_map *map,
void *ptr, size_t size);
struct pmem2_map {
void *addr; /* base address */
size_t reserved_length; /* length of the mapping reservation */
size_t content_length; /* length of the mapped content */
/* effective persistence granularity */
enum pmem2_granularity effective_granularity;
pmem2_persist_fn persist_fn;
pmem2_flush_fn flush_fn;
pmem2_drain_fn drain_fn;
pmem2_deep_flush_fn deep_flush_fn;
pmem2_memmove_fn memmove_fn;
pmem2_memcpy_fn memcpy_fn;
pmem2_memset_fn memset_fn;
struct pmem2_source source;
};
enum pmem2_granularity get_min_granularity(bool eADR, bool is_pmem,
enum pmem2_sharing_type sharing);
struct pmem2_map *pmem2_map_find(const void *addr, size_t len);
int pmem2_register_mapping(struct pmem2_map *map);
int pmem2_unregister_mapping(struct pmem2_map *map);
void pmem2_map_init(void);
void pmem2_map_fini(void);
int pmem2_validate_offset(const struct pmem2_config *cfg,
size_t *offset, size_t alignment);
#ifdef __cplusplus
}
#endif
#endif /* map.h */
| 1,426 | 22.016129 | 67 | h |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/deep_flush.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* deep_flush.h -- functions for deep flush functionality
*/
#ifndef PMEM2_DEEP_FLUSH_H
#define PMEM2_DEEP_FLUSH_H 1
#include "map.h"
#ifdef __cplusplus
extern "C" {
#endif
int pmem2_deep_flush_write(unsigned region_id);
int pmem2_deep_flush_dax(struct pmem2_map *map, void *ptr, size_t size);
int pmem2_deep_flush_page(struct pmem2_map *map, void *ptr, size_t size);
int pmem2_deep_flush_cache(struct pmem2_map *map, void *ptr, size_t size);
int pmem2_deep_flush_byte(struct pmem2_map *map, void *ptr, size_t size);
#ifdef __cplusplus
}
#endif
#endif
| 644 | 22.035714 | 74 | h |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/persist.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* persist.c -- pmem2_get_[persist|flush|drain]_fn
*/
#include <errno.h>
#include <stdlib.h>
#include "libpmem2.h"
#include "map.h"
#include "out.h"
#include "os.h"
#include "persist.h"
#include "deep_flush.h"
#include "pmem2_arch.h"
#include "pmem2_utils.h"
#include "valgrind_internal.h"
static struct pmem2_arch_info Info;
/*
* memmove_nodrain_libc -- (internal) memmove to pmem using libc
*/
static void *
memmove_nodrain_libc(void *pmemdest, const void *src, size_t len,
unsigned flags, flush_func flush)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
LOG(15, "pmemdest %p src %p len %zu flags 0x%x", pmemdest, src, len,
flags);
memmove(pmemdest, src, len);
if (!(flags & PMEM2_F_MEM_NOFLUSH))
flush(pmemdest, len);
return pmemdest;
}
/*
* memset_nodrain_libc -- (internal) memset to pmem using libc
*/
static void *
memset_nodrain_libc(void *pmemdest, int c, size_t len, unsigned flags,
flush_func flush)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x", pmemdest, c, len,
flags);
memset(pmemdest, c, len);
if (!(flags & PMEM2_F_MEM_NOFLUSH))
flush(pmemdest, len);
return pmemdest;
}
/*
* pmem2_persist_init -- initialize persist module
*/
void
pmem2_persist_init(void)
{
Info.memmove_nodrain = NULL;
Info.memset_nodrain = NULL;
Info.memmove_nodrain_eadr = NULL;
Info.memset_nodrain_eadr = NULL;
Info.flush = NULL;
Info.fence = NULL;
Info.flush_has_builtin_fence = 0;
pmem2_arch_init(&Info);
char *ptr = os_getenv("PMEM_NO_GENERIC_MEMCPY");
long long no_generic = 0;
if (ptr)
no_generic = atoll(ptr);
if (Info.memmove_nodrain == NULL) {
if (no_generic) {
Info.memmove_nodrain = memmove_nodrain_libc;
Info.memmove_nodrain_eadr = memmove_nodrain_libc;
LOG(3, "using libc memmove");
} else {
Info.memmove_nodrain = memmove_nodrain_generic;
Info.memmove_nodrain_eadr = memmove_nodrain_generic;
LOG(3, "using generic memmove");
}
}
if (Info.memset_nodrain == NULL) {
if (no_generic) {
Info.memset_nodrain = memset_nodrain_libc;
Info.memset_nodrain_eadr = memset_nodrain_libc;
LOG(3, "using libc memset");
} else {
Info.memset_nodrain = memset_nodrain_generic;
Info.memset_nodrain_eadr = memset_nodrain_generic;
LOG(3, "using generic memset");
}
}
}
/*
* pmem2_drain -- wait for any PM stores to drain from HW buffers
*/
static void
pmem2_drain(void)
{
LOG(15, NULL);
Info.fence();
}
/*
* pmem2_log_flush -- log the flush attempt for the given range
*/
static inline void
pmem2_log_flush(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len);
}
/*
* pmem2_flush_nop -- NOP version of the flush routine, used in cases where
* memory behind the mapping is already in persistence domain
*/
static void
pmem2_flush_nop(const void *addr, size_t len)
{
pmem2_log_flush(addr, len);
/* nothing more to do, other than telling pmemcheck about it */
VALGRIND_DO_FLUSH(addr, len);
}
/*
* pmem2_flush_cpu_cache -- flush processor cache for the given range
*/
static void
pmem2_flush_cpu_cache(const void *addr, size_t len)
{
pmem2_log_flush(addr, len);
Info.flush(addr, len);
}
/*
* pmem2_persist_noflush -- make all changes to a range of pmem persistent
*/
static void
pmem2_persist_noflush(const void *addr, size_t len)
{
pmem2_flush_nop(addr, len);
pmem2_drain();
}
/*
* pmem2_persist_cpu_cache -- make all changes to a range of pmem persistent
*/
static void
pmem2_persist_cpu_cache(const void *addr, size_t len)
{
pmem2_flush_cpu_cache(addr, len);
pmem2_drain();
}
/*
* pmem2_flush_file_buffers -- flush CPU and OS caches for the given range
*/
static int
pmem2_flush_file_buffers(const void *addr, size_t len, int autorestart)
{
int olderrno = errno;
pmem2_log_flush(addr, len);
/*
* Flushing using OS-provided mechanisms requires that the address
* be a multiple of the page size.
* Align address down and change len so that [addr, addr + len) still
* contains the initial range.
*/
/* round address down to page boundary */
uintptr_t new_addr = ALIGN_DOWN((uintptr_t)addr, Pagesize);
/* increase len by the amount we gain when we round addr down */
len += (uintptr_t)addr - new_addr;
addr = (const void *)new_addr;
int ret = 0;
/*
* Find all the mappings overlapping with the [addr, addr + len) range
* and flush them, one by one.
*/
do {
struct pmem2_map *map = pmem2_map_find(addr, len);
if (!map)
break;
size_t flush;
size_t remaining = map->reserved_length;
if (map->addr < addr) {
/*
* Addr is inside of the mapping, so we have to decrease
* the remaining length by an offset from the start
* of our mapping.
*/
remaining -= (uintptr_t)addr - (uintptr_t)map->addr;
} else if (map->addr == addr) {
/* perfect match, there's nothing to do in this case */
} else {
/*
* map->addr > addr, so we have to skip the hole
* between addr and map->addr.
*/
len -= (uintptr_t)map->addr - (uintptr_t)addr;
addr = map->addr;
}
if (len > remaining)
flush = remaining;
else
flush = len;
int ret1 = pmem2_flush_file_buffers_os(map, addr, flush,
autorestart);
if (ret1 != 0)
ret = ret1;
addr = ((const char *)addr) + flush;
len -= flush;
} while (len > 0);
errno = olderrno;
return ret;
}
/*
* pmem2_persist_pages -- flush processor cache for the given range
*/
static void
pmem2_persist_pages(const void *addr, size_t len)
{
/*
* Restarting on EINTR in general is a bad idea, but we don't have
* any way to communicate the failure outside.
*/
const int autorestart = 1;
int ret = pmem2_flush_file_buffers(addr, len, autorestart);
if (ret) {
/*
* 1) There's no way to propagate this error. Silently ignoring
* it would lead to data corruption.
* 2) non-pmem code path shouldn't be used in production.
*
* The only sane thing to do is to crash the application. Sorry.
*/
abort();
}
}
/*
* pmem2_drain_nop -- variant of pmem2_drain for page granularity;
* it is a NOP because the flush part has built-in drain
*/
static void
pmem2_drain_nop(void)
{
LOG(15, NULL);
}
/*
* pmem2_deep_flush_page -- do nothing - pmem2_persist_fn already did msync
*/
int
pmem2_deep_flush_page(struct pmem2_map *map, void *ptr, size_t size)
{
LOG(3, "map %p ptr %p size %zu", map, ptr, size);
return 0;
}
/*
* pmem2_deep_flush_cache -- flush buffers for fsdax or write
* to deep_flush for DevDax
*/
int
pmem2_deep_flush_cache(struct pmem2_map *map, void *ptr, size_t size)
{
LOG(3, "map %p ptr %p size %zu", map, ptr, size);
enum pmem2_file_type type = map->source.value.ftype;
/*
* XXX: this should be moved to pmem2_deep_flush_dax
* while refactoring abstraction
*/
if (type == PMEM2_FTYPE_DEVDAX)
pmem2_persist_cpu_cache(ptr, size);
int ret = pmem2_deep_flush_dax(map, ptr, size);
if (ret < 0) {
LOG(1, "cannot perform deep flush cache for map %p", map);
return ret;
}
return 0;
}
/*
* pmem2_deep_flush_byte -- flush cpu cache and perform deep flush for dax
*/
int
pmem2_deep_flush_byte(struct pmem2_map *map, void *ptr, size_t size)
{
LOG(3, "map %p ptr %p size %zu", map, ptr, size);
if (map->source.type == PMEM2_SOURCE_ANON) {
ERR("Anonymous source does not support deep flush");
return PMEM2_E_NOSUPP;
}
ASSERT(map->source.type == PMEM2_SOURCE_FD ||
map->source.type == PMEM2_SOURCE_HANDLE);
enum pmem2_file_type type = map->source.value.ftype;
/*
* XXX: this should be moved to pmem2_deep_flush_dax
* while refactoring abstraction
*/
if (type == PMEM2_FTYPE_DEVDAX)
pmem2_persist_cpu_cache(ptr, size);
int ret = pmem2_deep_flush_dax(map, ptr, size);
if (ret < 0) {
LOG(1, "cannot perform deep flush byte for map %p", map);
return ret;
}
return 0;
}
/*
* pmem2_set_flush_fns -- set function pointers related to flushing
*/
void
pmem2_set_flush_fns(struct pmem2_map *map)
{
switch (map->effective_granularity) {
case PMEM2_GRANULARITY_PAGE:
map->persist_fn = pmem2_persist_pages;
map->flush_fn = pmem2_persist_pages;
map->drain_fn = pmem2_drain_nop;
map->deep_flush_fn = pmem2_deep_flush_page;
break;
case PMEM2_GRANULARITY_CACHE_LINE:
map->persist_fn = pmem2_persist_cpu_cache;
map->flush_fn = pmem2_flush_cpu_cache;
map->drain_fn = pmem2_drain;
map->deep_flush_fn = pmem2_deep_flush_cache;
break;
case PMEM2_GRANULARITY_BYTE:
map->persist_fn = pmem2_persist_noflush;
map->flush_fn = pmem2_flush_nop;
map->drain_fn = pmem2_drain;
map->deep_flush_fn = pmem2_deep_flush_byte;
break;
default:
abort();
}
}
/*
* pmem2_get_persist_fn - return a pointer to a function responsible for
* persisting data in range owned by pmem2_map
*/
pmem2_persist_fn
pmem2_get_persist_fn(struct pmem2_map *map)
{
return map->persist_fn;
}
/*
* pmem2_get_flush_fn - return a pointer to a function responsible for
* flushing data in range owned by pmem2_map
*/
pmem2_flush_fn
pmem2_get_flush_fn(struct pmem2_map *map)
{
return map->flush_fn;
}
/*
* pmem2_get_drain_fn - return a pointer to a function responsible for
* draining flushes in range owned by pmem2_map
*/
pmem2_drain_fn
pmem2_get_drain_fn(struct pmem2_map *map)
{
return map->drain_fn;
}
/*
* pmem2_memmove_nonpmem -- mem[move|cpy] followed by an msync
*/
static void *
pmem2_memmove_nonpmem(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM2_API_START("pmem2_memmove");
Info.memmove_nodrain(pmemdest, src, len, flags & ~PMEM2_F_MEM_NODRAIN,
Info.flush);
pmem2_persist_pages(pmemdest, len);
PMEM2_API_END("pmem2_memmove");
return pmemdest;
}
/*
* pmem2_memset_nonpmem -- memset followed by an msync
*/
static void *
pmem2_memset_nonpmem(void *pmemdest, int c, size_t len, unsigned flags)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM2_API_START("pmem2_memset");
Info.memset_nodrain(pmemdest, c, len, flags & ~PMEM2_F_MEM_NODRAIN,
Info.flush);
pmem2_persist_pages(pmemdest, len);
PMEM2_API_END("pmem2_memset");
return pmemdest;
}
/*
* pmem2_memmove -- mem[move|cpy] to pmem
*/
static void *
pmem2_memmove(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM2_API_START("pmem2_memmove");
Info.memmove_nodrain(pmemdest, src, len, flags, Info.flush);
if ((flags & (PMEM2_F_MEM_NODRAIN | PMEM2_F_MEM_NOFLUSH)) == 0)
pmem2_drain();
PMEM2_API_END("pmem2_memmove");
return pmemdest;
}
/*
* pmem2_memset -- memset to pmem
*/
static void *
pmem2_memset(void *pmemdest, int c, size_t len, unsigned flags)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM2_API_START("pmem2_memset");
Info.memset_nodrain(pmemdest, c, len, flags, Info.flush);
if ((flags & (PMEM2_F_MEM_NODRAIN | PMEM2_F_MEM_NOFLUSH)) == 0)
pmem2_drain();
PMEM2_API_END("pmem2_memset");
return pmemdest;
}
/*
* pmem2_memmove_eadr -- mem[move|cpy] to pmem, platform supports eADR
*/
static void *
pmem2_memmove_eadr(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM2_API_START("pmem2_memmove");
Info.memmove_nodrain_eadr(pmemdest, src, len, flags, Info.flush);
if ((flags & (PMEM2_F_MEM_NODRAIN | PMEM2_F_MEM_NOFLUSH)) == 0)
pmem2_drain();
PMEM2_API_END("pmem2_memmove");
return pmemdest;
}
/*
* pmem2_memset_eadr -- memset to pmem, platform supports eADR
*/
static void *
pmem2_memset_eadr(void *pmemdest, int c, size_t len, unsigned flags)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM2_API_START("pmem2_memset");
Info.memset_nodrain_eadr(pmemdest, c, len, flags, Info.flush);
if ((flags & (PMEM2_F_MEM_NODRAIN | PMEM2_F_MEM_NOFLUSH)) == 0)
pmem2_drain();
PMEM2_API_END("pmem2_memset");
return pmemdest;
}
/*
* pmem2_set_mem_fns -- set function pointers related to mem[move|cpy|set]
*/
void
pmem2_set_mem_fns(struct pmem2_map *map)
{
switch (map->effective_granularity) {
case PMEM2_GRANULARITY_PAGE:
map->memmove_fn = pmem2_memmove_nonpmem;
map->memcpy_fn = pmem2_memmove_nonpmem;
map->memset_fn = pmem2_memset_nonpmem;
break;
case PMEM2_GRANULARITY_CACHE_LINE:
map->memmove_fn = pmem2_memmove;
map->memcpy_fn = pmem2_memmove;
map->memset_fn = pmem2_memset;
break;
case PMEM2_GRANULARITY_BYTE:
map->memmove_fn = pmem2_memmove_eadr;
map->memcpy_fn = pmem2_memmove_eadr;
map->memset_fn = pmem2_memset_eadr;
break;
default:
abort();
}
}
/*
* pmem2_get_memmove_fn - return a pointer to a function
*/
pmem2_memmove_fn
pmem2_get_memmove_fn(struct pmem2_map *map)
{
return map->memmove_fn;
}
/*
* pmem2_get_memcpy_fn - return a pointer to a function
*/
pmem2_memcpy_fn
pmem2_get_memcpy_fn(struct pmem2_map *map)
{
return map->memcpy_fn;
}
/*
* pmem2_get_memset_fn - return a pointer to a function
*/
pmem2_memset_fn
pmem2_get_memset_fn(struct pmem2_map *map)
{
return map->memset_fn;
}
#if VG_PMEMCHECK_ENABLED
/*
* pmem2_emit_log -- logs library and function names to pmemcheck store log
*/
void
pmem2_emit_log(const char *func, int order)
{
util_emit_log("libpmem2", func, order);
}
#endif
| 13,665 | 21.58843 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/persist_posix.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* persist_posix.c -- POSIX-specific part of persist implementation
*/
#include <errno.h>
#include <stdint.h>
#include <sys/mman.h>
#include "out.h"
#include "persist.h"
#include "pmem2_utils.h"
#include "valgrind_internal.h"
/*
* pmem2_flush_file_buffers_os -- flush CPU and OS file caches for the given
* range
*/
int
pmem2_flush_file_buffers_os(struct pmem2_map *map, const void *addr, size_t len,
int autorestart)
{
/*
* msync accepts addresses aligned to the page boundary, so we may sync
* more and part of it may have been marked as undefined/inaccessible.
* Msyncing such memory is not a bug, so as a workaround temporarily
* disable error reporting.
*/
VALGRIND_DO_DISABLE_ERROR_REPORTING;
int ret;
do {
ret = msync((void *)addr, len, MS_SYNC);
if (ret < 0) {
ERR("!msync");
} else {
/* full flush */
VALGRIND_DO_PERSIST((uintptr_t)addr, len);
}
} while (autorestart && ret < 0 && errno == EINTR);
VALGRIND_DO_ENABLE_ERROR_REPORTING;
if (ret)
return PMEM2_E_ERRNO;
return 0;
}
| 1,126 | 21.098039 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/pmem2_utils_linux.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <sys/sysmacros.h>
#include "libpmem2.h"
#include "out.h"
#include "pmem2_utils.h"
#include "region_namespace.h"
#include "source.h"
/*
* pmem2_get_type_from_stat -- determine type of file based on output of stat
* syscall
*/
int
pmem2_get_type_from_stat(const os_stat_t *st, enum pmem2_file_type *type)
{
if (S_ISREG(st->st_mode)) {
*type = PMEM2_FTYPE_REG;
return 0;
}
if (S_ISDIR(st->st_mode)) {
*type = PMEM2_FTYPE_DIR;
return 0;
}
if (!S_ISCHR(st->st_mode)) {
ERR("file type 0%o not supported", st->st_mode & S_IFMT);
return PMEM2_E_INVALID_FILE_TYPE;
}
char spath[PATH_MAX];
int ret = util_snprintf(spath, PATH_MAX,
"/sys/dev/char/%u:%u/subsystem",
os_major(st->st_rdev), os_minor(st->st_rdev));
if (ret < 0) {
/* impossible */
ERR("!snprintf");
ASSERTinfo(0, "snprintf failed");
return PMEM2_E_ERRNO;
}
LOG(4, "device subsystem path \"%s\"", spath);
char npath[PATH_MAX];
char *rpath = realpath(spath, npath);
if (rpath == NULL) {
ERR("!realpath \"%s\"", spath);
return PMEM2_E_ERRNO;
}
char *basename = strrchr(rpath, '/');
if (!basename || strcmp("dax", basename + 1) != 0) {
LOG(3, "%s path does not match device dax prefix path", rpath);
return PMEM2_E_INVALID_FILE_TYPE;
}
*type = PMEM2_FTYPE_DEVDAX;
return 0;
}
| 1,507 | 20.239437 | 77 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/source_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* source_windows.c -- windows specific pmem2_source implementation
*/
#include <Windows.h>
#include "config.h"
#include "libpmem2.h"
#include "config.h"
#include "out.h"
#include "pmem2_utils.h"
#include "source.h"
#include "util.h"
/*
* pmem2_source_from_fd -- create a new data source instance
*/
int
pmem2_source_from_fd(struct pmem2_source **src, int fd)
{
*src = NULL;
if (fd < 0)
return PMEM2_E_INVALID_FILE_HANDLE;
HANDLE handle = (HANDLE)_get_osfhandle(fd);
if (handle == INVALID_HANDLE_VALUE) {
/*
* _get_osfhandle aborts in an error case, so technically
* this is dead code. But according to MSDN it is
* setting an errno on failure, so we can return it in case of
* "windows magic" happen and this function "accidentally"
* will not abort.
*/
ERR("!_get_osfhandle");
if (errno == EBADF)
return PMEM2_E_INVALID_FILE_HANDLE;
return PMEM2_E_ERRNO;
}
return pmem2_source_from_handle(src, handle);
}
/*
* pmem2_win_stat -- retrieve information about handle
*/
static int
pmem2_win_stat(HANDLE handle, BY_HANDLE_FILE_INFORMATION *info)
{
if (!GetFileInformationByHandle(handle, info)) {
ERR("!!GetFileInformationByHandle");
if (GetLastError() == ERROR_INVALID_HANDLE)
return PMEM2_E_INVALID_FILE_HANDLE;
else
return pmem2_lasterror_to_err();
}
if (info->dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
ERR(
"using directory doesn't make any sense in context of pmem2");
return PMEM2_E_INVALID_FILE_TYPE;
}
return 0;
}
/*
* pmem2_source_from_fd -- create a new data source instance
*/
int
pmem2_source_from_handle(struct pmem2_source **src, HANDLE handle)
{
*src = NULL;
int ret;
if (handle == INVALID_HANDLE_VALUE)
return PMEM2_E_INVALID_FILE_HANDLE;
BY_HANDLE_FILE_INFORMATION file_info;
ret = pmem2_win_stat(handle, &file_info);
if (ret)
return ret;
/* XXX: winapi doesn't provide option to get open flags from HANDLE */
struct pmem2_source *srcp = pmem2_malloc(sizeof(**src), &ret);
if (ret)
return ret;
ASSERTne(srcp, NULL);
srcp->type = PMEM2_SOURCE_HANDLE;
srcp->value.handle = handle;
*src = srcp;
return 0;
}
/*
* pmem2_source_size -- get a size of the file handle stored in the provided
* source
*/
int
pmem2_source_size(const struct pmem2_source *src, size_t *size)
{
LOG(3, "type %d", src->type);
int ret;
if (src->type == PMEM2_SOURCE_ANON) {
*size = src->value.size;
return 0;
}
ASSERTeq(src->type, PMEM2_SOURCE_HANDLE);
BY_HANDLE_FILE_INFORMATION info;
ret = pmem2_win_stat(src->value.handle, &info);
if (ret)
return ret;
*size = ((size_t)info.nFileSizeHigh << 32) | info.nFileSizeLow;
LOG(4, "file length %zu", *size);
return 0;
}
/*
* pmem2_source_alignment -- get alignment from the system info
*/
int
pmem2_source_alignment(const struct pmem2_source *src, size_t *alignment)
{
LOG(3, "type %d", src->type);
SYSTEM_INFO info;
GetSystemInfo(&info);
*alignment = (size_t)info.dwAllocationGranularity;
if (!util_is_pow2(*alignment)) {
ERR("alignment (%zu) has to be a power of two", *alignment);
return PMEM2_E_INVALID_ALIGNMENT_VALUE;
}
LOG(4, "alignment %zu", *alignment);
return 0;
}
| 3,248 | 20.235294 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/pmem2_utils_none.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
#include <errno.h>
#include "libpmem2.h"
#include "out.h"
#include "pmem2_utils.h"
#include "source.h"
/*
* pmem2_device_dax_alignment -- checks the alignment of a given
* dax device from given source
*/
int
pmem2_device_dax_alignment(const struct pmem2_source *src, size_t *alignment)
{
ERR("Cannot read Device Dax alignment - ndctl is not available");
return PMEM2_E_NOSUPP;
}
/*
* pmem2_device_dax_size -- checks the size of a given dax device from
* given source
*/
int
pmem2_device_dax_size(const struct pmem2_source *src, size_t *size)
{
ERR("Cannot read Device Dax size - ndctl is not available");
return PMEM2_E_NOSUPP;
}
| 727 | 20.411765 | 77 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/auto_flush_linux.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* auto_flush_linux.c -- Linux auto flush detection
*/
#define _GNU_SOURCE
#include <inttypes.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <string.h>
#include <errno.h>
#include "out.h"
#include "os.h"
#include "fs.h"
#include "auto_flush.h"
#define BUS_DEVICE_PATH "/sys/bus/nd/devices"
#define PERSISTENCE_DOMAIN "persistence_domain"
#define DOMAIN_VALUE_LEN 32
/*
* check_cpu_cache -- (internal) check if file contains "cpu_cache" entry
*/
static int
check_cpu_cache(const char *domain_path)
{
LOG(3, "domain_path: %s", domain_path);
char domain_value[DOMAIN_VALUE_LEN];
int domain_fd;
int cpu_cache = 0;
if ((domain_fd = os_open(domain_path, O_RDONLY)) < 0) {
LOG(1, "!open(\"%s\", O_RDONLY)", domain_path);
goto end;
}
ssize_t len = read(domain_fd, domain_value,
DOMAIN_VALUE_LEN);
if (len < 0) {
ERR("!read(%d, %p, %d)", domain_fd,
domain_value, DOMAIN_VALUE_LEN);
cpu_cache = -1;
goto end;
} else if (len == 0) {
errno = EIO;
ERR("read(%d, %p, %d) empty string",
domain_fd, domain_value,
DOMAIN_VALUE_LEN);
cpu_cache = -1;
goto end;
} else if (domain_value[len - 1] != '\n') {
ERR("!read(%d, %p, %d) invalid format",
domain_fd, domain_value,
DOMAIN_VALUE_LEN);
cpu_cache = -1;
goto end;
}
domain_value[len - 1] = '\0';
LOG(15, "detected persistent_domain: %s", domain_value);
if (strcmp(domain_value, "cpu_cache") == 0) {
LOG(15, "cpu_cache in persistent_domain: %s", domain_path);
cpu_cache = 1;
} else {
LOG(15, "cpu_cache not in persistent_domain: %s", domain_path);
cpu_cache = 0;
}
end:
if (domain_fd >= 0)
os_close(domain_fd);
return cpu_cache;
}
/*
* check_domain_in_region -- (internal) check if region
* contains persistence_domain file
*/
static int
check_domain_in_region(const char *region_path)
{
LOG(3, "region_path: %s", region_path);
struct fs *reg = NULL;
struct fs_entry *reg_entry;
char domain_path[PATH_MAX];
int cpu_cache = 0;
reg = fs_new(region_path);
if (reg == NULL) {
ERR("!fs_new: \"%s\"", region_path);
cpu_cache = -1;
goto end;
}
while ((reg_entry = fs_read(reg)) != NULL) {
/*
* persistence_domain has to be a file type entry
* and it has to be first level child for region;
* there is no need to run into deeper levels
*/
if (reg_entry->type != FS_ENTRY_FILE ||
strcmp(reg_entry->name,
PERSISTENCE_DOMAIN) != 0 ||
reg_entry->level != 1)
continue;
int ret = util_snprintf(domain_path, PATH_MAX,
"%s/"PERSISTENCE_DOMAIN, region_path);
if (ret < 0) {
ERR("!snprintf");
cpu_cache = -1;
goto end;
}
cpu_cache = check_cpu_cache(domain_path);
}
end:
if (reg)
fs_delete(reg);
return cpu_cache;
}
/*
* pmem2_auto_flush -- check if platform supports auto flush for all regions
*
* Traverse "/sys/bus/nd/devices" path to find all the nvdimm regions,
* then for each region checks if "persistence_domain" file exists and
* contains "cpu_cache" string.
* If for any region "persistence_domain" entry does not exists, or its
* context is not as expected, assume eADR is not available on this platform.
*/
int
pmem2_auto_flush(void)
{
LOG(15, NULL);
char *device_path;
int cpu_cache = 0;
device_path = BUS_DEVICE_PATH;
os_stat_t sdev;
if (os_stat(device_path, &sdev) != 0 ||
S_ISDIR(sdev.st_mode) == 0) {
LOG(3, "eADR not supported");
return cpu_cache;
}
struct fs *dev = fs_new(device_path);
if (dev == NULL) {
ERR("!fs_new: \"%s\"", device_path);
return -1;
}
struct fs_entry *dev_entry;
while ((dev_entry = fs_read(dev)) != NULL) {
/*
* Skip if not a symlink, because we expect that
* region on sysfs path is a symlink.
* Skip if depth is different than 1, because region
* we are interested in should be the first level
* child for device.
*/
if ((dev_entry->type != FS_ENTRY_SYMLINK) ||
!strstr(dev_entry->name, "region") ||
dev_entry->level != 1)
continue;
LOG(15, "Start traversing region: %s", dev_entry->path);
cpu_cache = check_domain_in_region(dev_entry->path);
if (cpu_cache != 1)
goto end;
}
end:
fs_delete(dev);
return cpu_cache;
}
| 4,214 | 21.783784 | 77 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/config.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* config.c -- pmem2_config implementation
*/
#include <unistd.h>
#include "alloc.h"
#include "config.h"
#include "libpmem2.h"
#include "out.h"
#include "pmem2.h"
#include "pmem2_utils.h"
/*
* pmem2_config_init -- initialize cfg structure.
*/
void
pmem2_config_init(struct pmem2_config *cfg)
{
cfg->offset = 0;
cfg->length = 0;
cfg->addr = NULL;
cfg->addr_request = PMEM2_ADDRESS_ANY;
cfg->requested_max_granularity = PMEM2_GRANULARITY_INVALID;
cfg->sharing = PMEM2_SHARED;
cfg->protection_flag = PMEM2_PROT_READ | PMEM2_PROT_WRITE;
}
/*
* pmem2_config_new -- allocates and initialize cfg structure.
*/
int
pmem2_config_new(struct pmem2_config **cfg)
{
int ret;
*cfg = pmem2_malloc(sizeof(**cfg), &ret);
if (ret)
return ret;
ASSERTne(cfg, NULL);
pmem2_config_init(*cfg);
return 0;
}
/*
* pmem2_config_delete -- deallocate cfg structure.
*/
int
pmem2_config_delete(struct pmem2_config **cfg)
{
Free(*cfg);
*cfg = NULL;
return 0;
}
/*
* pmem2_config_set_required_store_granularity -- set granularity
* requested by user in the pmem2_config structure
*/
int
pmem2_config_set_required_store_granularity(struct pmem2_config *cfg,
enum pmem2_granularity g)
{
switch (g) {
case PMEM2_GRANULARITY_BYTE:
case PMEM2_GRANULARITY_CACHE_LINE:
case PMEM2_GRANULARITY_PAGE:
break;
default:
ERR("unknown granularity value %d", g);
return PMEM2_E_GRANULARITY_NOT_SUPPORTED;
}
cfg->requested_max_granularity = g;
return 0;
}
/*
* pmem2_config_set_offset -- set offset in the pmem2_config structure
*/
int
pmem2_config_set_offset(struct pmem2_config *cfg, size_t offset)
{
/* mmap func takes offset as a type of off_t */
if (offset > (size_t)INT64_MAX) {
ERR("offset is greater than INT64_MAX");
return PMEM2_E_OFFSET_OUT_OF_RANGE;
}
cfg->offset = offset;
return 0;
}
/*
* pmem2_config_set_length -- set length in the pmem2_config structure
*/
int
pmem2_config_set_length(struct pmem2_config *cfg, size_t length)
{
cfg->length = length;
return 0;
}
/*
* pmem2_config_validate_length -- validate that length in the pmem2_config
* structure is consistent with the file length
*/
int
pmem2_config_validate_length(const struct pmem2_config *cfg,
size_t file_len, size_t alignment)
{
ASSERTne(alignment, 0);
if (file_len == 0) {
ERR("file length is equal 0");
return PMEM2_E_SOURCE_EMPTY;
}
if (cfg->length % alignment) {
ERR("length is not a multiple of %lu", alignment);
return PMEM2_E_LENGTH_UNALIGNED;
}
/* overflow check */
const size_t end = cfg->offset + cfg->length;
if (end < cfg->offset) {
ERR("overflow of offset and length");
return PMEM2_E_MAP_RANGE;
}
/* let's align the file size */
size_t aligned_file_len = file_len;
if (file_len % alignment)
aligned_file_len = ALIGN_UP(file_len, alignment);
/* validate mapping fit into the file */
if (end > aligned_file_len) {
ERR("mapping larger than file size");
return PMEM2_E_MAP_RANGE;
}
return 0;
}
/*
* pmem2_config_set_sharing -- set the way pmem2_map will map the file
*/
int
pmem2_config_set_sharing(struct pmem2_config *cfg, enum pmem2_sharing_type type)
{
switch (type) {
case PMEM2_SHARED:
case PMEM2_PRIVATE:
cfg->sharing = type;
break;
default:
ERR("unknown sharing value %d", type);
return PMEM2_E_INVALID_SHARING_VALUE;
}
return 0;
}
/*
* pmem2_config_validate_addr_alignment -- validate that addr in the
* pmem2_config structure is a multiple of the alignment required for
* specific cfg
*/
int
pmem2_config_validate_addr_alignment(const struct pmem2_config *cfg,
const struct pmem2_source *src)
{
/* cannot NULL % alignment, NULL is valid */
if (!cfg->addr)
return 0;
size_t alignment;
int ret = pmem2_source_alignment(src, &alignment);
if (ret)
return ret;
ASSERTne(alignment, 0);
if ((size_t)cfg->addr % alignment) {
ERR("address %p is not a multiple of %lu", cfg->addr,
alignment);
return PMEM2_E_ADDRESS_UNALIGNED;
}
return 0;
}
/*
* pmem2_config_set_address -- set addr and addr_request in the config
* struct
*/
int
pmem2_config_set_address(struct pmem2_config *cfg, void *addr,
enum pmem2_address_request_type request_type)
{
if (request_type != PMEM2_ADDRESS_FIXED_NOREPLACE) {
ERR("invalid address request_type 0x%x", request_type);
return PMEM2_E_INVALID_ADDRESS_REQUEST_TYPE;
}
if (request_type == PMEM2_ADDRESS_FIXED_NOREPLACE && !addr) {
ERR(
"cannot use address request type PMEM2_ADDRESS_FIXED_NOREPLACE with addr being NULL");
return PMEM2_E_ADDRESS_NULL;
}
cfg->addr = addr;
cfg->addr_request = (int)request_type;
return 0;
}
/*
* pmem2_config_set_vm_reservation -- set vm_reservation in the
* pmem2_config structure
*/
int
pmem2_config_set_vm_reservation(struct pmem2_config *cfg,
struct pmem2_vm_reservation *rsv, size_t offset)
{
return PMEM2_E_NOSUPP;
}
/*
* pmem2_config_clear_address -- reset addr and addr_request in the config
* to the default values
*/
void
pmem2_config_clear_address(struct pmem2_config *cfg)
{
cfg->addr = NULL;
cfg->addr_request = PMEM2_ADDRESS_ANY;
}
/*
* pmem2_config_set_protection -- set protection flags
* in the config struct
*/
int
pmem2_config_set_protection(struct pmem2_config *cfg,
unsigned prot)
{
unsigned unknown_prot = prot & ~(PMEM2_PROT_READ | PMEM2_PROT_WRITE |
PMEM2_PROT_EXEC | PMEM2_PROT_NONE);
if (unknown_prot) {
ERR("invalid flag %u", prot);
return PMEM2_E_INVALID_PROT_FLAG;
}
cfg->protection_flag = prot;
return 0;
}
| 5,603 | 20.227273 | 89 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/ravl_interval.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* ravl_interval.h -- internal definitions for ravl_interval
*/
#ifndef RAVL_INTERVAL_H
#define RAVL_INTERVAL_H
#include "libpmem2.h"
#include "os_thread.h"
#include "ravl.h"
struct ravl_interval;
struct ravl_interval_node;
typedef size_t ravl_interval_min(void *addr);
typedef size_t ravl_interval_max(void *addr);
struct ravl_interval *ravl_interval_new(ravl_interval_min *min,
ravl_interval_min *max);
void ravl_interval_delete(struct ravl_interval *ri);
int ravl_interval_insert(struct ravl_interval *ri, void *addr);
int ravl_interval_remove(struct ravl_interval *ri,
struct ravl_interval_node *rin);
struct ravl_interval_node *ravl_interval_find_equal(struct ravl_interval *ri,
void *addr);
struct ravl_interval_node *ravl_interval_find(struct ravl_interval *ri,
void *addr);
void *ravl_interval_data(struct ravl_interval_node *rin);
#endif
| 947 | 27.727273 | 77 | h |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/memops_generic.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* memops_generic.c -- architecture-independent memmove & memset fallback
*
* This fallback is needed to fulfill guarantee that pmem_mem[cpy|set|move]
* will use at least 8-byte stores (for 8-byte aligned buffers and sizes),
* even when accelerated implementation is missing or disabled.
* This guarantee is needed to maintain correctness eg in pmemobj.
* Libc may do the same, but this behavior is not documented, so we can't rely
* on that.
*/
#include <stddef.h>
#include "out.h"
#include "pmem2_arch.h"
#include "util.h"
/*
* pmem2_flush_flags -- internal wrapper around pmem_flush
*/
static inline void
pmem2_flush_flags(const void *addr, size_t len, unsigned flags,
flush_func flush)
{
if (!(flags & PMEM2_F_MEM_NOFLUSH))
flush(addr, len);
}
/*
* cpy128 -- (internal) copy 128 bytes from src to dst
*/
static force_inline void
cpy128(uint64_t *dst, const uint64_t *src)
{
/*
* We use atomics here just to be sure compiler will not split stores.
* Order of stores doesn't matter.
*/
uint64_t tmp[16];
util_atomic_load_explicit64(&src[0], &tmp[0], memory_order_relaxed);
util_atomic_load_explicit64(&src[1], &tmp[1], memory_order_relaxed);
util_atomic_load_explicit64(&src[2], &tmp[2], memory_order_relaxed);
util_atomic_load_explicit64(&src[3], &tmp[3], memory_order_relaxed);
util_atomic_load_explicit64(&src[4], &tmp[4], memory_order_relaxed);
util_atomic_load_explicit64(&src[5], &tmp[5], memory_order_relaxed);
util_atomic_load_explicit64(&src[6], &tmp[6], memory_order_relaxed);
util_atomic_load_explicit64(&src[7], &tmp[7], memory_order_relaxed);
util_atomic_load_explicit64(&src[8], &tmp[8], memory_order_relaxed);
util_atomic_load_explicit64(&src[9], &tmp[9], memory_order_relaxed);
util_atomic_load_explicit64(&src[10], &tmp[10], memory_order_relaxed);
util_atomic_load_explicit64(&src[11], &tmp[11], memory_order_relaxed);
util_atomic_load_explicit64(&src[12], &tmp[12], memory_order_relaxed);
util_atomic_load_explicit64(&src[13], &tmp[13], memory_order_relaxed);
util_atomic_load_explicit64(&src[14], &tmp[14], memory_order_relaxed);
util_atomic_load_explicit64(&src[15], &tmp[15], memory_order_relaxed);
util_atomic_store_explicit64(&dst[0], tmp[0], memory_order_relaxed);
util_atomic_store_explicit64(&dst[1], tmp[1], memory_order_relaxed);
util_atomic_store_explicit64(&dst[2], tmp[2], memory_order_relaxed);
util_atomic_store_explicit64(&dst[3], tmp[3], memory_order_relaxed);
util_atomic_store_explicit64(&dst[4], tmp[4], memory_order_relaxed);
util_atomic_store_explicit64(&dst[5], tmp[5], memory_order_relaxed);
util_atomic_store_explicit64(&dst[6], tmp[6], memory_order_relaxed);
util_atomic_store_explicit64(&dst[7], tmp[7], memory_order_relaxed);
util_atomic_store_explicit64(&dst[8], tmp[8], memory_order_relaxed);
util_atomic_store_explicit64(&dst[9], tmp[9], memory_order_relaxed);
util_atomic_store_explicit64(&dst[10], tmp[10], memory_order_relaxed);
util_atomic_store_explicit64(&dst[11], tmp[11], memory_order_relaxed);
util_atomic_store_explicit64(&dst[12], tmp[12], memory_order_relaxed);
util_atomic_store_explicit64(&dst[13], tmp[13], memory_order_relaxed);
util_atomic_store_explicit64(&dst[14], tmp[14], memory_order_relaxed);
util_atomic_store_explicit64(&dst[15], tmp[15], memory_order_relaxed);
}
/*
* cpy64 -- (internal) copy 64 bytes from src to dst
*/
static force_inline void
cpy64(uint64_t *dst, const uint64_t *src)
{
/*
* We use atomics here just to be sure compiler will not split stores.
* Order of stores doesn't matter.
*/
uint64_t tmp[8];
util_atomic_load_explicit64(&src[0], &tmp[0], memory_order_relaxed);
util_atomic_load_explicit64(&src[1], &tmp[1], memory_order_relaxed);
util_atomic_load_explicit64(&src[2], &tmp[2], memory_order_relaxed);
util_atomic_load_explicit64(&src[3], &tmp[3], memory_order_relaxed);
util_atomic_load_explicit64(&src[4], &tmp[4], memory_order_relaxed);
util_atomic_load_explicit64(&src[5], &tmp[5], memory_order_relaxed);
util_atomic_load_explicit64(&src[6], &tmp[6], memory_order_relaxed);
util_atomic_load_explicit64(&src[7], &tmp[7], memory_order_relaxed);
util_atomic_store_explicit64(&dst[0], tmp[0], memory_order_relaxed);
util_atomic_store_explicit64(&dst[1], tmp[1], memory_order_relaxed);
util_atomic_store_explicit64(&dst[2], tmp[2], memory_order_relaxed);
util_atomic_store_explicit64(&dst[3], tmp[3], memory_order_relaxed);
util_atomic_store_explicit64(&dst[4], tmp[4], memory_order_relaxed);
util_atomic_store_explicit64(&dst[5], tmp[5], memory_order_relaxed);
util_atomic_store_explicit64(&dst[6], tmp[6], memory_order_relaxed);
util_atomic_store_explicit64(&dst[7], tmp[7], memory_order_relaxed);
}
/*
* cpy8 -- (internal) copy 8 bytes from src to dst
*/
static force_inline void
cpy8(uint64_t *dst, const uint64_t *src)
{
uint64_t tmp;
util_atomic_load_explicit64(src, &tmp, memory_order_relaxed);
util_atomic_store_explicit64(dst, tmp, memory_order_relaxed);
}
/*
* store8 -- (internal) store 8 bytes
*/
static force_inline void
store8(uint64_t *dst, uint64_t c)
{
util_atomic_store_explicit64(dst, c, memory_order_relaxed);
}
/*
* memmove_nodrain_generic -- generic memmove to pmem without hw drain
*/
void *
memmove_nodrain_generic(void *dst, const void *src, size_t len,
unsigned flags, flush_func flush)
{
LOG(15, "pmemdest %p src %p len %zu flags 0x%x", dst, src, len,
flags);
char *cdst = dst;
const char *csrc = src;
size_t remaining;
(void) flags;
if ((uintptr_t)cdst - (uintptr_t)csrc >= len) {
size_t cnt = (uint64_t)cdst & 7;
if (cnt > 0) {
cnt = 8 - cnt;
if (cnt > len)
cnt = len;
for (size_t i = 0; i < cnt; ++i)
cdst[i] = csrc[i];
pmem2_flush_flags(cdst, cnt, flags, flush);
cdst += cnt;
csrc += cnt;
len -= cnt;
}
uint64_t *dst8 = (uint64_t *)cdst;
const uint64_t *src8 = (const uint64_t *)csrc;
while (len >= 128 && CACHELINE_SIZE == 128) {
cpy128(dst8, src8);
pmem2_flush_flags(dst8, 128, flags, flush);
len -= 128;
dst8 += 16;
src8 += 16;
}
while (len >= 64) {
cpy64(dst8, src8);
pmem2_flush_flags(dst8, 64, flags, flush);
len -= 64;
dst8 += 8;
src8 += 8;
}
remaining = len;
while (len >= 8) {
cpy8(dst8, src8);
len -= 8;
dst8++;
src8++;
}
cdst = (char *)dst8;
csrc = (const char *)src8;
for (size_t i = 0; i < len; ++i)
*cdst++ = *csrc++;
if (remaining)
pmem2_flush_flags(cdst - remaining, remaining, flags,
flush);
} else {
cdst += len;
csrc += len;
size_t cnt = (uint64_t)cdst & 7;
if (cnt > 0) {
if (cnt > len)
cnt = len;
cdst -= cnt;
csrc -= cnt;
len -= cnt;
for (size_t i = cnt; i > 0; --i)
cdst[i - 1] = csrc[i - 1];
pmem2_flush_flags(cdst, cnt, flags, flush);
}
uint64_t *dst8 = (uint64_t *)cdst;
const uint64_t *src8 = (const uint64_t *)csrc;
while (len >= 128 && CACHELINE_SIZE == 128) {
dst8 -= 16;
src8 -= 16;
cpy128(dst8, src8);
pmem2_flush_flags(dst8, 128, flags, flush);
len -= 128;
}
while (len >= 64) {
dst8 -= 8;
src8 -= 8;
cpy64(dst8, src8);
pmem2_flush_flags(dst8, 64, flags, flush);
len -= 64;
}
remaining = len;
while (len >= 8) {
--dst8;
--src8;
cpy8(dst8, src8);
len -= 8;
}
cdst = (char *)dst8;
csrc = (const char *)src8;
for (size_t i = len; i > 0; --i)
*--cdst = *--csrc;
if (remaining)
pmem2_flush_flags(cdst, remaining, flags, flush);
}
return dst;
}
/*
* memset_nodrain_generic -- generic memset to pmem without hw drain
*/
void *
memset_nodrain_generic(void *dst, int c, size_t len, unsigned flags,
flush_func flush)
{
LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x", dst, c, len,
flags);
(void) flags;
char *cdst = dst;
size_t cnt = (uint64_t)cdst & 7;
if (cnt > 0) {
cnt = 8 - cnt;
if (cnt > len)
cnt = len;
for (size_t i = 0; i < cnt; ++i)
cdst[i] = (char)c;
pmem2_flush_flags(cdst, cnt, flags, flush);
cdst += cnt;
len -= cnt;
}
uint64_t *dst8 = (uint64_t *)cdst;
uint64_t u = (unsigned char)c;
uint64_t tmp = (u << 56) | (u << 48) | (u << 40) | (u << 32) |
(u << 24) | (u << 16) | (u << 8) | u;
while (len >= 128 && CACHELINE_SIZE == 128) {
store8(&dst8[0], tmp);
store8(&dst8[1], tmp);
store8(&dst8[2], tmp);
store8(&dst8[3], tmp);
store8(&dst8[4], tmp);
store8(&dst8[5], tmp);
store8(&dst8[6], tmp);
store8(&dst8[7], tmp);
store8(&dst8[8], tmp);
store8(&dst8[9], tmp);
store8(&dst8[10], tmp);
store8(&dst8[11], tmp);
store8(&dst8[12], tmp);
store8(&dst8[13], tmp);
store8(&dst8[14], tmp);
store8(&dst8[15], tmp);
pmem2_flush_flags(dst8, 128, flags, flush);
len -= 128;
dst8 += 16;
}
while (len >= 64) {
store8(&dst8[0], tmp);
store8(&dst8[1], tmp);
store8(&dst8[2], tmp);
store8(&dst8[3], tmp);
store8(&dst8[4], tmp);
store8(&dst8[5], tmp);
store8(&dst8[6], tmp);
store8(&dst8[7], tmp);
pmem2_flush_flags(dst8, 64, flags, flush);
len -= 64;
dst8 += 8;
}
size_t remaining = len;
while (len >= 8) {
store8(dst8, tmp);
len -= 8;
dst8++;
}
cdst = (char *)dst8;
for (size_t i = 0; i < len; ++i)
*cdst++ = (char)c;
if (remaining)
pmem2_flush_flags(cdst - remaining, remaining, flags, flush);
return dst;
}
| 9,345 | 26.488235 | 78 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/pmem2_arch.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* pmem2_arch.h -- core-arch interface
*/
#ifndef PMEM2_ARCH_H
#define PMEM2_ARCH_H
#include <stddef.h>
#include "libpmem2.h"
#include "util.h"
#include "valgrind_internal.h"
#ifdef __cplusplus
extern "C" {
#endif
struct pmem2_arch_info;
typedef void (*fence_func)(void);
typedef void (*flush_func)(const void *, size_t);
typedef void *(*memmove_nodrain_func)(void *pmemdest, const void *src,
size_t len, unsigned flags, flush_func flush);
typedef void *(*memset_nodrain_func)(void *pmemdest, int c, size_t len,
unsigned flags, flush_func flush);
struct pmem2_arch_info {
memmove_nodrain_func memmove_nodrain;
memmove_nodrain_func memmove_nodrain_eadr;
memset_nodrain_func memset_nodrain;
memset_nodrain_func memset_nodrain_eadr;
flush_func flush;
fence_func fence;
int flush_has_builtin_fence;
};
void pmem2_arch_init(struct pmem2_arch_info *info);
/*
* flush_empty_nolog -- (internal) do not flush the CPU cache
*/
static force_inline void
flush_empty_nolog(const void *addr, size_t len)
{
/* NOP, but tell pmemcheck about it */
VALGRIND_DO_FLUSH(addr, len);
}
void *memmove_nodrain_generic(void *pmemdest, const void *src, size_t len,
unsigned flags, flush_func flush);
void *memset_nodrain_generic(void *pmemdest, int c, size_t len, unsigned flags,
flush_func flush);
#ifdef __cplusplus
}
#endif
#endif
| 1,427 | 22.8 | 79 | h |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/region_namespace_ndctl.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* region_namespace_ndctl.c -- common ndctl functions
*/
#include <ndctl/libndctl.h>
#include <ndctl/libdaxctl.h>
#include <sys/sysmacros.h>
#include <fcntl.h>
#include "libpmem2.h"
#include "pmem2_utils.h"
#include "region_namespace_ndctl.h"
#include "region_namespace.h"
#include "out.h"
/*
* ndctl_match_devdax -- (internal) returns 0 if the devdax matches
* with the given file, 1 if it doesn't match,
* and a negative value in case of an error.
*/
static int
ndctl_match_devdax(dev_t st_rdev, const char *devname)
{
LOG(3, "st_rdev %lu devname %s", st_rdev, devname);
if (*devname == '\0')
return 1;
char path[PATH_MAX];
os_stat_t stat;
if (util_snprintf(path, PATH_MAX, "/dev/%s", devname) < 0) {
ERR("!snprintf");
return PMEM2_E_ERRNO;
}
if (os_stat(path, &stat)) {
ERR("!stat %s", path);
return PMEM2_E_ERRNO;
}
if (st_rdev != stat.st_rdev) {
LOG(10, "skipping not matching device: %s", path);
return 1;
}
LOG(4, "found matching device: %s", path);
return 0;
}
#define BUFF_LENGTH 64
/*
* ndctl_match_fsdax -- (internal) returns 0 if the device matches
* with the given file, 1 if it doesn't match,
* and a negative value in case of an error.
*/
static int
ndctl_match_fsdax(dev_t st_dev, const char *devname)
{
LOG(3, "st_dev %lu devname %s", st_dev, devname);
if (*devname == '\0')
return 1;
char path[PATH_MAX];
char dev_id[BUFF_LENGTH];
if (util_snprintf(path, PATH_MAX, "/sys/block/%s/dev", devname) < 0) {
ERR("!snprintf");
return PMEM2_E_ERRNO;
}
if (util_snprintf(dev_id, BUFF_LENGTH, "%d:%d",
major(st_dev), minor(st_dev)) < 0) {
ERR("!snprintf");
return PMEM2_E_ERRNO;
}
int fd = os_open(path, O_RDONLY);
if (fd < 0) {
ERR("!open \"%s\"", path);
return PMEM2_E_ERRNO;
}
char buff[BUFF_LENGTH];
ssize_t nread = read(fd, buff, BUFF_LENGTH);
if (nread < 0) {
ERR("!read");
int oerrno = errno; /* save the errno */
os_close(fd);
errno = oerrno;
return PMEM2_E_ERRNO;
}
os_close(fd);
if (nread == 0) {
ERR("%s is empty", path);
return PMEM2_E_INVALID_DEV_FORMAT;
}
if (buff[nread - 1] != '\n') {
ERR("%s doesn't end with new line", path);
return PMEM2_E_INVALID_DEV_FORMAT;
}
buff[nread - 1] = '\0';
if (strcmp(buff, dev_id) != 0) {
LOG(10, "skipping not matching device: %s", path);
return 1;
}
LOG(4, "found matching device: %s", path);
return 0;
}
/*
* pmem2_region_namespace -- returns the region
* (and optionally the namespace)
* where the given file is located
*/
int
pmem2_region_namespace(struct ndctl_ctx *ctx,
const struct pmem2_source *src,
struct ndctl_region **pregion,
struct ndctl_namespace **pndns)
{
LOG(3, "ctx %p src %p pregion %p pnamespace %p",
ctx, src, pregion, pndns);
struct ndctl_bus *bus;
struct ndctl_region *region;
struct ndctl_namespace *ndns;
if (pregion)
*pregion = NULL;
if (pndns)
*pndns = NULL;
if (src->value.ftype == PMEM2_FTYPE_DIR) {
ERR("cannot check region or namespace of a directory");
return PMEM2_E_INVALID_FILE_TYPE;
}
FOREACH_BUS_REGION_NAMESPACE(ctx, bus, region, ndns) {
struct ndctl_btt *btt;
struct ndctl_dax *dax = NULL;
struct ndctl_pfn *pfn;
const char *devname;
if ((dax = ndctl_namespace_get_dax(ndns))) {
if (src->value.ftype == PMEM2_FTYPE_REG)
continue;
ASSERTeq(src->value.ftype, PMEM2_FTYPE_DEVDAX);
struct daxctl_region *dax_region;
dax_region = ndctl_dax_get_daxctl_region(dax);
if (!dax_region) {
ERR("!cannot find dax region");
return PMEM2_E_DAX_REGION_NOT_FOUND;
}
struct daxctl_dev *dev;
daxctl_dev_foreach(dax_region, dev) {
devname = daxctl_dev_get_devname(dev);
int ret = ndctl_match_devdax(src->value.st_rdev,
devname);
if (ret < 0)
return ret;
if (ret == 0) {
if (pregion)
*pregion = region;
if (pndns)
*pndns = ndns;
return 0;
}
}
} else {
if (src->value.ftype == PMEM2_FTYPE_DEVDAX)
continue;
ASSERTeq(src->value.ftype, PMEM2_FTYPE_REG);
if ((btt = ndctl_namespace_get_btt(ndns))) {
devname = ndctl_btt_get_block_device(btt);
} else if ((pfn = ndctl_namespace_get_pfn(ndns))) {
devname = ndctl_pfn_get_block_device(pfn);
} else {
devname =
ndctl_namespace_get_block_device(ndns);
}
int ret = ndctl_match_fsdax(src->value.st_dev, devname);
if (ret < 0)
return ret;
if (ret == 0) {
if (pregion)
*pregion = region;
if (pndns)
*pndns = ndns;
return 0;
}
}
}
LOG(10, "did not found any matching device");
return 0;
}
/*
* pmem2_region_get_id -- returns the region id
*/
int
pmem2_get_region_id(const struct pmem2_source *src, unsigned *region_id)
{
LOG(3, "src %p region_id %p", src, region_id);
struct ndctl_region *region;
struct ndctl_namespace *ndns;
struct ndctl_ctx *ctx;
errno = ndctl_new(&ctx) * (-1);
if (errno) {
ERR("!ndctl_new");
return PMEM2_E_ERRNO;
}
int rv = pmem2_region_namespace(ctx, src, ®ion, &ndns);
if (rv) {
LOG(1, "getting region and namespace failed");
goto end;
}
if (!region) {
ERR("unknown region");
rv = PMEM2_E_DAX_REGION_NOT_FOUND;
goto end;
}
*region_id = ndctl_region_get_id(region);
end:
ndctl_unref(ctx);
return rv;
}
| 5,467 | 20.111969 | 72 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/pmem2_utils_other.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
#include <errno.h>
#include <sys/stat.h>
#include "libpmem2.h"
#include "out.h"
#include "pmem2_utils.h"
#ifdef _WIN32
#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
#endif
int
pmem2_get_type_from_stat(const os_stat_t *st, enum pmem2_file_type *type)
{
if (S_ISREG(st->st_mode)) {
*type = PMEM2_FTYPE_REG;
return 0;
}
if (S_ISDIR(st->st_mode)) {
*type = PMEM2_FTYPE_DIR;
return 0;
}
ERR("file type 0%o not supported", st->st_mode & S_IFMT);
return PMEM2_E_INVALID_FILE_TYPE;
}
/*
* pmem2_device_dax_size -- checks the size of a given
* dax device from given source structure
*/
int
pmem2_device_dax_size(const struct pmem2_source *src, size_t *size)
{
const char *err =
"BUG: pmem2_device_dax_size should never be called on this OS";
ERR("%s", err);
ASSERTinfo(0, err);
return PMEM2_E_NOSUPP;
}
/*
* pmem2_device_dax_alignment -- checks the alignment of a given
* dax device from given source
*/
int
pmem2_device_dax_alignment(const struct pmem2_source *src, size_t *alignment)
{
const char *err =
"BUG: pmem2_device_dax_alignment should never be called on this OS";
ERR("%s", err);
ASSERTinfo(0, err);
return PMEM2_E_NOSUPP;
}
| 1,301 | 20.7 | 77 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/deep_flush.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* deep_flush.c -- pmem2_deep_flush implementation
*/
#include <stdlib.h>
#include "libpmem2.h"
#include "deep_flush.h"
#include "out.h"
/*
* pmem2_deep_flush -- performs deep flush operation
*/
int
pmem2_deep_flush(struct pmem2_map *map, void *ptr, size_t size)
{
LOG(3, "map %p ptr %p size %zu", map, ptr, size);
uintptr_t map_addr = (uintptr_t)map->addr;
uintptr_t map_end = map_addr + map->content_length;
uintptr_t flush_addr = (uintptr_t)ptr;
uintptr_t flush_end = flush_addr + size;
if (flush_addr < map_addr || flush_end > map_end) {
ERR("requested deep flush rage ptr %p size %zu"
"exceeds map range %p", ptr, size, map);
return PMEM2_E_DEEP_FLUSH_RANGE;
}
int ret = map->deep_flush_fn(map, ptr, size);
if (ret) {
LOG(1, "cannot perform deep flush operation for map %p", map);
return ret;
}
return 0;
}
| 929 | 21.682927 | 64 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/map_posix.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* map_posix.c -- pmem2_map (POSIX)
*/
#include <errno.h>
#include <stdbool.h>
#include <string.h>
#include <sys/mman.h>
#include "libpmem2.h"
#include "alloc.h"
#include "auto_flush.h"
#include "config.h"
#include "file.h"
#include "map.h"
#include "out.h"
#include "persist.h"
#include "pmem2_utils.h"
#include "source.h"
#include "valgrind_internal.h"
#ifndef MAP_SYNC
#define MAP_SYNC 0x80000
#endif
#ifndef MAP_SHARED_VALIDATE
#define MAP_SHARED_VALIDATE 0x03
#endif
#define MEGABYTE ((uintptr_t)1 << 20)
#define GIGABYTE ((uintptr_t)1 << 30)
/* indicates the cases in which the error cannot occur */
#define GRAN_IMPOSSIBLE "impossible"
#ifdef __linux__
/* requested CACHE_LINE, available PAGE */
#define REQ_CL_AVAIL_PG \
"requested granularity not available because fd doesn't point to DAX-enabled file " \
"or kernel doesn't support MAP_SYNC flag (Linux >= 4.15)"
/* requested BYTE, available PAGE */
#define REQ_BY_AVAIL_PG REQ_CL_AVAIL_PG
/* requested BYTE, available CACHE_LINE */
#define REQ_BY_AVAIL_CL \
"requested granularity not available because the platform doesn't support eADR"
static const char *granularity_err_msg[3][3] = {
/* requested granularity / available granularity */
/* -------------------------------------------------------------------- */
/* BYTE CACHE_LINE PAGE */
/* -------------------------------------------------------------------- */
/* BYTE */ {GRAN_IMPOSSIBLE, REQ_BY_AVAIL_CL, REQ_BY_AVAIL_PG},
/* CL */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, REQ_CL_AVAIL_PG},
/* PAGE */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE}};
#else
/* requested CACHE_LINE, available PAGE */
#define REQ_CL_AVAIL_PG \
"the operating system doesn't provide a method of detecting granularity"
/* requested BYTE, available PAGE */
#define REQ_BY_AVAIL_PG \
"the operating system doesn't provide a method of detecting whether the platform supports eADR"
static const char *granularity_err_msg[3][3] = {
/* requested granularity / available granularity */
/* -------------------------------------------------------------------- */
/* BYTE CACHE_LINE PAGE */
/* -------------------------------------------------------------------- */
/* BYTE */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, REQ_BY_AVAIL_PG},
/* CL */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, REQ_CL_AVAIL_PG},
/* PAGE */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE}};
#endif
/*
* get_map_alignment -- (internal) choose the desired mapping alignment
*
* The smallest supported alignment is 2 megabytes because of the object
* alignment requirements. Changing this value to 4 kilobytes constitutes a
* layout change.
*
* Use 1GB page alignment only if the mapping length is at least
* twice as big as the page size.
*/
static inline size_t
get_map_alignment(size_t len, size_t req_align)
{
size_t align = 2 * MEGABYTE;
if (req_align)
align = req_align;
else if (len >= 2 * GIGABYTE)
align = GIGABYTE;
return align;
}
/*
* map_reserve -- (internal) reserve an address for mmap()
*
* ALSR in 64-bit Linux kernel uses 28-bit of randomness for mmap
* (bit positions 12-39), which means the base mapping address is randomized
* within [0..1024GB] range, with 4KB granularity. Assuming additional
* 1GB alignment, it results in 1024 possible locations.
*/
static int
map_reserve(size_t len, size_t alignment, void **reserv, size_t *reslen,
const struct pmem2_config *cfg)
{
ASSERTne(reserv, NULL);
/* let's get addr from the cfg */
void *mmap_addr = cfg->addr;
int mmap_addr_flag = 0;
size_t dlength; /* dummy length */
/* if addr is initialized, dlength == len */
if (mmap_addr)
dlength = len;
else
dlength = len + alignment; /* dummy length */
/* "translate" pmem2 addr request type into linux flag */
if (cfg->addr_request == PMEM2_ADDRESS_FIXED_NOREPLACE) {
/*
* glibc started exposing this flag in version 4.17 but we can still
* imitate it even if it is not supported by libc or kernel
*/
#ifdef MAP_FIXED_NOREPLACE
mmap_addr_flag = MAP_FIXED_NOREPLACE;
#else
mmap_addr_flag = 0;
#endif
}
/*
* Create dummy mapping to find an unused region of given size.
* Request for increased size for later address alignment.
* Use MAP_PRIVATE with read-only access to simulate
* zero cost for overcommit accounting. Note: MAP_NORESERVE
* flag is ignored if overcommit is disabled (mode 2).
*/
char *daddr = mmap(mmap_addr, dlength, PROT_READ,
MAP_PRIVATE | MAP_ANONYMOUS | mmap_addr_flag, -1, 0);
if (daddr == MAP_FAILED) {
if (errno == EEXIST) {
ERR("!mmap MAP_FIXED_NOREPLACE");
return PMEM2_E_MAPPING_EXISTS;
}
ERR("!mmap MAP_ANONYMOUS");
return PMEM2_E_ERRNO;
}
/*
* When kernel does not support MAP_FIXED_NOREPLACE flag we imitate it.
* If kernel does not support flag and given addr is occupied, kernel
* chooses new addr randomly and returns it. We do not want that
* behavior, so we validate it and fail when addresses do not match.
*/
if (mmap_addr && cfg->addr_request == PMEM2_ADDRESS_FIXED_NOREPLACE) {
/* mapping passed and gave different addr, while it shouldn't */
if (daddr != mmap_addr) {
munmap(daddr, dlength);
ERR("mapping exists in the given address");
return PMEM2_E_MAPPING_EXISTS;
}
}
LOG(4, "system choice %p", daddr);
*reserv = (void *)roundup((uintptr_t)daddr, alignment);
/*
* since the last part of the reservation from (reserv + reslen == end)
* will be unmapped, the 'end' address has to be page-aligned.
* 'reserv' is already page-aligned (or even aligned to multiple of page
* size) so it is enough to page-align the 'reslen' value.
*/
*reslen = roundup(len, Pagesize);
LOG(4, "hint %p", *reserv);
/*
* The placeholder mapping is divided into few parts:
*
* 1 2 3 4 5
* |......|uuuuuuuuu|rrr|.................|
*
* Addresses:
* 1 == daddr
* 2 == reserv
* 3 == reserv + len
* 4 == reserv + reslen == end (has to be page-aligned)
* 5 == daddr + dlength
*
* Key:
* - '.' is an unused part of the placeholder
* - 'u' is where the actual mapping lies
* - 'r' is what reserved as padding
*/
/* unmap the placeholder before the actual mapping */
const size_t before = (uintptr_t)(*reserv) - (uintptr_t)daddr;
if (before) {
if (munmap(daddr, before)) {
ERR("!munmap");
return PMEM2_E_ERRNO;
}
}
/* unmap the placeholder after the actual mapping */
const size_t after = dlength - *reslen - before;
void *end = (void *)((uintptr_t)(*reserv) + (uintptr_t)*reslen);
if (after)
if (munmap(end, after)) {
ERR("!munmap");
return PMEM2_E_ERRNO;
}
return 0;
}
/*
* file_map -- (internal) memory map given file into memory
* If (flags & MAP_PRIVATE) it uses just mmap. Otherwise, it tries to mmap with
* (flags | MAP_SHARED_VALIDATE | MAP_SYNC) which allows flushing from the
* user-space. If MAP_SYNC fails and the user did not specify it by himself it
* falls back to the mmap with user-provided flags.
*/
static int
file_map(void *reserv, size_t len, int proto, int flags,
int fd, off_t offset, bool *map_sync, void **base)
{
LOG(15, "reserve %p len %zu proto %x flags %x fd %d offset %ld "
"map_sync %p", reserv, len, proto, flags, fd, offset,
map_sync);
ASSERTne(map_sync, NULL);
ASSERTne(base, NULL);
/*
* MAP_PRIVATE and MAP_SHARED are mutually exclusive, therefore mmap
* with MAP_PRIVATE is executed separately.
*/
if (flags & MAP_PRIVATE) {
*base = mmap(reserv, len, proto, flags, fd, offset);
if (*base == MAP_FAILED) {
ERR("!mmap");
return PMEM2_E_ERRNO;
}
LOG(4, "mmap with MAP_PRIVATE succeeded");
*map_sync = false;
return 0;
}
/* try to mmap with MAP_SYNC flag */
const int sync_flags = MAP_SHARED_VALIDATE | MAP_SYNC;
*base = mmap(reserv, len, proto, flags | sync_flags, fd, offset);
if (*base != MAP_FAILED) {
LOG(4, "mmap with MAP_SYNC succeeded");
*map_sync = true;
return 0;
}
/* try to mmap with MAP_SHARED flag (without MAP_SYNC) */
if (errno == EINVAL || errno == ENOTSUP) {
LOG(4, "mmap with MAP_SYNC not supported");
*base = mmap(reserv, len, proto, flags | MAP_SHARED, fd,
offset);
if (*base != MAP_FAILED) {
*map_sync = false;
return 0;
}
}
ERR("!mmap");
return PMEM2_E_ERRNO;
}
/*
* unmap -- (internal) unmap a memory range
*/
static int
unmap(void *addr, size_t len)
{
int retval = munmap(addr, len);
if (retval < 0) {
ERR("!munmap");
return PMEM2_E_ERRNO;
}
return 0;
}
/*
* pmem2_map -- map memory according to provided config
*/
int
pmem2_map(const struct pmem2_config *cfg, const struct pmem2_source *src,
struct pmem2_map **map_ptr)
{
LOG(3, "cfg %p src %p map_ptr %p", cfg, src, map_ptr);
int ret = 0;
struct pmem2_map *map;
size_t file_len;
*map_ptr = NULL;
if (cfg->requested_max_granularity == PMEM2_GRANULARITY_INVALID) {
ERR(
"please define the max granularity requested for the mapping");
return PMEM2_E_GRANULARITY_NOT_SET;
}
size_t src_alignment;
ret = pmem2_source_alignment(src, &src_alignment);
if (ret)
return ret;
/* get file size */
ret = pmem2_source_size(src, &file_len);
if (ret)
return ret;
/* get offset */
size_t effective_offset;
ret = pmem2_validate_offset(cfg, &effective_offset, src_alignment);
if (ret)
return ret;
ASSERTeq(effective_offset, cfg->offset);
if (src->type == PMEM2_SOURCE_ANON)
effective_offset = 0;
os_off_t off = (os_off_t)effective_offset;
/* map input and output variables */
bool map_sync = false;
/*
* MAP_SHARED - is required to mmap directly the underlying hardware
* MAP_FIXED - is required to mmap at exact address pointed by hint
*/
int flags = MAP_FIXED;
void *addr;
/* "translate" pmem2 protection flags into linux flags */
int proto = 0;
if (cfg->protection_flag == PMEM2_PROT_NONE)
proto = PROT_NONE;
if (cfg->protection_flag & PMEM2_PROT_EXEC)
proto |= PROT_EXEC;
if (cfg->protection_flag & PMEM2_PROT_READ)
proto |= PROT_READ;
if (cfg->protection_flag & PMEM2_PROT_WRITE)
proto |= PROT_WRITE;
if (src->type == PMEM2_SOURCE_FD) {
if (src->value.ftype == PMEM2_FTYPE_DIR) {
ERR("the directory is not a supported file type");
return PMEM2_E_INVALID_FILE_TYPE;
}
ASSERT(src->value.ftype == PMEM2_FTYPE_REG ||
src->value.ftype == PMEM2_FTYPE_DEVDAX);
if (cfg->sharing == PMEM2_PRIVATE &&
src->value.ftype == PMEM2_FTYPE_DEVDAX) {
ERR(
"device DAX does not support mapping with MAP_PRIVATE");
return PMEM2_E_SRC_DEVDAX_PRIVATE;
}
}
size_t content_length, reserved_length = 0;
ret = pmem2_config_validate_length(cfg, file_len, src_alignment);
if (ret)
return ret;
/* without user-provided length, map to the end of the file */
if (cfg->length)
content_length = cfg->length;
else
content_length = file_len - effective_offset;
size_t alignment = get_map_alignment(content_length,
src_alignment);
ret = pmem2_config_validate_addr_alignment(cfg, src);
if (ret)
return ret;
/* find a hint for the mapping */
void *reserv = NULL;
ret = map_reserve(content_length, alignment, &reserv, &reserved_length,
cfg);
if (ret != 0) {
if (ret == PMEM2_E_MAPPING_EXISTS)
LOG(1, "given mapping region is already occupied");
else
LOG(1, "cannot find a contiguous region of given size");
return ret;
}
ASSERTne(reserv, NULL);
if (cfg->sharing == PMEM2_PRIVATE) {
flags |= MAP_PRIVATE;
}
int map_fd = INVALID_FD;
if (src->type == PMEM2_SOURCE_FD) {
map_fd = src->value.fd;
} else if (src->type == PMEM2_SOURCE_ANON) {
flags |= MAP_ANONYMOUS;
} else {
ASSERT(0);
}
ret = file_map(reserv, content_length, proto, flags, map_fd, off,
&map_sync, &addr);
if (ret) {
/* unmap the reservation mapping */
munmap(reserv, reserved_length);
if (ret == -EACCES)
return PMEM2_E_NO_ACCESS;
else if (ret == -ENOTSUP)
return PMEM2_E_NOSUPP;
else
return ret;
}
LOG(3, "mapped at %p", addr);
bool eADR = (pmem2_auto_flush() == 1);
enum pmem2_granularity available_min_granularity =
src->type == PMEM2_SOURCE_ANON ? PMEM2_GRANULARITY_BYTE :
get_min_granularity(eADR, map_sync, cfg->sharing);
if (available_min_granularity > cfg->requested_max_granularity) {
const char *err = granularity_err_msg
[cfg->requested_max_granularity]
[available_min_granularity];
if (strcmp(err, GRAN_IMPOSSIBLE) == 0)
FATAL(
"unhandled granularity error: available_min_granularity: %d" \
"requested_max_granularity: %d",
available_min_granularity,
cfg->requested_max_granularity);
ERR("%s", err);
ret = PMEM2_E_GRANULARITY_NOT_SUPPORTED;
goto err;
}
/* prepare pmem2_map structure */
map = (struct pmem2_map *)pmem2_malloc(sizeof(*map), &ret);
if (!map)
goto err;
map->addr = addr;
map->reserved_length = reserved_length;
map->content_length = content_length;
map->effective_granularity = available_min_granularity;
pmem2_set_flush_fns(map);
pmem2_set_mem_fns(map);
map->source = *src;
map->source.value.fd = INVALID_FD; /* fd should not be used after map */
ret = pmem2_register_mapping(map);
if (ret)
goto err_register;
*map_ptr = map;
if (src->type == PMEM2_SOURCE_FD) {
VALGRIND_REGISTER_PMEM_MAPPING(map->addr, map->content_length);
VALGRIND_REGISTER_PMEM_FILE(src->value.fd,
map->addr, map->content_length, 0);
}
return 0;
err_register:
free(map);
err:
unmap(addr, reserved_length);
return ret;
}
/*
* pmem2_unmap -- unmap the specified mapping
*/
int
pmem2_unmap(struct pmem2_map **map_ptr)
{
LOG(3, "map_ptr %p", map_ptr);
int ret = 0;
struct pmem2_map *map = *map_ptr;
ret = pmem2_unregister_mapping(map);
if (ret)
return ret;
ret = unmap(map->addr, map->reserved_length);
if (ret)
return ret;
VALGRIND_REMOVE_PMEM_MAPPING(map->addr, map->content_length);
Free(map);
*map_ptr = NULL;
return ret;
}
| 13,869 | 25.879845 | 96 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/auto_flush_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2019, Intel Corporation */
/*
* auto_flush_windows.c -- Windows auto flush detection
*/
#include <windows.h>
#include <inttypes.h>
#include "alloc.h"
#include "out.h"
#include "os.h"
#include "endian.h"
#include "auto_flush_windows.h"
/*
* is_nfit_available -- (internal) check if platform supports NFIT table.
*/
static int
is_nfit_available()
{
LOG(3, "is_nfit_available()");
DWORD signatures_size;
char *signatures = NULL;
int is_nfit = 0;
DWORD offset = 0;
signatures_size = EnumSystemFirmwareTables(ACPI_SIGNATURE, NULL, 0);
if (signatures_size == 0) {
ERR("!EnumSystemFirmwareTables");
return -1;
}
signatures = (char *)Malloc(signatures_size + 1);
if (signatures == NULL) {
ERR("!malloc");
return -1;
}
int ret = EnumSystemFirmwareTables(ACPI_SIGNATURE,
signatures, signatures_size);
signatures[signatures_size] = '\0';
if (ret != signatures_size) {
ERR("!EnumSystemFirmwareTables");
goto err;
}
while (offset <= signatures_size) {
int nfit_sig = strncmp(signatures + offset,
NFIT_STR_SIGNATURE, NFIT_SIGNATURE_LEN);
if (nfit_sig == 0) {
is_nfit = 1;
break;
}
offset += NFIT_SIGNATURE_LEN;
}
Free(signatures);
return is_nfit;
err:
Free(signatures);
return -1;
}
/*
* is_auto_flush_cap_set -- (internal) check if specific
* capabilities bits are set.
*
* ACPI 6.2A Specification:
* Bit[0] - CPU Cache Flush to NVDIMM Durability on
* Power Loss Capable. If set to 1, indicates that platform
* ensures the entire CPU store data path is flushed to
* persistent memory on system power loss.
* Bit[1] - Memory Controller Flush to NVDIMM Durability on Power Loss Capable.
* If set to 1, indicates that platform provides mechanisms to automatically
* flush outstanding write data from the memory controller to persistent memory
* in the event of platform power loss. Note: If bit 0 is set to 1 then this bit
* shall be set to 1 as well.
*/
static int
is_auto_flush_cap_set(uint32_t capabilities)
{
LOG(3, "is_auto_flush_cap_set capabilities 0x%" PRIx32, capabilities);
int CPU_cache_flush = CHECK_BIT(capabilities, 0);
int memory_controller_flush = CHECK_BIT(capabilities, 1);
LOG(15, "CPU_cache_flush %d, memory_controller_flush %d",
CPU_cache_flush, memory_controller_flush);
if (memory_controller_flush == 1 && CPU_cache_flush == 1)
return 1;
return 0;
}
/*
* parse_nfit_buffer -- (internal) parse nfit buffer
* if platform_capabilities struct is available return pcs structure.
*/
static struct platform_capabilities
parse_nfit_buffer(const unsigned char *nfit_buffer, unsigned long buffer_size)
{
LOG(3, "parse_nfit_buffer nfit_buffer %s, buffer_size %lu",
nfit_buffer, buffer_size);
uint16_t type;
uint16_t length;
size_t offset = sizeof(struct nfit_header);
struct platform_capabilities pcs = {0};
while (offset < buffer_size) {
type = *(nfit_buffer + offset);
length = *(nfit_buffer + offset + 2);
if (type == PCS_TYPE_NUMBER) {
if (length == sizeof(struct platform_capabilities)) {
memmove(&pcs, nfit_buffer + offset, length);
return pcs;
}
}
offset += length;
}
return pcs;
}
/*
* pmem2_auto_flush -- check if platform supports auto flush.
*/
int
pmem2_auto_flush(void)
{
LOG(3, NULL);
DWORD nfit_buffer_size = 0;
DWORD nfit_written = 0;
PVOID nfit_buffer = NULL;
struct nfit_header *nfit_data;
struct platform_capabilities *pc = NULL;
int eADR = 0;
int is_nfit = is_nfit_available();
if (is_nfit == 0) {
LOG(15, "ACPI NFIT table not available");
return 0;
}
if (is_nfit < 0 || is_nfit != 1) {
LOG(1, "!is_nfit_available");
return -1;
}
/* get the entire nfit size */
nfit_buffer_size = GetSystemFirmwareTable(
(DWORD)ACPI_SIGNATURE, (DWORD)NFIT_REV_SIGNATURE, NULL, 0);
if (nfit_buffer_size == 0) {
ERR("!GetSystemFirmwareTable");
return -1;
}
/* reserve buffer */
nfit_buffer = (unsigned char *)Malloc(nfit_buffer_size);
if (nfit_buffer == NULL) {
ERR("!malloc");
goto err;
}
/* write actual nfit to buffer */
nfit_written = GetSystemFirmwareTable(
(DWORD)ACPI_SIGNATURE, (DWORD)NFIT_REV_SIGNATURE,
nfit_buffer, nfit_buffer_size);
if (nfit_written == 0) {
ERR("!GetSystemFirmwareTable");
goto err;
}
if (nfit_buffer_size != nfit_written) {
errno = ERROR_INVALID_DATA;
ERR("!GetSystemFirmwareTable invalid data");
goto err;
}
nfit_data = (struct nfit_header *)nfit_buffer;
int nfit_sig = strncmp(nfit_data->signature,
NFIT_STR_SIGNATURE, NFIT_SIGNATURE_LEN);
if (nfit_sig != 0) {
ERR("!NFIT buffer has invalid data");
goto err;
}
struct platform_capabilities pcs = parse_nfit_buffer(
nfit_buffer, nfit_buffer_size);
eADR = is_auto_flush_cap_set(pcs.capabilities);
Free(nfit_buffer);
return eADR;
err:
Free(nfit_buffer);
return -1;
}
| 4,857 | 23.535354 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/badblocks_ndctl.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* badblocks_ndctl.c -- implementation of DIMMs API based on the ndctl library
*/
#define _GNU_SOURCE
#include <sys/types.h>
#include <libgen.h>
#include <limits.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/sysmacros.h>
#include <fcntl.h>
#include <ndctl/libndctl.h>
#include <ndctl/libdaxctl.h>
#include "libpmem2.h"
#include "pmem2_utils.h"
#include "source.h"
#include "region_namespace_ndctl.h"
#include "file.h"
#include "out.h"
#include "badblocks.h"
#include "set_badblocks.h"
#include "extent.h"
typedef int pmem2_badblock_next_type(
struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb);
typedef void *pmem2_badblock_get_next_type(
struct pmem2_badblock_context *bbctx);
struct pmem2_badblock_context {
/* file descriptor */
int fd;
/* pmem2 file type */
enum pmem2_file_type file_type;
/* ndctl context */
struct ndctl_ctx *ctx;
/*
* Function pointer to:
* - pmem2_badblock_next_namespace() or
* - pmem2_badblock_next_region()
*/
pmem2_badblock_next_type *pmem2_badblock_next_func;
/*
* Function pointer to:
* - pmem2_namespace_get_first_badblock() or
* - pmem2_namespace_get_next_badblock() or
* - pmem2_region_get_first_badblock() or
* - pmem2_region_get_next_badblock()
*/
pmem2_badblock_get_next_type *pmem2_badblock_get_next_func;
/* needed only by the ndctl namespace badblock iterator */
struct ndctl_namespace *ndns;
/* needed only by the ndctl region badblock iterator */
struct {
struct ndctl_bus *bus;
struct ndctl_region *region;
unsigned long long ns_res; /* address of the namespace */
unsigned long long ns_beg; /* the begining of the namespace */
unsigned long long ns_end; /* the end of the namespace */
} rgn;
/* file's extents */
struct extents *exts;
unsigned first_extent;
struct pmem2_badblock last_bb;
};
/* forward declarations */
static int pmem2_badblock_next_namespace(
struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb);
static int pmem2_badblock_next_region(
struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb);
static void *pmem2_namespace_get_first_badblock(
struct pmem2_badblock_context *bbctx);
static void *pmem2_region_get_first_badblock(
struct pmem2_badblock_context *bbctx);
/*
* badblocks_get_namespace_bounds -- (internal) returns the bounds
* (offset and size) of the given namespace
* relative to the beginning of its region
*/
static int
badblocks_get_namespace_bounds(struct ndctl_region *region,
struct ndctl_namespace *ndns,
unsigned long long *ns_offset,
unsigned long long *ns_size)
{
LOG(3, "region %p namespace %p ns_offset %p ns_size %p",
region, ndns, ns_offset, ns_size);
struct ndctl_pfn *pfn = ndctl_namespace_get_pfn(ndns);
struct ndctl_dax *dax = ndctl_namespace_get_dax(ndns);
ASSERTne(ns_offset, NULL);
ASSERTne(ns_size, NULL);
if (pfn) {
*ns_offset = ndctl_pfn_get_resource(pfn);
if (*ns_offset == ULLONG_MAX) {
ERR("(pfn) cannot read offset of the namespace");
return PMEM2_E_CANNOT_READ_BOUNDS;
}
*ns_size = ndctl_pfn_get_size(pfn);
if (*ns_size == ULLONG_MAX) {
ERR("(pfn) cannot read size of the namespace");
return PMEM2_E_CANNOT_READ_BOUNDS;
}
LOG(10, "(pfn) ns_offset 0x%llx ns_size %llu",
*ns_offset, *ns_size);
} else if (dax) {
*ns_offset = ndctl_dax_get_resource(dax);
if (*ns_offset == ULLONG_MAX) {
ERR("(dax) cannot read offset of the namespace");
return PMEM2_E_CANNOT_READ_BOUNDS;
}
*ns_size = ndctl_dax_get_size(dax);
if (*ns_size == ULLONG_MAX) {
ERR("(dax) cannot read size of the namespace");
return PMEM2_E_CANNOT_READ_BOUNDS;
}
LOG(10, "(dax) ns_offset 0x%llx ns_size %llu",
*ns_offset, *ns_size);
} else { /* raw or btt */
*ns_offset = ndctl_namespace_get_resource(ndns);
if (*ns_offset == ULLONG_MAX) {
ERR("(raw/btt) cannot read offset of the namespace");
return PMEM2_E_CANNOT_READ_BOUNDS;
}
*ns_size = ndctl_namespace_get_size(ndns);
if (*ns_size == ULLONG_MAX) {
ERR("(raw/btt) cannot read size of the namespace");
return PMEM2_E_CANNOT_READ_BOUNDS;
}
LOG(10, "(raw/btt) ns_offset 0x%llx ns_size %llu",
*ns_offset, *ns_size);
}
unsigned long long region_offset = ndctl_region_get_resource(region);
if (region_offset == ULLONG_MAX) {
ERR("!cannot read offset of the region");
return PMEM2_E_ERRNO;
}
LOG(10, "region_offset 0x%llx", region_offset);
*ns_offset -= region_offset;
return 0;
}
/*
* badblocks_devdax_clear_one_badblock -- (internal) clear one bad block
* in the dax device
*/
static int
badblocks_devdax_clear_one_badblock(struct ndctl_bus *bus,
unsigned long long address,
unsigned long long length)
{
LOG(3, "bus %p address 0x%llx length %llu (bytes)",
bus, address, length);
int ret;
struct ndctl_cmd *cmd_ars_cap = ndctl_bus_cmd_new_ars_cap(bus,
address, length);
if (cmd_ars_cap == NULL) {
ERR("ndctl_bus_cmd_new_ars_cap() failed (bus '%s')",
ndctl_bus_get_provider(bus));
return PMEM2_E_ERRNO;
}
ret = ndctl_cmd_submit(cmd_ars_cap);
if (ret) {
ERR("ndctl_cmd_submit() failed (bus '%s')",
ndctl_bus_get_provider(bus));
/* ndctl_cmd_submit() returns -errno */
goto out_ars_cap;
}
struct ndctl_range range;
ret = ndctl_cmd_ars_cap_get_range(cmd_ars_cap, &range);
if (ret) {
ERR("ndctl_cmd_ars_cap_get_range() failed");
/* ndctl_cmd_ars_cap_get_range() returns -errno */
goto out_ars_cap;
}
struct ndctl_cmd *cmd_clear_error = ndctl_bus_cmd_new_clear_error(
range.address, range.length, cmd_ars_cap);
ret = ndctl_cmd_submit(cmd_clear_error);
if (ret) {
ERR("ndctl_cmd_submit() failed (bus '%s')",
ndctl_bus_get_provider(bus));
/* ndctl_cmd_submit() returns -errno */
goto out_clear_error;
}
size_t cleared = ndctl_cmd_clear_error_get_cleared(cmd_clear_error);
LOG(4, "cleared %zu out of %llu bad blocks", cleared, length);
ASSERT(cleared <= length);
if (cleared < length) {
ERR("failed to clear %llu out of %llu bad blocks",
length - cleared, length);
errno = ENXIO; /* ndctl handles such error in this way */
ret = PMEM2_E_ERRNO;
} else {
ret = 0;
}
out_clear_error:
ndctl_cmd_unref(cmd_clear_error);
out_ars_cap:
ndctl_cmd_unref(cmd_ars_cap);
return ret;
}
/*
* pmem2_badblock_context_new -- allocate and create a new bad block context
*/
int
pmem2_badblock_context_new(const struct pmem2_source *src,
struct pmem2_badblock_context **bbctx)
{
LOG(3, "src %p bbctx %p", src, bbctx);
ASSERTne(bbctx, NULL);
if (src->type == PMEM2_SOURCE_ANON) {
ERR("Anonymous source does not support bad blocks");
return PMEM2_E_NOSUPP;
}
ASSERTeq(src->type, PMEM2_SOURCE_FD);
struct ndctl_ctx *ctx;
struct ndctl_region *region;
struct ndctl_namespace *ndns;
struct pmem2_badblock_context *tbbctx = NULL;
enum pmem2_file_type pmem2_type;
int ret = PMEM2_E_UNKNOWN;
*bbctx = NULL;
errno = ndctl_new(&ctx) * (-1);
if (errno) {
ERR("!ndctl_new");
return PMEM2_E_ERRNO;
}
pmem2_type = src->value.ftype;
ret = pmem2_region_namespace(ctx, src, ®ion, &ndns);
if (ret) {
LOG(1, "getting region and namespace failed");
goto exit_ndctl_unref;
}
tbbctx = pmem2_zalloc(sizeof(struct pmem2_badblock_context), &ret);
if (ret)
goto exit_ndctl_unref;
tbbctx->fd = src->value.fd;
tbbctx->file_type = pmem2_type;
tbbctx->ctx = ctx;
if (region == NULL || ndns == NULL) {
/* did not found any matching device */
*bbctx = tbbctx;
return 0;
}
if (ndctl_namespace_get_mode(ndns) == NDCTL_NS_MODE_FSDAX) {
tbbctx->ndns = ndns;
tbbctx->pmem2_badblock_next_func =
pmem2_badblock_next_namespace;
tbbctx->pmem2_badblock_get_next_func =
pmem2_namespace_get_first_badblock;
} else {
unsigned long long ns_beg, ns_size, ns_end;
ret = badblocks_get_namespace_bounds(
region, ndns,
&ns_beg, &ns_size);
if (ret) {
LOG(1, "cannot read namespace's bounds");
goto error_free_all;
}
ns_end = ns_beg + ns_size - 1;
LOG(10,
"namespace: begin %llu, end %llu size %llu (in 512B sectors)",
B2SEC(ns_beg), B2SEC(ns_end + 1) - 1, B2SEC(ns_size));
tbbctx->rgn.bus = ndctl_region_get_bus(region);
tbbctx->rgn.region = region;
tbbctx->rgn.ns_beg = ns_beg;
tbbctx->rgn.ns_end = ns_end;
tbbctx->rgn.ns_res = ns_beg + ndctl_region_get_resource(region);
tbbctx->pmem2_badblock_next_func =
pmem2_badblock_next_region;
tbbctx->pmem2_badblock_get_next_func =
pmem2_region_get_first_badblock;
}
if (pmem2_type == PMEM2_FTYPE_REG) {
/* only regular files have extents */
ret = pmem2_extents_create_get(src->value.fd, &tbbctx->exts);
if (ret) {
LOG(1, "getting extents of fd %i failed",
src->value.fd);
goto error_free_all;
}
}
/* set the context */
*bbctx = tbbctx;
return 0;
error_free_all:
pmem2_extents_destroy(&tbbctx->exts);
Free(tbbctx);
exit_ndctl_unref:
ndctl_unref(ctx);
return ret;
}
/*
* pmem2_badblock_context_delete -- delete and free the bad block context
*/
void
pmem2_badblock_context_delete(struct pmem2_badblock_context **bbctx)
{
LOG(3, "bbctx %p", bbctx);
ASSERTne(bbctx, NULL);
if (*bbctx == NULL)
return;
struct pmem2_badblock_context *tbbctx = *bbctx;
pmem2_extents_destroy(&tbbctx->exts);
ndctl_unref(tbbctx->ctx);
Free(tbbctx);
*bbctx = NULL;
}
/*
* pmem2_namespace_get_next_badblock -- (internal) wrapper for
* ndctl_namespace_get_next_badblock
*/
static void *
pmem2_namespace_get_next_badblock(struct pmem2_badblock_context *bbctx)
{
LOG(3, "bbctx %p", bbctx);
return ndctl_namespace_get_next_badblock(bbctx->ndns);
}
/*
* pmem2_namespace_get_first_badblock -- (internal) wrapper for
* ndctl_namespace_get_first_badblock
*/
static void *
pmem2_namespace_get_first_badblock(struct pmem2_badblock_context *bbctx)
{
LOG(3, "bbctx %p", bbctx);
bbctx->pmem2_badblock_get_next_func = pmem2_namespace_get_next_badblock;
return ndctl_namespace_get_first_badblock(bbctx->ndns);
}
/*
* pmem2_region_get_next_badblock -- (internal) wrapper for
* ndctl_region_get_next_badblock
*/
static void *
pmem2_region_get_next_badblock(struct pmem2_badblock_context *bbctx)
{
LOG(3, "bbctx %p", bbctx);
return ndctl_region_get_next_badblock(bbctx->rgn.region);
}
/*
* pmem2_region_get_first_badblock -- (internal) wrapper for
* ndctl_region_get_first_badblock
*/
static void *
pmem2_region_get_first_badblock(struct pmem2_badblock_context *bbctx)
{
LOG(3, "bbctx %p", bbctx);
bbctx->pmem2_badblock_get_next_func = pmem2_region_get_next_badblock;
return ndctl_region_get_first_badblock(bbctx->rgn.region);
}
/*
* pmem2_badblock_next_namespace -- (internal) version of pmem2_badblock_next()
* called for ndctl with namespace badblock
* iterator
*
* This function works only for fsdax, but does not require any special
* permissions.
*/
static int
pmem2_badblock_next_namespace(struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb)
{
LOG(3, "bbctx %p bb %p", bbctx, bb);
ASSERTne(bbctx, NULL);
ASSERTne(bb, NULL);
struct badblock *bbn;
bbn = bbctx->pmem2_badblock_get_next_func(bbctx);
if (bbn == NULL)
return PMEM2_E_NO_BAD_BLOCK_FOUND;
/*
* libndctl returns offset and length of a bad block
* both expressed in 512B sectors. Offset is relative
* to the beginning of the namespace.
*/
bb->offset = SEC2B(bbn->offset);
bb->length = SEC2B(bbn->len);
return 0;
}
/*
* pmem2_badblock_next_region -- (internal) version of pmem2_badblock_next()
* called for ndctl with region badblock iterator
*
* This function works for all types of namespaces, but requires read access to
* privileged device information.
*/
static int
pmem2_badblock_next_region(struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb)
{
LOG(3, "bbctx %p bb %p", bbctx, bb);
ASSERTne(bbctx, NULL);
ASSERTne(bb, NULL);
unsigned long long bb_beg, bb_end;
unsigned long long beg, end;
struct badblock *bbn;
unsigned long long ns_beg = bbctx->rgn.ns_beg;
unsigned long long ns_end = bbctx->rgn.ns_end;
do {
bbn = bbctx->pmem2_badblock_get_next_func(bbctx);
if (bbn == NULL)
return PMEM2_E_NO_BAD_BLOCK_FOUND;
LOG(10,
"region bad block: begin %llu end %llu length %u (in 512B sectors)",
bbn->offset, bbn->offset + bbn->len - 1, bbn->len);
/*
* libndctl returns offset and length of a bad block
* both expressed in 512B sectors. Offset is relative
* to the beginning of the region.
*/
bb_beg = SEC2B(bbn->offset);
bb_end = bb_beg + SEC2B(bbn->len) - 1;
} while (bb_beg > ns_end || ns_beg > bb_end);
beg = (bb_beg > ns_beg) ? bb_beg : ns_beg;
end = (bb_end < ns_end) ? bb_end : ns_end;
/*
* Form a new bad block structure with offset and length
* expressed in bytes and offset relative to the beginning
* of the namespace.
*/
bb->offset = beg - ns_beg;
bb->length = end - beg + 1;
LOG(4,
"namespace bad block: begin %llu end %llu length %llu (in 512B sectors)",
B2SEC(beg - ns_beg), B2SEC(end - ns_beg), B2SEC(end - beg) + 1);
return 0;
}
/*
* pmem2_badblock_next -- get the next bad block
*/
int
pmem2_badblock_next(struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb)
{
LOG(3, "bbctx %p bb %p", bbctx, bb);
ASSERTne(bbctx, NULL);
ASSERTne(bb, NULL);
struct pmem2_badblock bbn;
unsigned long long bb_beg;
unsigned long long bb_end;
unsigned long long bb_len;
unsigned long long bb_off;
unsigned long long ext_beg;
unsigned long long ext_end;
unsigned e;
int ret;
if (bbctx->rgn.region == NULL && bbctx->ndns == NULL) {
/* did not found any matching device */
return PMEM2_E_NO_BAD_BLOCK_FOUND;
}
struct extents *exts = bbctx->exts;
/* DAX devices have no extents */
if (!exts) {
ret = bbctx->pmem2_badblock_next_func(bbctx, &bbn);
*bb = bbn;
return ret;
}
/*
* There is at least one extent.
* Loop until:
* 1) a bad block overlaps with an extent or
* 2) there are no more bad blocks.
*/
int bb_overlaps_with_extent = 0;
do {
if (bbctx->last_bb.length) {
/*
* We have saved the last bad block to check it
* with the next extent saved
* in bbctx->first_extent.
*/
ASSERTne(bbctx->first_extent, 0);
bbn = bbctx->last_bb;
bbctx->last_bb.offset = 0;
bbctx->last_bb.length = 0;
} else {
ASSERTeq(bbctx->first_extent, 0);
/* look for the next bad block */
ret = bbctx->pmem2_badblock_next_func(bbctx, &bbn);
if (ret)
return ret;
}
bb_beg = bbn.offset;
bb_end = bb_beg + bbn.length - 1;
for (e = bbctx->first_extent;
e < exts->extents_count;
e++) {
ext_beg = exts->extents[e].offset_physical;
ext_end = ext_beg + exts->extents[e].length - 1;
/* check if the bad block overlaps with the extent */
if (bb_beg <= ext_end && ext_beg <= bb_end) {
/* bad block overlaps with the extent */
bb_overlaps_with_extent = 1;
if (bb_end > ext_end &&
e + 1 < exts->extents_count) {
/*
* The bad block is longer than
* the extent and there are
* more extents.
* Save the current bad block
* to check it with the next extent.
*/
bbctx->first_extent = e + 1;
bbctx->last_bb = bbn;
} else {
/*
* All extents were checked
* with the current bad block.
*/
bbctx->first_extent = 0;
bbctx->last_bb.length = 0;
bbctx->last_bb.offset = 0;
}
break;
}
}
/* check all extents with the next bad block */
if (bb_overlaps_with_extent == 0) {
bbctx->first_extent = 0;
bbctx->last_bb.length = 0;
bbctx->last_bb.offset = 0;
}
} while (bb_overlaps_with_extent == 0);
/* bad block overlaps with an extent */
bb_beg = (bb_beg > ext_beg) ? bb_beg : ext_beg;
bb_end = (bb_end < ext_end) ? bb_end : ext_end;
bb_len = bb_end - bb_beg + 1;
bb_off = bb_beg + exts->extents[e].offset_logical
- exts->extents[e].offset_physical;
LOG(10, "bad block found: physical offset: %llu, length: %llu",
bb_beg, bb_len);
/* make sure the offset is block-aligned */
unsigned long long not_block_aligned = bb_off & (exts->blksize - 1);
if (not_block_aligned) {
bb_off -= not_block_aligned;
bb_len += not_block_aligned;
}
/* make sure the length is block-aligned */
bb_len = ALIGN_UP(bb_len, exts->blksize);
LOG(4, "bad block found: logical offset: %llu, length: %llu",
bb_off, bb_len);
/*
* Return the bad block with offset and length
* expressed in bytes and offset relative
* to the beginning of the file.
*/
bb->offset = bb_off;
bb->length = bb_len;
return 0;
}
/*
* pmem2_badblock_clear_fsdax -- (internal) clear one bad block
* in a FSDAX device
*/
static int
pmem2_badblock_clear_fsdax(int fd, const struct pmem2_badblock *bb)
{
LOG(3, "fd %i badblock %p", fd, bb);
ASSERTne(bb, NULL);
LOG(10,
"clearing a bad block: fd %i logical offset %zu length %zu (in 512B sectors)",
fd, B2SEC(bb->offset), B2SEC(bb->length));
/* fallocate() takes offset as the off_t type */
if (bb->offset > (size_t)INT64_MAX) {
ERR("bad block's offset is greater than INT64_MAX");
return PMEM2_E_OFFSET_OUT_OF_RANGE;
}
/* fallocate() takes length as the off_t type */
if (bb->length > (size_t)INT64_MAX) {
ERR("bad block's length is greater than INT64_MAX");
return PMEM2_E_LENGTH_OUT_OF_RANGE;
}
off_t offset = (off_t)bb->offset;
off_t length = (off_t)bb->length;
/* deallocate bad blocks */
if (fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
offset, length)) {
ERR("!fallocate");
return PMEM2_E_ERRNO;
}
/* allocate new blocks */
if (fallocate(fd, FALLOC_FL_KEEP_SIZE, offset, length)) {
ERR("!fallocate");
return PMEM2_E_ERRNO;
}
return 0;
}
/*
* pmem2_badblock_clear_devdax -- (internal) clear one bad block
* in a DAX device
*/
static int
pmem2_badblock_clear_devdax(const struct pmem2_badblock_context *bbctx,
const struct pmem2_badblock *bb)
{
LOG(3, "bbctx %p bb %p", bbctx, bb);
ASSERTne(bb, NULL);
ASSERTne(bbctx, NULL);
ASSERTne(bbctx->rgn.bus, NULL);
ASSERTne(bbctx->rgn.ns_res, 0);
LOG(4,
"clearing a bad block: offset %zu length %zu (in 512B sectors)",
B2SEC(bb->offset), B2SEC(bb->length));
int ret = badblocks_devdax_clear_one_badblock(bbctx->rgn.bus,
bb->offset + bbctx->rgn.ns_res,
bb->length);
if (ret) {
LOG(1,
"failed to clear a bad block: offset %zu length %zu (in 512B sectors)",
B2SEC(bb->offset),
B2SEC(bb->length));
return ret;
}
return 0;
}
/*
* pmem2_badblock_clear -- clear one bad block
*/
int
pmem2_badblock_clear(struct pmem2_badblock_context *bbctx,
const struct pmem2_badblock *bb)
{
LOG(3, "bbctx %p badblock %p", bbctx, bb);
ASSERTne(bbctx, NULL);
ASSERTne(bb, NULL);
if (bbctx->file_type == PMEM2_FTYPE_DEVDAX)
return pmem2_badblock_clear_devdax(bbctx, bb);
ASSERTeq(bbctx->file_type, PMEM2_FTYPE_REG);
return pmem2_badblock_clear_fsdax(bbctx->fd, bb);
}
| 19,316 | 24.218016 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/region_namespace_ndctl.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* region_namespace_ndctl.h -- internal definitions for libpmem2
* common ndctl functions
*/
#ifndef PMDK_REGION_NAMESPACE_NDCTL_H
#define PMDK_REGION_NAMESPACE_NDCTL_H 1
#include "os.h"
#ifdef __cplusplus
extern "C" {
#endif
#define FOREACH_BUS_REGION_NAMESPACE(ctx, bus, region, ndns) \
ndctl_bus_foreach(ctx, bus) \
ndctl_region_foreach(bus, region) \
ndctl_namespace_foreach(region, ndns)
int pmem2_region_namespace(struct ndctl_ctx *ctx,
const struct pmem2_source *src,
struct ndctl_region **pregion,
struct ndctl_namespace **pndns);
#ifdef __cplusplus
}
#endif
#endif /* PMDK_REGION_NAMESPACE_NDCTL_H */
| 754 | 21.878788 | 64 | h |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/vm_reservation.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* vm_reservation.c -- implementation of virtual memory allocation API
*/
#include "libpmem2.h"
/*
* pmem2_vm_reservation_new -- creates new virtual memory reservation
*/
int
pmem2_vm_reservation_new(struct pmem2_vm_reservation **rsv,
size_t size, void *address)
{
return PMEM2_E_NOSUPP;
}
/*
* pmem2_vm_reservation_delete -- deletes reservation bound to
* structure pmem2_vm_reservation
*/
int
pmem2_vm_reservation_delete(struct pmem2_vm_reservation **rsv)
{
return PMEM2_E_NOSUPP;
}
| 614 | 20.206897 | 70 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/usc_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* usc_windows.c -- pmem2 usc function for windows
*/
#include "alloc.h"
#include "source.h"
#include "out.h"
#include "libpmem2.h"
#include "pmem2_utils.h"
#define GUID_SIZE sizeof("XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX")
#define VOLUME_PATH_SIZE sizeof("\\\\?\\Volume{}") + (GUID_SIZE - 2 /* \0 */)
/*
* get_volume_handle -- returns volume handle
*/
static int
get_volume_handle(HANDLE handle, HANDLE *volume_handle)
{
wchar_t *volume;
wchar_t tmp[10];
DWORD len =
GetFinalPathNameByHandleW(handle, tmp, 10, VOLUME_NAME_GUID);
if (len == 0) {
ERR("!!GetFinalPathNameByHandleW");
return pmem2_lasterror_to_err();
}
len *= sizeof(wchar_t);
int err;
volume = pmem2_malloc(len, &err);
if (volume == NULL)
return err;
if (!GetFinalPathNameByHandleW(handle, volume, len,
VOLUME_NAME_GUID)) {
Free(volume);
ERR("!!GetFinalPathNameByHandleW");
return pmem2_lasterror_to_err();
}
ASSERTeq(volume[VOLUME_PATH_SIZE], '\\');
volume[VOLUME_PATH_SIZE] = '\0';
*volume_handle = CreateFileW(volume, /* path to the file */
/* request access to send ioctl to the file */
FILE_READ_ATTRIBUTES,
/* do not block access to the file */
FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
NULL, /* security attributes */
OPEN_EXISTING, /* open only if it exists */
FILE_ATTRIBUTE_NORMAL, /* no attributes */
NULL); /* used only for new files */
Free(volume);
if (*volume_handle == INVALID_HANDLE_VALUE) {
ERR("!!CreateFileW");
return pmem2_lasterror_to_err();
}
return 0;
}
static int
get_device_guid(HANDLE handle, GUID *guid)
{
HANDLE vHandle;
int ret = get_volume_handle(handle, &vHandle);
if (vHandle == INVALID_HANDLE_VALUE)
return ret;
STORAGE_DEVICE_NUMBER_EX sdn;
sdn.DeviceNumber = -1;
DWORD dwBytesReturned = 0;
if (!DeviceIoControl(vHandle,
IOCTL_STORAGE_GET_DEVICE_NUMBER_EX,
NULL, 0,
&sdn, sizeof(sdn),
&dwBytesReturned, NULL)) {
/*
* IOCTL_STORAGE_GET_DEVICE_NUMBER_EX is not supported
* on this server
*/
ERR(
"Getting device id (IOCTL_STORAGE_GET_DEVICE_NUMBER_EX) is not supported on this system");
CloseHandle(vHandle);
return PMEM2_E_NOSUPP;
}
*guid = sdn.DeviceGuid;
CloseHandle(vHandle);
return 0;
}
int
pmem2_source_device_idW(const struct pmem2_source *src, wchar_t *id,
size_t *len)
{
if (src->type == PMEM2_SOURCE_ANON) {
ERR("Anonymous source does not have device id");
return PMEM2_E_NOSUPP;
}
ASSERTeq(src->type, PMEM2_SOURCE_HANDLE);
if (id == NULL) {
*len = GUID_SIZE * sizeof(*id);
return 0;
}
if (*len < GUID_SIZE * sizeof(*id)) {
ERR("id buffer is to small");
return PMEM2_E_BUFFER_TOO_SMALL;
}
GUID guid;
int ret = get_device_guid(src->value.handle, &guid);
if (ret)
return ret;
_snwprintf(id, GUID_SIZE,
L"%08lX-%04hX-%04hX-%02hhX%02hhX-%02hhX%02hhX%02hhX%02hhX%02hhX%02hhX",
guid.Data1, guid.Data2, guid.Data3, guid.Data4[0],
guid.Data4[1], guid.Data4[2], guid.Data4[3],
guid.Data4[4], guid.Data4[5], guid.Data4[6],
guid.Data4[7]);
return 0;
}
int
pmem2_source_device_idU(const struct pmem2_source *src, char *id, size_t *len)
{
if (src->type == PMEM2_SOURCE_ANON) {
ERR("Anonymous source does not have device id");
return PMEM2_E_NOSUPP;
}
ASSERTeq(src->type, PMEM2_SOURCE_HANDLE);
if (id == NULL) {
*len = GUID_SIZE * sizeof(*id);
return 0;
}
if (*len < GUID_SIZE * sizeof(*id)) {
ERR("id buffer is to small");
return PMEM2_E_BUFFER_TOO_SMALL;
}
GUID guid;
int ret = get_device_guid(src->value.handle, &guid);
if (ret)
return ret;
if (util_snprintf(id, GUID_SIZE,
"%08lX-%04hX-%04hX-%02hhX%02hhX-%02hhX%02hhX%02hhX%02hhX%02hhX%02hhX",
guid.Data1, guid.Data2, guid.Data3, guid.Data4[0],
guid.Data4[1], guid.Data4[2], guid.Data4[3],
guid.Data4[4], guid.Data4[5], guid.Data4[6],
guid.Data4[7]) < 0) {
ERR("!snprintf");
return PMEM2_E_ERRNO;
}
return 0;
}
int
pmem2_source_device_usc(const struct pmem2_source *src, uint64_t *usc)
{
LOG(3, "cfg %p, usc %p", src, usc);
if (src->type == PMEM2_SOURCE_ANON) {
ERR("Anonymous source does not support unsafe shutdown count");
return PMEM2_E_NOSUPP;
}
ASSERTeq(src->type, PMEM2_SOURCE_HANDLE);
*usc = 0;
HANDLE vHandle;
int err = get_volume_handle(src->value.handle, &vHandle);
if (vHandle == INVALID_HANDLE_VALUE)
return err;
STORAGE_PROPERTY_QUERY prop;
DWORD dwSize;
prop.PropertyId = StorageDeviceUnsafeShutdownCount;
prop.QueryType = PropertyExistsQuery;
prop.AdditionalParameters[0] = 0;
STORAGE_DEVICE_UNSAFE_SHUTDOWN_COUNT ret;
BOOL bResult = DeviceIoControl(vHandle,
IOCTL_STORAGE_QUERY_PROPERTY,
&prop, sizeof(prop),
&ret, sizeof(ret),
(LPDWORD)&dwSize, (LPOVERLAPPED)NULL);
if (!bResult) {
ERR(
"Getting unsafe shutdown count is not supported on this system");
CloseHandle(vHandle);
return PMEM2_E_NOSUPP;
}
prop.QueryType = PropertyStandardQuery;
bResult = DeviceIoControl(vHandle,
IOCTL_STORAGE_QUERY_PROPERTY,
&prop, sizeof(prop),
&ret, sizeof(ret),
(LPDWORD)&dwSize, (LPOVERLAPPED)NULL);
CloseHandle(vHandle);
if (!bResult) {
ERR("!!DeviceIoControl");
return pmem2_lasterror_to_err();
}
*usc = ret.UnsafeShutdownCount;
return 0;
}
| 5,261 | 22.283186 | 93 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/ravl_interval.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* ravl_interval.c -- ravl_interval implementation
*/
#include "alloc.h"
#include "map.h"
#include "ravl_interval.h"
#include "pmem2_utils.h"
#include "sys_util.h"
#include "os_thread.h"
#include "ravl.h"
/*
* ravl_interval - structure representing two points
* on the number line
*/
struct ravl_interval {
struct ravl *tree;
ravl_interval_min *get_min;
ravl_interval_max *get_max;
};
/*
* ravl_interval_node - structure holding min, max functions and address
*/
struct ravl_interval_node {
void *addr;
ravl_interval_min *get_min;
ravl_interval_max *get_max;
};
/*
* ravl_interval_compare -- compare intervals by its boundaries,
* no overlapping allowed
*/
static int
ravl_interval_compare(const void *lhs, const void *rhs)
{
const struct ravl_interval_node *left = lhs;
const struct ravl_interval_node *right = rhs;
if (left->get_min(left->addr) < right->get_min(right->addr) &&
left->get_max(left->addr) <= right->get_min(right->addr))
return -1;
if (left->get_min(left->addr) > right->get_min(right->addr) &&
left->get_max(left->addr) >= right->get_min(right->addr))
return 1;
return 0;
}
/*
* ravl_interval_delete - finalize the ravl interval module
*/
void
ravl_interval_delete(struct ravl_interval *ri)
{
ravl_delete(ri->tree);
ri->tree = NULL;
Free(ri);
}
/*
* ravl_interval_new -- initialize the ravl interval module
*/
struct ravl_interval *
ravl_interval_new(ravl_interval_min *get_min, ravl_interval_max *get_max)
{
int ret;
struct ravl_interval *interval = pmem2_malloc(sizeof(*interval), &ret);
if (ret)
goto ret_null;
interval->tree = ravl_new_sized(ravl_interval_compare,
sizeof(struct ravl_interval_node));
if (!(interval->tree))
goto free_alloc;
interval->get_min = get_min;
interval->get_max = get_max;
return interval;
free_alloc:
Free(interval);
ret_null:
return NULL;
}
/*
* ravl_interval_insert -- insert interval entry into the tree
*/
int
ravl_interval_insert(struct ravl_interval *ri, void *addr)
{
struct ravl_interval_node rin;
rin.addr = addr;
rin.get_min = ri->get_min;
rin.get_max = ri->get_max;
if (ravl_emplace_copy(ri->tree, &rin))
return PMEM2_E_ERRNO;
return 0;
}
/*
* ravl_interval_remove -- remove interval entry from the tree
*/
int
ravl_interval_remove(struct ravl_interval *ri, struct ravl_interval_node *rin)
{
struct ravl_node *node = ravl_find(ri->tree, rin,
RAVL_PREDICATE_EQUAL);
if (!node)
return PMEM2_E_MAPPING_NOT_FOUND;
ravl_remove(ri->tree, node);
return 0;
}
/*
* ravl_interval_find_prior_or_eq -- find overlapping interval starting prior to
* the current one or at the same place
*/
static struct ravl_interval_node *
ravl_interval_find_prior_or_eq(struct ravl *tree,
struct ravl_interval_node *rin)
{
struct ravl_node *node;
struct ravl_interval_node *cur;
node = ravl_find(tree, rin, RAVL_PREDICATE_LESS_EQUAL);
if (!node)
return NULL;
cur = ravl_data(node);
/*
* If the end of the found interval is below the searched boundary, then
* this is not our interval.
*/
if (cur->get_max(cur->addr) <= rin->get_min(rin->addr))
return NULL;
return cur;
}
/*
* ravl_interval_find_later -- find overlapping interval starting later than
* the current one
*/
static struct ravl_interval_node *
ravl_interval_find_later(struct ravl *tree, struct ravl_interval_node *rin)
{
struct ravl_node *node;
struct ravl_interval_node *cur;
node = ravl_find(tree, rin, RAVL_PREDICATE_GREATER);
if (!node)
return NULL;
cur = ravl_data(node);
/*
* If the beginning of the found interval is above the end of
* the searched range, then this is not our interval.
*/
if (cur->get_min(cur->addr) >= rin->get_max(rin->addr))
return NULL;
return cur;
}
/*
* ravl_interval_find_equal -- find the interval with exact (min, max) range
*/
struct ravl_interval_node *
ravl_interval_find_equal(struct ravl_interval *ri, void *addr)
{
struct ravl_interval_node range;
range.addr = addr;
range.get_min = ri->get_min;
range.get_max = ri->get_max;
struct ravl_node *node;
node = ravl_find(ri->tree, &range, RAVL_PREDICATE_EQUAL);
if (!node)
return NULL;
return ravl_data(node);
}
/*
* ravl_interval_find -- find the earliest interval within (min, max) range
*/
struct ravl_interval_node *
ravl_interval_find(struct ravl_interval *ri, void *addr)
{
struct ravl_interval_node range;
range.addr = addr;
range.get_min = ri->get_min;
range.get_max = ri->get_max;
struct ravl_interval_node *cur;
cur = ravl_interval_find_prior_or_eq(ri->tree, &range);
if (!cur)
cur = ravl_interval_find_later(ri->tree, &range);
return cur;
}
/*
* ravl_interval_data -- returns the data contained within interval node
*/
void *
ravl_interval_data(struct ravl_interval_node *rin)
{
return (void *)rin->addr;
}
| 4,963 | 21.26009 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/map_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* map_windows.c -- pmem2_map (Windows)
*/
#include <stdbool.h>
#include "libpmem2.h"
#include "alloc.h"
#include "auto_flush.h"
#include "config.h"
#include "map.h"
#include "out.h"
#include "persist.h"
#include "pmem2_utils.h"
#include "source.h"
#include "util.h"
#define HIDWORD(x) ((DWORD)((x) >> 32))
#define LODWORD(x) ((DWORD)((x) & 0xFFFFFFFF))
/* requested CACHE_LINE, available PAGE */
#define REQ_CL_AVAIL_PG \
"requested granularity not available because specified volume is not a direct access (DAX) volume"
/* requested BYTE, available PAGE */
#define REQ_BY_AVAIL_PG REQ_CL_AVAIL_PG
/* requested BYTE, available CACHE_LINE */
#define REQ_BY_AVAIL_CL \
"requested granularity not available because the platform doesn't support eADR"
/* indicates the cases in which the error cannot occur */
#define GRAN_IMPOSSIBLE "impossible"
static const char *granularity_err_msg[3][3] = {
/* requested granularity / available granularity */
/* -------------------------------------------------------------------- */
/* BYTE CACHE_LINE PAGE */
/* -------------------------------------------------------------------- */
/* BYTE */ {GRAN_IMPOSSIBLE, REQ_BY_AVAIL_CL, REQ_BY_AVAIL_PG},
/* CL */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, REQ_CL_AVAIL_PG},
/* PAGE */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE}};
/*
* create_mapping -- creates file mapping object for a file
*/
static HANDLE
create_mapping(HANDLE hfile, size_t offset, size_t length, DWORD protect,
unsigned long *err)
{
size_t max_size = length + offset;
SetLastError(0);
HANDLE mh = CreateFileMapping(hfile,
NULL, /* security attributes */
protect,
HIDWORD(max_size),
LODWORD(max_size),
NULL);
*err = GetLastError();
if (!mh) {
ERR("!!CreateFileMapping");
return NULL;
}
if (*err == ERROR_ALREADY_EXISTS) {
ERR("!!CreateFileMapping");
CloseHandle(mh);
return NULL;
}
/* if the handle is valid the last error is undefined */
*err = 0;
return mh;
}
/*
* is_direct_access -- check if the specified volume is a
* direct access (DAX) volume
*/
static int
is_direct_access(HANDLE fh)
{
DWORD filesystemFlags;
if (!GetVolumeInformationByHandleW(fh, NULL, 0, NULL,
NULL, &filesystemFlags, NULL, 0)) {
ERR("!!GetVolumeInformationByHandleW");
/* always return a negative value */
return pmem2_lasterror_to_err();
}
if (filesystemFlags & FILE_DAX_VOLUME)
return 1;
return 0;
}
/*
* pmem2_map -- map memory according to provided config
*/
int
pmem2_map(const struct pmem2_config *cfg, const struct pmem2_source *src,
struct pmem2_map **map_ptr)
{
LOG(3, "cfg %p src %p map_ptr %p", cfg, src, map_ptr);
int ret = 0;
unsigned long err = 0;
size_t file_size;
*map_ptr = NULL;
if ((int)cfg->requested_max_granularity == PMEM2_GRANULARITY_INVALID) {
ERR(
"please define the max granularity requested for the mapping");
return PMEM2_E_GRANULARITY_NOT_SET;
}
ret = pmem2_source_size(src, &file_size);
if (ret)
return ret;
size_t src_alignment;
ret = pmem2_source_alignment(src, &src_alignment);
if (ret)
return ret;
size_t length;
ret = pmem2_config_validate_length(cfg, file_size, src_alignment);
if (ret)
return ret;
size_t effective_offset;
ret = pmem2_validate_offset(cfg, &effective_offset, src_alignment);
if (ret)
return ret;
if (src->type == PMEM2_SOURCE_ANON)
effective_offset = 0;
/* without user-provided length, map to the end of the file */
if (cfg->length)
length = cfg->length;
else
length = file_size - effective_offset;
HANDLE map_handle = INVALID_HANDLE_VALUE;
if (src->type == PMEM2_SOURCE_HANDLE) {
map_handle = src->value.handle;
} else if (src->type == PMEM2_SOURCE_ANON) {
/* no extra settings */
} else {
ASSERT(0);
}
DWORD proto = PAGE_READWRITE;
DWORD access = FILE_MAP_ALL_ACCESS;
/* Unsupported flag combinations */
if ((cfg->protection_flag == PMEM2_PROT_NONE) ||
(cfg->protection_flag == PMEM2_PROT_WRITE) ||
(cfg->protection_flag == PMEM2_PROT_EXEC) ||
(cfg->protection_flag == (PMEM2_PROT_WRITE |
PMEM2_PROT_EXEC))) {
ERR("Windows does not support "
"this protection flag combination.");
return PMEM2_E_NOSUPP;
}
/* Translate protection flags into Windows flags */
if (cfg->protection_flag & PMEM2_PROT_WRITE) {
if (cfg->protection_flag & PMEM2_PROT_EXEC) {
proto = PAGE_EXECUTE_READWRITE;
access = FILE_MAP_READ | FILE_MAP_WRITE |
FILE_MAP_EXECUTE;
} else {
/*
* Due to the already done exclusion
* of incorrect combinations, PROT_WRITE
* implies PROT_READ
*/
proto = PAGE_READWRITE;
access = FILE_MAP_READ | FILE_MAP_WRITE;
}
} else if (cfg->protection_flag & PMEM2_PROT_READ) {
if (cfg->protection_flag & PMEM2_PROT_EXEC) {
proto = PAGE_EXECUTE_READ;
access = FILE_MAP_READ | FILE_MAP_EXECUTE;
} else {
proto = PAGE_READONLY;
access = FILE_MAP_READ;
}
}
if (cfg->sharing == PMEM2_PRIVATE) {
if (cfg->protection_flag & PMEM2_PROT_EXEC) {
proto = PAGE_EXECUTE_WRITECOPY;
access = FILE_MAP_EXECUTE | FILE_MAP_COPY;
} else {
/*
* If FILE_MAP_COPY is set,
* protection is changed to read/write
*/
proto = PAGE_READONLY;
access = FILE_MAP_COPY;
}
}
/* create a file mapping handle */
HANDLE mh = create_mapping(map_handle, effective_offset, length,
proto, &err);
if (!mh) {
if (err == ERROR_ALREADY_EXISTS) {
ERR("mapping already exists");
return PMEM2_E_MAPPING_EXISTS;
} else if (err == ERROR_ACCESS_DENIED) {
return PMEM2_E_NO_ACCESS;
}
return pmem2_lasterror_to_err();
}
ret = pmem2_config_validate_addr_alignment(cfg, src);
if (ret)
return ret;
/* let's get addr from cfg struct */
LPVOID addr_hint = cfg->addr;
/* obtain a pointer to the mapping view */
void *base = MapViewOfFileEx(mh,
access,
HIDWORD(effective_offset),
LODWORD(effective_offset),
length,
addr_hint); /* hint address */
if (base == NULL) {
ERR("!!MapViewOfFileEx");
if (cfg->addr_request == PMEM2_ADDRESS_FIXED_NOREPLACE) {
DWORD ret_windows = GetLastError();
if (ret_windows == ERROR_INVALID_ADDRESS)
ret = PMEM2_E_MAPPING_EXISTS;
else
ret = pmem2_lasterror_to_err();
}
else
ret = pmem2_lasterror_to_err();
goto err_close_mapping_handle;
}
if (!CloseHandle(mh)) {
ERR("!!CloseHandle");
ret = pmem2_lasterror_to_err();
goto err_unmap_base;
}
enum pmem2_granularity available_min_granularity =
PMEM2_GRANULARITY_PAGE;
if (src->type == PMEM2_SOURCE_HANDLE) {
int direct_access = is_direct_access(src->value.handle);
if (direct_access < 0) {
ret = direct_access;
goto err_unmap_base;
}
bool eADR = (pmem2_auto_flush() == 1);
available_min_granularity =
get_min_granularity(eADR, direct_access, cfg->sharing);
} else if (src->type == PMEM2_SOURCE_ANON) {
available_min_granularity = PMEM2_GRANULARITY_BYTE;
} else {
ASSERT(0);
}
if (available_min_granularity > cfg->requested_max_granularity) {
const char *err = granularity_err_msg
[cfg->requested_max_granularity]
[available_min_granularity];
if (strcmp(err, GRAN_IMPOSSIBLE) == 0)
FATAL(
"unhandled granularity error: available_min_granularity: %d" \
"requested_max_granularity: %d",
available_min_granularity,
cfg->requested_max_granularity);
ERR("%s", err);
ret = PMEM2_E_GRANULARITY_NOT_SUPPORTED;
goto err_unmap_base;
}
/* prepare pmem2_map structure */
struct pmem2_map *map;
map = (struct pmem2_map *)pmem2_malloc(sizeof(*map), &ret);
if (!map)
goto err_unmap_base;
map->addr = base;
/*
* XXX probably in some cases the reserved length > the content length.
* Maybe it is worth to do the research.
*/
map->reserved_length = length;
map->content_length = length;
map->effective_granularity = available_min_granularity;
map->source = *src;
pmem2_set_flush_fns(map);
pmem2_set_mem_fns(map);
ret = pmem2_register_mapping(map);
if (ret)
goto err_register;
/* return a pointer to the pmem2_map structure */
*map_ptr = map;
return ret;
err_register:
free(map);
err_unmap_base:
UnmapViewOfFile(base);
return ret;
err_close_mapping_handle:
CloseHandle(mh);
return ret;
}
/*
* pmem2_unmap -- unmap the specified region
*/
int
pmem2_unmap(struct pmem2_map **map_ptr)
{
LOG(3, "mapp %p", map_ptr);
struct pmem2_map *map = *map_ptr;
int ret = pmem2_unregister_mapping(map);
if (ret)
return ret;
if (!UnmapViewOfFile(map->addr)) {
ERR("!!UnmapViewOfFile");
return pmem2_lasterror_to_err();
}
Free(map);
*map_ptr = NULL;
return 0;
}
| 8,611 | 23.123249 | 99 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/extent_linux.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* extent_linux.c - implementation of the linux fs extent query API
*/
#include <string.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <linux/fs.h>
#include <linux/fiemap.h>
#include "libpmem2.h"
#include "pmem2_utils.h"
#include "file.h"
#include "out.h"
#include "extent.h"
#include "alloc.h"
/*
* pmem2_extents_create_get -- allocate extents structure and get extents
* of the given file
*/
int
pmem2_extents_create_get(int fd, struct extents **exts)
{
LOG(3, "fd %i extents %p", fd, exts);
ASSERT(fd > 2);
ASSERTne(exts, NULL);
enum pmem2_file_type pmem2_type;
struct extents *pexts = NULL;
struct fiemap *fmap = NULL;
os_stat_t st;
if (os_fstat(fd, &st) < 0) {
ERR("!fstat %d", fd);
return PMEM2_E_ERRNO;
}
int ret = pmem2_get_type_from_stat(&st, &pmem2_type);
if (ret)
return ret;
/* directories do not have any extents */
if (pmem2_type == PMEM2_FTYPE_DIR) {
ERR(
"checking extents does not make sense in case of directories");
return PMEM2_E_INVALID_FILE_TYPE;
}
/* allocate extents structure */
pexts = pmem2_zalloc(sizeof(struct extents), &ret);
if (ret)
return ret;
/* save block size */
LOG(10, "fd %i: block size: %li", fd, (long int)st.st_blksize);
pexts->blksize = (uint64_t)st.st_blksize;
/* DAX device does not have any extents */
if (pmem2_type == PMEM2_FTYPE_DEVDAX) {
*exts = pexts;
return 0;
}
ASSERTeq(pmem2_type, PMEM2_FTYPE_REG);
fmap = pmem2_zalloc(sizeof(struct fiemap), &ret);
if (ret)
goto error_free;
fmap->fm_start = 0;
fmap->fm_length = (size_t)st.st_size;
fmap->fm_flags = 0;
fmap->fm_extent_count = 0;
fmap->fm_mapped_extents = 0;
if (ioctl(fd, FS_IOC_FIEMAP, fmap) != 0) {
ERR("!fiemap ioctl() for fd=%d failed", fd);
ret = PMEM2_E_ERRNO;
goto error_free;
}
size_t newsize = sizeof(struct fiemap) +
fmap->fm_mapped_extents * sizeof(struct fiemap_extent);
struct fiemap *newfmap = pmem2_realloc(fmap, newsize, &ret);
if (ret)
goto error_free;
fmap = newfmap;
memset(fmap->fm_extents, 0, fmap->fm_mapped_extents *
sizeof(struct fiemap_extent));
fmap->fm_extent_count = fmap->fm_mapped_extents;
fmap->fm_mapped_extents = 0;
if (ioctl(fd, FS_IOC_FIEMAP, fmap) != 0) {
ERR("!fiemap ioctl() for fd=%d failed", fd);
ret = PMEM2_E_ERRNO;
goto error_free;
}
LOG(4, "file with fd=%i has %u extents:", fd, fmap->fm_mapped_extents);
/* save number of extents */
pexts->extents_count = fmap->fm_mapped_extents;
pexts->extents = pmem2_malloc(
pexts->extents_count * sizeof(struct extent),
&ret);
if (ret)
goto error_free;
/* save extents */
unsigned e;
for (e = 0; e < fmap->fm_mapped_extents; e++) {
pexts->extents[e].offset_physical =
fmap->fm_extents[e].fe_physical;
pexts->extents[e].offset_logical =
fmap->fm_extents[e].fe_logical;
pexts->extents[e].length =
fmap->fm_extents[e].fe_length;
LOG(10, " #%u: off_phy: %lu off_log: %lu len: %lu",
e,
pexts->extents[e].offset_physical,
pexts->extents[e].offset_logical,
pexts->extents[e].length);
}
*exts = pexts;
Free(fmap);
return 0;
error_free:
Free(pexts->extents);
Free(pexts);
Free(fmap);
return ret;
}
/*
* pmem2_extents_destroy -- free extents structure
*/
void
pmem2_extents_destroy(struct extents **exts)
{
LOG(3, "extents %p", exts);
ASSERTne(exts, NULL);
if (*exts) {
Free((*exts)->extents);
Free(*exts);
*exts = NULL;
}
}
| 3,519 | 20.333333 | 73 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/flush.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
#ifndef X86_64_FLUSH_H
#define X86_64_FLUSH_H
#include <emmintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "util.h"
#include "valgrind_internal.h"
#define FLUSH_ALIGN ((uintptr_t)64)
static force_inline void
pmem_clflush(const void *addr)
{
_mm_clflush(addr);
}
#ifdef _MSC_VER
static force_inline void
pmem_clflushopt(const void *addr)
{
_mm_clflushopt(addr);
}
static force_inline void
pmem_clwb(const void *addr)
{
_mm_clwb(addr);
}
#else
/*
* The x86 memory instructions are new enough that the compiler
* intrinsic functions are not always available. The intrinsic
* functions are defined here in terms of asm statements for now.
*/
static force_inline void
pmem_clflushopt(const void *addr)
{
asm volatile(".byte 0x66; clflush %0" : "+m" \
(*(volatile char *)(addr)));
}
static force_inline void
pmem_clwb(const void *addr)
{
asm volatile(".byte 0x66; xsaveopt %0" : "+m" \
(*(volatile char *)(addr)));
}
#endif /* _MSC_VER */
typedef void flush_fn(const void *, size_t);
/*
* flush_clflush_nolog -- flush the CPU cache, using clflush
*/
static force_inline void
flush_clflush_nolog(const void *addr, size_t len)
{
uintptr_t uptr;
/*
* Loop through cache-line-size (typically 64B) aligned chunks
* covering the given range.
*/
for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1);
uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN)
_mm_clflush((char *)uptr);
}
/*
* flush_clflushopt_nolog -- flush the CPU cache, using clflushopt
*/
static force_inline void
flush_clflushopt_nolog(const void *addr, size_t len)
{
uintptr_t uptr;
/*
* Loop through cache-line-size (typically 64B) aligned chunks
* covering the given range.
*/
for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1);
uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) {
pmem_clflushopt((char *)uptr);
}
}
/*
* flush_clwb_nolog -- flush the CPU cache, using clwb
*/
static force_inline void
flush_clwb_nolog(const void *addr, size_t len)
{
uintptr_t uptr;
/*
* Loop through cache-line-size (typically 64B) aligned chunks
* covering the given range.
*/
for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1);
uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) {
pmem_clwb((char *)uptr);
}
}
/*
* flush64b_empty -- (internal) do not flush the CPU cache
*/
static force_inline void
flush64b_empty(const void *addr)
{
/* NOP, but tell pmemcheck about it */
VALGRIND_DO_FLUSH(addr, 64);
}
#endif
| 2,521 | 20.193277 | 66 | h |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/init.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
#include <string.h>
#include <xmmintrin.h>
#include "auto_flush.h"
#include "cpu.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "os.h"
#include "out.h"
#include "pmem2_arch.h"
#include "valgrind_internal.h"
#define MOVNT_THRESHOLD 256
size_t Movnt_threshold = MOVNT_THRESHOLD;
/*
* memory_barrier -- (internal) issue the fence instruction
*/
static void
memory_barrier(void)
{
LOG(15, NULL);
_mm_sfence(); /* ensure CLWB or CLFLUSHOPT completes */
}
/*
* flush_clflush -- (internal) flush the CPU cache, using clflush
*/
static void
flush_clflush(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
flush_clflush_nolog(addr, len);
}
/*
* flush_clflushopt -- (internal) flush the CPU cache, using clflushopt
*/
static void
flush_clflushopt(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
flush_clflushopt_nolog(addr, len);
}
/*
* flush_clwb -- (internal) flush the CPU cache, using clwb
*/
static void
flush_clwb(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
flush_clwb_nolog(addr, len);
}
#if SSE2_AVAILABLE || AVX_AVAILABLE || AVX512F_AVAILABLE
#define PMEM2_F_MEM_MOVNT (PMEM2_F_MEM_WC | PMEM2_F_MEM_NONTEMPORAL)
#define PMEM2_F_MEM_MOV (PMEM2_F_MEM_WB | PMEM2_F_MEM_TEMPORAL)
#define MEMCPY_TEMPLATE(isa, flush, perfbarrier) \
static void *\
memmove_nodrain_##isa##_##flush##perfbarrier(void *dest, const void *src, \
size_t len, unsigned flags, flush_func flushf)\
{\
if (len == 0 || src == dest)\
return dest;\
\
if (flags & PMEM2_F_MEM_NOFLUSH) \
memmove_mov_##isa##_noflush(dest, src, len); \
else if (flags & PMEM2_F_MEM_MOVNT)\
memmove_movnt_##isa ##_##flush##perfbarrier(dest, src, len);\
else if (flags & PMEM2_F_MEM_MOV)\
memmove_mov_##isa##_##flush(dest, src, len);\
else if (len < Movnt_threshold)\
memmove_mov_##isa##_##flush(dest, src, len);\
else\
memmove_movnt_##isa##_##flush##perfbarrier(dest, src, len);\
\
return dest;\
}
#define MEMCPY_TEMPLATE_EADR(isa, perfbarrier) \
static void *\
memmove_nodrain_##isa##_eadr##perfbarrier(void *dest, const void *src, \
size_t len, unsigned flags, flush_func flushf)\
{\
if (len == 0 || src == dest)\
return dest;\
\
if (flags & PMEM2_F_MEM_NOFLUSH)\
memmove_mov_##isa##_noflush(dest, src, len);\
else if (flags & PMEM2_F_MEM_NONTEMPORAL)\
memmove_movnt_##isa##_empty##perfbarrier(dest, src, len);\
else\
memmove_mov_##isa##_empty(dest, src, len);\
\
return dest;\
}
#define MEMSET_TEMPLATE(isa, flush, perfbarrier)\
static void *\
memset_nodrain_##isa##_##flush##perfbarrier(void *dest, int c, size_t len, \
unsigned flags, flush_func flushf)\
{\
if (len == 0)\
return dest;\
\
if (flags & PMEM2_F_MEM_NOFLUSH) \
memset_mov_##isa##_noflush(dest, c, len); \
else if (flags & PMEM2_F_MEM_MOVNT)\
memset_movnt_##isa##_##flush##perfbarrier(dest, c, len);\
else if (flags & PMEM2_F_MEM_MOV)\
memset_mov_##isa##_##flush(dest, c, len);\
else if (len < Movnt_threshold)\
memset_mov_##isa##_##flush(dest, c, len);\
else\
memset_movnt_##isa##_##flush##perfbarrier(dest, c, len);\
\
return dest;\
}
#define MEMSET_TEMPLATE_EADR(isa, perfbarrier) \
static void *\
memset_nodrain_##isa##_eadr##perfbarrier(void *dest, int c, size_t len, \
unsigned flags, flush_func flushf)\
{\
if (len == 0)\
return dest;\
\
if (flags & PMEM2_F_MEM_NOFLUSH)\
memset_mov_##isa##_noflush(dest, c, len);\
else if (flags & PMEM2_F_MEM_NONTEMPORAL)\
memset_movnt_##isa##_empty##perfbarrier(dest, c, len);\
else\
memset_mov_##isa##_empty(dest, c, len);\
\
return dest;\
}
#endif
#if SSE2_AVAILABLE
MEMCPY_TEMPLATE(sse2, clflush, _nobarrier)
MEMCPY_TEMPLATE(sse2, clflushopt, _nobarrier)
MEMCPY_TEMPLATE(sse2, clwb, _nobarrier)
MEMCPY_TEMPLATE_EADR(sse2, _nobarrier)
MEMSET_TEMPLATE(sse2, clflush, _nobarrier)
MEMSET_TEMPLATE(sse2, clflushopt, _nobarrier)
MEMSET_TEMPLATE(sse2, clwb, _nobarrier)
MEMSET_TEMPLATE_EADR(sse2, _nobarrier)
MEMCPY_TEMPLATE(sse2, clflush, _wcbarrier)
MEMCPY_TEMPLATE(sse2, clflushopt, _wcbarrier)
MEMCPY_TEMPLATE(sse2, clwb, _wcbarrier)
MEMCPY_TEMPLATE_EADR(sse2, _wcbarrier)
MEMSET_TEMPLATE(sse2, clflush, _wcbarrier)
MEMSET_TEMPLATE(sse2, clflushopt, _wcbarrier)
MEMSET_TEMPLATE(sse2, clwb, _wcbarrier)
MEMSET_TEMPLATE_EADR(sse2, _wcbarrier)
#endif
#if AVX_AVAILABLE
MEMCPY_TEMPLATE(avx, clflush, _nobarrier)
MEMCPY_TEMPLATE(avx, clflushopt, _nobarrier)
MEMCPY_TEMPLATE(avx, clwb, _nobarrier)
MEMCPY_TEMPLATE_EADR(avx, _nobarrier)
MEMSET_TEMPLATE(avx, clflush, _nobarrier)
MEMSET_TEMPLATE(avx, clflushopt, _nobarrier)
MEMSET_TEMPLATE(avx, clwb, _nobarrier)
MEMSET_TEMPLATE_EADR(avx, _nobarrier)
MEMCPY_TEMPLATE(avx, clflush, _wcbarrier)
MEMCPY_TEMPLATE(avx, clflushopt, _wcbarrier)
MEMCPY_TEMPLATE(avx, clwb, _wcbarrier)
MEMCPY_TEMPLATE_EADR(avx, _wcbarrier)
MEMSET_TEMPLATE(avx, clflush, _wcbarrier)
MEMSET_TEMPLATE(avx, clflushopt, _wcbarrier)
MEMSET_TEMPLATE(avx, clwb, _wcbarrier)
MEMSET_TEMPLATE_EADR(avx, _wcbarrier)
#endif
#if AVX512F_AVAILABLE
MEMCPY_TEMPLATE(avx512f, clflush, /* cstyle wa */)
MEMCPY_TEMPLATE(avx512f, clflushopt, /* */)
MEMCPY_TEMPLATE(avx512f, clwb, /* */)
MEMCPY_TEMPLATE_EADR(avx512f, /* */)
MEMSET_TEMPLATE(avx512f, clflush, /* */)
MEMSET_TEMPLATE(avx512f, clflushopt, /* */)
MEMSET_TEMPLATE(avx512f, clwb, /* */)
MEMSET_TEMPLATE_EADR(avx512f, /* */)
#endif
enum memcpy_impl {
MEMCPY_INVALID,
MEMCPY_SSE2,
MEMCPY_AVX,
MEMCPY_AVX512F
};
/*
* use_sse2_memcpy_memset -- (internal) SSE2 detected, use it if possible
*/
static void
use_sse2_memcpy_memset(struct pmem2_arch_info *info, enum memcpy_impl *impl,
int wc_workaround)
{
#if SSE2_AVAILABLE
*impl = MEMCPY_SSE2;
if (wc_workaround) {
info->memmove_nodrain_eadr =
memmove_nodrain_sse2_eadr_wcbarrier;
if (info->flush == flush_clflush)
info->memmove_nodrain =
memmove_nodrain_sse2_clflush_wcbarrier;
else if (info->flush == flush_clflushopt)
info->memmove_nodrain =
memmove_nodrain_sse2_clflushopt_wcbarrier;
else if (info->flush == flush_clwb)
info->memmove_nodrain =
memmove_nodrain_sse2_clwb_wcbarrier;
else
ASSERT(0);
info->memset_nodrain_eadr = memset_nodrain_sse2_eadr_wcbarrier;
if (info->flush == flush_clflush)
info->memset_nodrain =
memset_nodrain_sse2_clflush_wcbarrier;
else if (info->flush == flush_clflushopt)
info->memset_nodrain =
memset_nodrain_sse2_clflushopt_wcbarrier;
else if (info->flush == flush_clwb)
info->memset_nodrain =
memset_nodrain_sse2_clwb_wcbarrier;
else
ASSERT(0);
} else {
info->memmove_nodrain_eadr =
memmove_nodrain_sse2_eadr_nobarrier;
if (info->flush == flush_clflush)
info->memmove_nodrain =
memmove_nodrain_sse2_clflush_nobarrier;
else if (info->flush == flush_clflushopt)
info->memmove_nodrain =
memmove_nodrain_sse2_clflushopt_nobarrier;
else if (info->flush == flush_clwb)
info->memmove_nodrain =
memmove_nodrain_sse2_clwb_nobarrier;
else
ASSERT(0);
info->memset_nodrain_eadr =
memset_nodrain_sse2_eadr_nobarrier;
if (info->flush == flush_clflush)
info->memset_nodrain =
memset_nodrain_sse2_clflush_nobarrier;
else if (info->flush == flush_clflushopt)
info->memset_nodrain =
memset_nodrain_sse2_clflushopt_nobarrier;
else if (info->flush == flush_clwb)
info->memset_nodrain =
memset_nodrain_sse2_clwb_nobarrier;
else
ASSERT(0);
}
#else
LOG(3, "sse2 disabled at build time");
#endif
}
/*
* use_avx_memcpy_memset -- (internal) AVX detected, use it if possible
*/
static void
use_avx_memcpy_memset(struct pmem2_arch_info *info, enum memcpy_impl *impl,
int wc_workaround)
{
#if AVX_AVAILABLE
LOG(3, "avx supported");
char *e = os_getenv("PMEM_AVX");
if (e != NULL && strcmp(e, "0") == 0) {
LOG(3, "PMEM_AVX set to 0");
return;
}
LOG(3, "PMEM_AVX enabled");
*impl = MEMCPY_AVX;
if (wc_workaround) {
info->memmove_nodrain_eadr =
memmove_nodrain_avx_eadr_wcbarrier;
if (info->flush == flush_clflush)
info->memmove_nodrain =
memmove_nodrain_avx_clflush_wcbarrier;
else if (info->flush == flush_clflushopt)
info->memmove_nodrain =
memmove_nodrain_avx_clflushopt_wcbarrier;
else if (info->flush == flush_clwb)
info->memmove_nodrain =
memmove_nodrain_avx_clwb_wcbarrier;
else
ASSERT(0);
info->memset_nodrain_eadr =
memset_nodrain_avx_eadr_wcbarrier;
if (info->flush == flush_clflush)
info->memset_nodrain =
memset_nodrain_avx_clflush_wcbarrier;
else if (info->flush == flush_clflushopt)
info->memset_nodrain =
memset_nodrain_avx_clflushopt_wcbarrier;
else if (info->flush == flush_clwb)
info->memset_nodrain =
memset_nodrain_avx_clwb_wcbarrier;
else
ASSERT(0);
} else {
info->memmove_nodrain_eadr =
memmove_nodrain_avx_eadr_nobarrier;
if (info->flush == flush_clflush)
info->memmove_nodrain =
memmove_nodrain_avx_clflush_nobarrier;
else if (info->flush == flush_clflushopt)
info->memmove_nodrain =
memmove_nodrain_avx_clflushopt_nobarrier;
else if (info->flush == flush_clwb)
info->memmove_nodrain =
memmove_nodrain_avx_clwb_nobarrier;
else
ASSERT(0);
info->memset_nodrain_eadr =
memset_nodrain_avx_eadr_nobarrier;
if (info->flush == flush_clflush)
info->memset_nodrain =
memset_nodrain_avx_clflush_nobarrier;
else if (info->flush == flush_clflushopt)
info->memset_nodrain =
memset_nodrain_avx_clflushopt_nobarrier;
else if (info->flush == flush_clwb)
info->memset_nodrain =
memset_nodrain_avx_clwb_nobarrier;
else
ASSERT(0);
}
#else
LOG(3, "avx supported, but disabled at build time");
#endif
}
/*
* use_avx512f_memcpy_memset -- (internal) AVX512F detected, use it if possible
*/
static void
use_avx512f_memcpy_memset(struct pmem2_arch_info *info,
enum memcpy_impl *impl)
{
#if AVX512F_AVAILABLE
LOG(3, "avx512f supported");
char *e = os_getenv("PMEM_AVX512F");
if (e != NULL && strcmp(e, "0") == 0) {
LOG(3, "PMEM_AVX512F set to 0");
return;
}
LOG(3, "PMEM_AVX512F enabled");
*impl = MEMCPY_AVX512F;
info->memmove_nodrain_eadr = memmove_nodrain_avx512f_eadr;
if (info->flush == flush_clflush)
info->memmove_nodrain = memmove_nodrain_avx512f_clflush;
else if (info->flush == flush_clflushopt)
info->memmove_nodrain = memmove_nodrain_avx512f_clflushopt;
else if (info->flush == flush_clwb)
info->memmove_nodrain = memmove_nodrain_avx512f_clwb;
else
ASSERT(0);
info->memset_nodrain_eadr = memset_nodrain_avx512f_eadr;
if (info->flush == flush_clflush)
info->memset_nodrain = memset_nodrain_avx512f_clflush;
else if (info->flush == flush_clflushopt)
info->memset_nodrain = memset_nodrain_avx512f_clflushopt;
else if (info->flush == flush_clwb)
info->memset_nodrain = memset_nodrain_avx512f_clwb;
else
ASSERT(0);
#else
LOG(3, "avx512f supported, but disabled at build time");
#endif
}
/*
* pmem_get_cpuinfo -- configure libpmem based on CPUID
*/
static void
pmem_cpuinfo_to_funcs(struct pmem2_arch_info *info, enum memcpy_impl *impl)
{
LOG(3, NULL);
if (is_cpu_clflush_present()) {
LOG(3, "clflush supported");
info->flush = flush_clflush;
info->flush_has_builtin_fence = 1;
info->fence = memory_barrier;
}
if (is_cpu_clflushopt_present()) {
LOG(3, "clflushopt supported");
char *e = os_getenv("PMEM_NO_CLFLUSHOPT");
if (e && strcmp(e, "1") == 0) {
LOG(3, "PMEM_NO_CLFLUSHOPT forced no clflushopt");
} else {
info->flush = flush_clflushopt;
info->flush_has_builtin_fence = 0;
info->fence = memory_barrier;
}
}
if (is_cpu_clwb_present()) {
LOG(3, "clwb supported");
char *e = os_getenv("PMEM_NO_CLWB");
if (e && strcmp(e, "1") == 0) {
LOG(3, "PMEM_NO_CLWB forced no clwb");
} else {
info->flush = flush_clwb;
info->flush_has_builtin_fence = 0;
info->fence = memory_barrier;
}
}
/*
* XXX Disable this work around for Intel CPUs with optimized
* WC eviction.
*/
int wc_workaround = is_cpu_genuine_intel();
char *ptr = os_getenv("PMEM_WC_WORKAROUND");
if (ptr) {
if (strcmp(ptr, "1") == 0) {
LOG(3, "WC workaround forced to 1");
wc_workaround = 1;
} else if (strcmp(ptr, "0") == 0) {
LOG(3, "WC workaround forced to 0");
wc_workaround = 0;
} else {
LOG(3, "incorrect value of PMEM_WC_WORKAROUND (%s)",
ptr);
}
}
LOG(3, "WC workaround = %d", wc_workaround);
ptr = os_getenv("PMEM_NO_MOVNT");
if (ptr && strcmp(ptr, "1") == 0) {
LOG(3, "PMEM_NO_MOVNT forced no movnt");
} else {
use_sse2_memcpy_memset(info, impl, wc_workaround);
if (is_cpu_avx_present())
use_avx_memcpy_memset(info, impl, wc_workaround);
if (is_cpu_avx512f_present())
use_avx512f_memcpy_memset(info, impl);
}
}
/*
* pmem2_arch_init -- initialize architecture-specific list of pmem operations
*/
void
pmem2_arch_init(struct pmem2_arch_info *info)
{
LOG(3, NULL);
enum memcpy_impl impl = MEMCPY_INVALID;
pmem_cpuinfo_to_funcs(info, &impl);
/*
* For testing, allow overriding the default threshold
* for using non-temporal stores in pmem_memcpy_*(), pmem_memmove_*()
* and pmem_memset_*().
* It has no effect if movnt is not supported or disabled.
*/
const char *ptr = os_getenv("PMEM_MOVNT_THRESHOLD");
if (ptr) {
long long val = atoll(ptr);
if (val < 0) {
LOG(3, "Invalid PMEM_MOVNT_THRESHOLD");
} else {
LOG(3, "PMEM_MOVNT_THRESHOLD set to %zu", (size_t)val);
Movnt_threshold = (size_t)val;
}
}
if (info->flush == flush_clwb)
LOG(3, "using clwb");
else if (info->flush == flush_clflushopt)
LOG(3, "using clflushopt");
else if (info->flush == flush_clflush)
LOG(3, "using clflush");
else
FATAL("invalid deep flush function address");
if (impl == MEMCPY_AVX512F)
LOG(3, "using movnt AVX512F");
else if (impl == MEMCPY_AVX)
LOG(3, "using movnt AVX");
else if (impl == MEMCPY_SSE2)
LOG(3, "using movnt SSE2");
}
| 13,899 | 25.275992 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/avx.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2018, Intel Corporation */
#ifndef PMEM_AVX_H
#define PMEM_AVX_H
#include <immintrin.h>
#include "util.h"
/*
* avx_zeroupper -- _mm256_zeroupper wrapper
*
* _mm256_zeroupper clears upper parts of avx registers.
*
* It's needed for 2 reasons:
* - it improves performance of non-avx code after avx
* - it works around problem discovered by Valgrind
*
* In optimized builds gcc inserts VZEROUPPER automatically before
* calling non-avx code (or at the end of the function). But in release
* builds it doesn't, so if we don't do this by ourselves, then when
* someone memcpy'ies uninitialized data, Valgrind complains whenever
* someone reads those registers.
*
* One notable example is loader, which tries to detect whether it
* needs to save whole ymm registers by looking at their current
* (possibly uninitialized) value.
*
* Valgrind complains like that:
* Conditional jump or move depends on uninitialised value(s)
* at 0x4015CC9: _dl_runtime_resolve_avx_slow
* (in /lib/x86_64-linux-gnu/ld-2.24.so)
* by 0x10B531: test_realloc_api (obj_basic_integration.c:185)
* by 0x10F1EE: main (obj_basic_integration.c:594)
*
* Note: We have to be careful to not read AVX registers after this
* intrinsic, because of this stupid gcc bug:
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82735
*/
static force_inline void
avx_zeroupper(void)
{
_mm256_zeroupper();
}
static force_inline __m128i
m256_get16b(__m256i ymm)
{
return _mm256_extractf128_si256(ymm, 0);
}
#ifdef _MSC_VER
static force_inline uint64_t
m256_get8b(__m256i ymm)
{
return (uint64_t)_mm_extract_epi64(m256_get16b(ymm), 0);
}
static force_inline uint32_t
m256_get4b(__m256i ymm)
{
return (uint32_t)m256_get8b(ymm);
}
static force_inline uint16_t
m256_get2b(__m256i ymm)
{
return (uint16_t)m256_get8b(ymm);
}
#else
static force_inline uint64_t
m256_get8b(__m256i ymm)
{
return (uint64_t)_mm256_extract_epi64(ymm, 0);
}
static force_inline uint32_t
m256_get4b(__m256i ymm)
{
return (uint32_t)_mm256_extract_epi32(ymm, 0);
}
static force_inline uint16_t
m256_get2b(__m256i ymm)
{
return (uint16_t)_mm256_extract_epi16(ymm, 0);
}
#endif
#endif
| 2,238 | 24.735632 | 72 | h |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.