repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmempool/check_bad_blocks.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* check_bad_blocks.c -- pre-check bad_blocks
*/
#include <stddef.h>
#include <stdint.h>
#include <unistd.h>
#include "out.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
#include "set_badblocks.h"
#include "badblocks.h"
/*
* check_bad_blocks -- check poolset for bad_blocks
*/
void
check_bad_blocks(PMEMpoolcheck *ppc)
{
LOG(3, "ppc %p", ppc);
int ret;
if (!(ppc->pool->params.features.compat & POOL_FEAT_CHECK_BAD_BLOCKS)) {
/* skipping checking poolset for bad blocks */
ppc->result = CHECK_RESULT_CONSISTENT;
return;
}
if (ppc->pool->set_file->poolset) {
ret = badblocks_check_poolset(ppc->pool->set_file->poolset, 0);
} else {
ret = badblocks_check_file(ppc->pool->set_file->fname);
}
if (ret < 0) {
if (errno == ENOTSUP) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
CHECK_ERR(ppc, BB_NOT_SUPP);
return;
}
ppc->result = CHECK_RESULT_ERROR;
CHECK_ERR(ppc, "checking poolset for bad blocks failed -- '%s'",
ppc->path);
return;
}
if (ret > 0) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
CHECK_ERR(ppc,
"poolset contains bad blocks, use 'pmempool info --bad-blocks=yes' to print or 'pmempool sync --bad-blocks' to clear them");
}
}
| 1,329 | 20.803279 | 127 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmempool/feature.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* feature.c -- implementation of pmempool_feature_(enable|disable|query)()
*/
#include <stddef.h>
#include <stdint.h>
#include <unistd.h>
#include <errno.h>
#include <sys/mman.h>
#include "libpmempool.h"
#include "util_pmem.h"
#include "pool_hdr.h"
#include "pool.h"
#define RW 0
#define RDONLY 1
#define FEATURE_INCOMPAT(X) \
(features_t)FEAT_INCOMPAT(X)
static const features_t f_singlehdr = FEAT_INCOMPAT(SINGLEHDR);
static const features_t f_cksum_2k = FEAT_INCOMPAT(CKSUM_2K);
static const features_t f_sds = FEAT_INCOMPAT(SDS);
static const features_t f_chkbb = FEAT_COMPAT(CHECK_BAD_BLOCKS);
#define FEAT_INVALID \
{UINT32_MAX, UINT32_MAX, UINT32_MAX};
static const features_t f_invalid = FEAT_INVALID;
#define FEATURE_MAXPRINT ((size_t)1024)
/*
* buff_concat -- (internal) concat formatted string to string buffer
*/
static int
buff_concat(char *buff, size_t *pos, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
const size_t size = FEATURE_MAXPRINT - *pos - 1;
int ret = vsnprintf(buff + *pos, size, fmt, ap);
va_end(ap);
if (ret < 0) {
ERR("vsprintf");
return ret;
}
if ((size_t)ret >= size) {
ERR("buffer truncated %d >= %zu", ret, size);
return -1;
}
*pos += (size_t)ret;
return 0;
}
/*
* buff_concat_features -- (internal) concat features string to string buffer
*/
static int
buff_concat_features(char *buff, size_t *pos, features_t f)
{
return buff_concat(buff, pos,
"{compat 0x%x, incompat 0x%x, ro_compat 0x%x}",
f.compat, f.incompat, f.ro_compat);
}
/*
* poolset_close -- (internal) close pool set
*/
static void
poolset_close(struct pool_set *set)
{
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = REP(set, r);
ASSERT(!rep->remote);
for (unsigned p = 0; p < rep->nparts; ++p) {
util_unmap_hdr(PART(rep, p));
}
}
util_poolset_close(set, DO_NOT_DELETE_PARTS);
}
/*
* features_check -- (internal) check if features are correct
*/
static int
features_check(features_t *features, struct pool_hdr *hdrp)
{
static char msg[FEATURE_MAXPRINT];
struct pool_hdr hdr;
memcpy(&hdr, hdrp, sizeof(hdr));
util_convert2h_hdr_nocheck(&hdr);
/* (features != f_invlaid) <=> features is set */
if (!util_feature_cmp(*features, f_invalid)) {
/* features from current and previous headers have to match */
if (!util_feature_cmp(*features, hdr.features)) {
size_t pos = 0;
if (buff_concat_features(msg, &pos, hdr.features))
goto err;
if (buff_concat(msg, &pos, "%s", " != "))
goto err;
if (buff_concat_features(msg, &pos, *features))
goto err;
ERR("features mismatch detected: %s", msg);
return -1;
} else {
return 0;
}
}
features_t unknown = util_get_unknown_features(
hdr.features, (features_t)POOL_FEAT_VALID);
/* all features are known */
if (util_feature_is_zero(unknown)) {
memcpy(features, &hdr.features, sizeof(*features));
return 0;
}
/* unknown features detected - print error message */
size_t pos = 0;
if (buff_concat_features(msg, &pos, unknown))
goto err;
ERR("invalid features detected: %s", msg);
err:
return -1;
}
/*
* get_pool_open_flags -- (internal) generate pool open flags
*/
static inline unsigned
get_pool_open_flags(struct pool_set *set, int rdonly)
{
unsigned flags = 0;
if (rdonly == RDONLY && !util_pool_has_device_dax(set))
flags = POOL_OPEN_COW;
flags |= POOL_OPEN_IGNORE_BAD_BLOCKS;
return flags;
}
/*
* get_mmap_flags -- (internal) generate mmap flags
*/
static inline int
get_mmap_flags(struct pool_set_part *part, int rdonly)
{
if (part->is_dev_dax)
return MAP_SHARED;
else
return rdonly ? MAP_PRIVATE : MAP_SHARED;
}
/*
* poolset_open -- (internal) open pool set
*/
static struct pool_set *
poolset_open(const char *path, int rdonly)
{
struct pool_set *set;
features_t features = FEAT_INVALID;
/* read poolset */
int ret = util_poolset_create_set(&set, path, 0, 0, true);
if (ret < 0) {
ERR("cannot open pool set -- '%s'", path);
goto err_poolset;
}
if (set->remote) {
ERR("poolsets with remote replicas are not supported");
errno = EINVAL;
goto err_open;
}
/* open a memory pool */
unsigned flags = get_pool_open_flags(set, rdonly);
if (util_pool_open_nocheck(set, flags))
goto err_open;
/* map all headers and check features */
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = REP(set, r);
ASSERT(!rep->remote);
for (unsigned p = 0; p < rep->nparts; ++p) {
struct pool_set_part *part = PART(rep, p);
int mmap_flags = get_mmap_flags(part, rdonly);
if (util_map_hdr(part, mmap_flags, rdonly)) {
part->hdr = NULL;
goto err_map_hdr;
}
if (features_check(&features, HDR(rep, p))) {
ERR(
"invalid features - replica #%d part #%d",
r, p);
goto err_open;
}
}
}
return set;
err_map_hdr:
/* unmap all headers */
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = REP(set, r);
ASSERT(!rep->remote);
for (unsigned p = 0; p < rep->nparts; ++p) {
util_unmap_hdr(PART(rep, p));
}
}
err_open:
/* close the memory pool and release pool set structure */
util_poolset_close(set, DO_NOT_DELETE_PARTS);
err_poolset:
return NULL;
}
/*
* get_hdr -- (internal) read header in host byte order
*/
static struct pool_hdr *
get_hdr(struct pool_set *set, unsigned rep, unsigned part)
{
static struct pool_hdr hdr;
/* copy header */
struct pool_hdr *hdrp = HDR(REP(set, rep), part);
memcpy(&hdr, hdrp, sizeof(hdr));
/* convert to host byte order and return */
util_convert2h_hdr_nocheck(&hdr);
return &hdr;
}
/*
* set_hdr -- (internal) convert header to little-endian, checksum and write
*/
static void
set_hdr(struct pool_set *set, unsigned rep, unsigned part, struct pool_hdr *src)
{
/* convert to little-endian and set new checksum */
const size_t skip_off = POOL_HDR_CSUM_END_OFF(src);
util_convert2le_hdr(src);
util_checksum(src, sizeof(*src), &src->checksum, 1, skip_off);
/* write header */
struct pool_replica *replica = REP(set, rep);
struct pool_hdr *dst = HDR(replica, part);
memcpy(dst, src, sizeof(*src));
util_persist_auto(PART(replica, part)->is_dev_dax, dst, sizeof(*src));
}
typedef enum {
DISABLED,
ENABLED
} fstate_t;
#define FEATURE_IS_ENABLED_STR "feature already enabled: %s"
#define FEATURE_IS_DISABLED_STR "feature already disabled: %s"
/*
* require_feature_is -- (internal) check if required feature is enabled
* (or disabled)
*/
static int
require_feature_is(struct pool_set *set, features_t feature, fstate_t req_state)
{
struct pool_hdr *hdrp = get_hdr((set), 0, 0);
fstate_t state = util_feature_is_set(hdrp->features, feature)
? ENABLED : DISABLED;
if (state == req_state)
return 1;
const char *msg = (state == ENABLED)
? FEATURE_IS_ENABLED_STR : FEATURE_IS_DISABLED_STR;
LOG(3, msg, util_feature2str(feature, NULL));
return 0;
}
#define FEATURE_IS_NOT_ENABLED_PRIOR_STR "enable %s prior to %s %s"
#define FEATURE_IS_NOT_DISABLED_PRIOR_STR "disable %s prior to %s %s"
/*
* require_other_feature_is -- (internal) check if other feature is enabled
* (or disabled) in case the other feature has to be enabled (or disabled)
* prior to the main one
*/
static int
require_other_feature_is(struct pool_set *set, features_t other,
fstate_t req_state, features_t feature, const char *cause)
{
struct pool_hdr *hdrp = get_hdr((set), 0, 0);
fstate_t state = util_feature_is_set(hdrp->features, other)
? ENABLED : DISABLED;
if (state == req_state)
return 1;
const char *msg = (req_state == ENABLED)
? FEATURE_IS_NOT_ENABLED_PRIOR_STR
: FEATURE_IS_NOT_DISABLED_PRIOR_STR;
ERR(msg, util_feature2str(other, NULL),
cause, util_feature2str(feature, NULL));
return 0;
}
/*
* feature_set -- (internal) enable (or disable) feature
*/
static void
feature_set(struct pool_set *set, features_t feature, int value)
{
for (unsigned r = 0; r < set->nreplicas; ++r) {
for (unsigned p = 0; p < REP(set, r)->nparts; ++p) {
struct pool_hdr *hdrp = get_hdr(set, r, p);
if (value == ENABLED)
util_feature_enable(&hdrp->features, feature);
else
util_feature_disable(&hdrp->features, feature);
set_hdr(set, r, p, hdrp);
}
}
}
/*
* query_feature -- (internal) query feature value
*/
static int
query_feature(const char *path, features_t feature)
{
struct pool_set *set = poolset_open(path, RDONLY);
if (!set)
goto err_open;
struct pool_hdr *hdrp = get_hdr(set, 0, 0);
const int query = util_feature_is_set(hdrp->features, feature);
poolset_close(set);
return query;
err_open:
return -1;
}
/*
* unsupported_feature -- (internal) report unsupported feature
*/
static inline int
unsupported_feature(features_t feature)
{
ERR("unsupported feature: %s", util_feature2str(feature, NULL));
errno = EINVAL;
return -1;
}
/*
* enable_singlehdr -- (internal) enable POOL_FEAT_SINGLEHDR
*/
static int
enable_singlehdr(const char *path)
{
return unsupported_feature(f_singlehdr);
}
/*
* disable_singlehdr -- (internal) disable POOL_FEAT_SINGLEHDR
*/
static int
disable_singlehdr(const char *path)
{
return unsupported_feature(f_singlehdr);
}
/*
* query_singlehdr -- (internal) query POOL_FEAT_SINGLEHDR
*/
static int
query_singlehdr(const char *path)
{
return query_feature(path, f_singlehdr);
}
/*
* enable_checksum_2k -- (internal) enable POOL_FEAT_CKSUM_2K
*/
static int
enable_checksum_2k(const char *path)
{
struct pool_set *set = poolset_open(path, RW);
if (!set)
return -1;
if (require_feature_is(set, f_cksum_2k, DISABLED))
feature_set(set, f_cksum_2k, ENABLED);
poolset_close(set);
return 0;
}
/*
* disable_checksum_2k -- (internal) disable POOL_FEAT_CKSUM_2K
*/
static int
disable_checksum_2k(const char *path)
{
struct pool_set *set = poolset_open(path, RW);
if (!set)
return -1;
int ret = 0;
if (!require_feature_is(set, f_cksum_2k, ENABLED))
goto exit;
/* check if POOL_FEAT_SDS is disabled */
if (!require_other_feature_is(set, f_sds, DISABLED,
f_cksum_2k, "disabling")) {
ret = -1;
goto exit;
}
feature_set(set, f_cksum_2k, DISABLED);
exit:
poolset_close(set);
return ret;
}
/*
* query_checksum_2k -- (internal) query POOL_FEAT_CKSUM_2K
*/
static int
query_checksum_2k(const char *path)
{
return query_feature(path, f_cksum_2k);
}
/*
* enable_shutdown_state -- (internal) enable POOL_FEAT_SDS
*/
static int
enable_shutdown_state(const char *path)
{
struct pool_set *set = poolset_open(path, RW);
if (!set)
return -1;
int ret = 0;
if (!require_feature_is(set, f_sds, DISABLED))
goto exit;
/* check if POOL_FEAT_CKSUM_2K is enabled */
if (!require_other_feature_is(set, f_cksum_2k, ENABLED,
f_sds, "enabling")) {
ret = -1;
goto exit;
}
feature_set(set, f_sds, ENABLED);
exit:
poolset_close(set);
return ret;
}
/*
* reset_shutdown_state -- zero all shutdown structures
*/
static void
reset_shutdown_state(struct pool_set *set)
{
for (unsigned rep = 0; rep < set->nreplicas; ++rep) {
for (unsigned part = 0; part < REP(set, rep)->nparts; ++part) {
struct pool_hdr *hdrp = HDR(REP(set, rep), part);
shutdown_state_init(&hdrp->sds, REP(set, rep));
}
}
}
/*
* disable_shutdown_state -- (internal) disable POOL_FEAT_SDS
*/
static int
disable_shutdown_state(const char *path)
{
struct pool_set *set = poolset_open(path, RW);
if (!set)
return -1;
if (require_feature_is(set, f_sds, ENABLED)) {
feature_set(set, f_sds, DISABLED);
reset_shutdown_state(set);
}
poolset_close(set);
return 0;
}
/*
* query_shutdown_state -- (internal) query POOL_FEAT_SDS
*/
static int
query_shutdown_state(const char *path)
{
return query_feature(path, f_sds);
}
/*
* enable_badblocks_checking -- (internal) enable POOL_FEAT_CHECK_BAD_BLOCKS
*/
static int
enable_badblocks_checking(const char *path)
{
#ifdef _WIN32
ERR("bad blocks checking is not supported on Windows");
return -1;
#else
struct pool_set *set = poolset_open(path, RW);
if (!set)
return -1;
if (require_feature_is(set, f_chkbb, DISABLED))
feature_set(set, f_chkbb, ENABLED);
poolset_close(set);
return 0;
#endif
}
/*
* disable_badblocks_checking -- (internal) disable POOL_FEAT_CHECK_BAD_BLOCKS
*/
static int
disable_badblocks_checking(const char *path)
{
struct pool_set *set = poolset_open(path, RW);
if (!set)
return -1;
int ret = 0;
if (!require_feature_is(set, f_chkbb, ENABLED))
goto exit;
feature_set(set, f_chkbb, DISABLED);
exit:
poolset_close(set);
return ret;
}
/*
* query_badblocks_checking -- (internal) query POOL_FEAT_CHECK_BAD_BLOCKS
*/
static int
query_badblocks_checking(const char *path)
{
return query_feature(path, f_chkbb);
}
struct feature_funcs {
int (*enable)(const char *);
int (*disable)(const char *);
int (*query)(const char *);
};
static struct feature_funcs features[] = {
{
.enable = enable_singlehdr,
.disable = disable_singlehdr,
.query = query_singlehdr
},
{
.enable = enable_checksum_2k,
.disable = disable_checksum_2k,
.query = query_checksum_2k
},
{
.enable = enable_shutdown_state,
.disable = disable_shutdown_state,
.query = query_shutdown_state
},
{
.enable = enable_badblocks_checking,
.disable = disable_badblocks_checking,
.query = query_badblocks_checking
},
};
#define FEATURE_FUNCS_MAX ARRAY_SIZE(features)
/*
* are_flags_valid -- (internal) check if flags are valid
*/
static inline int
are_flags_valid(unsigned flags)
{
if (flags != 0) {
ERR("invalid flags: 0x%x", flags);
errno = EINVAL;
return 0;
}
return 1;
}
/*
* is_feature_valid -- (internal) check if feature is valid
*/
static inline int
is_feature_valid(uint32_t feature)
{
if (feature >= FEATURE_FUNCS_MAX) {
ERR("invalid feature: 0x%x", feature);
errno = EINVAL;
return 0;
}
return 1;
}
/*
* pmempool_feature_enableU -- enable pool set feature
*/
#ifndef _WIN32
static inline
#endif
int
pmempool_feature_enableU(const char *path, enum pmempool_feature feature,
unsigned flags)
{
LOG(3, "path %s feature %x flags %x", path, feature, flags);
if (!is_feature_valid(feature))
return -1;
if (!are_flags_valid(flags))
return -1;
return features[feature].enable(path);
}
/*
* pmempool_feature_disableU -- disable pool set feature
*/
#ifndef _WIN32
static inline
#endif
int
pmempool_feature_disableU(const char *path, enum pmempool_feature feature,
unsigned flags)
{
LOG(3, "path %s feature %x flags %x", path, feature, flags);
if (!is_feature_valid(feature))
return -1;
if (!are_flags_valid(flags))
return -1;
return features[feature].disable(path);
}
/*
* pmempool_feature_queryU -- query pool set feature
*/
#ifndef _WIN32
static inline
#endif
int
pmempool_feature_queryU(const char *path, enum pmempool_feature feature,
unsigned flags)
{
LOG(3, "path %s feature %x flags %x", path, feature, flags);
/*
* XXX: Windows does not allow function call in a constant expressions
*/
#ifndef _WIN32
#define CHECK_INCOMPAT_MAPPING(FEAT, ENUM) \
COMPILE_ERROR_ON( \
util_feature2pmempool_feature(FEATURE_INCOMPAT(FEAT)) != ENUM)
CHECK_INCOMPAT_MAPPING(SINGLEHDR, PMEMPOOL_FEAT_SINGLEHDR);
CHECK_INCOMPAT_MAPPING(CKSUM_2K, PMEMPOOL_FEAT_CKSUM_2K);
CHECK_INCOMPAT_MAPPING(SDS, PMEMPOOL_FEAT_SHUTDOWN_STATE);
#undef CHECK_INCOMPAT_MAPPING
#endif
if (!is_feature_valid(feature))
return -1;
if (!are_flags_valid(flags))
return -1;
return features[feature].query(path);
}
#ifndef _WIN32
/*
* pmempool_feature_enable -- enable pool set feature
*/
int
pmempool_feature_enable(const char *path, enum pmempool_feature feature,
unsigned flags)
{
return pmempool_feature_enableU(path, feature, flags);
}
#else
/*
* pmempool_feature_enableW -- enable pool set feature as widechar
*/
int
pmempool_feature_enableW(const wchar_t *path, enum pmempool_feature feature,
unsigned flags)
{
char *upath = util_toUTF8(path);
if (upath == NULL) {
ERR("Invalid poolest/pool file path.");
return -1;
}
int ret = pmempool_feature_enableU(upath, feature, flags);
util_free_UTF8(upath);
return ret;
}
#endif
#ifndef _WIN32
/*
* pmempool_feature_disable -- disable pool set feature
*/
int
pmempool_feature_disable(const char *path, enum pmempool_feature feature,
unsigned flags)
{
return pmempool_feature_disableU(path, feature, flags);
}
#else
/*
* pmempool_feature_disableW -- disable pool set feature as widechar
*/
int
pmempool_feature_disableW(const wchar_t *path, enum pmempool_feature feature,
unsigned flags)
{
char *upath = util_toUTF8(path);
if (upath == NULL) {
ERR("Invalid poolest/pool file path.");
return -1;
}
int ret = pmempool_feature_disableU(upath, feature, flags);
util_free_UTF8(upath);
return ret;
}
#endif
#ifndef _WIN32
/*
* pmempool_feature_query -- query pool set feature
*/
int
pmempool_feature_query(const char *path, enum pmempool_feature feature,
unsigned flags)
{
return pmempool_feature_queryU(path, feature, flags);
}
#else
/*
* pmempool_feature_queryW -- query pool set feature as widechar
*/
int
pmempool_feature_queryW(const wchar_t *path, enum pmempool_feature feature,
unsigned flags)
{
char *upath = util_toUTF8(path);
if (upath == NULL) {
ERR("Invalid poolest/pool file path.");
return -1;
}
int ret = pmempool_feature_queryU(upath, feature, flags);
util_free_UTF8(upath);
return ret;
}
#endif
| 17,344 | 20.955696 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmempool/check_btt_map_flog.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* check_btt_map_flog.c -- check BTT Map and Flog
*/
#include <stdint.h>
#include <sys/param.h>
#include <endian.h>
#include "out.h"
#include "btt.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
enum questions {
Q_REPAIR_MAP,
Q_REPAIR_FLOG,
};
/*
* flog_read -- (internal) read and convert flog from file
*/
static int
flog_read(PMEMpoolcheck *ppc, struct arena *arenap)
{
uint64_t flogoff = arenap->offset + arenap->btt_info.flogoff;
arenap->flogsize = btt_flog_size(arenap->btt_info.nfree);
arenap->flog = malloc(arenap->flogsize);
if (!arenap->flog) {
ERR("!malloc");
goto error_malloc;
}
if (pool_read(ppc->pool, arenap->flog, arenap->flogsize, flogoff))
goto error_read;
uint8_t *ptr = arenap->flog;
uint32_t i;
for (i = 0; i < arenap->btt_info.nfree; i++) {
struct btt_flog *flog = (struct btt_flog *)ptr;
btt_flog_convert2h(&flog[0]);
btt_flog_convert2h(&flog[1]);
ptr += BTT_FLOG_PAIR_ALIGN;
}
return 0;
error_read:
free(arenap->flog);
arenap->flog = NULL;
error_malloc:
return -1;
}
/*
* map_read -- (internal) read and convert map from file
*/
static int
map_read(PMEMpoolcheck *ppc, struct arena *arenap)
{
uint64_t mapoff = arenap->offset + arenap->btt_info.mapoff;
arenap->mapsize = btt_map_size(arenap->btt_info.external_nlba);
ASSERT(arenap->mapsize != 0);
arenap->map = malloc(arenap->mapsize);
if (!arenap->map) {
ERR("!malloc");
goto error_malloc;
}
if (pool_read(ppc->pool, arenap->map, arenap->mapsize, mapoff)) {
goto error_read;
}
uint32_t i;
for (i = 0; i < arenap->btt_info.external_nlba; i++)
arenap->map[i] = le32toh(arenap->map[i]);
return 0;
error_read:
free(arenap->map);
arenap->map = NULL;
error_malloc:
return -1;
}
/*
* list_item -- item for simple list
*/
struct list_item {
PMDK_LIST_ENTRY(list_item) next;
uint32_t val;
};
/*
* list -- simple list for storing numbers
*/
struct list {
PMDK_LIST_HEAD(listhead, list_item) head;
uint32_t count;
};
/*
* list_alloc -- (internal) allocate an empty list
*/
static struct list *
list_alloc(void)
{
struct list *list = malloc(sizeof(struct list));
if (!list) {
ERR("!malloc");
return NULL;
}
PMDK_LIST_INIT(&list->head);
list->count = 0;
return list;
}
/*
* list_push -- (internal) insert new element to the list
*/
static struct list_item *
list_push(struct list *list, uint32_t val)
{
struct list_item *item = malloc(sizeof(*item));
if (!item) {
ERR("!malloc");
return NULL;
}
item->val = val;
list->count++;
PMDK_LIST_INSERT_HEAD(&list->head, item, next);
return item;
}
/*
* list_pop -- (internal) pop element from list head
*/
static int
list_pop(struct list *list, uint32_t *valp)
{
if (!PMDK_LIST_EMPTY(&list->head)) {
struct list_item *i = PMDK_LIST_FIRST(&list->head);
PMDK_LIST_REMOVE(i, next);
if (valp)
*valp = i->val;
free(i);
list->count--;
return 1;
}
return 0;
}
/*
* list_free -- (internal) free the list
*/
static void
list_free(struct list *list)
{
while (list_pop(list, NULL))
;
free(list);
}
/*
* cleanup -- (internal) prepare resources for map and flog check
*/
static int
cleanup(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
if (loc->list_unmap)
list_free(loc->list_unmap);
if (loc->list_flog_inval)
list_free(loc->list_flog_inval);
if (loc->list_inval)
list_free(loc->list_inval);
if (loc->fbitmap)
free(loc->fbitmap);
if (loc->bitmap)
free(loc->bitmap);
if (loc->dup_bitmap)
free(loc->dup_bitmap);
return 0;
}
/*
* init -- (internal) initialize map and flog check
*/
static int
init(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
struct arena *arenap = loc->arenap;
/* read flog and map entries */
if (flog_read(ppc, arenap)) {
CHECK_ERR(ppc, "arena %u: cannot read BTT Flog", arenap->id);
goto error;
}
if (map_read(ppc, arenap)) {
CHECK_ERR(ppc, "arena %u: cannot read BTT Map", arenap->id);
goto error;
}
/* create bitmaps for checking duplicated blocks */
uint32_t bitmapsize = howmany(arenap->btt_info.internal_nlba, 8);
loc->bitmap = calloc(bitmapsize, 1);
if (!loc->bitmap) {
ERR("!calloc");
CHECK_ERR(ppc, "arena %u: cannot allocate memory for blocks "
"bitmap", arenap->id);
goto error;
}
loc->dup_bitmap = calloc(bitmapsize, 1);
if (!loc->dup_bitmap) {
ERR("!calloc");
CHECK_ERR(ppc, "arena %u: cannot allocate memory for "
"duplicated blocks bitmap", arenap->id);
goto error;
}
loc->fbitmap = calloc(bitmapsize, 1);
if (!loc->fbitmap) {
ERR("!calloc");
CHECK_ERR(ppc, "arena %u: cannot allocate memory for BTT Flog "
"bitmap", arenap->id);
goto error;
}
/* list of invalid map entries */
loc->list_inval = list_alloc();
if (!loc->list_inval) {
CHECK_ERR(ppc,
"arena %u: cannot allocate memory for invalid BTT map "
"entries list", arenap->id);
goto error;
}
/* list of invalid flog entries */
loc->list_flog_inval = list_alloc();
if (!loc->list_flog_inval) {
CHECK_ERR(ppc,
"arena %u: cannot allocate memory for invalid BTT Flog "
"entries list", arenap->id);
goto error;
}
/* list of unmapped blocks */
loc->list_unmap = list_alloc();
if (!loc->list_unmap) {
CHECK_ERR(ppc,
"arena %u: cannot allocate memory for unmaped blocks "
"list", arenap->id);
goto error;
}
return 0;
error:
ppc->result = CHECK_RESULT_ERROR;
cleanup(ppc, loc);
return -1;
}
/*
* map_get_postmap_lba -- extract postmap LBA from map entry
*/
static inline uint32_t
map_get_postmap_lba(struct arena *arenap, uint32_t i)
{
uint32_t entry = arenap->map[i];
/* if map record is in initial state (flags == 0b00) */
if (map_entry_is_initial(entry))
return i;
/* read postmap LBA otherwise */
return entry & BTT_MAP_ENTRY_LBA_MASK;
}
/*
* map_entry_check -- (internal) check single map entry
*/
static int
map_entry_check(PMEMpoolcheck *ppc, location *loc, uint32_t i)
{
struct arena *arenap = loc->arenap;
uint32_t lba = map_get_postmap_lba(arenap, i);
/* add duplicated and invalid entries to list */
if (lba < arenap->btt_info.internal_nlba) {
if (util_isset(loc->bitmap, lba)) {
CHECK_INFO(ppc, "arena %u: BTT Map entry %u duplicated "
"at %u", arenap->id, lba, i);
util_setbit(loc->dup_bitmap, lba);
if (!list_push(loc->list_inval, i))
return -1;
} else
util_setbit(loc->bitmap, lba);
} else {
CHECK_INFO(ppc, "arena %u: invalid BTT Map entry at %u",
arenap->id, i);
if (!list_push(loc->list_inval, i))
return -1;
}
return 0;
}
/*
* flog_entry_check -- (internal) check single flog entry
*/
static int
flog_entry_check(PMEMpoolcheck *ppc, location *loc, uint32_t i,
uint8_t **ptr)
{
struct arena *arenap = loc->arenap;
/* flog entry consists of two btt_flog structures */
struct btt_flog *flog = (struct btt_flog *)*ptr;
int next;
struct btt_flog *flog_cur = btt_flog_get_valid(flog, &next);
/* insert invalid and duplicated indexes to list */
if (!flog_cur) {
CHECK_INFO(ppc, "arena %u: invalid BTT Flog entry at %u",
arenap->id, i);
if (!list_push(loc->list_flog_inval, i))
return -1;
goto next;
}
uint32_t entry = flog_cur->old_map & BTT_MAP_ENTRY_LBA_MASK;
uint32_t new_entry = flog_cur->new_map & BTT_MAP_ENTRY_LBA_MASK;
/*
* Check if lba is in extranal_nlba range, and check if both old_map and
* new_map are in internal_nlba range.
*/
if (flog_cur->lba >= arenap->btt_info.external_nlba ||
entry >= arenap->btt_info.internal_nlba ||
new_entry >= arenap->btt_info.internal_nlba) {
CHECK_INFO(ppc, "arena %u: invalid BTT Flog entry at %u",
arenap->id, i);
if (!list_push(loc->list_flog_inval, i))
return -1;
goto next;
}
if (util_isset(loc->fbitmap, entry)) {
/*
* here we have two flog entries which holds the same free block
*/
CHECK_INFO(ppc, "arena %u: duplicated BTT Flog entry at %u\n",
arenap->id, i);
if (!list_push(loc->list_flog_inval, i))
return -1;
} else if (util_isset(loc->bitmap, entry)) {
/* here we have probably an unfinished write */
if (util_isset(loc->bitmap, new_entry)) {
/* Both old_map and new_map are already used in map. */
CHECK_INFO(ppc, "arena %u: duplicated BTT Flog entry "
"at %u", arenap->id, i);
util_setbit(loc->dup_bitmap, new_entry);
if (!list_push(loc->list_flog_inval, i))
return -1;
} else {
/*
* Unfinished write. Next time pool is opened, the map
* will be updated to new_map.
*/
util_setbit(loc->bitmap, new_entry);
util_setbit(loc->fbitmap, entry);
}
} else {
int flog_valid = 1;
/*
* Either flog entry is in its initial state:
* - current_btt_flog entry is first one in pair and
* - current_btt_flog.old_map == current_btt_flog.new_map and
* - current_btt_flog.seq == 0b01 and
* - second flog entry in pair is zeroed
* or
* current_btt_flog.old_map != current_btt_flog.new_map
*/
if (entry == new_entry)
flog_valid = (next == 1) && (flog_cur->seq == 1) &&
util_is_zeroed((const void *)&flog[1],
sizeof(flog[1]));
if (flog_valid) {
/* totally fine case */
util_setbit(loc->bitmap, entry);
util_setbit(loc->fbitmap, entry);
} else {
CHECK_INFO(ppc, "arena %u: invalid BTT Flog entry at "
"%u", arenap->id, i);
if (!list_push(loc->list_flog_inval, i))
return -1;
}
}
next:
*ptr += BTT_FLOG_PAIR_ALIGN;
return 0;
}
/*
* arena_map_flog_check -- (internal) check map and flog
*/
static int
arena_map_flog_check(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
struct arena *arenap = loc->arenap;
/* check map entries */
uint32_t i;
for (i = 0; i < arenap->btt_info.external_nlba; i++) {
if (map_entry_check(ppc, loc, i))
goto error_push;
}
/* check flog entries */
uint8_t *ptr = arenap->flog;
for (i = 0; i < arenap->btt_info.nfree; i++) {
if (flog_entry_check(ppc, loc, i, &ptr))
goto error_push;
}
/* check unmapped blocks and insert to list */
for (i = 0; i < arenap->btt_info.internal_nlba; i++) {
if (!util_isset(loc->bitmap, i)) {
CHECK_INFO(ppc, "arena %u: unmapped block %u",
arenap->id, i);
if (!list_push(loc->list_unmap, i))
goto error_push;
}
}
if (loc->list_unmap->count)
CHECK_INFO(ppc, "arena %u: number of unmapped blocks: %u",
arenap->id, loc->list_unmap->count);
if (loc->list_inval->count)
CHECK_INFO(ppc, "arena %u: number of invalid BTT Map entries: "
"%u", arenap->id, loc->list_inval->count);
if (loc->list_flog_inval->count)
CHECK_INFO(ppc, "arena %u: number of invalid BTT Flog entries: "
"%u", arenap->id, loc->list_flog_inval->count);
if (CHECK_IS_NOT(ppc, REPAIR) && loc->list_unmap->count > 0) {
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
check_end(ppc->data);
goto cleanup;
}
/*
* We are able to repair if and only if number of unmapped blocks is
* equal to sum of invalid map and flog entries.
*/
if (loc->list_unmap->count != (loc->list_inval->count +
loc->list_flog_inval->count)) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
CHECK_ERR(ppc, "arena %u: cannot repair BTT Map and Flog",
arenap->id);
goto cleanup;
}
if (CHECK_IS_NOT(ppc, ADVANCED) && loc->list_inval->count +
loc->list_flog_inval->count > 0) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
CHECK_INFO(ppc, REQUIRE_ADVANCED);
CHECK_ERR(ppc, "BTT Map and / or BTT Flog contain invalid "
"entries");
check_end(ppc->data);
goto cleanup;
}
if (loc->list_inval->count > 0) {
CHECK_ASK(ppc, Q_REPAIR_MAP, "Do you want to repair invalid "
"BTT Map entries?");
}
if (loc->list_flog_inval->count > 0) {
CHECK_ASK(ppc, Q_REPAIR_FLOG, "Do you want to repair invalid "
"BTT Flog entries?");
}
return check_questions_sequence_validate(ppc);
error_push:
CHECK_ERR(ppc, "arena %u: cannot allocate momory for list item",
arenap->id);
ppc->result = CHECK_RESULT_ERROR;
cleanup:
cleanup(ppc, loc);
return -1;
}
/*
* arena_map_flog_fix -- (internal) fix map and flog
*/
static int
arena_map_flog_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *ctx)
{
LOG(3, NULL);
ASSERTeq(ctx, NULL);
ASSERTne(loc, NULL);
struct arena *arenap = loc->arenap;
uint32_t inval;
uint32_t unmap;
switch (question) {
case Q_REPAIR_MAP:
/*
* Cause first of duplicated map entries seems valid till we
* find second of them we must find all first map entries
* pointing to the postmap LBA's we know are duplicated to mark
* them with error flag.
*/
for (uint32_t i = 0; i < arenap->btt_info.external_nlba; i++) {
uint32_t lba = map_get_postmap_lba(arenap, i);
if (lba >= arenap->btt_info.internal_nlba)
continue;
if (!util_isset(loc->dup_bitmap, lba))
continue;
arenap->map[i] = BTT_MAP_ENTRY_ERROR | lba;
util_clrbit(loc->dup_bitmap, lba);
CHECK_INFO(ppc,
"arena %u: storing 0x%x at %u BTT Map entry",
arenap->id, arenap->map[i], i);
}
/*
* repair invalid or duplicated map entries by using unmapped
* blocks
*/
while (list_pop(loc->list_inval, &inval)) {
if (!list_pop(loc->list_unmap, &unmap)) {
ppc->result = CHECK_RESULT_ERROR;
return -1;
}
arenap->map[inval] = unmap | BTT_MAP_ENTRY_ERROR;
CHECK_INFO(ppc, "arena %u: storing 0x%x at %u BTT Map "
"entry", arenap->id, arenap->map[inval], inval);
}
break;
case Q_REPAIR_FLOG:
/* repair invalid flog entries using unmapped blocks */
while (list_pop(loc->list_flog_inval, &inval)) {
if (!list_pop(loc->list_unmap, &unmap)) {
ppc->result = CHECK_RESULT_ERROR;
return -1;
}
struct btt_flog *flog = (struct btt_flog *)
(arenap->flog + inval * BTT_FLOG_PAIR_ALIGN);
memset(&flog[1], 0, sizeof(flog[1]));
uint32_t entry = unmap | BTT_MAP_ENTRY_ERROR;
flog[0].lba = inval;
flog[0].new_map = entry;
flog[0].old_map = entry;
flog[0].seq = 1;
CHECK_INFO(ppc, "arena %u: repairing BTT Flog at %u "
"with free block entry 0x%x", loc->arenap->id,
inval, entry);
}
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
struct step {
int (*check)(PMEMpoolcheck *, location *);
int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *);
};
static const struct step steps[] = {
{
.check = init,
},
{
.check = arena_map_flog_check,
},
{
.fix = arena_map_flog_fix,
},
{
.check = cleanup,
},
{
.check = NULL,
.fix = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static inline int
step_exe(PMEMpoolcheck *ppc, location *loc)
{
ASSERT(loc->step < ARRAY_SIZE(steps));
const struct step *step = &steps[loc->step++];
if (!step->fix)
return step->check(ppc, loc);
if (!check_answer_loop(ppc, loc, NULL, 1, step->fix))
return 0;
cleanup(ppc, loc);
return -1;
}
/*
* check_btt_map_flog -- perform check and fixing of map and flog
*/
void
check_btt_map_flog(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
location *loc = check_get_step_data(ppc->data);
if (ppc->pool->blk_no_layout)
return;
/* initialize check */
if (!loc->arenap && loc->narena == 0 &&
ppc->result != CHECK_RESULT_PROCESS_ANSWERS) {
CHECK_INFO(ppc, "checking BTT Map and Flog");
loc->arenap = PMDK_TAILQ_FIRST(&ppc->pool->arenas);
loc->narena = 0;
}
while (loc->arenap != NULL) {
/* add info about checking next arena */
if (ppc->result != CHECK_RESULT_PROCESS_ANSWERS &&
loc->step == 0) {
CHECK_INFO(ppc, "arena %u: checking BTT Map and Flog",
loc->narena);
}
/* do all checks */
while (CHECK_NOT_COMPLETE(loc, steps)) {
if (step_exe(ppc, loc))
return;
}
/* jump to next arena */
loc->arenap = PMDK_TAILQ_NEXT(loc->arenap, next);
loc->narena++;
loc->step = 0;
}
}
| 15,734 | 21.937318 | 73 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmempool/check_backup.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* check_backup.c -- pre-check backup
*/
#include <stddef.h>
#include <stdint.h>
#include <unistd.h>
#include "out.h"
#include "file.h"
#include "os.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
enum question {
Q_OVERWRITE_EXISTING_FILE,
Q_OVERWRITE_EXISTING_PARTS
};
/*
* location_release -- (internal) release poolset structure
*/
static void
location_release(location *loc)
{
if (loc->set) {
util_poolset_free(loc->set);
loc->set = NULL;
}
}
/*
* backup_nonpoolset_requirements -- (internal) check backup requirements
*/
static int
backup_nonpoolset_requirements(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, "backup_path %s", ppc->backup_path);
int exists = util_file_exists(ppc->backup_path);
if (exists < 0) {
return CHECK_ERR(ppc,
"unable to access the backup destination: %s",
ppc->backup_path);
}
if (!exists) {
errno = 0;
return 0;
}
if ((size_t)util_file_get_size(ppc->backup_path) !=
ppc->pool->set_file->size) {
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc,
"destination of the backup does not match the size of the source pool file: %s",
ppc->backup_path);
}
if (CHECK_WITHOUT_FIXING(ppc)) {
location_release(loc);
loc->step = CHECK_STEP_COMPLETE;
return 0;
}
CHECK_ASK(ppc, Q_OVERWRITE_EXISTING_FILE,
"destination of the backup already exists.|Do you want to overwrite it?");
return check_questions_sequence_validate(ppc);
}
/*
* backup_nonpoolset_overwrite -- (internal) overwrite pool
*/
static int
backup_nonpoolset_overwrite(PMEMpoolcheck *ppc, location *loc,
uint32_t question, void *context)
{
LOG(3, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_OVERWRITE_EXISTING_FILE:
if (pool_copy(ppc->pool, ppc->backup_path, 1 /* overwrite */)) {
location_release(loc);
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc, "cannot perform backup");
}
location_release(loc);
loc->step = CHECK_STEP_COMPLETE;
return 0;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
/*
* backup_nonpoolset_create -- (internal) create backup
*/
static int
backup_nonpoolset_create(PMEMpoolcheck *ppc, location *loc)
{
CHECK_INFO(ppc, "creating backup file: %s", ppc->backup_path);
if (pool_copy(ppc->pool, ppc->backup_path, 0)) {
location_release(loc);
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc, "cannot perform backup");
}
location_release(loc);
loc->step = CHECK_STEP_COMPLETE;
return 0;
}
/*
* backup_poolset_requirements -- (internal) check backup requirements
*/
static int
backup_poolset_requirements(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, "backup_path %s", ppc->backup_path);
if (ppc->pool->set_file->poolset->nreplicas > 1) {
CHECK_INFO(ppc,
"backup of a poolset with multiple replicas is not supported");
goto err;
}
if (pool_set_parse(&loc->set, ppc->backup_path)) {
CHECK_INFO_ERRNO(ppc, "invalid poolset backup file: %s",
ppc->backup_path);
goto err;
}
if (loc->set->nreplicas > 1) {
CHECK_INFO(ppc,
"backup to a poolset with multiple replicas is not supported");
goto err_poolset;
}
ASSERTeq(loc->set->nreplicas, 1);
struct pool_replica *srep = ppc->pool->set_file->poolset->replica[0];
struct pool_replica *drep = loc->set->replica[0];
if (srep->nparts != drep->nparts) {
CHECK_INFO(ppc,
"number of part files in the backup poolset must match number of part files in the source poolset");
goto err_poolset;
}
int overwrite_required = 0;
for (unsigned p = 0; p < srep->nparts; p++) {
int exists = util_file_exists(drep->part[p].path);
if (exists < 0) {
CHECK_INFO(ppc,
"unable to access the part of the destination poolset: %s",
ppc->backup_path);
goto err_poolset;
}
if (srep->part[p].filesize != drep->part[p].filesize) {
CHECK_INFO(ppc,
"size of the part %u of the backup poolset does not match source poolset",
p);
goto err_poolset;
}
if (!exists) {
errno = 0;
continue;
}
overwrite_required = true;
if ((size_t)util_file_get_size(drep->part[p].path) !=
srep->part[p].filesize) {
CHECK_INFO(ppc,
"destination of the backup part does not match size of the source part file: %s",
drep->part[p].path);
goto err_poolset;
}
}
if (CHECK_WITHOUT_FIXING(ppc)) {
location_release(loc);
loc->step = CHECK_STEP_COMPLETE;
return 0;
}
if (overwrite_required) {
CHECK_ASK(ppc, Q_OVERWRITE_EXISTING_PARTS,
"part files of the destination poolset of the backup already exist.|"
"Do you want to overwrite them?");
}
return check_questions_sequence_validate(ppc);
err_poolset:
location_release(loc);
err:
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc, "unable to backup poolset");
}
/*
* backup_poolset -- (internal) backup the poolset
*/
static int
backup_poolset(PMEMpoolcheck *ppc, location *loc, int overwrite)
{
struct pool_replica *srep = ppc->pool->set_file->poolset->replica[0];
struct pool_replica *drep = loc->set->replica[0];
for (unsigned p = 0; p < srep->nparts; p++) {
if (overwrite == 0) {
CHECK_INFO(ppc, "creating backup file: %s",
drep->part[p].path);
}
if (pool_set_part_copy(&drep->part[p], &srep->part[p],
overwrite)) {
location_release(loc);
ppc->result = CHECK_RESULT_ERROR;
CHECK_INFO(ppc, "unable to create backup file");
return CHECK_ERR(ppc, "unable to backup poolset");
}
}
return 0;
}
/*
* backup_poolset_overwrite -- (internal) backup poolset with overwrite
*/
static int
backup_poolset_overwrite(PMEMpoolcheck *ppc, location *loc,
uint32_t question, void *context)
{
LOG(3, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_OVERWRITE_EXISTING_PARTS:
if (backup_poolset(ppc, loc, 1 /* overwrite */)) {
location_release(loc);
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc, "cannot perform backup");
}
location_release(loc);
loc->step = CHECK_STEP_COMPLETE;
return 0;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
/*
* backup_poolset_create -- (internal) backup poolset
*/
static int
backup_poolset_create(PMEMpoolcheck *ppc, location *loc)
{
if (backup_poolset(ppc, loc, 0)) {
location_release(loc);
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc, "cannot perform backup");
}
location_release(loc);
loc->step = CHECK_STEP_COMPLETE;
return 0;
}
struct step {
int (*check)(PMEMpoolcheck *, location *);
int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *);
int poolset;
};
static const struct step steps[] = {
{
.check = backup_nonpoolset_requirements,
.poolset = false,
},
{
.fix = backup_nonpoolset_overwrite,
.poolset = false,
},
{
.check = backup_nonpoolset_create,
.poolset = false
},
{
.check = backup_poolset_requirements,
.poolset = true,
},
{
.fix = backup_poolset_overwrite,
.poolset = true,
},
{
.check = backup_poolset_create,
.poolset = true
},
{
.check = NULL,
.fix = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static int
step_exe(PMEMpoolcheck *ppc, location *loc)
{
ASSERT(loc->step < ARRAY_SIZE(steps));
const struct step *step = &steps[loc->step++];
if (step->poolset == 0 && ppc->pool->params.is_poolset == 1)
return 0;
if (!step->fix)
return step->check(ppc, loc);
if (!check_has_answer(ppc->data))
return 0;
if (check_answer_loop(ppc, loc, NULL, 1, step->fix))
return -1;
ppc->result = CHECK_RESULT_CONSISTENT;
return 0;
}
/*
* check_backup -- perform backup if requested and needed
*/
void
check_backup(PMEMpoolcheck *ppc)
{
LOG(3, "backup_path %s", ppc->backup_path);
if (ppc->backup_path == NULL)
return;
location *loc = check_get_step_data(ppc->data);
/* do all checks */
while (CHECK_NOT_COMPLETE(loc, steps)) {
if (step_exe(ppc, loc))
break;
}
}
| 7,968 | 20.654891 | 103 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmempool/check_btt_info.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* check_btt_info.c -- check BTT Info
*/
#include <stdlib.h>
#include <stdint.h>
#include <endian.h>
#include "out.h"
#include "util.h"
#include "btt.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
enum question {
Q_RESTORE_FROM_BACKUP,
Q_REGENERATE,
Q_REGENERATE_CHECKSUM,
Q_RESTORE_FROM_HEADER
};
/*
* location_release -- (internal) release check_btt_info_loc allocations
*/
static void
location_release(location *loc)
{
free(loc->arenap);
loc->arenap = NULL;
}
/*
* btt_info_checksum -- (internal) check BTT Info checksum
*/
static int
btt_info_checksum(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
loc->arenap = calloc(1, sizeof(struct arena));
if (!loc->arenap) {
ERR("!calloc");
ppc->result = CHECK_RESULT_INTERNAL_ERROR;
CHECK_ERR(ppc, "cannot allocate memory for arena");
goto error_cleanup;
}
/* read the BTT Info header at well known offset */
if (pool_read(ppc->pool, &loc->arenap->btt_info,
sizeof(loc->arenap->btt_info), loc->offset)) {
CHECK_ERR(ppc, "arena %u: cannot read BTT Info header",
loc->arenap->id);
ppc->result = CHECK_RESULT_ERROR;
goto error_cleanup;
}
loc->arenap->id = ppc->pool->narenas;
/* BLK is consistent even without BTT Layout */
if (ppc->pool->params.type == POOL_TYPE_BLK) {
int is_zeroed = util_is_zeroed((const void *)
&loc->arenap->btt_info, sizeof(loc->arenap->btt_info));
if (is_zeroed) {
CHECK_INFO(ppc, "BTT Layout not written");
loc->step = CHECK_STEP_COMPLETE;
ppc->pool->blk_no_layout = 1;
location_release(loc);
check_end(ppc->data);
return 0;
}
}
/* check consistency of BTT Info */
if (pool_btt_info_valid(&loc->arenap->btt_info)) {
CHECK_INFO(ppc, "arena %u: BTT Info header checksum correct",
loc->arenap->id);
loc->valid.btti_header = 1;
} else if (CHECK_IS_NOT(ppc, REPAIR)) {
CHECK_ERR(ppc, "arena %u: BTT Info header checksum incorrect",
loc->arenap->id);
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
check_end(ppc->data);
goto error_cleanup;
}
return 0;
error_cleanup:
location_release(loc);
return -1;
}
/*
* btt_info_backup -- (internal) check BTT Info backup
*/
static int
btt_info_backup(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
/* check BTT Info backup consistency */
const size_t btt_info_size = sizeof(ppc->pool->bttc.btt_info);
uint64_t btt_info_off = pool_next_arena_offset(ppc->pool, loc->offset) -
btt_info_size;
if (pool_read(ppc->pool, &ppc->pool->bttc.btt_info, btt_info_size,
btt_info_off)) {
CHECK_ERR(ppc, "arena %u: cannot read BTT Info backup",
loc->arenap->id);
goto error;
}
/* check whether this BTT Info backup is valid */
if (pool_btt_info_valid(&ppc->pool->bttc.btt_info)) {
loc->valid.btti_backup = 1;
/* restore BTT Info from backup */
if (!loc->valid.btti_header && CHECK_IS(ppc, REPAIR))
CHECK_ASK(ppc, Q_RESTORE_FROM_BACKUP, "arena %u: BTT "
"Info header checksum incorrect.|Restore BTT "
"Info from backup?", loc->arenap->id);
}
/*
* if BTT Info backup require repairs it will be fixed in further steps
*/
return check_questions_sequence_validate(ppc);
error:
ppc->result = CHECK_RESULT_ERROR;
location_release(loc);
return -1;
}
/*
* btt_info_from_backup_fix -- (internal) fix BTT Info using its backup
*/
static int
btt_info_from_backup_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *ctx)
{
LOG(3, NULL);
ASSERTeq(ctx, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_RESTORE_FROM_BACKUP:
CHECK_INFO(ppc,
"arena %u: restoring BTT Info header from backup",
loc->arenap->id);
memcpy(&loc->arenap->btt_info, &ppc->pool->bttc.btt_info,
sizeof(loc->arenap->btt_info));
loc->valid.btti_header = 1;
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
/*
* btt_info_gen -- (internal) ask whether try to regenerate BTT Info
*/
static int
btt_info_gen(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
if (loc->valid.btti_header)
return 0;
ASSERT(CHECK_IS(ppc, REPAIR));
if (!loc->pool_valid.btti_offset) {
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
check_end(ppc->data);
return CHECK_ERR(ppc, "can not find any valid BTT Info");
}
CHECK_ASK(ppc, Q_REGENERATE,
"arena %u: BTT Info header checksum incorrect.|Do you want to "
"regenerate BTT Info?", loc->arenap->id);
return check_questions_sequence_validate(ppc);
}
/*
* btt_info_gen_fix -- (internal) fix by regenerating BTT Info
*/
static int
btt_info_gen_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *ctx)
{
LOG(3, NULL);
ASSERTeq(ctx, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_REGENERATE:
CHECK_INFO(ppc, "arena %u: regenerating BTT Info header",
loc->arenap->id);
/*
* We do not have valid BTT Info backup so we get first valid
* BTT Info and try to calculate BTT Info for current arena
*/
uint64_t arena_size = ppc->pool->set_file->size - loc->offset;
if (arena_size > BTT_MAX_ARENA)
arena_size = BTT_MAX_ARENA;
uint64_t space_left = ppc->pool->set_file->size - loc->offset -
arena_size;
struct btt_info *bttd = &loc->arenap->btt_info;
struct btt_info *btts = &loc->pool_valid.btti;
btt_info_convert2h(bttd);
/*
* all valid BTT Info structures have the same signature, UUID,
* parent UUID, flags, major, minor, external LBA size, internal
* LBA size, nfree, info size and data offset
*/
memcpy(bttd->sig, btts->sig, BTTINFO_SIG_LEN);
memcpy(bttd->uuid, btts->uuid, BTTINFO_UUID_LEN);
memcpy(bttd->parent_uuid, btts->parent_uuid, BTTINFO_UUID_LEN);
memset(bttd->unused, 0, BTTINFO_UNUSED_LEN);
bttd->flags = btts->flags;
bttd->major = btts->major;
bttd->minor = btts->minor;
/* other parameters can be calculated */
if (btt_info_set(bttd, btts->external_lbasize, btts->nfree,
arena_size, space_left)) {
CHECK_ERR(ppc, "can not restore BTT Info");
return -1;
}
ASSERTeq(bttd->external_lbasize, btts->external_lbasize);
ASSERTeq(bttd->internal_lbasize, btts->internal_lbasize);
ASSERTeq(bttd->nfree, btts->nfree);
ASSERTeq(bttd->infosize, btts->infosize);
ASSERTeq(bttd->dataoff, btts->dataoff);
return 0;
default:
ERR("not implemented question id: %u", question);
return -1;
}
}
/*
* btt_info_checksum_retry -- (internal) check BTT Info checksum
*/
static int
btt_info_checksum_retry(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
if (loc->valid.btti_header)
return 0;
btt_info_convert2le(&loc->arenap->btt_info);
/* check consistency of BTT Info */
if (pool_btt_info_valid(&loc->arenap->btt_info)) {
CHECK_INFO(ppc, "arena %u: BTT Info header checksum correct",
loc->arenap->id);
loc->valid.btti_header = 1;
return 0;
}
if (CHECK_IS_NOT(ppc, ADVANCED)) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
CHECK_INFO(ppc, REQUIRE_ADVANCED);
CHECK_ERR(ppc, "arena %u: BTT Info header checksum incorrect",
loc->arenap->id);
check_end(ppc->data);
goto error_cleanup;
}
CHECK_ASK(ppc, Q_REGENERATE_CHECKSUM,
"arena %u: BTT Info header checksum incorrect.|Do you want to "
"regenerate BTT Info checksum?", loc->arenap->id);
return check_questions_sequence_validate(ppc);
error_cleanup:
location_release(loc);
return -1;
}
/*
* btt_info_checksum_fix -- (internal) fix by regenerating BTT Info checksum
*/
static int
btt_info_checksum_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *ctx)
{
LOG(3, NULL);
ASSERTeq(ctx, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_REGENERATE_CHECKSUM:
util_checksum(&loc->arenap->btt_info, sizeof(struct btt_info),
&loc->arenap->btt_info.checksum, 1, 0);
loc->valid.btti_header = 1;
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
/*
* btt_info_backup_checksum -- (internal) check BTT Info backup checksum
*/
static int
btt_info_backup_checksum(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
ASSERT(loc->valid.btti_header);
if (loc->valid.btti_backup)
return 0;
/* BTT Info backup is not valid so it must be fixed */
if (CHECK_IS_NOT(ppc, REPAIR)) {
CHECK_ERR(ppc,
"arena %u: BTT Info backup checksum incorrect",
loc->arenap->id);
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
check_end(ppc->data);
goto error_cleanup;
}
CHECK_ASK(ppc, Q_RESTORE_FROM_HEADER,
"arena %u: BTT Info backup checksum incorrect.|Do you want to "
"restore it from BTT Info header?", loc->arenap->id);
return check_questions_sequence_validate(ppc);
error_cleanup:
location_release(loc);
return -1;
}
/*
* btt_info_backup_fix -- (internal) prepare restore BTT Info backup from header
*/
static int
btt_info_backup_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *ctx)
{
LOG(3, NULL);
ASSERTeq(ctx, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_RESTORE_FROM_HEADER:
/* BTT Info backup would be restored in check_write step */
CHECK_INFO(ppc,
"arena %u: restoring BTT Info backup from header",
loc->arenap->id);
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
struct step {
int (*check)(PMEMpoolcheck *, location *);
int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *);
};
static const struct step steps[] = {
{
.check = btt_info_checksum,
},
{
.check = btt_info_backup,
},
{
.fix = btt_info_from_backup_fix,
},
{
.check = btt_info_gen,
},
{
.fix = btt_info_gen_fix,
},
{
.check = btt_info_checksum_retry,
},
{
.fix = btt_info_checksum_fix,
},
{
.check = btt_info_backup_checksum,
},
{
.fix = btt_info_backup_fix,
},
{
.check = NULL,
.fix = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static inline int
step_exe(PMEMpoolcheck *ppc, location *loc)
{
ASSERT(loc->step < ARRAY_SIZE(steps));
const struct step *step = &steps[loc->step++];
if (!step->fix)
return step->check(ppc, loc);
if (!check_answer_loop(ppc, loc, NULL, 1, step->fix))
return 0;
if (check_has_error(ppc->data))
location_release(loc);
return -1;
}
/*
* check_btt_info -- entry point for btt info check
*/
void
check_btt_info(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
location *loc = check_get_step_data(ppc->data);
uint64_t nextoff = 0;
/* initialize check */
if (!loc->offset) {
CHECK_INFO(ppc, "checking BTT Info headers");
loc->offset = sizeof(struct pool_hdr);
if (ppc->pool->params.type == POOL_TYPE_BLK)
loc->offset += ALIGN_UP(sizeof(struct pmemblk) -
sizeof(struct pool_hdr),
BLK_FORMAT_DATA_ALIGN);
loc->pool_valid.btti_offset = pool_get_first_valid_btt(
ppc->pool, &loc->pool_valid.btti, loc->offset, NULL);
/* Without valid BTT Info we can not proceed */
if (!loc->pool_valid.btti_offset) {
if (ppc->pool->params.type == POOL_TYPE_BTT) {
CHECK_ERR(ppc,
"can not find any valid BTT Info");
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
check_end(ppc->data);
return;
}
} else
btt_info_convert2h(&loc->pool_valid.btti);
}
do {
/* jump to next offset */
if (ppc->result != CHECK_RESULT_PROCESS_ANSWERS) {
loc->offset += nextoff;
loc->step = 0;
loc->valid.btti_header = 0;
loc->valid.btti_backup = 0;
}
/* do all checks */
while (CHECK_NOT_COMPLETE(loc, steps)) {
if (step_exe(ppc, loc) || ppc->pool->blk_no_layout == 1)
return;
}
/* save offset and insert BTT to cache for next steps */
loc->arenap->offset = loc->offset;
loc->arenap->valid = true;
check_insert_arena(ppc, loc->arenap);
nextoff = le64toh(loc->arenap->btt_info.nextoff);
} while (nextoff > 0);
}
| 11,735 | 22.011765 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmempool/check_util.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* check_util.c -- check utility functions
*/
#include <stdio.h>
#include <stdint.h>
#include "out.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
#define CHECK_END UINT_MAX
/* separate info part of message from question part of message */
#define MSG_SEPARATOR '|'
/* error part of message must have '.' at the end */
#define MSG_PLACE_OF_SEPARATION '.'
#define MAX_MSG_STR_SIZE 8192
#define CHECK_ANSWER_YES "yes"
#define CHECK_ANSWER_NO "no"
#define STR_MAX 256
#define TIME_STR_FMT "%a %b %d %Y %H:%M:%S"
#define UUID_STR_MAX 37
enum check_answer {
PMEMPOOL_CHECK_ANSWER_EMPTY,
PMEMPOOL_CHECK_ANSWER_YES,
PMEMPOOL_CHECK_ANSWER_NO,
PMEMPOOL_CHECK_ANSWER_DEFAULT,
};
/* queue of check statuses */
struct check_status {
PMDK_TAILQ_ENTRY(check_status) next;
struct pmempool_check_status status;
unsigned question;
enum check_answer answer;
char *msg;
};
PMDK_TAILQ_HEAD(check_status_head, check_status);
/* check control context */
struct check_data {
unsigned step;
location step_data;
struct check_status *error;
struct check_status_head infos;
struct check_status_head questions;
struct check_status_head answers;
struct check_status *check_status_cache;
};
/*
* check_data_alloc -- allocate and initialize check_data structure
*/
struct check_data *
check_data_alloc(void)
{
LOG(3, NULL);
struct check_data *data = calloc(1, sizeof(*data));
if (data == NULL) {
ERR("!calloc");
return NULL;
}
PMDK_TAILQ_INIT(&data->infos);
PMDK_TAILQ_INIT(&data->questions);
PMDK_TAILQ_INIT(&data->answers);
return data;
}
/*
* check_data_free -- clean and deallocate check_data
*/
void
check_data_free(struct check_data *data)
{
LOG(3, NULL);
if (data->error != NULL) {
free(data->error);
data->error = NULL;
}
if (data->check_status_cache != NULL) {
free(data->check_status_cache);
data->check_status_cache = NULL;
}
while (!PMDK_TAILQ_EMPTY(&data->infos)) {
struct check_status *statp = PMDK_TAILQ_FIRST(&data->infos);
PMDK_TAILQ_REMOVE(&data->infos, statp, next);
free(statp);
}
while (!PMDK_TAILQ_EMPTY(&data->questions)) {
struct check_status *statp = PMDK_TAILQ_FIRST(&data->questions);
PMDK_TAILQ_REMOVE(&data->questions, statp, next);
free(statp);
}
while (!PMDK_TAILQ_EMPTY(&data->answers)) {
struct check_status *statp = PMDK_TAILQ_FIRST(&data->answers);
PMDK_TAILQ_REMOVE(&data->answers, statp, next);
free(statp);
}
free(data);
}
/*
* check_step_get - return current check step number
*/
uint32_t
check_step_get(struct check_data *data)
{
return data->step;
}
/*
* check_step_inc -- move to next step number
*/
void
check_step_inc(struct check_data *data)
{
if (check_is_end_util(data))
return;
++data->step;
memset(&data->step_data, 0, sizeof(location));
}
/*
* check_get_step_data -- return pointer to check step data
*/
location *
check_get_step_data(struct check_data *data)
{
return &data->step_data;
}
/*
* check_end -- mark check as ended
*/
void
check_end(struct check_data *data)
{
LOG(3, NULL);
data->step = CHECK_END;
}
/*
* check_is_end_util -- return if check has ended
*/
int
check_is_end_util(struct check_data *data)
{
return data->step == CHECK_END;
}
/*
* status_alloc -- (internal) allocate and initialize check_status
*/
static inline struct check_status *
status_alloc(void)
{
struct check_status *status = malloc(sizeof(*status));
if (!status)
FATAL("!malloc");
status->msg = malloc(sizeof(char) * MAX_MSG_STR_SIZE);
if (!status->msg) {
free(status);
FATAL("!malloc");
}
status->status.str.msg = status->msg;
status->answer = PMEMPOOL_CHECK_ANSWER_EMPTY;
status->question = CHECK_INVALID_QUESTION;
return status;
}
/*
* status_release -- (internal) release check_status
*/
static void
status_release(struct check_status *status)
{
#ifdef _WIN32
/* dealloc duplicate string after conversion */
if (status->status.str.msg != status->msg)
free((void *)status->status.str.msg);
#endif
free(status->msg);
free(status);
}
/*
* status_msg_info_only -- (internal) separate info part of the message
*
* If message is in form of "info.|question" it modifies it as follows
* "info\0|question"
*/
static inline int
status_msg_info_only(const char *msg)
{
char *sep = strchr(msg, MSG_SEPARATOR);
if (sep) {
ASSERTne(sep, msg);
--sep;
ASSERTeq(*sep, MSG_PLACE_OF_SEPARATION);
*sep = '\0';
return 0;
}
return -1;
}
/*
* status_msg_info_and_question -- (internal) join info and question
*
* If message is in form "info.|question" it will replace MSG_SEPARATOR '|' with
* space to get "info. question"
*/
static inline int
status_msg_info_and_question(const char *msg)
{
char *sep = strchr(msg, MSG_SEPARATOR);
if (sep) {
*sep = ' ';
return 0;
}
return -1;
}
/*
* status_push -- (internal) push single status object
*/
static int
status_push(PMEMpoolcheck *ppc, struct check_status *st, uint32_t question)
{
if (st->status.type == PMEMPOOL_CHECK_MSG_TYPE_ERROR) {
ASSERTeq(ppc->data->error, NULL);
ppc->data->error = st;
return -1;
} else if (st->status.type == PMEMPOOL_CHECK_MSG_TYPE_INFO) {
if (CHECK_IS(ppc, VERBOSE))
PMDK_TAILQ_INSERT_TAIL(&ppc->data->infos, st, next);
else
check_status_release(ppc, st);
return 0;
}
/* st->status.type == PMEMPOOL_CHECK_MSG_TYPE_QUESTION */
if (CHECK_IS_NOT(ppc, REPAIR)) {
/* error status */
if (status_msg_info_only(st->msg)) {
ERR("no error message for the user");
st->msg[0] = '\0';
}
st->status.type = PMEMPOOL_CHECK_MSG_TYPE_ERROR;
return status_push(ppc, st, question);
}
if (CHECK_IS(ppc, ALWAYS_YES)) {
if (!status_msg_info_only(st->msg)) {
/* information status */
st->status.type = PMEMPOOL_CHECK_MSG_TYPE_INFO;
status_push(ppc, st, question);
st = status_alloc();
}
/* answer status */
ppc->result = CHECK_RESULT_PROCESS_ANSWERS;
st->question = question;
st->answer = PMEMPOOL_CHECK_ANSWER_YES;
st->status.type = PMEMPOOL_CHECK_MSG_TYPE_QUESTION;
PMDK_TAILQ_INSERT_TAIL(&ppc->data->answers, st, next);
} else {
/* question message */
status_msg_info_and_question(st->msg);
st->question = question;
ppc->result = CHECK_RESULT_ASK_QUESTIONS;
st->answer = PMEMPOOL_CHECK_ANSWER_EMPTY;
PMDK_TAILQ_INSERT_TAIL(&ppc->data->questions, st, next);
}
return 0;
}
/*
* check_status_create -- create single status, push it to proper queue
*
* MSG_SEPARATOR character in fmt is treated as message separator. If creating
* question but check arguments do not allow to make any changes (asking any
* question is pointless) it takes part of message before MSG_SEPARATOR
* character and use it to create error message. Character just before separator
* must be a MSG_PLACE_OF_SEPARATION character. Return non 0 value if error
* status would be created.
*
* The arg is an additional argument for specified type of status.
*/
int
check_status_create(PMEMpoolcheck *ppc, enum pmempool_check_msg_type type,
uint32_t arg, const char *fmt, ...)
{
if (CHECK_IS_NOT(ppc, VERBOSE) && type == PMEMPOOL_CHECK_MSG_TYPE_INFO)
return 0;
struct check_status *st = status_alloc();
ASSERT(CHECK_IS(ppc, FORMAT_STR));
va_list ap;
va_start(ap, fmt);
int p = vsnprintf(st->msg, MAX_MSG_STR_SIZE, fmt, ap);
va_end(ap);
/* append possible strerror at the end of the message */
if (type != PMEMPOOL_CHECK_MSG_TYPE_QUESTION && arg && p > 0) {
char buff[UTIL_MAX_ERR_MSG];
util_strerror((int)arg, buff, UTIL_MAX_ERR_MSG);
int ret = util_snprintf(st->msg + p,
MAX_MSG_STR_SIZE - (size_t)p, ": %s", buff);
if (ret < 0) {
ERR("!snprintf");
status_release(st);
return -1;
}
}
st->status.type = type;
return status_push(ppc, st, arg);
}
/*
* check_status_release -- release single status object
*/
void
check_status_release(PMEMpoolcheck *ppc, struct check_status *status)
{
if (status->status.type == PMEMPOOL_CHECK_MSG_TYPE_ERROR)
ppc->data->error = NULL;
status_release(status);
}
/*
* pop_status -- (internal) pop single message from check_status queue
*/
static struct check_status *
pop_status(struct check_data *data, struct check_status_head *queue)
{
if (!PMDK_TAILQ_EMPTY(queue)) {
ASSERTeq(data->check_status_cache, NULL);
data->check_status_cache = PMDK_TAILQ_FIRST(queue);
PMDK_TAILQ_REMOVE(queue, data->check_status_cache, next);
return data->check_status_cache;
}
return NULL;
}
/*
* check_pop_question -- pop single question from questions queue
*/
struct check_status *
check_pop_question(struct check_data *data)
{
return pop_status(data, &data->questions);
}
/*
* check_pop_info -- pop single info from information queue
*/
struct check_status *
check_pop_info(struct check_data *data)
{
return pop_status(data, &data->infos);
}
/*
* check_pop_error -- pop error from state
*/
struct check_status *
check_pop_error(struct check_data *data)
{
if (data->error) {
ASSERTeq(data->check_status_cache, NULL);
data->check_status_cache = data->error;
data->error = NULL;
return data->check_status_cache;
}
return NULL;
}
#ifdef _WIN32
void
cache_to_utf8(struct check_data *data, char *buf, size_t size)
{
if (data->check_status_cache == NULL)
return;
struct check_status *status = data->check_status_cache;
/* if it was a question, convert it and the answer to utf8 */
if (status->status.type == PMEMPOOL_CHECK_MSG_TYPE_QUESTION) {
struct pmempool_check_statusW *wstatus =
(struct pmempool_check_statusW *)&status->status;
wchar_t *wstring = (wchar_t *)wstatus->str.msg;
status->status.str.msg = util_toUTF8(wstring);
if (status->status.str.msg == NULL)
FATAL("!malloc");
util_free_UTF16(wstring);
if (util_toUTF8_buff(wstatus->str.answer, buf, size) != 0)
FATAL("Invalid answer conversion %s",
out_get_errormsg());
status->status.str.answer = buf;
}
}
#endif
/*
* check_clear_status_cache -- release check_status from cache
*/
void
check_clear_status_cache(struct check_data *data)
{
if (data->check_status_cache) {
switch (data->check_status_cache->status.type) {
case PMEMPOOL_CHECK_MSG_TYPE_INFO:
case PMEMPOOL_CHECK_MSG_TYPE_ERROR:
/*
* Info and error statuses are disposable. After showing
* them to the user we have to release them.
*/
status_release(data->check_status_cache);
data->check_status_cache = NULL;
break;
case PMEMPOOL_CHECK_MSG_TYPE_QUESTION:
/*
* Question status after being showed to the user carry
* users answer. It must be kept till answer would be
* processed so it can not be released from cache. It
* has to be pushed to the answers queue, processed and
* released after that.
*/
break;
default:
ASSERT(0);
}
}
}
/*
* status_answer_push -- (internal) push single answer to answers queue
*/
static void
status_answer_push(struct check_data *data, struct check_status *st)
{
ASSERTeq(st->status.type, PMEMPOOL_CHECK_MSG_TYPE_QUESTION);
PMDK_TAILQ_INSERT_TAIL(&data->answers, st, next);
}
/*
* check_push_answer -- process answer and push it to answers queue
*/
int
check_push_answer(PMEMpoolcheck *ppc)
{
if (ppc->data->check_status_cache == NULL)
return 0;
/* check if answer is "yes" or "no" */
struct check_status *status = ppc->data->check_status_cache;
if (status->status.str.answer != NULL) {
if (strcmp(status->status.str.answer, CHECK_ANSWER_YES) == 0)
status->answer = PMEMPOOL_CHECK_ANSWER_YES;
else if (strcmp(status->status.str.answer, CHECK_ANSWER_NO)
== 0)
status->answer = PMEMPOOL_CHECK_ANSWER_NO;
}
if (status->answer == PMEMPOOL_CHECK_ANSWER_EMPTY) {
/* invalid answer provided */
status_answer_push(ppc->data, ppc->data->check_status_cache);
ppc->data->check_status_cache = NULL;
CHECK_INFO(ppc, "Answer must be either %s or %s",
CHECK_ANSWER_YES, CHECK_ANSWER_NO);
return -1;
}
/* push answer */
PMDK_TAILQ_INSERT_TAIL(&ppc->data->answers,
ppc->data->check_status_cache, next);
ppc->data->check_status_cache = NULL;
return 0;
}
/*
* check_has_error - check if error exists
*/
bool
check_has_error(struct check_data *data)
{
return data->error != NULL;
}
/*
* check_has_answer - check if any answer exists
*/
bool
check_has_answer(struct check_data *data)
{
return !PMDK_TAILQ_EMPTY(&data->answers);
}
/*
* pop_answer -- (internal) pop single answer from answers queue
*/
static struct check_status *
pop_answer(struct check_data *data)
{
struct check_status *ret = NULL;
if (!PMDK_TAILQ_EMPTY(&data->answers)) {
ret = PMDK_TAILQ_FIRST(&data->answers);
PMDK_TAILQ_REMOVE(&data->answers, ret, next);
}
return ret;
}
/*
* check_status_get_util -- extract pmempool_check_status from check_status
*/
struct pmempool_check_status *
check_status_get_util(struct check_status *status)
{
return &status->status;
}
/*
* check_answer_loop -- loop through all available answers and process them
*/
int
check_answer_loop(PMEMpoolcheck *ppc, location *data, void *ctx, int fail_on_no,
int (*callback)(PMEMpoolcheck *, location *, uint32_t, void *ctx))
{
struct check_status *answer;
while ((answer = pop_answer(ppc->data)) != NULL) {
/* if answer is "no" we cannot fix an issue */
if (answer->answer != PMEMPOOL_CHECK_ANSWER_YES) {
if (fail_on_no ||
answer->answer != PMEMPOOL_CHECK_ANSWER_NO) {
CHECK_ERR(ppc,
"cannot complete repair, reverting changes");
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
goto error;
}
ppc->result = CHECK_RESULT_REPAIRED;
check_status_release(ppc, answer);
continue;
}
/* perform fix */
if (callback(ppc, data, answer->question, ctx)) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
goto error;
}
if (ppc->result == CHECK_RESULT_ERROR)
goto error;
/* fix succeeded */
ppc->result = CHECK_RESULT_REPAIRED;
check_status_release(ppc, answer);
}
return 0;
error:
check_status_release(ppc, answer);
return -1;
}
/*
* check_questions_sequence_validate -- generate return value from result
*
* Sequence of questions can result in one of the following results: CONSISTENT,
* REPAIRED, ASK_QUESTIONS of PROCESS_ANSWERS. If result == ASK_QUESTIONS it
* returns -1 to indicate existence of unanswered questions.
*/
int
check_questions_sequence_validate(PMEMpoolcheck *ppc)
{
ASSERT(ppc->result == CHECK_RESULT_CONSISTENT ||
ppc->result == CHECK_RESULT_ASK_QUESTIONS ||
ppc->result == CHECK_RESULT_PROCESS_ANSWERS ||
ppc->result == CHECK_RESULT_REPAIRED);
if (ppc->result == CHECK_RESULT_ASK_QUESTIONS) {
ASSERT(!PMDK_TAILQ_EMPTY(&ppc->data->questions));
return -1;
}
return 0;
}
/*
* check_get_time_str -- returns time in human-readable format
*/
const char *
check_get_time_str(time_t time)
{
static char str_buff[STR_MAX] = {0, };
struct tm *tm = util_localtime(&time);
if (tm)
strftime(str_buff, STR_MAX, TIME_STR_FMT, tm);
else {
int ret = util_snprintf(str_buff, STR_MAX, "unknown");
if (ret < 0) {
ERR("!snprintf");
return "";
}
}
return str_buff;
}
/*
* check_get_uuid_str -- returns uuid in human readable format
*/
const char *
check_get_uuid_str(uuid_t uuid)
{
static char uuid_str[UUID_STR_MAX] = {0, };
int ret = util_uuid_to_string(uuid, uuid_str);
if (ret != 0) {
ERR("failed to covert uuid to string");
return "";
}
return uuid_str;
}
/*
* pmempool_check_insert_arena -- insert arena to list
*/
void
check_insert_arena(PMEMpoolcheck *ppc, struct arena *arenap)
{
PMDK_TAILQ_INSERT_TAIL(&ppc->pool->arenas, arenap, next);
ppc->pool->narenas++;
}
| 15,575 | 22.247761 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmempool/pool.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* pool.h -- internal definitions for pool processing functions
*/
#ifndef POOL_H
#define POOL_H
#include <stdbool.h>
#include <sys/types.h>
#include "libpmemobj.h"
#include "queue.h"
#include "set.h"
#include "log.h"
#include "blk.h"
#include "btt_layout.h"
#ifdef __cplusplus
extern "C" {
#endif
#include "alloc.h"
#include "fault_injection.h"
enum pool_type {
POOL_TYPE_UNKNOWN = (1 << 0),
POOL_TYPE_LOG = (1 << 1),
POOL_TYPE_BLK = (1 << 2),
POOL_TYPE_OBJ = (1 << 3),
POOL_TYPE_BTT = (1 << 4),
POOL_TYPE_ANY = POOL_TYPE_UNKNOWN | POOL_TYPE_LOG |
POOL_TYPE_BLK | POOL_TYPE_OBJ | POOL_TYPE_BTT,
};
struct pool_params {
enum pool_type type;
char signature[POOL_HDR_SIG_LEN];
features_t features;
size_t size;
mode_t mode;
int is_poolset;
int is_part;
int is_dev_dax;
int is_pmem;
union {
struct {
uint64_t bsize;
} blk;
struct {
char layout[PMEMOBJ_MAX_LAYOUT];
} obj;
};
};
struct pool_set_file {
int fd;
char *fname;
void *addr;
size_t size;
struct pool_set *poolset;
time_t mtime;
mode_t mode;
};
struct arena {
PMDK_TAILQ_ENTRY(arena) next;
struct btt_info btt_info;
uint32_t id;
bool valid;
bool zeroed;
uint64_t offset;
uint8_t *flog;
size_t flogsize;
uint32_t *map;
size_t mapsize;
};
struct pool_data {
struct pool_params params;
struct pool_set_file *set_file;
int blk_no_layout;
union {
struct pool_hdr pool;
struct pmemlog log;
struct pmemblk blk;
} hdr;
enum {
UUID_NOP = 0,
UUID_FROM_BTT,
UUID_NOT_FROM_BTT,
} uuid_op;
struct arena bttc;
PMDK_TAILQ_HEAD(arenashead, arena) arenas;
uint32_t narenas;
};
struct pool_data *pool_data_alloc(PMEMpoolcheck *ppc);
void pool_data_free(struct pool_data *pool);
void pool_params_from_header(struct pool_params *params,
const struct pool_hdr *hdr);
int pool_set_parse(struct pool_set **setp, const char *path);
void *pool_set_file_map(struct pool_set_file *file, uint64_t offset);
int pool_read(struct pool_data *pool, void *buff, size_t nbytes,
uint64_t off);
int pool_write(struct pool_data *pool, const void *buff, size_t nbytes,
uint64_t off);
int pool_copy(struct pool_data *pool, const char *dst_path, int overwrite);
int pool_set_part_copy(struct pool_set_part *dpart,
struct pool_set_part *spart, int overwrite);
int pool_memset(struct pool_data *pool, uint64_t off, int c, size_t count);
unsigned pool_set_files_count(struct pool_set_file *file);
int pool_set_file_map_headers(struct pool_set_file *file, int rdonly, int prv);
void pool_set_file_unmap_headers(struct pool_set_file *file);
void pool_hdr_default(enum pool_type type, struct pool_hdr *hdrp);
enum pool_type pool_hdr_get_type(const struct pool_hdr *hdrp);
enum pool_type pool_set_type(struct pool_set *set);
const char *pool_get_pool_type_str(enum pool_type type);
int pool_btt_info_valid(struct btt_info *infop);
int pool_blk_get_first_valid_arena(struct pool_data *pool,
struct arena *arenap);
int pool_blk_bsize_valid(uint32_t bsize, uint64_t fsize);
uint64_t pool_next_arena_offset(struct pool_data *pool, uint64_t header_offset);
uint64_t pool_get_first_valid_btt(struct pool_data *pool,
struct btt_info *infop, uint64_t offset, bool *zeroed);
size_t pool_get_min_size(enum pool_type);
#if FAULT_INJECTION
void
pmempool_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at);
int
pmempool_fault_injection_enabled(void);
#else
static inline void
pmempool_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
abort();
}
static inline int
pmempool_fault_injection_enabled(void)
{
return 0;
}
#endif
#ifdef __cplusplus
}
#endif
#endif
| 3,712 | 21.640244 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmempool/pool.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* pool.c -- pool processing functions
*/
#include <stdio.h>
#include <stdint.h>
#include <sys/mman.h>
#include <unistd.h>
#include <fcntl.h>
#include <endian.h>
#ifndef _WIN32
#include <sys/ioctl.h>
#ifdef __FreeBSD__
#include <sys/disk.h>
#define BLKGETSIZE64 DIOCGMEDIASIZE
#else
#include <linux/fs.h>
#endif
#endif
#include "libpmem.h"
#include "libpmemlog.h"
#include "libpmemblk.h"
#include "libpmempool.h"
#include "out.h"
#include "pmempool.h"
#include "pool.h"
#include "lane.h"
#include "obj.h"
#include "btt.h"
#include "file.h"
#include "os.h"
#include "set.h"
#include "check_util.h"
#include "util_pmem.h"
#include "mmap.h"
/* arbitrary size of a maximum file part being read / write at once */
#define RW_BUFFERING_SIZE (128 * 1024 * 1024)
/*
* pool_btt_lseek -- (internal) perform lseek in BTT file mode
*/
static inline os_off_t
pool_btt_lseek(struct pool_data *pool, os_off_t offset, int whence)
{
os_off_t result;
if ((result = os_lseek(pool->set_file->fd, offset, whence)) == -1)
ERR("!lseek");
return result;
}
/*
* pool_btt_read -- (internal) perform read in BTT file mode
*/
static inline ssize_t
pool_btt_read(struct pool_data *pool, void *dst, size_t count)
{
size_t total = 0;
ssize_t nread;
while (count > total &&
(nread = util_read(pool->set_file->fd, dst, count - total))) {
if (nread == -1) {
ERR("!read");
return total ? (ssize_t)total : -1;
}
dst = (void *)((ssize_t)dst + nread);
total += (size_t)nread;
}
return (ssize_t)total;
}
/*
* pool_btt_write -- (internal) perform write in BTT file mode
*/
static inline ssize_t
pool_btt_write(struct pool_data *pool, const void *src, size_t count)
{
ssize_t nwrite = 0;
size_t total = 0;
while (count > total &&
(nwrite = util_write(pool->set_file->fd, src,
count - total))) {
if (nwrite == -1) {
ERR("!write");
return total ? (ssize_t)total : -1;
}
src = (void *)((ssize_t)src + nwrite);
total += (size_t)nwrite;
}
return (ssize_t)total;
}
/*
* pool_set_read_header -- (internal) read a header of a pool set
*/
static int
pool_set_read_header(const char *fname, struct pool_hdr *hdr)
{
struct pool_set *set;
int ret = 0;
if (util_poolset_read(&set, fname)) {
return -1;
}
/* open the first part set file to read the pool header values */
const struct pool_set_part *part = PART(REP(set, 0), 0);
int fdp = util_file_open(part->path, NULL, 0, O_RDONLY);
if (fdp < 0) {
ERR("cannot open poolset part file");
ret = -1;
goto err_pool_set;
}
/* read the pool header from first pool set file */
if (pread(fdp, hdr, sizeof(*hdr), 0) != sizeof(*hdr)) {
ERR("cannot read pool header from poolset");
ret = -1;
goto err_close_part;
}
err_close_part:
os_close(fdp);
err_pool_set:
util_poolset_free(set);
return ret;
}
/*
* pool_set_map -- (internal) map poolset
*/
static int
pool_set_map(const char *fname, struct pool_set **poolset, unsigned flags)
{
ASSERTeq(util_is_poolset_file(fname), 1);
struct pool_hdr hdr;
if (pool_set_read_header(fname, &hdr))
return -1;
util_convert2h_hdr_nocheck(&hdr);
/* parse pool type from first pool set file */
enum pool_type type = pool_hdr_get_type(&hdr);
if (type == POOL_TYPE_UNKNOWN) {
ERR("cannot determine pool type from poolset");
return -1;
}
/*
* Open the poolset, the values passed to util_pool_open are read
* from the first poolset file, these values are then compared with
* the values from all headers of poolset files.
*/
struct pool_attr attr;
util_pool_hdr2attr(&attr, &hdr);
if (util_pool_open(poolset, fname, 0 /* minpartsize */, &attr,
NULL, NULL, flags | POOL_OPEN_IGNORE_SDS |
POOL_OPEN_IGNORE_BAD_BLOCKS)) {
ERR("opening poolset failed");
return -1;
}
return 0;
}
/*
* pool_params_from_header -- parse pool params from pool header
*/
void
pool_params_from_header(struct pool_params *params, const struct pool_hdr *hdr)
{
memcpy(params->signature, hdr->signature, sizeof(params->signature));
memcpy(¶ms->features, &hdr->features, sizeof(params->features));
/*
* Check if file is a part of pool set by comparing the UUID with the
* next part UUID. If it is the same it means the pool consist of a
* single file.
*/
int uuid_eq_next = uuidcmp(hdr->uuid, hdr->next_part_uuid);
int uuid_eq_prev = uuidcmp(hdr->uuid, hdr->prev_part_uuid);
params->is_part = !params->is_poolset && (uuid_eq_next || uuid_eq_prev);
params->type = pool_hdr_get_type(hdr);
}
/*
* pool_check_type_to_pool_type -- (internal) convert check pool type to
* internal pool type value
*/
static enum pool_type
pool_check_type_to_pool_type(enum pmempool_pool_type check_pool_type)
{
switch (check_pool_type) {
case PMEMPOOL_POOL_TYPE_LOG:
return POOL_TYPE_LOG;
case PMEMPOOL_POOL_TYPE_BLK:
return POOL_TYPE_BLK;
case PMEMPOOL_POOL_TYPE_OBJ:
return POOL_TYPE_OBJ;
default:
ERR("can not convert pmempool_pool_type %u to pool_type",
check_pool_type);
return POOL_TYPE_UNKNOWN;
}
}
/*
* pool_parse_params -- parse pool type, file size and block size
*/
static int
pool_params_parse(const PMEMpoolcheck *ppc, struct pool_params *params,
int check)
{
LOG(3, NULL);
int is_btt = ppc->args.pool_type == PMEMPOOL_POOL_TYPE_BTT;
params->type = POOL_TYPE_UNKNOWN;
params->is_poolset = util_is_poolset_file(ppc->path) == 1;
int fd = util_file_open(ppc->path, NULL, 0, O_RDONLY);
if (fd < 0)
return -1;
int ret = 0;
os_stat_t stat_buf;
ret = os_fstat(fd, &stat_buf);
if (ret)
goto out_close;
ASSERT(stat_buf.st_size >= 0);
params->mode = stat_buf.st_mode;
struct pool_set *set;
void *addr;
if (params->is_poolset) {
/*
* Need to close the poolset because it will be opened with
* flock in the following instructions.
*/
os_close(fd);
fd = -1;
if (check) {
if (pool_set_map(ppc->path, &set, 0))
return -1;
} else {
ret = util_poolset_create_set(&set, ppc->path,
0, 0, true);
if (ret < 0) {
LOG(2, "cannot open pool set -- '%s'",
ppc->path);
return -1;
}
if (set->remote) {
ERR("poolsets with remote replicas are not "
"supported");
return -1;
}
if (util_pool_open_nocheck(set,
POOL_OPEN_IGNORE_BAD_BLOCKS))
return -1;
}
params->size = set->poolsize;
addr = set->replica[0]->part[0].addr;
/*
* XXX mprotect for device dax with length not aligned to its
* page granularity causes SIGBUS on the next page fault.
* The length argument of this call should be changed to
* set->poolsize once the kernel issue is solved.
*/
if (mprotect(addr, set->replica[0]->repsize,
PROT_READ) < 0) {
ERR("!mprotect");
goto out_unmap;
}
params->is_dev_dax = set->replica[0]->part[0].is_dev_dax;
params->is_pmem = set->replica[0]->is_pmem;
} else if (is_btt) {
params->size = (size_t)stat_buf.st_size;
#ifndef _WIN32
if (params->mode & S_IFBLK)
if (ioctl(fd, BLKGETSIZE64, ¶ms->size)) {
ERR("!ioctl");
goto out_close;
}
#endif
addr = NULL;
} else {
enum file_type type = util_file_get_type(ppc->path);
if (type < 0) {
ret = -1;
goto out_close;
}
ssize_t s = util_file_get_size(ppc->path);
if (s < 0) {
ret = -1;
goto out_close;
}
params->size = (size_t)s;
int map_sync;
addr = util_map(fd, 0, params->size, MAP_SHARED, 1, 0,
&map_sync);
if (addr == NULL) {
ret = -1;
goto out_close;
}
params->is_dev_dax = type == TYPE_DEVDAX;
params->is_pmem = params->is_dev_dax || map_sync ||
pmem_is_pmem(addr, params->size);
}
/* stop processing for BTT device */
if (is_btt) {
params->type = POOL_TYPE_BTT;
params->is_part = false;
goto out_close;
}
struct pool_hdr hdr;
memcpy(&hdr, addr, sizeof(hdr));
util_convert2h_hdr_nocheck(&hdr);
pool_params_from_header(params, &hdr);
if (ppc->args.pool_type != PMEMPOOL_POOL_TYPE_DETECT) {
enum pool_type declared_type =
pool_check_type_to_pool_type(ppc->args.pool_type);
if ((params->type & ~declared_type) != 0) {
ERR("declared pool type does not match");
errno = EINVAL;
ret = 1;
goto out_unmap;
}
}
if (params->type == POOL_TYPE_BLK) {
struct pmemblk pbp;
memcpy(&pbp, addr, sizeof(pbp));
params->blk.bsize = le32toh(pbp.bsize);
} else if (params->type == POOL_TYPE_OBJ) {
struct pmemobjpool *pop = addr;
memcpy(params->obj.layout, pop->layout,
PMEMOBJ_MAX_LAYOUT);
}
out_unmap:
if (params->is_poolset) {
ASSERTeq(fd, -1);
ASSERTne(addr, NULL);
util_poolset_close(set, DO_NOT_DELETE_PARTS);
} else if (!is_btt) {
ASSERTne(fd, -1);
ASSERTne(addr, NULL);
munmap(addr, params->size);
}
out_close:
if (fd != -1)
os_close(fd);
return ret;
}
/*
* pool_set_file_open -- (internal) opens pool set file or regular file
*/
static struct pool_set_file *
pool_set_file_open(const char *fname, struct pool_params *params, int rdonly)
{
LOG(3, NULL);
struct pool_set_file *file = calloc(1, sizeof(*file));
if (!file)
return NULL;
file->fname = strdup(fname);
if (!file->fname)
goto err;
const char *path = file->fname;
if (params->type != POOL_TYPE_BTT) {
int ret = util_poolset_create_set(&file->poolset, path,
0, 0, true);
if (ret < 0) {
LOG(2, "cannot open pool set -- '%s'", path);
goto err_free_fname;
}
unsigned flags = (rdonly ? POOL_OPEN_COW : 0) |
POOL_OPEN_IGNORE_BAD_BLOCKS;
if (util_pool_open_nocheck(file->poolset, flags))
goto err_free_fname;
file->size = file->poolset->poolsize;
/* get modification time from the first part of first replica */
path = file->poolset->replica[0]->part[0].path;
file->addr = file->poolset->replica[0]->part[0].addr;
} else {
int oflag = rdonly ? O_RDONLY : O_RDWR;
file->fd = util_file_open(fname, NULL, 0, oflag);
file->size = params->size;
}
os_stat_t buf;
if (os_stat(path, &buf)) {
ERR("%s", path);
goto err_close_poolset;
}
file->mtime = buf.st_mtime;
file->mode = buf.st_mode;
return file;
err_close_poolset:
if (params->type != POOL_TYPE_BTT)
util_poolset_close(file->poolset, DO_NOT_DELETE_PARTS);
else if (file->fd != -1)
os_close(file->fd);
err_free_fname:
free(file->fname);
err:
free(file);
return NULL;
}
/*
* pool_set_parse -- parse poolset file
*/
int
pool_set_parse(struct pool_set **setp, const char *path)
{
LOG(3, "setp %p path %s", setp, path);
int fd = os_open(path, O_RDONLY);
int ret = 0;
if (fd < 0)
return 1;
if (util_poolset_parse(setp, path, fd)) {
ret = 1;
goto err_close;
}
err_close:
os_close(fd);
return ret;
}
/*
* pool_data_alloc -- allocate pool data and open set_file
*/
struct pool_data *
pool_data_alloc(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
struct pool_data *pool = calloc(1, sizeof(*pool));
if (!pool) {
ERR("!calloc");
return NULL;
}
PMDK_TAILQ_INIT(&pool->arenas);
pool->uuid_op = UUID_NOP;
if (pool_params_parse(ppc, &pool->params, 0))
goto error;
int rdonly = CHECK_IS_NOT(ppc, REPAIR);
int prv = CHECK_IS(ppc, DRY_RUN);
if (prv && pool->params.is_dev_dax) {
errno = ENOTSUP;
ERR("!cannot perform a dry run on dax device");
goto error;
}
pool->set_file = pool_set_file_open(ppc->path, &pool->params, prv);
if (pool->set_file == NULL)
goto error;
/*
* XXX mprotect for device dax with length not aligned to its
* page granularity causes SIGBUS on the next page fault.
* The length argument of this call should be changed to
* pool->set_file->poolsize once the kernel issue is solved.
*/
if (rdonly && mprotect(pool->set_file->addr,
pool->set_file->poolset->replica[0]->repsize,
PROT_READ) < 0)
goto error;
if (pool->params.type != POOL_TYPE_BTT) {
if (pool_set_file_map_headers(pool->set_file, rdonly, prv))
goto error;
}
return pool;
error:
pool_data_free(pool);
return NULL;
}
/*
* pool_set_file_close -- (internal) closes pool set file or regular file
*/
static void
pool_set_file_close(struct pool_set_file *file)
{
LOG(3, NULL);
if (file->poolset)
util_poolset_close(file->poolset, DO_NOT_DELETE_PARTS);
else if (file->addr) {
munmap(file->addr, file->size);
os_close(file->fd);
} else if (file->fd)
os_close(file->fd);
free(file->fname);
free(file);
}
/*
* pool_data_free -- close set_file and release pool data
*/
void
pool_data_free(struct pool_data *pool)
{
LOG(3, NULL);
if (pool->set_file) {
if (pool->params.type != POOL_TYPE_BTT)
pool_set_file_unmap_headers(pool->set_file);
pool_set_file_close(pool->set_file);
}
while (!PMDK_TAILQ_EMPTY(&pool->arenas)) {
struct arena *arenap = PMDK_TAILQ_FIRST(&pool->arenas);
if (arenap->map)
free(arenap->map);
if (arenap->flog)
free(arenap->flog);
PMDK_TAILQ_REMOVE(&pool->arenas, arenap, next);
free(arenap);
}
free(pool);
}
/*
* pool_set_file_map -- return mapped address at given offset
*/
void *
pool_set_file_map(struct pool_set_file *file, uint64_t offset)
{
if (file->addr == MAP_FAILED)
return NULL;
return (char *)file->addr + offset;
}
/*
* pool_read -- read from pool set file or regular file
*
* 'buff' has to be a buffer at least 'nbytes' long
* 'off' is an offset from the beginning of the pool
*/
int
pool_read(struct pool_data *pool, void *buff, size_t nbytes, uint64_t off)
{
if (off + nbytes > pool->set_file->size)
return -1;
if (pool->params.type != POOL_TYPE_BTT)
memcpy(buff, (char *)pool->set_file->addr + off, nbytes);
else {
if (pool_btt_lseek(pool, (os_off_t)off, SEEK_SET) == -1)
return -1;
if ((size_t)pool_btt_read(pool, buff, nbytes) != nbytes)
return -1;
}
return 0;
}
/*
* pool_write -- write to pool set file or regular file
*
* 'buff' has to be a buffer at least 'nbytes' long
* 'off' is an offset from the beginning of the pool
*/
int
pool_write(struct pool_data *pool, const void *buff, size_t nbytes,
uint64_t off)
{
if (off + nbytes > pool->set_file->size)
return -1;
if (pool->params.type != POOL_TYPE_BTT) {
memcpy((char *)pool->set_file->addr + off, buff, nbytes);
util_persist_auto(pool->params.is_pmem,
(char *)pool->set_file->addr + off, nbytes);
} else {
if (pool_btt_lseek(pool, (os_off_t)off, SEEK_SET) == -1)
return -1;
if ((size_t)pool_btt_write(pool, buff, nbytes) != nbytes)
return -1;
}
return 0;
}
/*
* pool_copy -- make a copy of the pool
*/
int
pool_copy(struct pool_data *pool, const char *dst_path, int overwrite)
{
struct pool_set_file *file = pool->set_file;
int dfd;
int exists = util_file_exists(dst_path);
if (exists < 0)
return -1;
if (exists) {
if (!overwrite) {
errno = EEXIST;
return -1;
}
dfd = util_file_open(dst_path, NULL, 0, O_RDWR);
} else {
errno = 0;
dfd = util_file_create(dst_path, file->size, 0);
}
if (dfd < 0)
return -1;
int result = 0;
os_stat_t stat_buf;
if (os_stat(file->fname, &stat_buf)) {
result = -1;
goto out_close;
}
if (fchmod(dfd, stat_buf.st_mode)) {
result = -1;
goto out_close;
}
void *daddr = mmap(NULL, file->size, PROT_READ | PROT_WRITE,
MAP_SHARED, dfd, 0);
if (daddr == MAP_FAILED) {
result = -1;
goto out_close;
}
if (pool->params.type != POOL_TYPE_BTT) {
void *saddr = pool_set_file_map(file, 0);
memcpy(daddr, saddr, file->size);
goto out_unmap;
}
void *buf = malloc(RW_BUFFERING_SIZE);
if (buf == NULL) {
ERR("!malloc");
result = -1;
goto out_unmap;
}
if (pool_btt_lseek(pool, 0, SEEK_SET) == -1) {
result = -1;
goto out_free;
}
ssize_t buf_read = 0;
void *dst = daddr;
while ((buf_read = pool_btt_read(pool, buf, RW_BUFFERING_SIZE))) {
if (buf_read == -1)
break;
memcpy(dst, buf, (size_t)buf_read);
dst = (void *)((ssize_t)dst + buf_read);
}
out_free:
free(buf);
out_unmap:
munmap(daddr, file->size);
out_close:
(void) os_close(dfd);
return result;
}
/*
* pool_set_part_copy -- make a copy of the poolset part
*/
int
pool_set_part_copy(struct pool_set_part *dpart, struct pool_set_part *spart,
int overwrite)
{
LOG(3, "dpart %p spart %p", dpart, spart);
int result = 0;
os_stat_t stat_buf;
if (os_fstat(spart->fd, &stat_buf)) {
ERR("!util_stat");
return -1;
}
size_t smapped = 0;
void *saddr = pmem_map_file(spart->path, 0, 0, S_IREAD, &smapped, NULL);
if (!saddr)
return -1;
size_t dmapped = 0;
int is_pmem;
void *daddr;
int exists = util_file_exists(dpart->path);
if (exists < 0) {
result = -1;
goto out_sunmap;
}
if (exists) {
if (!overwrite) {
errno = EEXIST;
result = -1;
goto out_sunmap;
}
daddr = pmem_map_file(dpart->path, 0, 0, S_IWRITE, &dmapped,
&is_pmem);
} else {
errno = 0;
daddr = pmem_map_file(dpart->path, dpart->filesize,
PMEM_FILE_CREATE | PMEM_FILE_EXCL,
stat_buf.st_mode, &dmapped, &is_pmem);
}
if (!daddr) {
result = -1;
goto out_sunmap;
}
#ifdef DEBUG
/* provide extra logging in case of wrong dmapped/smapped value */
if (dmapped < smapped) {
LOG(1, "dmapped < smapped: dmapped = %lu, smapped = %lu",
dmapped, smapped);
ASSERT(0);
}
#endif
if (is_pmem) {
pmem_memcpy_persist(daddr, saddr, smapped);
} else {
memcpy(daddr, saddr, smapped);
pmem_msync(daddr, smapped);
}
pmem_unmap(daddr, dmapped);
out_sunmap:
pmem_unmap(saddr, smapped);
return result;
}
/*
* pool_memset -- memset pool part described by off and count
*/
int
pool_memset(struct pool_data *pool, uint64_t off, int c, size_t count)
{
int result = 0;
if (pool->params.type != POOL_TYPE_BTT)
memset((char *)off, 0, count);
else {
if (pool_btt_lseek(pool, (os_off_t)off, SEEK_SET) == -1)
return -1;
size_t zero_size = min(count, RW_BUFFERING_SIZE);
void *buf = malloc(zero_size);
if (!buf) {
ERR("!malloc");
return -1;
}
memset(buf, c, zero_size);
ssize_t nwrite = 0;
do {
zero_size = min(zero_size, count);
nwrite = pool_btt_write(pool, buf, zero_size);
if (nwrite < 0) {
result = -1;
break;
}
count -= (size_t)nwrite;
} while (count > 0);
free(buf);
}
return result;
}
/*
* pool_set_files_count -- get total number of parts of all replicas
*/
unsigned
pool_set_files_count(struct pool_set_file *file)
{
unsigned ret = 0;
unsigned nreplicas = file->poolset->nreplicas;
for (unsigned r = 0; r < nreplicas; r++) {
struct pool_replica *rep = file->poolset->replica[r];
ret += rep->nparts;
}
return ret;
}
/*
* pool_set_file_map_headers -- map headers of each pool set part file
*/
int
pool_set_file_map_headers(struct pool_set_file *file, int rdonly, int prv)
{
if (!file->poolset)
return -1;
for (unsigned r = 0; r < file->poolset->nreplicas; r++) {
struct pool_replica *rep = file->poolset->replica[r];
for (unsigned p = 0; p < rep->nparts; p++) {
struct pool_set_part *part = &rep->part[p];
if (util_map_hdr(part,
prv ? MAP_PRIVATE : MAP_SHARED, rdonly)) {
part->hdr = NULL;
goto err;
}
}
}
return 0;
err:
pool_set_file_unmap_headers(file);
return -1;
}
/*
* pool_set_file_unmap_headers -- unmap headers of each pool set part file
*/
void
pool_set_file_unmap_headers(struct pool_set_file *file)
{
if (!file->poolset)
return;
for (unsigned r = 0; r < file->poolset->nreplicas; r++) {
struct pool_replica *rep = file->poolset->replica[r];
for (unsigned p = 0; p < rep->nparts; p++) {
struct pool_set_part *part = &rep->part[p];
util_unmap_hdr(part);
}
}
}
/*
* pool_get_signature -- (internal) return signature of specified pool type
*/
static const char *
pool_get_signature(enum pool_type type)
{
switch (type) {
case POOL_TYPE_LOG:
return LOG_HDR_SIG;
case POOL_TYPE_BLK:
return BLK_HDR_SIG;
case POOL_TYPE_OBJ:
return OBJ_HDR_SIG;
default:
return NULL;
}
}
/*
* pool_hdr_default -- return default pool header values
*/
void
pool_hdr_default(enum pool_type type, struct pool_hdr *hdrp)
{
memset(hdrp, 0, sizeof(*hdrp));
const char *sig = pool_get_signature(type);
ASSERTne(sig, NULL);
memcpy(hdrp->signature, sig, POOL_HDR_SIG_LEN);
switch (type) {
case POOL_TYPE_LOG:
hdrp->major = LOG_FORMAT_MAJOR;
hdrp->features = log_format_feat_default;
break;
case POOL_TYPE_BLK:
hdrp->major = BLK_FORMAT_MAJOR;
hdrp->features = blk_format_feat_default;
break;
case POOL_TYPE_OBJ:
hdrp->major = OBJ_FORMAT_MAJOR;
hdrp->features = obj_format_feat_default;
break;
default:
break;
}
}
/*
* pool_hdr_get_type -- return pool type based on pool header data
*/
enum pool_type
pool_hdr_get_type(const struct pool_hdr *hdrp)
{
if (memcmp(hdrp->signature, LOG_HDR_SIG, POOL_HDR_SIG_LEN) == 0)
return POOL_TYPE_LOG;
else if (memcmp(hdrp->signature, BLK_HDR_SIG, POOL_HDR_SIG_LEN) == 0)
return POOL_TYPE_BLK;
else if (memcmp(hdrp->signature, OBJ_HDR_SIG, POOL_HDR_SIG_LEN) == 0)
return POOL_TYPE_OBJ;
else
return POOL_TYPE_UNKNOWN;
}
/*
* pool_get_pool_type_str -- return human-readable pool type string
*/
const char *
pool_get_pool_type_str(enum pool_type type)
{
switch (type) {
case POOL_TYPE_BTT:
return "btt";
case POOL_TYPE_LOG:
return "pmemlog";
case POOL_TYPE_BLK:
return "pmemblk";
case POOL_TYPE_OBJ:
return "pmemobj";
default:
return "unknown";
}
}
/*
* pool_set_type -- get pool type of a poolset
*/
enum pool_type
pool_set_type(struct pool_set *set)
{
struct pool_hdr hdr;
/* open the first part file to read the pool header values */
const struct pool_set_part *part = PART(REP(set, 0), 0);
if (util_file_pread(part->path, &hdr, sizeof(hdr), 0) !=
sizeof(hdr)) {
ERR("cannot read pool header from poolset");
return POOL_TYPE_UNKNOWN;
}
util_convert2h_hdr_nocheck(&hdr);
enum pool_type type = pool_hdr_get_type(&hdr);
return type;
}
/*
* pool_btt_info_valid -- check consistency of BTT Info header
*/
int
pool_btt_info_valid(struct btt_info *infop)
{
if (memcmp(infop->sig, BTTINFO_SIG, BTTINFO_SIG_LEN) != 0)
return 0;
return util_checksum(infop, sizeof(*infop), &infop->checksum, 0, 0);
}
/*
* pool_blk_get_first_valid_arena -- get first valid BTT Info in arena
*/
int
pool_blk_get_first_valid_arena(struct pool_data *pool, struct arena *arenap)
{
arenap->zeroed = true;
uint64_t offset = pool_get_first_valid_btt(pool, &arenap->btt_info,
2 * BTT_ALIGNMENT, &arenap->zeroed);
if (offset != 0) {
arenap->offset = offset;
arenap->valid = true;
return 1;
}
return 0;
}
/*
* pool_next_arena_offset -- get offset of next arena
*
* Calculated offset is theoretical. Function does not check if such arena can
* exist.
*/
uint64_t
pool_next_arena_offset(struct pool_data *pool, uint64_t offset)
{
uint64_t lastoff = (pool->set_file->size & ~(BTT_ALIGNMENT - 1));
uint64_t nextoff = min(offset + BTT_MAX_ARENA, lastoff);
return nextoff;
}
/*
* pool_get_first_valid_btt -- return offset to first valid BTT Info
*
* - Return offset to valid BTT Info header in pool file.
* - Start looking from given offset.
* - Convert BTT Info header to host endianness.
* - Return the BTT Info header by pointer.
* - If zeroed pointer provided would check if all checked BTT Info are zeroed
* which is useful for BLK pools
*/
uint64_t
pool_get_first_valid_btt(struct pool_data *pool, struct btt_info *infop,
uint64_t offset, bool *zeroed)
{
/* if we have valid arena get BTT Info header from it */
if (pool->narenas != 0) {
struct arena *arenap = PMDK_TAILQ_FIRST(&pool->arenas);
memcpy(infop, &arenap->btt_info, sizeof(*infop));
return arenap->offset;
}
const size_t info_size = sizeof(*infop);
/* theoretical offsets to BTT Info header and backup */
uint64_t offsets[2] = {offset, 0};
while (offsets[0] < pool->set_file->size) {
/* calculate backup offset */
offsets[1] = pool_next_arena_offset(pool, offsets[0]) -
info_size;
/* check both offsets: header and backup */
for (int i = 0; i < 2; ++i) {
if (pool_read(pool, infop, info_size, offsets[i]))
continue;
/* check if all possible BTT Info are zeroed */
if (zeroed)
*zeroed &= util_is_zeroed((const void *)infop,
info_size);
/* check if read BTT Info is valid */
if (pool_btt_info_valid(infop)) {
btt_info_convert2h(infop);
return offsets[i];
}
}
/* jump to next arena */
offsets[0] += BTT_MAX_ARENA;
}
return 0;
}
/*
* pool_get_min_size -- return the minimum pool size of a pool of a given type
*/
size_t
pool_get_min_size(enum pool_type type)
{
switch (type) {
case POOL_TYPE_LOG:
return PMEMLOG_MIN_POOL;
case POOL_TYPE_BLK:
return PMEMBLK_MIN_POOL;
case POOL_TYPE_OBJ:
return PMEMOBJ_MIN_POOL;
default:
ERR("unknown type of a pool");
return SIZE_MAX;
}
}
#if FAULT_INJECTION
void
pmempool_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
core_inject_fault_at(type, nth, at);
}
int
pmempool_fault_injection_enabled(void)
{
return core_fault_injection_enabled();
}
#endif
| 24,738 | 21.009786 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem/pmem.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* pmem.c -- pmem entry points for libpmem
*
*
* PERSISTENT MEMORY INSTRUCTIONS ON X86
*
* The primary feature of this library is to provide a way to flush
* changes to persistent memory as outlined below (note that many
* of the decisions below are made at initialization time, and not
* repeated every time a flush is requested).
*
* To flush a range to pmem when CLWB is available:
*
* CLWB for each cache line in the given range.
*
* SFENCE to ensure the CLWBs above have completed.
*
* To flush a range to pmem when CLFLUSHOPT is available and CLWB is not
* (same as above but issue CLFLUSHOPT instead of CLWB):
*
* CLFLUSHOPT for each cache line in the given range.
*
* SFENCE to ensure the CLWBs above have completed.
*
* To flush a range to pmem when neither CLFLUSHOPT or CLWB are available
* (same as above but fences surrounding CLFLUSH are not required):
*
* CLFLUSH for each cache line in the given range.
*
* To memcpy a range of memory to pmem when MOVNT is available:
*
* Copy any non-64-byte portion of the destination using MOV.
*
* Use the flush flow above without the fence for the copied portion.
*
* Copy using MOVNTDQ, up to any non-64-byte aligned end portion.
* (The MOVNT instructions bypass the cache, so no flush is required.)
*
* Copy any unaligned end portion using MOV.
*
* Use the flush flow above for the copied portion (including fence).
*
* To memcpy a range of memory to pmem when MOVNT is not available:
*
* Just pass the call to the normal memcpy() followed by pmem_persist().
*
* To memset a non-trivial sized range of memory to pmem:
*
* Same as the memcpy cases above but store the given value instead
* of reading values from the source.
*
* These features are supported for ARM AARCH64 using equivalent ARM
* assembly instruction. Please refer to (arm_cacheops.h) for more details.
*
* INTERFACES FOR FLUSHING TO PERSISTENT MEMORY
*
* Given the flows above, three interfaces are provided for flushing a range
* so that the caller has the ability to separate the steps when necessary,
* but otherwise leaves the detection of available instructions to the libpmem:
*
* pmem_persist(addr, len)
*
* This is the common case, which just calls the two other functions:
*
* pmem_flush(addr, len);
* pmem_drain();
*
* pmem_flush(addr, len)
*
* CLWB or CLFLUSHOPT or CLFLUSH for each cache line
*
* pmem_drain()
*
* SFENCE unless using CLFLUSH
*
*
* INTERFACES FOR COPYING/SETTING RANGES OF MEMORY
*
* Given the flows above, the following interfaces are provided for the
* memmove/memcpy/memset operations to persistent memory:
*
* pmem_memmove_nodrain()
*
* Checks for overlapped ranges to determine whether to copy from
* the beginning of the range or from the end. If MOVNT instructions
* are available, uses the memory copy flow described above, otherwise
* calls the libc memmove() followed by pmem_flush(). Since no conditional
* compilation and/or architecture specific CFLAGS are in use at the
* moment, SSE2 ( thus movnt ) is just assumed to be available.
*
* pmem_memcpy_nodrain()
*
* Just calls pmem_memmove_nodrain().
*
* pmem_memset_nodrain()
*
* If MOVNT instructions are available, uses the memset flow described
* above, otherwise calls the libc memset() followed by pmem_flush().
*
* pmem_memmove_persist()
* pmem_memcpy_persist()
* pmem_memset_persist()
*
* Calls the appropriate _nodrain() function followed by pmem_drain().
*
*
* DECISIONS MADE AT INITIALIZATION TIME
*
* As much as possible, all decisions described above are made at library
* initialization time. This is achieved using function pointers that are
* setup by pmem_init() when the library loads.
*
* Func_fence is used by pmem_drain() to call one of:
* fence_empty()
* memory_barrier()
*
* Func_flush is used by pmem_flush() to call one of:
* flush_dcache()
* flush_dcache_invalidate_opt()
* flush_dcache_invalidate()
*
* Func_memmove_nodrain is used by memmove_nodrain() to call one of:
* memmove_nodrain_libc()
* memmove_nodrain_movnt()
*
* Func_memset_nodrain is used by memset_nodrain() to call one of:
* memset_nodrain_libc()
* memset_nodrain_movnt()
*
* DEBUG LOGGING
*
* Many of the functions here get called hundreds of times from loops
* iterating over ranges, making the usual LOG() calls at level 3
* impractical. The call tracing log for those functions is set at 15.
*/
#include <sys/mman.h>
#include <sys/stat.h>
#include <errno.h>
#include <fcntl.h>
#include "libpmem.h"
#include "pmem.h"
#include "pmem2_arch.h"
#include "out.h"
#include "os.h"
#include "mmap.h"
#include "file.h"
#include "valgrind_internal.h"
#include "os_deep.h"
#include "auto_flush.h"
struct pmem_funcs {
memmove_nodrain_func memmove_nodrain;
memset_nodrain_func memset_nodrain;
flush_func deep_flush;
flush_func flush;
fence_func fence;
};
static struct pmem_funcs Funcs;
static is_pmem_func Is_pmem = NULL;
/*
* pmem_has_hw_drain -- return whether or not HW drain was found
*
* Always false for x86: HW drain is done by HW with no SW involvement.
*/
int
pmem_has_hw_drain(void)
{
LOG(3, NULL);
return 0;
}
/*
* pmem_drain -- wait for any PM stores to drain from HW buffers
*/
void
pmem_drain(void)
{
LOG(15, NULL);
Funcs.fence();
}
/*
* pmem_has_auto_flush -- check if platform supports eADR
*/
int
pmem_has_auto_flush()
{
LOG(3, NULL);
return pmem2_auto_flush();
}
/*
* pmem_deep_flush -- flush processor cache for the given range
* regardless of eADR support on platform
*/
void
pmem_deep_flush(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len);
Funcs.deep_flush(addr, len);
}
/*
* pmem_flush -- flush processor cache for the given range
*/
void
pmem_flush(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len);
Funcs.flush(addr, len);
}
/*
* pmem_persist -- make any cached changes to a range of pmem persistent
*/
void
pmem_persist(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
pmem_flush(addr, len);
pmem_drain();
}
/*
* pmem_msync -- flush to persistence via msync
*
* Using msync() means this routine is less optimal for pmem (but it
* still works) but it also works for any memory mapped file, unlike
* pmem_persist() which is only safe where pmem_is_pmem() returns true.
*/
int
pmem_msync(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len);
/*
* msync requires addr to be a multiple of pagesize but there are no
* requirements for len. Align addr down and change len so that
* [addr, addr + len) still contains initial range.
*/
/* increase len by the amount we gain when we round addr down */
len += (uintptr_t)addr & (Pagesize - 1);
/* round addr down to page boundary */
uintptr_t uptr = (uintptr_t)addr & ~((uintptr_t)Pagesize - 1);
/*
* msync accepts addresses aligned to page boundary, so we may sync
* more and part of it may have been marked as undefined/inaccessible
* Msyncing such memory is not a bug, so as a workaround temporarily
* disable error reporting.
*/
VALGRIND_DO_DISABLE_ERROR_REPORTING;
int ret;
if ((ret = msync((void *)uptr, len, MS_SYNC)) < 0)
ERR("!msync");
VALGRIND_DO_ENABLE_ERROR_REPORTING;
/* full flush */
VALGRIND_DO_PERSIST(uptr, len);
return ret;
}
/*
* is_pmem_always -- (internal) always true (for meaningful parameters) version
* of pmem_is_pmem()
*/
static int
is_pmem_always(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
if (len == 0)
return 0;
return 1;
}
/*
* is_pmem_never -- (internal) never true version of pmem_is_pmem()
*/
static int
is_pmem_never(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
return 0;
}
/*
* pmem_is_pmem_init -- (internal) initialize Func_is_pmem pointer
*
* This should be done only once - on the first call to pmem_is_pmem().
* If PMEM_IS_PMEM_FORCE is set, it would override the default behavior
* of pmem_is_pmem().
*/
static void
pmem_is_pmem_init(void)
{
LOG(3, NULL);
static volatile unsigned init;
while (init != 2) {
if (!util_bool_compare_and_swap32(&init, 0, 1))
continue;
/*
* For debugging/testing, allow pmem_is_pmem() to be forced
* to always true or never true using environment variable
* PMEM_IS_PMEM_FORCE values of zero or one.
*
* This isn't #ifdef DEBUG because it has a trivial performance
* impact and it may turn out to be useful as a "chicken bit"
* for systems where pmem_is_pmem() isn't correctly detecting
* true persistent memory.
*/
char *ptr = os_getenv("PMEM_IS_PMEM_FORCE");
if (ptr) {
int val = atoi(ptr);
if (val == 0)
Is_pmem = is_pmem_never;
else if (val == 1)
Is_pmem = is_pmem_always;
VALGRIND_ANNOTATE_HAPPENS_BEFORE(&Is_pmem);
LOG(4, "PMEM_IS_PMEM_FORCE=%d", val);
}
if (Funcs.deep_flush == NULL)
Is_pmem = is_pmem_never;
if (!util_bool_compare_and_swap32(&init, 1, 2))
FATAL("util_bool_compare_and_swap32");
}
}
/*
* pmem_is_pmem -- return true if entire range is persistent memory
*/
int
pmem_is_pmem(const void *addr, size_t len)
{
LOG(10, "addr %p len %zu", addr, len);
static int once;
/* This is not thread-safe, but pmem_is_pmem_init() is. */
if (once == 0) {
pmem_is_pmem_init();
util_fetch_and_add32(&once, 1);
}
VALGRIND_ANNOTATE_HAPPENS_AFTER(&Is_pmem);
return Is_pmem(addr, len);
}
#define PMEM_FILE_ALL_FLAGS\
(PMEM_FILE_CREATE|PMEM_FILE_EXCL|PMEM_FILE_SPARSE|PMEM_FILE_TMPFILE)
#define PMEM_DAX_VALID_FLAGS\
(PMEM_FILE_CREATE|PMEM_FILE_SPARSE)
/*
* pmem_map_fileU -- create or open the file and map it to memory
*/
#ifndef _WIN32
static inline
#endif
void *
pmem_map_fileU(const char *path, size_t len, int flags,
mode_t mode, size_t *mapped_lenp, int *is_pmemp)
{
LOG(3, "path \"%s\" size %zu flags %x mode %o mapped_lenp %p "
"is_pmemp %p", path, len, flags, mode, mapped_lenp, is_pmemp);
int oerrno;
int fd;
int open_flags = O_RDWR;//O_RDONLY;//O_RDWR;O_RDONLY
int delete_on_err = 0;
int file_type = util_file_get_type(path);
#ifdef _WIN32
open_flags |= O_BINARY;
#endif
if (file_type == OTHER_ERROR)
return NULL;
if (flags & ~(PMEM_FILE_ALL_FLAGS)) {
ERR("invalid flag specified %x", flags);
errno = EINVAL;
return NULL;
}
if (file_type == TYPE_DEVDAX) {
if (flags & ~(PMEM_DAX_VALID_FLAGS)) {
ERR("flag unsupported for Device DAX %x", flags);
errno = EINVAL;
return NULL;
} else {
/* we are ignoring all of the flags */
flags = 0;
ssize_t actual_len = util_file_get_size(path);
if (actual_len < 0) {
ERR("unable to read Device DAX size");
errno = EINVAL;
return NULL;
}
if (len != 0 && len != (size_t)actual_len) {
ERR("Device DAX length must be either 0 or "
"the exact size of the device: %zu",
actual_len);
errno = EINVAL;
return NULL;
}
len = 0;
}
}
if (flags & PMEM_FILE_CREATE) {
if ((os_off_t)len < 0) {
ERR("invalid file length %zu", len);
errno = EINVAL;
return NULL;
}
open_flags |= O_CREAT;
}
if (flags & PMEM_FILE_EXCL)
open_flags |= O_EXCL;
if ((len != 0) && !(flags & PMEM_FILE_CREATE)) {
ERR("non-zero 'len' not allowed without PMEM_FILE_CREATE");
errno = EINVAL;
return NULL;
}
if ((len == 0) && (flags & PMEM_FILE_CREATE)) {
ERR("zero 'len' not allowed with PMEM_FILE_CREATE");
errno = EINVAL;
return NULL;
}
if ((flags & PMEM_FILE_TMPFILE) && !(flags & PMEM_FILE_CREATE)) {
ERR("PMEM_FILE_TMPFILE not allowed without PMEM_FILE_CREATE");
errno = EINVAL;
return NULL;
}
if (flags & PMEM_FILE_TMPFILE) {
if ((fd = util_tmpfile(path,
OS_DIR_SEP_STR"pmem.XXXXXX",
open_flags & O_EXCL)) < 0) {
LOG(2, "failed to create temporary file at \"%s\"",
path);
return NULL;
}
} else {
if ((fd = os_open(path, open_flags, mode)) < 0) {
ERR("!open %s", path);
return NULL;
}
if ((flags & PMEM_FILE_CREATE) && (flags & PMEM_FILE_EXCL))
delete_on_err = 1;
}
if (flags & PMEM_FILE_CREATE) {
/*
* Always set length of file to 'len'.
* (May either extend or truncate existing file.)
*/
if (os_ftruncate(fd, (os_off_t)len) != 0) {
ERR("!ftruncate");
goto err;
}
if ((flags & PMEM_FILE_SPARSE) == 0) {
if ((errno = os_posix_fallocate(fd, 0,
(os_off_t)len)) != 0) {
ERR("!posix_fallocate");
goto err;
}
}
} else {
ssize_t actual_size = util_fd_get_size(fd);
if (actual_size < 0) {
ERR("stat %s: negative size", path);
errno = EINVAL;
goto err;
}
len = (size_t)actual_size;
}
void *addr = pmem_map_register(fd, len, path, file_type == TYPE_DEVDAX);
if (addr == NULL)
goto err;
if (mapped_lenp != NULL)
*mapped_lenp = len;
if (is_pmemp != NULL)
*is_pmemp = pmem_is_pmem(addr, len);
LOG(3, "returning %p", addr);
VALGRIND_REGISTER_PMEM_MAPPING(addr, len);
VALGRIND_REGISTER_PMEM_FILE(fd, addr, len, 0);
(void) os_close(fd);
return addr;
err:
oerrno = errno;
(void) os_close(fd);
if (delete_on_err)
(void) os_unlink(path);
errno = oerrno;
return NULL;
}
#ifndef _WIN32
/*
* pmem_map_file -- create or open the file and map it to memory
*/
void *
pmem_map_file(const char *path, size_t len, int flags,
mode_t mode, size_t *mapped_lenp, int *is_pmemp)
{
return pmem_map_fileU(path, len, flags, mode, mapped_lenp, is_pmemp);
}
#else
/*
* pmem_map_fileW -- create or open the file and map it to memory
*/
void *
pmem_map_fileW(const wchar_t *path, size_t len, int flags, mode_t mode,
size_t *mapped_lenp, int *is_pmemp) {
char *upath = util_toUTF8(path);
if (upath == NULL)
return NULL;
void *ret = pmem_map_fileU(upath, len, flags, mode, mapped_lenp,
is_pmemp);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* pmem_unmap -- unmap the specified region
*/
int
pmem_unmap(void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
#ifndef _WIN32
util_range_unregister(addr, len);
#endif
VALGRIND_REMOVE_PMEM_MAPPING(addr, len);
return util_unmap(addr, len);
}
/*
* pmem_memmove -- memmove to pmem
*/
void *
pmem_memmove(void *pmemdest, const void *src, size_t len, unsigned flags)
{
LOG(15, "pmemdest %p src %p len %zu flags 0x%x",
pmemdest, src, len, flags);
#ifdef DEBUG
if (flags & ~PMEM_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM_API_START();
Funcs.memmove_nodrain(pmemdest, src, len, flags & ~PMEM_F_MEM_NODRAIN,
Funcs.flush);
if ((flags & (PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH)) == 0)
pmem_drain();
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memcpy -- memcpy to pmem
*/
void *
pmem_memcpy(void *pmemdest, const void *src, size_t len, unsigned flags)
{
LOG(15, "pmemdest %p src %p len %zu flags 0x%x",
pmemdest, src, len, flags);
#ifdef DEBUG
if (flags & ~PMEM_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM_API_START();
Funcs.memmove_nodrain(pmemdest, src, len, flags & ~PMEM_F_MEM_NODRAIN,
Funcs.flush);
if ((flags & (PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH)) == 0)
pmem_drain();
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memset -- memset to pmem
*/
void *
pmem_memset(void *pmemdest, int c, size_t len, unsigned flags)
{
LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x",
pmemdest, c, len, flags);
#ifdef DEBUG
if (flags & ~PMEM_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM_API_START();
Funcs.memset_nodrain(pmemdest, c, len, flags & ~PMEM_F_MEM_NODRAIN,
Funcs.flush);
if ((flags & (PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH)) == 0)
pmem_drain();
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memmove_nodrain -- memmove to pmem without hw drain
*/
void *
pmem_memmove_nodrain(void *pmemdest, const void *src, size_t len)
{
LOG(15, "pmemdest %p src %p len %zu", pmemdest, src, len);
PMEM_API_START();
Funcs.memmove_nodrain(pmemdest, src, len, 0, Funcs.flush);
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memcpy_nodrain -- memcpy to pmem without hw drain
*/
void *
pmem_memcpy_nodrain(void *pmemdest, const void *src, size_t len)
{
LOG(15, "pmemdest %p src %p len %zu", pmemdest, src, len);
PMEM_API_START();
Funcs.memmove_nodrain(pmemdest, src, len, 0, Funcs.flush);
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memmove_persist -- memmove to pmem
*/
void *
pmem_memmove_persist(void *pmemdest, const void *src, size_t len)
{
LOG(15, "pmemdest %p src %p len %zu", pmemdest, src, len);
PMEM_API_START();
Funcs.memmove_nodrain(pmemdest, src, len, 0, Funcs.flush);
pmem_drain();
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memcpy_persist -- memcpy to pmem
*/
void *
pmem_memcpy_persist(void *pmemdest, const void *src, size_t len)
{
LOG(15, "pmemdest %p src %p len %zu", pmemdest, src, len);
PMEM_API_START();
Funcs.memmove_nodrain(pmemdest, src, len, 0, Funcs.flush);
pmem_drain();
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memset_nodrain -- memset to pmem without hw drain
*/
void *
pmem_memset_nodrain(void *pmemdest, int c, size_t len)
{
LOG(15, "pmemdest %p c %d len %zu", pmemdest, c, len);
PMEM_API_START();
Funcs.memset_nodrain(pmemdest, c, len, 0, Funcs.flush);
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memset_persist -- memset to pmem
*/
void *
pmem_memset_persist(void *pmemdest, int c, size_t len)
{
LOG(15, "pmemdest %p c %d len %zu", pmemdest, c, len);
PMEM_API_START();
Funcs.memset_nodrain(pmemdest, c, len, 0, Funcs.flush);
pmem_drain();
PMEM_API_END();
return pmemdest;
}
/*
* memmove_nodrain_libc -- (internal) memmove to pmem using libc
*/
static void *
memmove_nodrain_libc(void *pmemdest, const void *src, size_t len,
unsigned flags, flush_func flush)
{
LOG(15, "pmemdest %p src %p len %zu flags 0x%x", pmemdest, src, len,
flags);
memmove(pmemdest, src, len);
if (!(flags & PMEM_F_MEM_NOFLUSH))
flush(pmemdest, len);
return pmemdest;
}
/*
* memset_nodrain_libc -- (internal) memset to pmem using libc
*/
static void *
memset_nodrain_libc(void *pmemdest, int c, size_t len, unsigned flags,
flush_func flush)
{
LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x", pmemdest, c, len,
flags);
memset(pmemdest, c, len);
if (!(flags & PMEM_F_MEM_NOFLUSH))
flush(pmemdest, len);
return pmemdest;
}
/*
* flush_empty -- (internal) do not flush the CPU cache
*/
static void
flush_empty(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
flush_empty_nolog(addr, len);
}
/*
* fence_empty -- (internal) issue the fence instruction
*/
static void
fence_empty(void)
{
LOG(15, NULL);
VALGRIND_DO_FENCE;
}
/*
* pmem_init -- load-time initialization for pmem.c
*/
void
pmem_init(void)
{
LOG(3, NULL);
struct pmem2_arch_info info;
info.memmove_nodrain = NULL;
info.memset_nodrain = NULL;
info.flush = NULL;
info.fence = NULL;
info.flush_has_builtin_fence = 0;
pmem2_arch_init(&info);
int flush;
char *e = os_getenv("PMEM_NO_FLUSH");
if (e && (strcmp(e, "1") == 0)) {
flush = 0;
LOG(3, "Forced not flushing CPU_cache");
} else if (e && (strcmp(e, "0") == 0)) {
flush = 1;
LOG(3, "Forced flushing CPU_cache");
} else if (pmem2_auto_flush() == 1) {
flush = 0;
LOG(3, "Not flushing CPU_cache, eADR detected");
} else {
flush = 1;
LOG(3, "Flushing CPU cache");
}
Funcs.deep_flush = info.flush;
if (flush) {
Funcs.flush = info.flush;
Funcs.memmove_nodrain = info.memmove_nodrain;
Funcs.memset_nodrain = info.memset_nodrain;
if (info.flush_has_builtin_fence)
Funcs.fence = fence_empty;
else
Funcs.fence = info.fence;
} else {
Funcs.memmove_nodrain = info.memmove_nodrain_eadr;
Funcs.memset_nodrain = info.memset_nodrain_eadr;
Funcs.flush = flush_empty;
Funcs.fence = info.fence;
}
char *ptr = os_getenv("PMEM_NO_GENERIC_MEMCPY");
long long no_generic = 0;
if (ptr)
no_generic = atoll(ptr);
if (info.memmove_nodrain == NULL) {
if (no_generic) {
Funcs.memmove_nodrain = memmove_nodrain_libc;
LOG(3, "using libc memmove");
} else {
Funcs.memmove_nodrain = memmove_nodrain_generic;
LOG(3, "using generic memmove");
}
} else {
Funcs.memmove_nodrain = info.memmove_nodrain;
}
if (info.memset_nodrain == NULL) {
if (no_generic) {
Funcs.memset_nodrain = memset_nodrain_libc;
LOG(3, "using libc memset");
} else {
Funcs.memset_nodrain = memset_nodrain_generic;
LOG(3, "using generic memset");
}
} else {
Funcs.memset_nodrain = info.memset_nodrain;
}
if (Funcs.flush == flush_empty)
LOG(3, "not flushing CPU cache");
else if (Funcs.flush != Funcs.deep_flush)
FATAL("invalid flush function address");
pmem_os_init(&Is_pmem);
}
/*
* pmem_deep_persist -- perform deep persist on a memory range
*
* It merely acts as wrapper around an msync call in most cases, the only
* exception is the case of an mmap'ed DAX device on Linux.
*/
int
pmem_deep_persist(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
pmem_deep_flush(addr, len);
return pmem_deep_drain(addr, len);
}
/*
* pmem_deep_drain -- perform deep drain on a memory range
*/
int
pmem_deep_drain(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
return os_range_deep_common((uintptr_t)addr, len);
}
#if VG_PMEMCHECK_ENABLED
/*
* pmem_emit_log -- logs library and function names to pmemcheck store log
*/
void
pmem_emit_log(const char *func, int order)
{
util_emit_log("libpmem", func, order);
}
#endif
#if FAULT_INJECTION
void
pmem_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
core_inject_fault_at(type, nth, at);
}
int
pmem_fault_injection_enabled(void)
{
return core_fault_injection_enabled();
}
#endif
| 21,858 | 21.817328 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem/pmem_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmem_windows.c -- pmem utilities with OS-specific implementation
*/
#include <memoryapi.h>
#include "pmem.h"
#include "out.h"
#include "mmap.h"
#include "win_mmap.h"
#include "sys/mman.h"
#if (NTDDI_VERSION >= NTDDI_WIN10_RS1)
typedef BOOL (WINAPI *PQVM)(
HANDLE, const void *,
enum WIN32_MEMORY_INFORMATION_CLASS, PVOID,
SIZE_T, PSIZE_T);
static PQVM Func_qvmi = NULL;
#endif
/*
* is_direct_mapped -- (internal) for each page in the given region
* checks with MM, if it's direct mapped.
*/
static int
is_direct_mapped(const void *begin, const void *end)
{
LOG(3, "begin %p end %p", begin, end);
#if (NTDDI_VERSION >= NTDDI_WIN10_RS1)
int retval = 1;
WIN32_MEMORY_REGION_INFORMATION region_info;
SIZE_T bytes_returned;
if (Func_qvmi == NULL) {
LOG(4, "QueryVirtualMemoryInformation not supported, "
"assuming non-DAX.");
return 0;
}
const void *begin_aligned = (const void *)rounddown((intptr_t)begin,
Pagesize);
const void *end_aligned = (const void *)roundup((intptr_t)end,
Pagesize);
for (const void *page = begin_aligned;
page < end_aligned;
page = (const void *)((char *)page + Pagesize)) {
if (Func_qvmi(GetCurrentProcess(), page,
MemoryRegionInfo, ®ion_info,
sizeof(region_info), &bytes_returned)) {
retval = region_info.DirectMapped;
} else {
LOG(4, "QueryVirtualMemoryInformation failed, assuming "
"non-DAX. Last error: %08x", GetLastError());
retval = 0;
}
if (retval == 0) {
LOG(4, "page %p is not direct mapped", page);
break;
}
}
return retval;
#else
/* if the MM API is not available the safest answer is NO */
return 0;
#endif /* NTDDI_VERSION >= NTDDI_WIN10_RS1 */
}
/*
* is_pmem_detect -- implement pmem_is_pmem()
*
* This function returns true only if the entire range can be confirmed
* as being direct access persistent memory. Finding any part of the
* range is not direct access, or failing to look up the information
* because it is unmapped or because any sort of error happens, just
* results in returning false.
*/
int
is_pmem_detect(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
if (len == 0)
return 0;
if (len > UINTPTR_MAX - (uintptr_t)addr) {
len = UINTPTR_MAX - (uintptr_t)addr;
LOG(4, "limit len to %zu to not get beyond address space", len);
}
int retval = 1;
const void *begin = addr;
const void *end = (const void *)((char *)addr + len);
LOG(4, "begin %p end %p", begin, end);
AcquireSRWLockShared(&FileMappingQLock);
PFILE_MAPPING_TRACKER mt;
PMDK_SORTEDQ_FOREACH(mt, &FileMappingQHead, ListEntry) {
if (mt->BaseAddress >= end) {
LOG(4, "ignoring all mapped ranges beyond given range");
break;
}
if (mt->EndAddress <= begin) {
LOG(4, "skipping all mapped ranges before given range");
continue;
}
if (!(mt->Flags & FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED)) {
LOG(4, "tracked range [%p, %p) is not direct mapped",
mt->BaseAddress, mt->EndAddress);
retval = 0;
break;
}
/*
* If there is a gap between the given region that we process
* currently and the mapped region in our tracking list, we
* need to process the gap by taking the long route of asking
* MM for each page in that range.
*/
if (begin < mt->BaseAddress &&
!is_direct_mapped(begin, mt->BaseAddress)) {
LOG(4, "untracked range [%p, %p) is not direct mapped",
begin, mt->BaseAddress);
retval = 0;
break;
}
/* push our begin to reflect what we have already processed */
begin = mt->EndAddress;
}
/*
* If we still have a range to verify, check with MM if the entire
* region is direct mapped.
*/
if (begin < end && !is_direct_mapped(begin, end)) {
LOG(4, "untracked end range [%p, %p) is not direct mapped",
begin, end);
retval = 0;
}
ReleaseSRWLockShared(&FileMappingQLock);
LOG(4, "returning %d", retval);
return retval;
}
/*
* pmem_map_register -- memory map file and register mapping
*/
void *
pmem_map_register(int fd, size_t len, const char *path, int is_dev_dax)
{
/* there is no device dax on windows */
ASSERTeq(is_dev_dax, 0);
return util_map(fd, 0, len, MAP_SHARED, 0, 0, NULL);
}
/*
* pmem_os_init -- os-dependent part of pmem initialization
*/
void
pmem_os_init(is_pmem_func *func)
{
LOG(3, NULL);
*func = is_pmem_detect;
#if NTDDI_VERSION >= NTDDI_WIN10_RS1
Func_qvmi = (PQVM)GetProcAddress(
GetModuleHandle(TEXT("KernelBase.dll")),
"QueryVirtualMemoryInformation");
#endif
}
| 6,186 | 27.643519 | 74 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/auto_flush_windows.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2019, Intel Corporation */
#ifndef PMEM2_AUTO_FLUSH_WINDOWS_H
#define PMEM2_AUTO_FLUSH_WINDOWS_H 1
#define ACPI_SIGNATURE 0x41435049 /* hex value of ACPI signature */
#define NFIT_REV_SIGNATURE 0x5449464e /* hex value of htonl(NFIT) signature */
#define NFIT_STR_SIGNATURE "NFIT"
#define NFIT_SIGNATURE_LEN 4
#define NFIT_OEM_ID_LEN 6
#define NFIT_OEM_TABLE_ID_LEN 8
#define NFIT_MAX_STRUCTURES 8
#define PCS_RESERVED 3
#define PCS_RESERVED_2 4
#define PCS_TYPE_NUMBER 7
/* check if bit on 'bit' position in number 'num' is set */
#define CHECK_BIT(num, bit) (((num) >> (bit)) & 1)
/*
* sets alignment of members of structure
*/
#pragma pack(1)
struct platform_capabilities
{
uint16_t type;
uint16_t length;
uint8_t highest_valid;
uint8_t reserved[PCS_RESERVED];
uint32_t capabilities;
uint8_t reserved2[PCS_RESERVED_2];
};
struct nfit_header
{
uint8_t signature[NFIT_SIGNATURE_LEN];
uint32_t length;
uint8_t revision;
uint8_t checksum;
uint8_t oem_id[NFIT_OEM_ID_LEN];
uint8_t oem_table_id[NFIT_OEM_TABLE_ID_LEN];
uint32_t oem_revision;
uint8_t creator_id[4];
uint32_t creator_revision;
uint32_t reserved;
};
#pragma pack()
#endif
| 1,215 | 22.843137 | 78 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/deep_flush_linux.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* deep_flush_linux.c -- deep_flush functionality
*/
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include "deep_flush.h"
#include "libpmem2.h"
#include "map.h"
#include "os.h"
#include "out.h"
#include "persist.h"
#include "pmem2_utils.h"
#include "region_namespace.h"
/*
* pmem2_deep_flush_write -- perform write to deep_flush file
* on given region_id
*/
int
pmem2_deep_flush_write(unsigned region_id)
{
LOG(3, "region_id %d", region_id);
char deep_flush_path[PATH_MAX];
int deep_flush_fd;
char rbuf[2];
if (util_snprintf(deep_flush_path, PATH_MAX,
"/sys/bus/nd/devices/region%u/deep_flush", region_id) < 0) {
ERR("!snprintf");
return PMEM2_E_ERRNO;
}
if ((deep_flush_fd = os_open(deep_flush_path, O_RDONLY)) < 0) {
LOG(1, "!os_open(\"%s\", O_RDONLY)", deep_flush_path);
return 0;
}
if (read(deep_flush_fd, rbuf, sizeof(rbuf)) != 2) {
LOG(1, "!read(%d)", deep_flush_fd);
goto end;
}
if (rbuf[0] == '0' && rbuf[1] == '\n') {
LOG(3, "Deep flushing not needed");
goto end;
}
os_close(deep_flush_fd);
if ((deep_flush_fd = os_open(deep_flush_path, O_WRONLY)) < 0) {
LOG(1, "Cannot open deep_flush file %s to write",
deep_flush_path);
return 0;
}
if (write(deep_flush_fd, "1", 1) != 1) {
LOG(1, "Cannot write to deep_flush file %d", deep_flush_fd);
goto end;
}
end:
os_close(deep_flush_fd);
return 0;
}
/*
* pmem2_deep_flush_dax -- reads file type for map and check
* if it is device dax or reg file, depend on file type
* performs proper flush operation
*/
int
pmem2_deep_flush_dax(struct pmem2_map *map, void *ptr, size_t size)
{
int ret;
enum pmem2_file_type type = map->source.value.ftype;
if (type == PMEM2_FTYPE_REG) {
ret = pmem2_flush_file_buffers_os(map, ptr, size, 0);
if (ret) {
LOG(1, "cannot flush buffers addr %p len %zu",
ptr, size);
return ret;
}
} else if (type == PMEM2_FTYPE_DEVDAX) {
unsigned region_id;
int ret = pmem2_get_region_id(&map->source, ®ion_id);
if (ret < 0) {
LOG(1, "cannot find region id for dev %lu",
map->source.value.st_rdev);
return ret;
}
ret = pmem2_deep_flush_write(region_id);
if (ret) {
LOG(1, "cannot write to deep_flush file for region %d",
region_id);
return ret;
}
} else {
ASSERT(0);
}
return 0;
}
| 2,395 | 20.392857 | 67 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/config.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* config.h -- internal definitions for pmem2_config
*/
#ifndef PMEM2_CONFIG_H
#define PMEM2_CONFIG_H
#include "libpmem2.h"
#define PMEM2_GRANULARITY_INVALID ((enum pmem2_granularity) (-1))
#define PMEM2_ADDRESS_ANY 0 /* default value of the address request type */
struct pmem2_config {
/* offset from the beginning of the file */
size_t offset;
size_t length; /* length of the mapping */
/* persistence granularity requested by user */
void *addr; /* address of the mapping */
int addr_request; /* address request type */
enum pmem2_granularity requested_max_granularity;
enum pmem2_sharing_type sharing; /* the way the file will be mapped */
unsigned protection_flag;
};
void pmem2_config_init(struct pmem2_config *cfg);
int pmem2_config_validate_length(const struct pmem2_config *cfg,
size_t file_len, size_t alignment);
int pmem2_config_validate_addr_alignment(const struct pmem2_config *cfg,
const struct pmem2_source *src);
#endif /* PMEM2_CONFIG_H */
| 1,070 | 28.75 | 75 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/map.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* map.h -- internal definitions for libpmem2
*/
#ifndef PMEM2_MAP_H
#define PMEM2_MAP_H
#include <stddef.h>
#include <stdbool.h>
#include "libpmem2.h"
#include "os.h"
#include "source.h"
#ifdef _WIN32
#include <windows.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
typedef int (*pmem2_deep_flush_fn)(struct pmem2_map *map,
void *ptr, size_t size);
struct pmem2_map {
void *addr; /* base address */
size_t reserved_length; /* length of the mapping reservation */
size_t content_length; /* length of the mapped content */
/* effective persistence granularity */
enum pmem2_granularity effective_granularity;
pmem2_persist_fn persist_fn;
pmem2_flush_fn flush_fn;
pmem2_drain_fn drain_fn;
pmem2_deep_flush_fn deep_flush_fn;
pmem2_memmove_fn memmove_fn;
pmem2_memcpy_fn memcpy_fn;
pmem2_memset_fn memset_fn;
struct pmem2_source source;
};
enum pmem2_granularity get_min_granularity(bool eADR, bool is_pmem,
enum pmem2_sharing_type sharing);
struct pmem2_map *pmem2_map_find(const void *addr, size_t len);
int pmem2_register_mapping(struct pmem2_map *map);
int pmem2_unregister_mapping(struct pmem2_map *map);
void pmem2_map_init(void);
void pmem2_map_fini(void);
int pmem2_validate_offset(const struct pmem2_config *cfg,
size_t *offset, size_t alignment);
#ifdef __cplusplus
}
#endif
#endif /* map.h */
| 1,426 | 22.016129 | 67 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/deep_flush.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* deep_flush.h -- functions for deep flush functionality
*/
#ifndef PMEM2_DEEP_FLUSH_H
#define PMEM2_DEEP_FLUSH_H 1
#include "map.h"
#ifdef __cplusplus
extern "C" {
#endif
int pmem2_deep_flush_write(unsigned region_id);
int pmem2_deep_flush_dax(struct pmem2_map *map, void *ptr, size_t size);
int pmem2_deep_flush_page(struct pmem2_map *map, void *ptr, size_t size);
int pmem2_deep_flush_cache(struct pmem2_map *map, void *ptr, size_t size);
int pmem2_deep_flush_byte(struct pmem2_map *map, void *ptr, size_t size);
#ifdef __cplusplus
}
#endif
#endif
| 644 | 22.035714 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/persist.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* persist.c -- pmem2_get_[persist|flush|drain]_fn
*/
#include <errno.h>
#include <stdlib.h>
#include "libpmem2.h"
#include "map.h"
#include "out.h"
#include "os.h"
#include "persist.h"
#include "deep_flush.h"
#include "pmem2_arch.h"
#include "pmem2_utils.h"
#include "valgrind_internal.h"
static struct pmem2_arch_info Info;
/*
* memmove_nodrain_libc -- (internal) memmove to pmem using libc
*/
static void *
memmove_nodrain_libc(void *pmemdest, const void *src, size_t len,
unsigned flags, flush_func flush)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
LOG(15, "pmemdest %p src %p len %zu flags 0x%x", pmemdest, src, len,
flags);
memmove(pmemdest, src, len);
if (!(flags & PMEM2_F_MEM_NOFLUSH))
flush(pmemdest, len);
return pmemdest;
}
/*
* memset_nodrain_libc -- (internal) memset to pmem using libc
*/
static void *
memset_nodrain_libc(void *pmemdest, int c, size_t len, unsigned flags,
flush_func flush)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x", pmemdest, c, len,
flags);
memset(pmemdest, c, len);
if (!(flags & PMEM2_F_MEM_NOFLUSH))
flush(pmemdest, len);
return pmemdest;
}
/*
* pmem2_persist_init -- initialize persist module
*/
void
pmem2_persist_init(void)
{
Info.memmove_nodrain = NULL;
Info.memset_nodrain = NULL;
Info.memmove_nodrain_eadr = NULL;
Info.memset_nodrain_eadr = NULL;
Info.flush = NULL;
Info.fence = NULL;
Info.flush_has_builtin_fence = 0;
pmem2_arch_init(&Info);
char *ptr = os_getenv("PMEM_NO_GENERIC_MEMCPY");
long long no_generic = 0;
if (ptr)
no_generic = atoll(ptr);
if (Info.memmove_nodrain == NULL) {
if (no_generic) {
Info.memmove_nodrain = memmove_nodrain_libc;
Info.memmove_nodrain_eadr = memmove_nodrain_libc;
LOG(3, "using libc memmove");
} else {
Info.memmove_nodrain = memmove_nodrain_generic;
Info.memmove_nodrain_eadr = memmove_nodrain_generic;
LOG(3, "using generic memmove");
}
}
if (Info.memset_nodrain == NULL) {
if (no_generic) {
Info.memset_nodrain = memset_nodrain_libc;
Info.memset_nodrain_eadr = memset_nodrain_libc;
LOG(3, "using libc memset");
} else {
Info.memset_nodrain = memset_nodrain_generic;
Info.memset_nodrain_eadr = memset_nodrain_generic;
LOG(3, "using generic memset");
}
}
}
/*
* pmem2_drain -- wait for any PM stores to drain from HW buffers
*/
static void
pmem2_drain(void)
{
LOG(15, NULL);
Info.fence();
}
/*
* pmem2_log_flush -- log the flush attempt for the given range
*/
static inline void
pmem2_log_flush(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len);
}
/*
* pmem2_flush_nop -- NOP version of the flush routine, used in cases where
* memory behind the mapping is already in persistence domain
*/
static void
pmem2_flush_nop(const void *addr, size_t len)
{
pmem2_log_flush(addr, len);
/* nothing more to do, other than telling pmemcheck about it */
VALGRIND_DO_FLUSH(addr, len);
}
/*
* pmem2_flush_cpu_cache -- flush processor cache for the given range
*/
static void
pmem2_flush_cpu_cache(const void *addr, size_t len)
{
pmem2_log_flush(addr, len);
Info.flush(addr, len);
}
/*
* pmem2_persist_noflush -- make all changes to a range of pmem persistent
*/
static void
pmem2_persist_noflush(const void *addr, size_t len)
{
pmem2_flush_nop(addr, len);
pmem2_drain();
}
/*
* pmem2_persist_cpu_cache -- make all changes to a range of pmem persistent
*/
static void
pmem2_persist_cpu_cache(const void *addr, size_t len)
{
pmem2_flush_cpu_cache(addr, len);
pmem2_drain();
}
/*
* pmem2_flush_file_buffers -- flush CPU and OS caches for the given range
*/
static int
pmem2_flush_file_buffers(const void *addr, size_t len, int autorestart)
{
int olderrno = errno;
pmem2_log_flush(addr, len);
/*
* Flushing using OS-provided mechanisms requires that the address
* be a multiple of the page size.
* Align address down and change len so that [addr, addr + len) still
* contains the initial range.
*/
/* round address down to page boundary */
uintptr_t new_addr = ALIGN_DOWN((uintptr_t)addr, Pagesize);
/* increase len by the amount we gain when we round addr down */
len += (uintptr_t)addr - new_addr;
addr = (const void *)new_addr;
int ret = 0;
/*
* Find all the mappings overlapping with the [addr, addr + len) range
* and flush them, one by one.
*/
do {
struct pmem2_map *map = pmem2_map_find(addr, len);
if (!map)
break;
size_t flush;
size_t remaining = map->reserved_length;
if (map->addr < addr) {
/*
* Addr is inside of the mapping, so we have to decrease
* the remaining length by an offset from the start
* of our mapping.
*/
remaining -= (uintptr_t)addr - (uintptr_t)map->addr;
} else if (map->addr == addr) {
/* perfect match, there's nothing to do in this case */
} else {
/*
* map->addr > addr, so we have to skip the hole
* between addr and map->addr.
*/
len -= (uintptr_t)map->addr - (uintptr_t)addr;
addr = map->addr;
}
if (len > remaining)
flush = remaining;
else
flush = len;
int ret1 = pmem2_flush_file_buffers_os(map, addr, flush,
autorestart);
if (ret1 != 0)
ret = ret1;
addr = ((const char *)addr) + flush;
len -= flush;
} while (len > 0);
errno = olderrno;
return ret;
}
/*
* pmem2_persist_pages -- flush processor cache for the given range
*/
static void
pmem2_persist_pages(const void *addr, size_t len)
{
/*
* Restarting on EINTR in general is a bad idea, but we don't have
* any way to communicate the failure outside.
*/
const int autorestart = 1;
int ret = pmem2_flush_file_buffers(addr, len, autorestart);
if (ret) {
/*
* 1) There's no way to propagate this error. Silently ignoring
* it would lead to data corruption.
* 2) non-pmem code path shouldn't be used in production.
*
* The only sane thing to do is to crash the application. Sorry.
*/
abort();
}
}
/*
* pmem2_drain_nop -- variant of pmem2_drain for page granularity;
* it is a NOP because the flush part has built-in drain
*/
static void
pmem2_drain_nop(void)
{
LOG(15, NULL);
}
/*
* pmem2_deep_flush_page -- do nothing - pmem2_persist_fn already did msync
*/
int
pmem2_deep_flush_page(struct pmem2_map *map, void *ptr, size_t size)
{
LOG(3, "map %p ptr %p size %zu", map, ptr, size);
return 0;
}
/*
* pmem2_deep_flush_cache -- flush buffers for fsdax or write
* to deep_flush for DevDax
*/
int
pmem2_deep_flush_cache(struct pmem2_map *map, void *ptr, size_t size)
{
LOG(3, "map %p ptr %p size %zu", map, ptr, size);
enum pmem2_file_type type = map->source.value.ftype;
/*
* XXX: this should be moved to pmem2_deep_flush_dax
* while refactoring abstraction
*/
if (type == PMEM2_FTYPE_DEVDAX)
pmem2_persist_cpu_cache(ptr, size);
int ret = pmem2_deep_flush_dax(map, ptr, size);
if (ret < 0) {
LOG(1, "cannot perform deep flush cache for map %p", map);
return ret;
}
return 0;
}
/*
* pmem2_deep_flush_byte -- flush cpu cache and perform deep flush for dax
*/
int
pmem2_deep_flush_byte(struct pmem2_map *map, void *ptr, size_t size)
{
LOG(3, "map %p ptr %p size %zu", map, ptr, size);
if (map->source.type == PMEM2_SOURCE_ANON) {
ERR("Anonymous source does not support deep flush");
return PMEM2_E_NOSUPP;
}
ASSERT(map->source.type == PMEM2_SOURCE_FD ||
map->source.type == PMEM2_SOURCE_HANDLE);
enum pmem2_file_type type = map->source.value.ftype;
/*
* XXX: this should be moved to pmem2_deep_flush_dax
* while refactoring abstraction
*/
if (type == PMEM2_FTYPE_DEVDAX)
pmem2_persist_cpu_cache(ptr, size);
int ret = pmem2_deep_flush_dax(map, ptr, size);
if (ret < 0) {
LOG(1, "cannot perform deep flush byte for map %p", map);
return ret;
}
return 0;
}
/*
* pmem2_set_flush_fns -- set function pointers related to flushing
*/
void
pmem2_set_flush_fns(struct pmem2_map *map)
{
switch (map->effective_granularity) {
case PMEM2_GRANULARITY_PAGE:
map->persist_fn = pmem2_persist_pages;
map->flush_fn = pmem2_persist_pages;
map->drain_fn = pmem2_drain_nop;
map->deep_flush_fn = pmem2_deep_flush_page;
break;
case PMEM2_GRANULARITY_CACHE_LINE:
map->persist_fn = pmem2_persist_cpu_cache;
map->flush_fn = pmem2_flush_cpu_cache;
map->drain_fn = pmem2_drain;
map->deep_flush_fn = pmem2_deep_flush_cache;
break;
case PMEM2_GRANULARITY_BYTE:
map->persist_fn = pmem2_persist_noflush;
map->flush_fn = pmem2_flush_nop;
map->drain_fn = pmem2_drain;
map->deep_flush_fn = pmem2_deep_flush_byte;
break;
default:
abort();
}
}
/*
* pmem2_get_persist_fn - return a pointer to a function responsible for
* persisting data in range owned by pmem2_map
*/
pmem2_persist_fn
pmem2_get_persist_fn(struct pmem2_map *map)
{
return map->persist_fn;
}
/*
* pmem2_get_flush_fn - return a pointer to a function responsible for
* flushing data in range owned by pmem2_map
*/
pmem2_flush_fn
pmem2_get_flush_fn(struct pmem2_map *map)
{
return map->flush_fn;
}
/*
* pmem2_get_drain_fn - return a pointer to a function responsible for
* draining flushes in range owned by pmem2_map
*/
pmem2_drain_fn
pmem2_get_drain_fn(struct pmem2_map *map)
{
return map->drain_fn;
}
/*
* pmem2_memmove_nonpmem -- mem[move|cpy] followed by an msync
*/
static void *
pmem2_memmove_nonpmem(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM2_API_START("pmem2_memmove");
Info.memmove_nodrain(pmemdest, src, len, flags & ~PMEM2_F_MEM_NODRAIN,
Info.flush);
pmem2_persist_pages(pmemdest, len);
PMEM2_API_END("pmem2_memmove");
return pmemdest;
}
/*
* pmem2_memset_nonpmem -- memset followed by an msync
*/
static void *
pmem2_memset_nonpmem(void *pmemdest, int c, size_t len, unsigned flags)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM2_API_START("pmem2_memset");
Info.memset_nodrain(pmemdest, c, len, flags & ~PMEM2_F_MEM_NODRAIN,
Info.flush);
pmem2_persist_pages(pmemdest, len);
PMEM2_API_END("pmem2_memset");
return pmemdest;
}
/*
* pmem2_memmove -- mem[move|cpy] to pmem
*/
static void *
pmem2_memmove(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM2_API_START("pmem2_memmove");
Info.memmove_nodrain(pmemdest, src, len, flags, Info.flush);
if ((flags & (PMEM2_F_MEM_NODRAIN | PMEM2_F_MEM_NOFLUSH)) == 0)
pmem2_drain();
PMEM2_API_END("pmem2_memmove");
return pmemdest;
}
/*
* pmem2_memset -- memset to pmem
*/
static void *
pmem2_memset(void *pmemdest, int c, size_t len, unsigned flags)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM2_API_START("pmem2_memset");
Info.memset_nodrain(pmemdest, c, len, flags, Info.flush);
if ((flags & (PMEM2_F_MEM_NODRAIN | PMEM2_F_MEM_NOFLUSH)) == 0)
pmem2_drain();
PMEM2_API_END("pmem2_memset");
return pmemdest;
}
/*
* pmem2_memmove_eadr -- mem[move|cpy] to pmem, platform supports eADR
*/
static void *
pmem2_memmove_eadr(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM2_API_START("pmem2_memmove");
Info.memmove_nodrain_eadr(pmemdest, src, len, flags, Info.flush);
if ((flags & (PMEM2_F_MEM_NODRAIN | PMEM2_F_MEM_NOFLUSH)) == 0)
pmem2_drain();
PMEM2_API_END("pmem2_memmove");
return pmemdest;
}
/*
* pmem2_memset_eadr -- memset to pmem, platform supports eADR
*/
static void *
pmem2_memset_eadr(void *pmemdest, int c, size_t len, unsigned flags)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM2_API_START("pmem2_memset");
Info.memset_nodrain_eadr(pmemdest, c, len, flags, Info.flush);
if ((flags & (PMEM2_F_MEM_NODRAIN | PMEM2_F_MEM_NOFLUSH)) == 0)
pmem2_drain();
PMEM2_API_END("pmem2_memset");
return pmemdest;
}
/*
* pmem2_set_mem_fns -- set function pointers related to mem[move|cpy|set]
*/
void
pmem2_set_mem_fns(struct pmem2_map *map)
{
switch (map->effective_granularity) {
case PMEM2_GRANULARITY_PAGE:
map->memmove_fn = pmem2_memmove_nonpmem;
map->memcpy_fn = pmem2_memmove_nonpmem;
map->memset_fn = pmem2_memset_nonpmem;
break;
case PMEM2_GRANULARITY_CACHE_LINE:
map->memmove_fn = pmem2_memmove;
map->memcpy_fn = pmem2_memmove;
map->memset_fn = pmem2_memset;
break;
case PMEM2_GRANULARITY_BYTE:
map->memmove_fn = pmem2_memmove_eadr;
map->memcpy_fn = pmem2_memmove_eadr;
map->memset_fn = pmem2_memset_eadr;
break;
default:
abort();
}
}
/*
* pmem2_get_memmove_fn - return a pointer to a function
*/
pmem2_memmove_fn
pmem2_get_memmove_fn(struct pmem2_map *map)
{
return map->memmove_fn;
}
/*
* pmem2_get_memcpy_fn - return a pointer to a function
*/
pmem2_memcpy_fn
pmem2_get_memcpy_fn(struct pmem2_map *map)
{
return map->memcpy_fn;
}
/*
* pmem2_get_memset_fn - return a pointer to a function
*/
pmem2_memset_fn
pmem2_get_memset_fn(struct pmem2_map *map)
{
return map->memset_fn;
}
#if VG_PMEMCHECK_ENABLED
/*
* pmem2_emit_log -- logs library and function names to pmemcheck store log
*/
void
pmem2_emit_log(const char *func, int order)
{
util_emit_log("libpmem2", func, order);
}
#endif
| 13,665 | 21.58843 | 76 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/persist_posix.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* persist_posix.c -- POSIX-specific part of persist implementation
*/
#include <errno.h>
#include <stdint.h>
#include <sys/mman.h>
#include "out.h"
#include "persist.h"
#include "pmem2_utils.h"
#include "valgrind_internal.h"
/*
* pmem2_flush_file_buffers_os -- flush CPU and OS file caches for the given
* range
*/
int
pmem2_flush_file_buffers_os(struct pmem2_map *map, const void *addr, size_t len,
int autorestart)
{
/*
* msync accepts addresses aligned to the page boundary, so we may sync
* more and part of it may have been marked as undefined/inaccessible.
* Msyncing such memory is not a bug, so as a workaround temporarily
* disable error reporting.
*/
VALGRIND_DO_DISABLE_ERROR_REPORTING;
int ret;
do {
ret = msync((void *)addr, len, MS_SYNC);
if (ret < 0) {
ERR("!msync");
} else {
/* full flush */
VALGRIND_DO_PERSIST((uintptr_t)addr, len);
}
} while (autorestart && ret < 0 && errno == EINTR);
VALGRIND_DO_ENABLE_ERROR_REPORTING;
if (ret)
return PMEM2_E_ERRNO;
return 0;
}
| 1,126 | 21.098039 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/pmem2_utils_linux.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <sys/sysmacros.h>
#include "libpmem2.h"
#include "out.h"
#include "pmem2_utils.h"
#include "region_namespace.h"
#include "source.h"
/*
* pmem2_get_type_from_stat -- determine type of file based on output of stat
* syscall
*/
int
pmem2_get_type_from_stat(const os_stat_t *st, enum pmem2_file_type *type)
{
if (S_ISREG(st->st_mode)) {
*type = PMEM2_FTYPE_REG;
return 0;
}
if (S_ISDIR(st->st_mode)) {
*type = PMEM2_FTYPE_DIR;
return 0;
}
if (!S_ISCHR(st->st_mode)) {
ERR("file type 0%o not supported", st->st_mode & S_IFMT);
return PMEM2_E_INVALID_FILE_TYPE;
}
char spath[PATH_MAX];
int ret = util_snprintf(spath, PATH_MAX,
"/sys/dev/char/%u:%u/subsystem",
os_major(st->st_rdev), os_minor(st->st_rdev));
if (ret < 0) {
/* impossible */
ERR("!snprintf");
ASSERTinfo(0, "snprintf failed");
return PMEM2_E_ERRNO;
}
LOG(4, "device subsystem path \"%s\"", spath);
char npath[PATH_MAX];
char *rpath = realpath(spath, npath);
if (rpath == NULL) {
ERR("!realpath \"%s\"", spath);
return PMEM2_E_ERRNO;
}
char *basename = strrchr(rpath, '/');
if (!basename || strcmp("dax", basename + 1) != 0) {
LOG(3, "%s path does not match device dax prefix path", rpath);
return PMEM2_E_INVALID_FILE_TYPE;
}
*type = PMEM2_FTYPE_DEVDAX;
return 0;
}
| 1,507 | 20.239437 | 77 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/source_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* source_windows.c -- windows specific pmem2_source implementation
*/
#include <Windows.h>
#include "config.h"
#include "libpmem2.h"
#include "config.h"
#include "out.h"
#include "pmem2_utils.h"
#include "source.h"
#include "util.h"
/*
* pmem2_source_from_fd -- create a new data source instance
*/
int
pmem2_source_from_fd(struct pmem2_source **src, int fd)
{
*src = NULL;
if (fd < 0)
return PMEM2_E_INVALID_FILE_HANDLE;
HANDLE handle = (HANDLE)_get_osfhandle(fd);
if (handle == INVALID_HANDLE_VALUE) {
/*
* _get_osfhandle aborts in an error case, so technically
* this is dead code. But according to MSDN it is
* setting an errno on failure, so we can return it in case of
* "windows magic" happen and this function "accidentally"
* will not abort.
*/
ERR("!_get_osfhandle");
if (errno == EBADF)
return PMEM2_E_INVALID_FILE_HANDLE;
return PMEM2_E_ERRNO;
}
return pmem2_source_from_handle(src, handle);
}
/*
* pmem2_win_stat -- retrieve information about handle
*/
static int
pmem2_win_stat(HANDLE handle, BY_HANDLE_FILE_INFORMATION *info)
{
if (!GetFileInformationByHandle(handle, info)) {
ERR("!!GetFileInformationByHandle");
if (GetLastError() == ERROR_INVALID_HANDLE)
return PMEM2_E_INVALID_FILE_HANDLE;
else
return pmem2_lasterror_to_err();
}
if (info->dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
ERR(
"using directory doesn't make any sense in context of pmem2");
return PMEM2_E_INVALID_FILE_TYPE;
}
return 0;
}
/*
* pmem2_source_from_fd -- create a new data source instance
*/
int
pmem2_source_from_handle(struct pmem2_source **src, HANDLE handle)
{
*src = NULL;
int ret;
if (handle == INVALID_HANDLE_VALUE)
return PMEM2_E_INVALID_FILE_HANDLE;
BY_HANDLE_FILE_INFORMATION file_info;
ret = pmem2_win_stat(handle, &file_info);
if (ret)
return ret;
/* XXX: winapi doesn't provide option to get open flags from HANDLE */
struct pmem2_source *srcp = pmem2_malloc(sizeof(**src), &ret);
if (ret)
return ret;
ASSERTne(srcp, NULL);
srcp->type = PMEM2_SOURCE_HANDLE;
srcp->value.handle = handle;
*src = srcp;
return 0;
}
/*
* pmem2_source_size -- get a size of the file handle stored in the provided
* source
*/
int
pmem2_source_size(const struct pmem2_source *src, size_t *size)
{
LOG(3, "type %d", src->type);
int ret;
if (src->type == PMEM2_SOURCE_ANON) {
*size = src->value.size;
return 0;
}
ASSERTeq(src->type, PMEM2_SOURCE_HANDLE);
BY_HANDLE_FILE_INFORMATION info;
ret = pmem2_win_stat(src->value.handle, &info);
if (ret)
return ret;
*size = ((size_t)info.nFileSizeHigh << 32) | info.nFileSizeLow;
LOG(4, "file length %zu", *size);
return 0;
}
/*
* pmem2_source_alignment -- get alignment from the system info
*/
int
pmem2_source_alignment(const struct pmem2_source *src, size_t *alignment)
{
LOG(3, "type %d", src->type);
SYSTEM_INFO info;
GetSystemInfo(&info);
*alignment = (size_t)info.dwAllocationGranularity;
if (!util_is_pow2(*alignment)) {
ERR("alignment (%zu) has to be a power of two", *alignment);
return PMEM2_E_INVALID_ALIGNMENT_VALUE;
}
LOG(4, "alignment %zu", *alignment);
return 0;
}
| 3,248 | 20.235294 | 76 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/pmem2_utils_none.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
#include <errno.h>
#include "libpmem2.h"
#include "out.h"
#include "pmem2_utils.h"
#include "source.h"
/*
* pmem2_device_dax_alignment -- checks the alignment of a given
* dax device from given source
*/
int
pmem2_device_dax_alignment(const struct pmem2_source *src, size_t *alignment)
{
ERR("Cannot read Device Dax alignment - ndctl is not available");
return PMEM2_E_NOSUPP;
}
/*
* pmem2_device_dax_size -- checks the size of a given dax device from
* given source
*/
int
pmem2_device_dax_size(const struct pmem2_source *src, size_t *size)
{
ERR("Cannot read Device Dax size - ndctl is not available");
return PMEM2_E_NOSUPP;
}
| 727 | 20.411765 | 77 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/auto_flush_linux.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* auto_flush_linux.c -- Linux auto flush detection
*/
#define _GNU_SOURCE
#include <inttypes.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <string.h>
#include <errno.h>
#include "out.h"
#include "os.h"
#include "fs.h"
#include "auto_flush.h"
#define BUS_DEVICE_PATH "/sys/bus/nd/devices"
#define PERSISTENCE_DOMAIN "persistence_domain"
#define DOMAIN_VALUE_LEN 32
/*
* check_cpu_cache -- (internal) check if file contains "cpu_cache" entry
*/
static int
check_cpu_cache(const char *domain_path)
{
LOG(3, "domain_path: %s", domain_path);
char domain_value[DOMAIN_VALUE_LEN];
int domain_fd;
int cpu_cache = 0;
if ((domain_fd = os_open(domain_path, O_RDONLY)) < 0) {
LOG(1, "!open(\"%s\", O_RDONLY)", domain_path);
goto end;
}
ssize_t len = read(domain_fd, domain_value,
DOMAIN_VALUE_LEN);
if (len < 0) {
ERR("!read(%d, %p, %d)", domain_fd,
domain_value, DOMAIN_VALUE_LEN);
cpu_cache = -1;
goto end;
} else if (len == 0) {
errno = EIO;
ERR("read(%d, %p, %d) empty string",
domain_fd, domain_value,
DOMAIN_VALUE_LEN);
cpu_cache = -1;
goto end;
} else if (domain_value[len - 1] != '\n') {
ERR("!read(%d, %p, %d) invalid format",
domain_fd, domain_value,
DOMAIN_VALUE_LEN);
cpu_cache = -1;
goto end;
}
domain_value[len - 1] = '\0';
LOG(15, "detected persistent_domain: %s", domain_value);
if (strcmp(domain_value, "cpu_cache") == 0) {
LOG(15, "cpu_cache in persistent_domain: %s", domain_path);
cpu_cache = 1;
} else {
LOG(15, "cpu_cache not in persistent_domain: %s", domain_path);
cpu_cache = 0;
}
end:
if (domain_fd >= 0)
os_close(domain_fd);
return cpu_cache;
}
/*
* check_domain_in_region -- (internal) check if region
* contains persistence_domain file
*/
static int
check_domain_in_region(const char *region_path)
{
LOG(3, "region_path: %s", region_path);
struct fs *reg = NULL;
struct fs_entry *reg_entry;
char domain_path[PATH_MAX];
int cpu_cache = 0;
reg = fs_new(region_path);
if (reg == NULL) {
ERR("!fs_new: \"%s\"", region_path);
cpu_cache = -1;
goto end;
}
while ((reg_entry = fs_read(reg)) != NULL) {
/*
* persistence_domain has to be a file type entry
* and it has to be first level child for region;
* there is no need to run into deeper levels
*/
if (reg_entry->type != FS_ENTRY_FILE ||
strcmp(reg_entry->name,
PERSISTENCE_DOMAIN) != 0 ||
reg_entry->level != 1)
continue;
int ret = util_snprintf(domain_path, PATH_MAX,
"%s/"PERSISTENCE_DOMAIN, region_path);
if (ret < 0) {
ERR("!snprintf");
cpu_cache = -1;
goto end;
}
cpu_cache = check_cpu_cache(domain_path);
}
end:
if (reg)
fs_delete(reg);
return cpu_cache;
}
/*
* pmem2_auto_flush -- check if platform supports auto flush for all regions
*
* Traverse "/sys/bus/nd/devices" path to find all the nvdimm regions,
* then for each region checks if "persistence_domain" file exists and
* contains "cpu_cache" string.
* If for any region "persistence_domain" entry does not exists, or its
* context is not as expected, assume eADR is not available on this platform.
*/
int
pmem2_auto_flush(void)
{
LOG(15, NULL);
char *device_path;
int cpu_cache = 0;
device_path = BUS_DEVICE_PATH;
os_stat_t sdev;
if (os_stat(device_path, &sdev) != 0 ||
S_ISDIR(sdev.st_mode) == 0) {
LOG(3, "eADR not supported");
return cpu_cache;
}
struct fs *dev = fs_new(device_path);
if (dev == NULL) {
ERR("!fs_new: \"%s\"", device_path);
return -1;
}
struct fs_entry *dev_entry;
while ((dev_entry = fs_read(dev)) != NULL) {
/*
* Skip if not a symlink, because we expect that
* region on sysfs path is a symlink.
* Skip if depth is different than 1, because region
* we are interested in should be the first level
* child for device.
*/
if ((dev_entry->type != FS_ENTRY_SYMLINK) ||
!strstr(dev_entry->name, "region") ||
dev_entry->level != 1)
continue;
LOG(15, "Start traversing region: %s", dev_entry->path);
cpu_cache = check_domain_in_region(dev_entry->path);
if (cpu_cache != 1)
goto end;
}
end:
fs_delete(dev);
return cpu_cache;
}
| 4,214 | 21.783784 | 77 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/config.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* config.c -- pmem2_config implementation
*/
#include <unistd.h>
#include "alloc.h"
#include "config.h"
#include "libpmem2.h"
#include "out.h"
#include "pmem2.h"
#include "pmem2_utils.h"
/*
* pmem2_config_init -- initialize cfg structure.
*/
void
pmem2_config_init(struct pmem2_config *cfg)
{
cfg->offset = 0;
cfg->length = 0;
cfg->addr = NULL;
cfg->addr_request = PMEM2_ADDRESS_ANY;
cfg->requested_max_granularity = PMEM2_GRANULARITY_INVALID;
cfg->sharing = PMEM2_SHARED;
cfg->protection_flag = PMEM2_PROT_READ | PMEM2_PROT_WRITE;
}
/*
* pmem2_config_new -- allocates and initialize cfg structure.
*/
int
pmem2_config_new(struct pmem2_config **cfg)
{
int ret;
*cfg = pmem2_malloc(sizeof(**cfg), &ret);
if (ret)
return ret;
ASSERTne(cfg, NULL);
pmem2_config_init(*cfg);
return 0;
}
/*
* pmem2_config_delete -- deallocate cfg structure.
*/
int
pmem2_config_delete(struct pmem2_config **cfg)
{
Free(*cfg);
*cfg = NULL;
return 0;
}
/*
* pmem2_config_set_required_store_granularity -- set granularity
* requested by user in the pmem2_config structure
*/
int
pmem2_config_set_required_store_granularity(struct pmem2_config *cfg,
enum pmem2_granularity g)
{
switch (g) {
case PMEM2_GRANULARITY_BYTE:
case PMEM2_GRANULARITY_CACHE_LINE:
case PMEM2_GRANULARITY_PAGE:
break;
default:
ERR("unknown granularity value %d", g);
return PMEM2_E_GRANULARITY_NOT_SUPPORTED;
}
cfg->requested_max_granularity = g;
return 0;
}
/*
* pmem2_config_set_offset -- set offset in the pmem2_config structure
*/
int
pmem2_config_set_offset(struct pmem2_config *cfg, size_t offset)
{
/* mmap func takes offset as a type of off_t */
if (offset > (size_t)INT64_MAX) {
ERR("offset is greater than INT64_MAX");
return PMEM2_E_OFFSET_OUT_OF_RANGE;
}
cfg->offset = offset;
return 0;
}
/*
* pmem2_config_set_length -- set length in the pmem2_config structure
*/
int
pmem2_config_set_length(struct pmem2_config *cfg, size_t length)
{
cfg->length = length;
return 0;
}
/*
* pmem2_config_validate_length -- validate that length in the pmem2_config
* structure is consistent with the file length
*/
int
pmem2_config_validate_length(const struct pmem2_config *cfg,
size_t file_len, size_t alignment)
{
ASSERTne(alignment, 0);
if (file_len == 0) {
ERR("file length is equal 0");
return PMEM2_E_SOURCE_EMPTY;
}
if (cfg->length % alignment) {
ERR("length is not a multiple of %lu", alignment);
return PMEM2_E_LENGTH_UNALIGNED;
}
/* overflow check */
const size_t end = cfg->offset + cfg->length;
if (end < cfg->offset) {
ERR("overflow of offset and length");
return PMEM2_E_MAP_RANGE;
}
/* let's align the file size */
size_t aligned_file_len = file_len;
if (file_len % alignment)
aligned_file_len = ALIGN_UP(file_len, alignment);
/* validate mapping fit into the file */
if (end > aligned_file_len) {
ERR("mapping larger than file size");
return PMEM2_E_MAP_RANGE;
}
return 0;
}
/*
* pmem2_config_set_sharing -- set the way pmem2_map will map the file
*/
int
pmem2_config_set_sharing(struct pmem2_config *cfg, enum pmem2_sharing_type type)
{
switch (type) {
case PMEM2_SHARED:
case PMEM2_PRIVATE:
cfg->sharing = type;
break;
default:
ERR("unknown sharing value %d", type);
return PMEM2_E_INVALID_SHARING_VALUE;
}
return 0;
}
/*
* pmem2_config_validate_addr_alignment -- validate that addr in the
* pmem2_config structure is a multiple of the alignment required for
* specific cfg
*/
int
pmem2_config_validate_addr_alignment(const struct pmem2_config *cfg,
const struct pmem2_source *src)
{
/* cannot NULL % alignment, NULL is valid */
if (!cfg->addr)
return 0;
size_t alignment;
int ret = pmem2_source_alignment(src, &alignment);
if (ret)
return ret;
ASSERTne(alignment, 0);
if ((size_t)cfg->addr % alignment) {
ERR("address %p is not a multiple of %lu", cfg->addr,
alignment);
return PMEM2_E_ADDRESS_UNALIGNED;
}
return 0;
}
/*
* pmem2_config_set_address -- set addr and addr_request in the config
* struct
*/
int
pmem2_config_set_address(struct pmem2_config *cfg, void *addr,
enum pmem2_address_request_type request_type)
{
if (request_type != PMEM2_ADDRESS_FIXED_NOREPLACE) {
ERR("invalid address request_type 0x%x", request_type);
return PMEM2_E_INVALID_ADDRESS_REQUEST_TYPE;
}
if (request_type == PMEM2_ADDRESS_FIXED_NOREPLACE && !addr) {
ERR(
"cannot use address request type PMEM2_ADDRESS_FIXED_NOREPLACE with addr being NULL");
return PMEM2_E_ADDRESS_NULL;
}
cfg->addr = addr;
cfg->addr_request = (int)request_type;
return 0;
}
/*
* pmem2_config_set_vm_reservation -- set vm_reservation in the
* pmem2_config structure
*/
int
pmem2_config_set_vm_reservation(struct pmem2_config *cfg,
struct pmem2_vm_reservation *rsv, size_t offset)
{
return PMEM2_E_NOSUPP;
}
/*
* pmem2_config_clear_address -- reset addr and addr_request in the config
* to the default values
*/
void
pmem2_config_clear_address(struct pmem2_config *cfg)
{
cfg->addr = NULL;
cfg->addr_request = PMEM2_ADDRESS_ANY;
}
/*
* pmem2_config_set_protection -- set protection flags
* in the config struct
*/
int
pmem2_config_set_protection(struct pmem2_config *cfg,
unsigned prot)
{
unsigned unknown_prot = prot & ~(PMEM2_PROT_READ | PMEM2_PROT_WRITE |
PMEM2_PROT_EXEC | PMEM2_PROT_NONE);
if (unknown_prot) {
ERR("invalid flag %u", prot);
return PMEM2_E_INVALID_PROT_FLAG;
}
cfg->protection_flag = prot;
return 0;
}
| 5,603 | 20.227273 | 89 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/ravl_interval.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* ravl_interval.h -- internal definitions for ravl_interval
*/
#ifndef RAVL_INTERVAL_H
#define RAVL_INTERVAL_H
#include "libpmem2.h"
#include "os_thread.h"
#include "ravl.h"
struct ravl_interval;
struct ravl_interval_node;
typedef size_t ravl_interval_min(void *addr);
typedef size_t ravl_interval_max(void *addr);
struct ravl_interval *ravl_interval_new(ravl_interval_min *min,
ravl_interval_min *max);
void ravl_interval_delete(struct ravl_interval *ri);
int ravl_interval_insert(struct ravl_interval *ri, void *addr);
int ravl_interval_remove(struct ravl_interval *ri,
struct ravl_interval_node *rin);
struct ravl_interval_node *ravl_interval_find_equal(struct ravl_interval *ri,
void *addr);
struct ravl_interval_node *ravl_interval_find(struct ravl_interval *ri,
void *addr);
void *ravl_interval_data(struct ravl_interval_node *rin);
#endif
| 947 | 27.727273 | 77 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/memops_generic.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* memops_generic.c -- architecture-independent memmove & memset fallback
*
* This fallback is needed to fulfill guarantee that pmem_mem[cpy|set|move]
* will use at least 8-byte stores (for 8-byte aligned buffers and sizes),
* even when accelerated implementation is missing or disabled.
* This guarantee is needed to maintain correctness eg in pmemobj.
* Libc may do the same, but this behavior is not documented, so we can't rely
* on that.
*/
#include <stddef.h>
#include "out.h"
#include "pmem2_arch.h"
#include "util.h"
/*
* pmem2_flush_flags -- internal wrapper around pmem_flush
*/
static inline void
pmem2_flush_flags(const void *addr, size_t len, unsigned flags,
flush_func flush)
{
if (!(flags & PMEM2_F_MEM_NOFLUSH))
flush(addr, len);
}
/*
* cpy128 -- (internal) copy 128 bytes from src to dst
*/
static force_inline void
cpy128(uint64_t *dst, const uint64_t *src)
{
/*
* We use atomics here just to be sure compiler will not split stores.
* Order of stores doesn't matter.
*/
uint64_t tmp[16];
util_atomic_load_explicit64(&src[0], &tmp[0], memory_order_relaxed);
util_atomic_load_explicit64(&src[1], &tmp[1], memory_order_relaxed);
util_atomic_load_explicit64(&src[2], &tmp[2], memory_order_relaxed);
util_atomic_load_explicit64(&src[3], &tmp[3], memory_order_relaxed);
util_atomic_load_explicit64(&src[4], &tmp[4], memory_order_relaxed);
util_atomic_load_explicit64(&src[5], &tmp[5], memory_order_relaxed);
util_atomic_load_explicit64(&src[6], &tmp[6], memory_order_relaxed);
util_atomic_load_explicit64(&src[7], &tmp[7], memory_order_relaxed);
util_atomic_load_explicit64(&src[8], &tmp[8], memory_order_relaxed);
util_atomic_load_explicit64(&src[9], &tmp[9], memory_order_relaxed);
util_atomic_load_explicit64(&src[10], &tmp[10], memory_order_relaxed);
util_atomic_load_explicit64(&src[11], &tmp[11], memory_order_relaxed);
util_atomic_load_explicit64(&src[12], &tmp[12], memory_order_relaxed);
util_atomic_load_explicit64(&src[13], &tmp[13], memory_order_relaxed);
util_atomic_load_explicit64(&src[14], &tmp[14], memory_order_relaxed);
util_atomic_load_explicit64(&src[15], &tmp[15], memory_order_relaxed);
util_atomic_store_explicit64(&dst[0], tmp[0], memory_order_relaxed);
util_atomic_store_explicit64(&dst[1], tmp[1], memory_order_relaxed);
util_atomic_store_explicit64(&dst[2], tmp[2], memory_order_relaxed);
util_atomic_store_explicit64(&dst[3], tmp[3], memory_order_relaxed);
util_atomic_store_explicit64(&dst[4], tmp[4], memory_order_relaxed);
util_atomic_store_explicit64(&dst[5], tmp[5], memory_order_relaxed);
util_atomic_store_explicit64(&dst[6], tmp[6], memory_order_relaxed);
util_atomic_store_explicit64(&dst[7], tmp[7], memory_order_relaxed);
util_atomic_store_explicit64(&dst[8], tmp[8], memory_order_relaxed);
util_atomic_store_explicit64(&dst[9], tmp[9], memory_order_relaxed);
util_atomic_store_explicit64(&dst[10], tmp[10], memory_order_relaxed);
util_atomic_store_explicit64(&dst[11], tmp[11], memory_order_relaxed);
util_atomic_store_explicit64(&dst[12], tmp[12], memory_order_relaxed);
util_atomic_store_explicit64(&dst[13], tmp[13], memory_order_relaxed);
util_atomic_store_explicit64(&dst[14], tmp[14], memory_order_relaxed);
util_atomic_store_explicit64(&dst[15], tmp[15], memory_order_relaxed);
}
/*
* cpy64 -- (internal) copy 64 bytes from src to dst
*/
static force_inline void
cpy64(uint64_t *dst, const uint64_t *src)
{
/*
* We use atomics here just to be sure compiler will not split stores.
* Order of stores doesn't matter.
*/
uint64_t tmp[8];
util_atomic_load_explicit64(&src[0], &tmp[0], memory_order_relaxed);
util_atomic_load_explicit64(&src[1], &tmp[1], memory_order_relaxed);
util_atomic_load_explicit64(&src[2], &tmp[2], memory_order_relaxed);
util_atomic_load_explicit64(&src[3], &tmp[3], memory_order_relaxed);
util_atomic_load_explicit64(&src[4], &tmp[4], memory_order_relaxed);
util_atomic_load_explicit64(&src[5], &tmp[5], memory_order_relaxed);
util_atomic_load_explicit64(&src[6], &tmp[6], memory_order_relaxed);
util_atomic_load_explicit64(&src[7], &tmp[7], memory_order_relaxed);
util_atomic_store_explicit64(&dst[0], tmp[0], memory_order_relaxed);
util_atomic_store_explicit64(&dst[1], tmp[1], memory_order_relaxed);
util_atomic_store_explicit64(&dst[2], tmp[2], memory_order_relaxed);
util_atomic_store_explicit64(&dst[3], tmp[3], memory_order_relaxed);
util_atomic_store_explicit64(&dst[4], tmp[4], memory_order_relaxed);
util_atomic_store_explicit64(&dst[5], tmp[5], memory_order_relaxed);
util_atomic_store_explicit64(&dst[6], tmp[6], memory_order_relaxed);
util_atomic_store_explicit64(&dst[7], tmp[7], memory_order_relaxed);
}
/*
* cpy8 -- (internal) copy 8 bytes from src to dst
*/
static force_inline void
cpy8(uint64_t *dst, const uint64_t *src)
{
uint64_t tmp;
util_atomic_load_explicit64(src, &tmp, memory_order_relaxed);
util_atomic_store_explicit64(dst, tmp, memory_order_relaxed);
}
/*
* store8 -- (internal) store 8 bytes
*/
static force_inline void
store8(uint64_t *dst, uint64_t c)
{
util_atomic_store_explicit64(dst, c, memory_order_relaxed);
}
/*
* memmove_nodrain_generic -- generic memmove to pmem without hw drain
*/
void *
memmove_nodrain_generic(void *dst, const void *src, size_t len,
unsigned flags, flush_func flush)
{
LOG(15, "pmemdest %p src %p len %zu flags 0x%x", dst, src, len,
flags);
char *cdst = dst;
const char *csrc = src;
size_t remaining;
(void) flags;
if ((uintptr_t)cdst - (uintptr_t)csrc >= len) {
size_t cnt = (uint64_t)cdst & 7;
if (cnt > 0) {
cnt = 8 - cnt;
if (cnt > len)
cnt = len;
for (size_t i = 0; i < cnt; ++i)
cdst[i] = csrc[i];
pmem2_flush_flags(cdst, cnt, flags, flush);
cdst += cnt;
csrc += cnt;
len -= cnt;
}
uint64_t *dst8 = (uint64_t *)cdst;
const uint64_t *src8 = (const uint64_t *)csrc;
while (len >= 128 && CACHELINE_SIZE == 128) {
cpy128(dst8, src8);
pmem2_flush_flags(dst8, 128, flags, flush);
len -= 128;
dst8 += 16;
src8 += 16;
}
while (len >= 64) {
cpy64(dst8, src8);
pmem2_flush_flags(dst8, 64, flags, flush);
len -= 64;
dst8 += 8;
src8 += 8;
}
remaining = len;
while (len >= 8) {
cpy8(dst8, src8);
len -= 8;
dst8++;
src8++;
}
cdst = (char *)dst8;
csrc = (const char *)src8;
for (size_t i = 0; i < len; ++i)
*cdst++ = *csrc++;
if (remaining)
pmem2_flush_flags(cdst - remaining, remaining, flags,
flush);
} else {
cdst += len;
csrc += len;
size_t cnt = (uint64_t)cdst & 7;
if (cnt > 0) {
if (cnt > len)
cnt = len;
cdst -= cnt;
csrc -= cnt;
len -= cnt;
for (size_t i = cnt; i > 0; --i)
cdst[i - 1] = csrc[i - 1];
pmem2_flush_flags(cdst, cnt, flags, flush);
}
uint64_t *dst8 = (uint64_t *)cdst;
const uint64_t *src8 = (const uint64_t *)csrc;
while (len >= 128 && CACHELINE_SIZE == 128) {
dst8 -= 16;
src8 -= 16;
cpy128(dst8, src8);
pmem2_flush_flags(dst8, 128, flags, flush);
len -= 128;
}
while (len >= 64) {
dst8 -= 8;
src8 -= 8;
cpy64(dst8, src8);
pmem2_flush_flags(dst8, 64, flags, flush);
len -= 64;
}
remaining = len;
while (len >= 8) {
--dst8;
--src8;
cpy8(dst8, src8);
len -= 8;
}
cdst = (char *)dst8;
csrc = (const char *)src8;
for (size_t i = len; i > 0; --i)
*--cdst = *--csrc;
if (remaining)
pmem2_flush_flags(cdst, remaining, flags, flush);
}
return dst;
}
/*
* memset_nodrain_generic -- generic memset to pmem without hw drain
*/
void *
memset_nodrain_generic(void *dst, int c, size_t len, unsigned flags,
flush_func flush)
{
LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x", dst, c, len,
flags);
(void) flags;
char *cdst = dst;
size_t cnt = (uint64_t)cdst & 7;
if (cnt > 0) {
cnt = 8 - cnt;
if (cnt > len)
cnt = len;
for (size_t i = 0; i < cnt; ++i)
cdst[i] = (char)c;
pmem2_flush_flags(cdst, cnt, flags, flush);
cdst += cnt;
len -= cnt;
}
uint64_t *dst8 = (uint64_t *)cdst;
uint64_t u = (unsigned char)c;
uint64_t tmp = (u << 56) | (u << 48) | (u << 40) | (u << 32) |
(u << 24) | (u << 16) | (u << 8) | u;
while (len >= 128 && CACHELINE_SIZE == 128) {
store8(&dst8[0], tmp);
store8(&dst8[1], tmp);
store8(&dst8[2], tmp);
store8(&dst8[3], tmp);
store8(&dst8[4], tmp);
store8(&dst8[5], tmp);
store8(&dst8[6], tmp);
store8(&dst8[7], tmp);
store8(&dst8[8], tmp);
store8(&dst8[9], tmp);
store8(&dst8[10], tmp);
store8(&dst8[11], tmp);
store8(&dst8[12], tmp);
store8(&dst8[13], tmp);
store8(&dst8[14], tmp);
store8(&dst8[15], tmp);
pmem2_flush_flags(dst8, 128, flags, flush);
len -= 128;
dst8 += 16;
}
while (len >= 64) {
store8(&dst8[0], tmp);
store8(&dst8[1], tmp);
store8(&dst8[2], tmp);
store8(&dst8[3], tmp);
store8(&dst8[4], tmp);
store8(&dst8[5], tmp);
store8(&dst8[6], tmp);
store8(&dst8[7], tmp);
pmem2_flush_flags(dst8, 64, flags, flush);
len -= 64;
dst8 += 8;
}
size_t remaining = len;
while (len >= 8) {
store8(dst8, tmp);
len -= 8;
dst8++;
}
cdst = (char *)dst8;
for (size_t i = 0; i < len; ++i)
*cdst++ = (char)c;
if (remaining)
pmem2_flush_flags(cdst - remaining, remaining, flags, flush);
return dst;
}
| 9,345 | 26.488235 | 78 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/pmem2_arch.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* pmem2_arch.h -- core-arch interface
*/
#ifndef PMEM2_ARCH_H
#define PMEM2_ARCH_H
#include <stddef.h>
#include "libpmem2.h"
#include "util.h"
#include "valgrind_internal.h"
#ifdef __cplusplus
extern "C" {
#endif
struct pmem2_arch_info;
typedef void (*fence_func)(void);
typedef void (*flush_func)(const void *, size_t);
typedef void *(*memmove_nodrain_func)(void *pmemdest, const void *src,
size_t len, unsigned flags, flush_func flush);
typedef void *(*memset_nodrain_func)(void *pmemdest, int c, size_t len,
unsigned flags, flush_func flush);
struct pmem2_arch_info {
memmove_nodrain_func memmove_nodrain;
memmove_nodrain_func memmove_nodrain_eadr;
memset_nodrain_func memset_nodrain;
memset_nodrain_func memset_nodrain_eadr;
flush_func flush;
fence_func fence;
int flush_has_builtin_fence;
};
void pmem2_arch_init(struct pmem2_arch_info *info);
/*
* flush_empty_nolog -- (internal) do not flush the CPU cache
*/
static force_inline void
flush_empty_nolog(const void *addr, size_t len)
{
/* NOP, but tell pmemcheck about it */
VALGRIND_DO_FLUSH(addr, len);
}
void *memmove_nodrain_generic(void *pmemdest, const void *src, size_t len,
unsigned flags, flush_func flush);
void *memset_nodrain_generic(void *pmemdest, int c, size_t len, unsigned flags,
flush_func flush);
#ifdef __cplusplus
}
#endif
#endif
| 1,427 | 22.8 | 79 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/region_namespace_ndctl.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* region_namespace_ndctl.c -- common ndctl functions
*/
#include <ndctl/libndctl.h>
#include <ndctl/libdaxctl.h>
#include <sys/sysmacros.h>
#include <fcntl.h>
#include "libpmem2.h"
#include "pmem2_utils.h"
#include "region_namespace_ndctl.h"
#include "region_namespace.h"
#include "out.h"
/*
* ndctl_match_devdax -- (internal) returns 0 if the devdax matches
* with the given file, 1 if it doesn't match,
* and a negative value in case of an error.
*/
static int
ndctl_match_devdax(dev_t st_rdev, const char *devname)
{
LOG(3, "st_rdev %lu devname %s", st_rdev, devname);
if (*devname == '\0')
return 1;
char path[PATH_MAX];
os_stat_t stat;
if (util_snprintf(path, PATH_MAX, "/dev/%s", devname) < 0) {
ERR("!snprintf");
return PMEM2_E_ERRNO;
}
if (os_stat(path, &stat)) {
ERR("!stat %s", path);
return PMEM2_E_ERRNO;
}
if (st_rdev != stat.st_rdev) {
LOG(10, "skipping not matching device: %s", path);
return 1;
}
LOG(4, "found matching device: %s", path);
return 0;
}
#define BUFF_LENGTH 64
/*
* ndctl_match_fsdax -- (internal) returns 0 if the device matches
* with the given file, 1 if it doesn't match,
* and a negative value in case of an error.
*/
static int
ndctl_match_fsdax(dev_t st_dev, const char *devname)
{
LOG(3, "st_dev %lu devname %s", st_dev, devname);
if (*devname == '\0')
return 1;
char path[PATH_MAX];
char dev_id[BUFF_LENGTH];
if (util_snprintf(path, PATH_MAX, "/sys/block/%s/dev", devname) < 0) {
ERR("!snprintf");
return PMEM2_E_ERRNO;
}
if (util_snprintf(dev_id, BUFF_LENGTH, "%d:%d",
major(st_dev), minor(st_dev)) < 0) {
ERR("!snprintf");
return PMEM2_E_ERRNO;
}
int fd = os_open(path, O_RDONLY);
if (fd < 0) {
ERR("!open \"%s\"", path);
return PMEM2_E_ERRNO;
}
char buff[BUFF_LENGTH];
ssize_t nread = read(fd, buff, BUFF_LENGTH);
if (nread < 0) {
ERR("!read");
int oerrno = errno; /* save the errno */
os_close(fd);
errno = oerrno;
return PMEM2_E_ERRNO;
}
os_close(fd);
if (nread == 0) {
ERR("%s is empty", path);
return PMEM2_E_INVALID_DEV_FORMAT;
}
if (buff[nread - 1] != '\n') {
ERR("%s doesn't end with new line", path);
return PMEM2_E_INVALID_DEV_FORMAT;
}
buff[nread - 1] = '\0';
if (strcmp(buff, dev_id) != 0) {
LOG(10, "skipping not matching device: %s", path);
return 1;
}
LOG(4, "found matching device: %s", path);
return 0;
}
/*
* pmem2_region_namespace -- returns the region
* (and optionally the namespace)
* where the given file is located
*/
int
pmem2_region_namespace(struct ndctl_ctx *ctx,
const struct pmem2_source *src,
struct ndctl_region **pregion,
struct ndctl_namespace **pndns)
{
LOG(3, "ctx %p src %p pregion %p pnamespace %p",
ctx, src, pregion, pndns);
struct ndctl_bus *bus;
struct ndctl_region *region;
struct ndctl_namespace *ndns;
if (pregion)
*pregion = NULL;
if (pndns)
*pndns = NULL;
if (src->value.ftype == PMEM2_FTYPE_DIR) {
ERR("cannot check region or namespace of a directory");
return PMEM2_E_INVALID_FILE_TYPE;
}
FOREACH_BUS_REGION_NAMESPACE(ctx, bus, region, ndns) {
struct ndctl_btt *btt;
struct ndctl_dax *dax = NULL;
struct ndctl_pfn *pfn;
const char *devname;
if ((dax = ndctl_namespace_get_dax(ndns))) {
if (src->value.ftype == PMEM2_FTYPE_REG)
continue;
ASSERTeq(src->value.ftype, PMEM2_FTYPE_DEVDAX);
struct daxctl_region *dax_region;
dax_region = ndctl_dax_get_daxctl_region(dax);
if (!dax_region) {
ERR("!cannot find dax region");
return PMEM2_E_DAX_REGION_NOT_FOUND;
}
struct daxctl_dev *dev;
daxctl_dev_foreach(dax_region, dev) {
devname = daxctl_dev_get_devname(dev);
int ret = ndctl_match_devdax(src->value.st_rdev,
devname);
if (ret < 0)
return ret;
if (ret == 0) {
if (pregion)
*pregion = region;
if (pndns)
*pndns = ndns;
return 0;
}
}
} else {
if (src->value.ftype == PMEM2_FTYPE_DEVDAX)
continue;
ASSERTeq(src->value.ftype, PMEM2_FTYPE_REG);
if ((btt = ndctl_namespace_get_btt(ndns))) {
devname = ndctl_btt_get_block_device(btt);
} else if ((pfn = ndctl_namespace_get_pfn(ndns))) {
devname = ndctl_pfn_get_block_device(pfn);
} else {
devname =
ndctl_namespace_get_block_device(ndns);
}
int ret = ndctl_match_fsdax(src->value.st_dev, devname);
if (ret < 0)
return ret;
if (ret == 0) {
if (pregion)
*pregion = region;
if (pndns)
*pndns = ndns;
return 0;
}
}
}
LOG(10, "did not found any matching device");
return 0;
}
/*
* pmem2_region_get_id -- returns the region id
*/
int
pmem2_get_region_id(const struct pmem2_source *src, unsigned *region_id)
{
LOG(3, "src %p region_id %p", src, region_id);
struct ndctl_region *region;
struct ndctl_namespace *ndns;
struct ndctl_ctx *ctx;
errno = ndctl_new(&ctx) * (-1);
if (errno) {
ERR("!ndctl_new");
return PMEM2_E_ERRNO;
}
int rv = pmem2_region_namespace(ctx, src, ®ion, &ndns);
if (rv) {
LOG(1, "getting region and namespace failed");
goto end;
}
if (!region) {
ERR("unknown region");
rv = PMEM2_E_DAX_REGION_NOT_FOUND;
goto end;
}
*region_id = ndctl_region_get_id(region);
end:
ndctl_unref(ctx);
return rv;
}
| 5,467 | 20.111969 | 72 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/pmem2_utils_other.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
#include <errno.h>
#include <sys/stat.h>
#include "libpmem2.h"
#include "out.h"
#include "pmem2_utils.h"
#ifdef _WIN32
#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
#endif
int
pmem2_get_type_from_stat(const os_stat_t *st, enum pmem2_file_type *type)
{
if (S_ISREG(st->st_mode)) {
*type = PMEM2_FTYPE_REG;
return 0;
}
if (S_ISDIR(st->st_mode)) {
*type = PMEM2_FTYPE_DIR;
return 0;
}
ERR("file type 0%o not supported", st->st_mode & S_IFMT);
return PMEM2_E_INVALID_FILE_TYPE;
}
/*
* pmem2_device_dax_size -- checks the size of a given
* dax device from given source structure
*/
int
pmem2_device_dax_size(const struct pmem2_source *src, size_t *size)
{
const char *err =
"BUG: pmem2_device_dax_size should never be called on this OS";
ERR("%s", err);
ASSERTinfo(0, err);
return PMEM2_E_NOSUPP;
}
/*
* pmem2_device_dax_alignment -- checks the alignment of a given
* dax device from given source
*/
int
pmem2_device_dax_alignment(const struct pmem2_source *src, size_t *alignment)
{
const char *err =
"BUG: pmem2_device_dax_alignment should never be called on this OS";
ERR("%s", err);
ASSERTinfo(0, err);
return PMEM2_E_NOSUPP;
}
| 1,301 | 20.7 | 77 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/deep_flush.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* deep_flush.c -- pmem2_deep_flush implementation
*/
#include <stdlib.h>
#include "libpmem2.h"
#include "deep_flush.h"
#include "out.h"
/*
* pmem2_deep_flush -- performs deep flush operation
*/
int
pmem2_deep_flush(struct pmem2_map *map, void *ptr, size_t size)
{
LOG(3, "map %p ptr %p size %zu", map, ptr, size);
uintptr_t map_addr = (uintptr_t)map->addr;
uintptr_t map_end = map_addr + map->content_length;
uintptr_t flush_addr = (uintptr_t)ptr;
uintptr_t flush_end = flush_addr + size;
if (flush_addr < map_addr || flush_end > map_end) {
ERR("requested deep flush rage ptr %p size %zu"
"exceeds map range %p", ptr, size, map);
return PMEM2_E_DEEP_FLUSH_RANGE;
}
int ret = map->deep_flush_fn(map, ptr, size);
if (ret) {
LOG(1, "cannot perform deep flush operation for map %p", map);
return ret;
}
return 0;
}
| 929 | 21.682927 | 64 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/map_posix.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* map_posix.c -- pmem2_map (POSIX)
*/
#include <errno.h>
#include <stdbool.h>
#include <string.h>
#include <sys/mman.h>
#include "libpmem2.h"
#include "alloc.h"
#include "auto_flush.h"
#include "config.h"
#include "file.h"
#include "map.h"
#include "out.h"
#include "persist.h"
#include "pmem2_utils.h"
#include "source.h"
#include "valgrind_internal.h"
#ifndef MAP_SYNC
#define MAP_SYNC 0x80000
#endif
#ifndef MAP_SHARED_VALIDATE
#define MAP_SHARED_VALIDATE 0x03
#endif
#define MEGABYTE ((uintptr_t)1 << 20)
#define GIGABYTE ((uintptr_t)1 << 30)
/* indicates the cases in which the error cannot occur */
#define GRAN_IMPOSSIBLE "impossible"
#ifdef __linux__
/* requested CACHE_LINE, available PAGE */
#define REQ_CL_AVAIL_PG \
"requested granularity not available because fd doesn't point to DAX-enabled file " \
"or kernel doesn't support MAP_SYNC flag (Linux >= 4.15)"
/* requested BYTE, available PAGE */
#define REQ_BY_AVAIL_PG REQ_CL_AVAIL_PG
/* requested BYTE, available CACHE_LINE */
#define REQ_BY_AVAIL_CL \
"requested granularity not available because the platform doesn't support eADR"
static const char *granularity_err_msg[3][3] = {
/* requested granularity / available granularity */
/* -------------------------------------------------------------------- */
/* BYTE CACHE_LINE PAGE */
/* -------------------------------------------------------------------- */
/* BYTE */ {GRAN_IMPOSSIBLE, REQ_BY_AVAIL_CL, REQ_BY_AVAIL_PG},
/* CL */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, REQ_CL_AVAIL_PG},
/* PAGE */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE}};
#else
/* requested CACHE_LINE, available PAGE */
#define REQ_CL_AVAIL_PG \
"the operating system doesn't provide a method of detecting granularity"
/* requested BYTE, available PAGE */
#define REQ_BY_AVAIL_PG \
"the operating system doesn't provide a method of detecting whether the platform supports eADR"
static const char *granularity_err_msg[3][3] = {
/* requested granularity / available granularity */
/* -------------------------------------------------------------------- */
/* BYTE CACHE_LINE PAGE */
/* -------------------------------------------------------------------- */
/* BYTE */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, REQ_BY_AVAIL_PG},
/* CL */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, REQ_CL_AVAIL_PG},
/* PAGE */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE}};
#endif
/*
* get_map_alignment -- (internal) choose the desired mapping alignment
*
* The smallest supported alignment is 2 megabytes because of the object
* alignment requirements. Changing this value to 4 kilobytes constitutes a
* layout change.
*
* Use 1GB page alignment only if the mapping length is at least
* twice as big as the page size.
*/
static inline size_t
get_map_alignment(size_t len, size_t req_align)
{
size_t align = 2 * MEGABYTE;
if (req_align)
align = req_align;
else if (len >= 2 * GIGABYTE)
align = GIGABYTE;
return align;
}
/*
* map_reserve -- (internal) reserve an address for mmap()
*
* ALSR in 64-bit Linux kernel uses 28-bit of randomness for mmap
* (bit positions 12-39), which means the base mapping address is randomized
* within [0..1024GB] range, with 4KB granularity. Assuming additional
* 1GB alignment, it results in 1024 possible locations.
*/
static int
map_reserve(size_t len, size_t alignment, void **reserv, size_t *reslen,
const struct pmem2_config *cfg)
{
ASSERTne(reserv, NULL);
/* let's get addr from the cfg */
void *mmap_addr = cfg->addr;
int mmap_addr_flag = 0;
size_t dlength; /* dummy length */
/* if addr is initialized, dlength == len */
if (mmap_addr)
dlength = len;
else
dlength = len + alignment; /* dummy length */
/* "translate" pmem2 addr request type into linux flag */
if (cfg->addr_request == PMEM2_ADDRESS_FIXED_NOREPLACE) {
/*
* glibc started exposing this flag in version 4.17 but we can still
* imitate it even if it is not supported by libc or kernel
*/
#ifdef MAP_FIXED_NOREPLACE
mmap_addr_flag = MAP_FIXED_NOREPLACE;
#else
mmap_addr_flag = 0;
#endif
}
/*
* Create dummy mapping to find an unused region of given size.
* Request for increased size for later address alignment.
* Use MAP_PRIVATE with read-only access to simulate
* zero cost for overcommit accounting. Note: MAP_NORESERVE
* flag is ignored if overcommit is disabled (mode 2).
*/
char *daddr = mmap(mmap_addr, dlength, PROT_READ,
MAP_PRIVATE | MAP_ANONYMOUS | mmap_addr_flag, -1, 0);
if (daddr == MAP_FAILED) {
if (errno == EEXIST) {
ERR("!mmap MAP_FIXED_NOREPLACE");
return PMEM2_E_MAPPING_EXISTS;
}
ERR("!mmap MAP_ANONYMOUS");
return PMEM2_E_ERRNO;
}
/*
* When kernel does not support MAP_FIXED_NOREPLACE flag we imitate it.
* If kernel does not support flag and given addr is occupied, kernel
* chooses new addr randomly and returns it. We do not want that
* behavior, so we validate it and fail when addresses do not match.
*/
if (mmap_addr && cfg->addr_request == PMEM2_ADDRESS_FIXED_NOREPLACE) {
/* mapping passed and gave different addr, while it shouldn't */
if (daddr != mmap_addr) {
munmap(daddr, dlength);
ERR("mapping exists in the given address");
return PMEM2_E_MAPPING_EXISTS;
}
}
LOG(4, "system choice %p", daddr);
*reserv = (void *)roundup((uintptr_t)daddr, alignment);
/*
* since the last part of the reservation from (reserv + reslen == end)
* will be unmapped, the 'end' address has to be page-aligned.
* 'reserv' is already page-aligned (or even aligned to multiple of page
* size) so it is enough to page-align the 'reslen' value.
*/
*reslen = roundup(len, Pagesize);
LOG(4, "hint %p", *reserv);
/*
* The placeholder mapping is divided into few parts:
*
* 1 2 3 4 5
* |......|uuuuuuuuu|rrr|.................|
*
* Addresses:
* 1 == daddr
* 2 == reserv
* 3 == reserv + len
* 4 == reserv + reslen == end (has to be page-aligned)
* 5 == daddr + dlength
*
* Key:
* - '.' is an unused part of the placeholder
* - 'u' is where the actual mapping lies
* - 'r' is what reserved as padding
*/
/* unmap the placeholder before the actual mapping */
const size_t before = (uintptr_t)(*reserv) - (uintptr_t)daddr;
if (before) {
if (munmap(daddr, before)) {
ERR("!munmap");
return PMEM2_E_ERRNO;
}
}
/* unmap the placeholder after the actual mapping */
const size_t after = dlength - *reslen - before;
void *end = (void *)((uintptr_t)(*reserv) + (uintptr_t)*reslen);
if (after)
if (munmap(end, after)) {
ERR("!munmap");
return PMEM2_E_ERRNO;
}
return 0;
}
/*
* file_map -- (internal) memory map given file into memory
* If (flags & MAP_PRIVATE) it uses just mmap. Otherwise, it tries to mmap with
* (flags | MAP_SHARED_VALIDATE | MAP_SYNC) which allows flushing from the
* user-space. If MAP_SYNC fails and the user did not specify it by himself it
* falls back to the mmap with user-provided flags.
*/
static int
file_map(void *reserv, size_t len, int proto, int flags,
int fd, off_t offset, bool *map_sync, void **base)
{
LOG(15, "reserve %p len %zu proto %x flags %x fd %d offset %ld "
"map_sync %p", reserv, len, proto, flags, fd, offset,
map_sync);
ASSERTne(map_sync, NULL);
ASSERTne(base, NULL);
/*
* MAP_PRIVATE and MAP_SHARED are mutually exclusive, therefore mmap
* with MAP_PRIVATE is executed separately.
*/
if (flags & MAP_PRIVATE) {
*base = mmap(reserv, len, proto, flags, fd, offset);
if (*base == MAP_FAILED) {
ERR("!mmap");
return PMEM2_E_ERRNO;
}
LOG(4, "mmap with MAP_PRIVATE succeeded");
*map_sync = false;
return 0;
}
/* try to mmap with MAP_SYNC flag */
const int sync_flags = MAP_SHARED_VALIDATE | MAP_SYNC;
*base = mmap(reserv, len, proto, flags | sync_flags, fd, offset);
if (*base != MAP_FAILED) {
LOG(4, "mmap with MAP_SYNC succeeded");
*map_sync = true;
return 0;
}
/* try to mmap with MAP_SHARED flag (without MAP_SYNC) */
if (errno == EINVAL || errno == ENOTSUP) {
LOG(4, "mmap with MAP_SYNC not supported");
*base = mmap(reserv, len, proto, flags | MAP_SHARED, fd,
offset);
if (*base != MAP_FAILED) {
*map_sync = false;
return 0;
}
}
ERR("!mmap");
return PMEM2_E_ERRNO;
}
/*
* unmap -- (internal) unmap a memory range
*/
static int
unmap(void *addr, size_t len)
{
int retval = munmap(addr, len);
if (retval < 0) {
ERR("!munmap");
return PMEM2_E_ERRNO;
}
return 0;
}
/*
* pmem2_map -- map memory according to provided config
*/
int
pmem2_map(const struct pmem2_config *cfg, const struct pmem2_source *src,
struct pmem2_map **map_ptr)
{
LOG(3, "cfg %p src %p map_ptr %p", cfg, src, map_ptr);
int ret = 0;
struct pmem2_map *map;
size_t file_len;
*map_ptr = NULL;
if (cfg->requested_max_granularity == PMEM2_GRANULARITY_INVALID) {
ERR(
"please define the max granularity requested for the mapping");
return PMEM2_E_GRANULARITY_NOT_SET;
}
size_t src_alignment;
ret = pmem2_source_alignment(src, &src_alignment);
if (ret)
return ret;
/* get file size */
ret = pmem2_source_size(src, &file_len);
if (ret)
return ret;
/* get offset */
size_t effective_offset;
ret = pmem2_validate_offset(cfg, &effective_offset, src_alignment);
if (ret)
return ret;
ASSERTeq(effective_offset, cfg->offset);
if (src->type == PMEM2_SOURCE_ANON)
effective_offset = 0;
os_off_t off = (os_off_t)effective_offset;
/* map input and output variables */
bool map_sync = false;
/*
* MAP_SHARED - is required to mmap directly the underlying hardware
* MAP_FIXED - is required to mmap at exact address pointed by hint
*/
int flags = MAP_FIXED;
void *addr;
/* "translate" pmem2 protection flags into linux flags */
int proto = 0;
if (cfg->protection_flag == PMEM2_PROT_NONE)
proto = PROT_NONE;
if (cfg->protection_flag & PMEM2_PROT_EXEC)
proto |= PROT_EXEC;
if (cfg->protection_flag & PMEM2_PROT_READ)
proto |= PROT_READ;
if (cfg->protection_flag & PMEM2_PROT_WRITE)
proto |= PROT_WRITE;
if (src->type == PMEM2_SOURCE_FD) {
if (src->value.ftype == PMEM2_FTYPE_DIR) {
ERR("the directory is not a supported file type");
return PMEM2_E_INVALID_FILE_TYPE;
}
ASSERT(src->value.ftype == PMEM2_FTYPE_REG ||
src->value.ftype == PMEM2_FTYPE_DEVDAX);
if (cfg->sharing == PMEM2_PRIVATE &&
src->value.ftype == PMEM2_FTYPE_DEVDAX) {
ERR(
"device DAX does not support mapping with MAP_PRIVATE");
return PMEM2_E_SRC_DEVDAX_PRIVATE;
}
}
size_t content_length, reserved_length = 0;
ret = pmem2_config_validate_length(cfg, file_len, src_alignment);
if (ret)
return ret;
/* without user-provided length, map to the end of the file */
if (cfg->length)
content_length = cfg->length;
else
content_length = file_len - effective_offset;
size_t alignment = get_map_alignment(content_length,
src_alignment);
ret = pmem2_config_validate_addr_alignment(cfg, src);
if (ret)
return ret;
/* find a hint for the mapping */
void *reserv = NULL;
ret = map_reserve(content_length, alignment, &reserv, &reserved_length,
cfg);
if (ret != 0) {
if (ret == PMEM2_E_MAPPING_EXISTS)
LOG(1, "given mapping region is already occupied");
else
LOG(1, "cannot find a contiguous region of given size");
return ret;
}
ASSERTne(reserv, NULL);
if (cfg->sharing == PMEM2_PRIVATE) {
flags |= MAP_PRIVATE;
}
int map_fd = INVALID_FD;
if (src->type == PMEM2_SOURCE_FD) {
map_fd = src->value.fd;
} else if (src->type == PMEM2_SOURCE_ANON) {
flags |= MAP_ANONYMOUS;
} else {
ASSERT(0);
}
ret = file_map(reserv, content_length, proto, flags, map_fd, off,
&map_sync, &addr);
if (ret) {
/* unmap the reservation mapping */
munmap(reserv, reserved_length);
if (ret == -EACCES)
return PMEM2_E_NO_ACCESS;
else if (ret == -ENOTSUP)
return PMEM2_E_NOSUPP;
else
return ret;
}
LOG(3, "mapped at %p", addr);
bool eADR = (pmem2_auto_flush() == 1);
enum pmem2_granularity available_min_granularity =
src->type == PMEM2_SOURCE_ANON ? PMEM2_GRANULARITY_BYTE :
get_min_granularity(eADR, map_sync, cfg->sharing);
if (available_min_granularity > cfg->requested_max_granularity) {
const char *err = granularity_err_msg
[cfg->requested_max_granularity]
[available_min_granularity];
if (strcmp(err, GRAN_IMPOSSIBLE) == 0)
FATAL(
"unhandled granularity error: available_min_granularity: %d" \
"requested_max_granularity: %d",
available_min_granularity,
cfg->requested_max_granularity);
ERR("%s", err);
ret = PMEM2_E_GRANULARITY_NOT_SUPPORTED;
goto err;
}
/* prepare pmem2_map structure */
map = (struct pmem2_map *)pmem2_malloc(sizeof(*map), &ret);
if (!map)
goto err;
map->addr = addr;
map->reserved_length = reserved_length;
map->content_length = content_length;
map->effective_granularity = available_min_granularity;
pmem2_set_flush_fns(map);
pmem2_set_mem_fns(map);
map->source = *src;
map->source.value.fd = INVALID_FD; /* fd should not be used after map */
ret = pmem2_register_mapping(map);
if (ret)
goto err_register;
*map_ptr = map;
if (src->type == PMEM2_SOURCE_FD) {
VALGRIND_REGISTER_PMEM_MAPPING(map->addr, map->content_length);
VALGRIND_REGISTER_PMEM_FILE(src->value.fd,
map->addr, map->content_length, 0);
}
return 0;
err_register:
free(map);
err:
unmap(addr, reserved_length);
return ret;
}
/*
* pmem2_unmap -- unmap the specified mapping
*/
int
pmem2_unmap(struct pmem2_map **map_ptr)
{
LOG(3, "map_ptr %p", map_ptr);
int ret = 0;
struct pmem2_map *map = *map_ptr;
ret = pmem2_unregister_mapping(map);
if (ret)
return ret;
ret = unmap(map->addr, map->reserved_length);
if (ret)
return ret;
VALGRIND_REMOVE_PMEM_MAPPING(map->addr, map->content_length);
Free(map);
*map_ptr = NULL;
return ret;
}
| 13,869 | 25.879845 | 96 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/auto_flush_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2019, Intel Corporation */
/*
* auto_flush_windows.c -- Windows auto flush detection
*/
#include <windows.h>
#include <inttypes.h>
#include "alloc.h"
#include "out.h"
#include "os.h"
#include "endian.h"
#include "auto_flush_windows.h"
/*
* is_nfit_available -- (internal) check if platform supports NFIT table.
*/
static int
is_nfit_available()
{
LOG(3, "is_nfit_available()");
DWORD signatures_size;
char *signatures = NULL;
int is_nfit = 0;
DWORD offset = 0;
signatures_size = EnumSystemFirmwareTables(ACPI_SIGNATURE, NULL, 0);
if (signatures_size == 0) {
ERR("!EnumSystemFirmwareTables");
return -1;
}
signatures = (char *)Malloc(signatures_size + 1);
if (signatures == NULL) {
ERR("!malloc");
return -1;
}
int ret = EnumSystemFirmwareTables(ACPI_SIGNATURE,
signatures, signatures_size);
signatures[signatures_size] = '\0';
if (ret != signatures_size) {
ERR("!EnumSystemFirmwareTables");
goto err;
}
while (offset <= signatures_size) {
int nfit_sig = strncmp(signatures + offset,
NFIT_STR_SIGNATURE, NFIT_SIGNATURE_LEN);
if (nfit_sig == 0) {
is_nfit = 1;
break;
}
offset += NFIT_SIGNATURE_LEN;
}
Free(signatures);
return is_nfit;
err:
Free(signatures);
return -1;
}
/*
* is_auto_flush_cap_set -- (internal) check if specific
* capabilities bits are set.
*
* ACPI 6.2A Specification:
* Bit[0] - CPU Cache Flush to NVDIMM Durability on
* Power Loss Capable. If set to 1, indicates that platform
* ensures the entire CPU store data path is flushed to
* persistent memory on system power loss.
* Bit[1] - Memory Controller Flush to NVDIMM Durability on Power Loss Capable.
* If set to 1, indicates that platform provides mechanisms to automatically
* flush outstanding write data from the memory controller to persistent memory
* in the event of platform power loss. Note: If bit 0 is set to 1 then this bit
* shall be set to 1 as well.
*/
static int
is_auto_flush_cap_set(uint32_t capabilities)
{
LOG(3, "is_auto_flush_cap_set capabilities 0x%" PRIx32, capabilities);
int CPU_cache_flush = CHECK_BIT(capabilities, 0);
int memory_controller_flush = CHECK_BIT(capabilities, 1);
LOG(15, "CPU_cache_flush %d, memory_controller_flush %d",
CPU_cache_flush, memory_controller_flush);
if (memory_controller_flush == 1 && CPU_cache_flush == 1)
return 1;
return 0;
}
/*
* parse_nfit_buffer -- (internal) parse nfit buffer
* if platform_capabilities struct is available return pcs structure.
*/
static struct platform_capabilities
parse_nfit_buffer(const unsigned char *nfit_buffer, unsigned long buffer_size)
{
LOG(3, "parse_nfit_buffer nfit_buffer %s, buffer_size %lu",
nfit_buffer, buffer_size);
uint16_t type;
uint16_t length;
size_t offset = sizeof(struct nfit_header);
struct platform_capabilities pcs = {0};
while (offset < buffer_size) {
type = *(nfit_buffer + offset);
length = *(nfit_buffer + offset + 2);
if (type == PCS_TYPE_NUMBER) {
if (length == sizeof(struct platform_capabilities)) {
memmove(&pcs, nfit_buffer + offset, length);
return pcs;
}
}
offset += length;
}
return pcs;
}
/*
* pmem2_auto_flush -- check if platform supports auto flush.
*/
int
pmem2_auto_flush(void)
{
LOG(3, NULL);
DWORD nfit_buffer_size = 0;
DWORD nfit_written = 0;
PVOID nfit_buffer = NULL;
struct nfit_header *nfit_data;
struct platform_capabilities *pc = NULL;
int eADR = 0;
int is_nfit = is_nfit_available();
if (is_nfit == 0) {
LOG(15, "ACPI NFIT table not available");
return 0;
}
if (is_nfit < 0 || is_nfit != 1) {
LOG(1, "!is_nfit_available");
return -1;
}
/* get the entire nfit size */
nfit_buffer_size = GetSystemFirmwareTable(
(DWORD)ACPI_SIGNATURE, (DWORD)NFIT_REV_SIGNATURE, NULL, 0);
if (nfit_buffer_size == 0) {
ERR("!GetSystemFirmwareTable");
return -1;
}
/* reserve buffer */
nfit_buffer = (unsigned char *)Malloc(nfit_buffer_size);
if (nfit_buffer == NULL) {
ERR("!malloc");
goto err;
}
/* write actual nfit to buffer */
nfit_written = GetSystemFirmwareTable(
(DWORD)ACPI_SIGNATURE, (DWORD)NFIT_REV_SIGNATURE,
nfit_buffer, nfit_buffer_size);
if (nfit_written == 0) {
ERR("!GetSystemFirmwareTable");
goto err;
}
if (nfit_buffer_size != nfit_written) {
errno = ERROR_INVALID_DATA;
ERR("!GetSystemFirmwareTable invalid data");
goto err;
}
nfit_data = (struct nfit_header *)nfit_buffer;
int nfit_sig = strncmp(nfit_data->signature,
NFIT_STR_SIGNATURE, NFIT_SIGNATURE_LEN);
if (nfit_sig != 0) {
ERR("!NFIT buffer has invalid data");
goto err;
}
struct platform_capabilities pcs = parse_nfit_buffer(
nfit_buffer, nfit_buffer_size);
eADR = is_auto_flush_cap_set(pcs.capabilities);
Free(nfit_buffer);
return eADR;
err:
Free(nfit_buffer);
return -1;
}
| 4,857 | 23.535354 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/badblocks_ndctl.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* badblocks_ndctl.c -- implementation of DIMMs API based on the ndctl library
*/
#define _GNU_SOURCE
#include <sys/types.h>
#include <libgen.h>
#include <limits.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/sysmacros.h>
#include <fcntl.h>
#include <ndctl/libndctl.h>
#include <ndctl/libdaxctl.h>
#include "libpmem2.h"
#include "pmem2_utils.h"
#include "source.h"
#include "region_namespace_ndctl.h"
#include "file.h"
#include "out.h"
#include "badblocks.h"
#include "set_badblocks.h"
#include "extent.h"
typedef int pmem2_badblock_next_type(
struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb);
typedef void *pmem2_badblock_get_next_type(
struct pmem2_badblock_context *bbctx);
struct pmem2_badblock_context {
/* file descriptor */
int fd;
/* pmem2 file type */
enum pmem2_file_type file_type;
/* ndctl context */
struct ndctl_ctx *ctx;
/*
* Function pointer to:
* - pmem2_badblock_next_namespace() or
* - pmem2_badblock_next_region()
*/
pmem2_badblock_next_type *pmem2_badblock_next_func;
/*
* Function pointer to:
* - pmem2_namespace_get_first_badblock() or
* - pmem2_namespace_get_next_badblock() or
* - pmem2_region_get_first_badblock() or
* - pmem2_region_get_next_badblock()
*/
pmem2_badblock_get_next_type *pmem2_badblock_get_next_func;
/* needed only by the ndctl namespace badblock iterator */
struct ndctl_namespace *ndns;
/* needed only by the ndctl region badblock iterator */
struct {
struct ndctl_bus *bus;
struct ndctl_region *region;
unsigned long long ns_res; /* address of the namespace */
unsigned long long ns_beg; /* the begining of the namespace */
unsigned long long ns_end; /* the end of the namespace */
} rgn;
/* file's extents */
struct extents *exts;
unsigned first_extent;
struct pmem2_badblock last_bb;
};
/* forward declarations */
static int pmem2_badblock_next_namespace(
struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb);
static int pmem2_badblock_next_region(
struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb);
static void *pmem2_namespace_get_first_badblock(
struct pmem2_badblock_context *bbctx);
static void *pmem2_region_get_first_badblock(
struct pmem2_badblock_context *bbctx);
/*
* badblocks_get_namespace_bounds -- (internal) returns the bounds
* (offset and size) of the given namespace
* relative to the beginning of its region
*/
static int
badblocks_get_namespace_bounds(struct ndctl_region *region,
struct ndctl_namespace *ndns,
unsigned long long *ns_offset,
unsigned long long *ns_size)
{
LOG(3, "region %p namespace %p ns_offset %p ns_size %p",
region, ndns, ns_offset, ns_size);
struct ndctl_pfn *pfn = ndctl_namespace_get_pfn(ndns);
struct ndctl_dax *dax = ndctl_namespace_get_dax(ndns);
ASSERTne(ns_offset, NULL);
ASSERTne(ns_size, NULL);
if (pfn) {
*ns_offset = ndctl_pfn_get_resource(pfn);
if (*ns_offset == ULLONG_MAX) {
ERR("(pfn) cannot read offset of the namespace");
return PMEM2_E_CANNOT_READ_BOUNDS;
}
*ns_size = ndctl_pfn_get_size(pfn);
if (*ns_size == ULLONG_MAX) {
ERR("(pfn) cannot read size of the namespace");
return PMEM2_E_CANNOT_READ_BOUNDS;
}
LOG(10, "(pfn) ns_offset 0x%llx ns_size %llu",
*ns_offset, *ns_size);
} else if (dax) {
*ns_offset = ndctl_dax_get_resource(dax);
if (*ns_offset == ULLONG_MAX) {
ERR("(dax) cannot read offset of the namespace");
return PMEM2_E_CANNOT_READ_BOUNDS;
}
*ns_size = ndctl_dax_get_size(dax);
if (*ns_size == ULLONG_MAX) {
ERR("(dax) cannot read size of the namespace");
return PMEM2_E_CANNOT_READ_BOUNDS;
}
LOG(10, "(dax) ns_offset 0x%llx ns_size %llu",
*ns_offset, *ns_size);
} else { /* raw or btt */
*ns_offset = ndctl_namespace_get_resource(ndns);
if (*ns_offset == ULLONG_MAX) {
ERR("(raw/btt) cannot read offset of the namespace");
return PMEM2_E_CANNOT_READ_BOUNDS;
}
*ns_size = ndctl_namespace_get_size(ndns);
if (*ns_size == ULLONG_MAX) {
ERR("(raw/btt) cannot read size of the namespace");
return PMEM2_E_CANNOT_READ_BOUNDS;
}
LOG(10, "(raw/btt) ns_offset 0x%llx ns_size %llu",
*ns_offset, *ns_size);
}
unsigned long long region_offset = ndctl_region_get_resource(region);
if (region_offset == ULLONG_MAX) {
ERR("!cannot read offset of the region");
return PMEM2_E_ERRNO;
}
LOG(10, "region_offset 0x%llx", region_offset);
*ns_offset -= region_offset;
return 0;
}
/*
* badblocks_devdax_clear_one_badblock -- (internal) clear one bad block
* in the dax device
*/
static int
badblocks_devdax_clear_one_badblock(struct ndctl_bus *bus,
unsigned long long address,
unsigned long long length)
{
LOG(3, "bus %p address 0x%llx length %llu (bytes)",
bus, address, length);
int ret;
struct ndctl_cmd *cmd_ars_cap = ndctl_bus_cmd_new_ars_cap(bus,
address, length);
if (cmd_ars_cap == NULL) {
ERR("ndctl_bus_cmd_new_ars_cap() failed (bus '%s')",
ndctl_bus_get_provider(bus));
return PMEM2_E_ERRNO;
}
ret = ndctl_cmd_submit(cmd_ars_cap);
if (ret) {
ERR("ndctl_cmd_submit() failed (bus '%s')",
ndctl_bus_get_provider(bus));
/* ndctl_cmd_submit() returns -errno */
goto out_ars_cap;
}
struct ndctl_range range;
ret = ndctl_cmd_ars_cap_get_range(cmd_ars_cap, &range);
if (ret) {
ERR("ndctl_cmd_ars_cap_get_range() failed");
/* ndctl_cmd_ars_cap_get_range() returns -errno */
goto out_ars_cap;
}
struct ndctl_cmd *cmd_clear_error = ndctl_bus_cmd_new_clear_error(
range.address, range.length, cmd_ars_cap);
ret = ndctl_cmd_submit(cmd_clear_error);
if (ret) {
ERR("ndctl_cmd_submit() failed (bus '%s')",
ndctl_bus_get_provider(bus));
/* ndctl_cmd_submit() returns -errno */
goto out_clear_error;
}
size_t cleared = ndctl_cmd_clear_error_get_cleared(cmd_clear_error);
LOG(4, "cleared %zu out of %llu bad blocks", cleared, length);
ASSERT(cleared <= length);
if (cleared < length) {
ERR("failed to clear %llu out of %llu bad blocks",
length - cleared, length);
errno = ENXIO; /* ndctl handles such error in this way */
ret = PMEM2_E_ERRNO;
} else {
ret = 0;
}
out_clear_error:
ndctl_cmd_unref(cmd_clear_error);
out_ars_cap:
ndctl_cmd_unref(cmd_ars_cap);
return ret;
}
/*
* pmem2_badblock_context_new -- allocate and create a new bad block context
*/
int
pmem2_badblock_context_new(const struct pmem2_source *src,
struct pmem2_badblock_context **bbctx)
{
LOG(3, "src %p bbctx %p", src, bbctx);
ASSERTne(bbctx, NULL);
if (src->type == PMEM2_SOURCE_ANON) {
ERR("Anonymous source does not support bad blocks");
return PMEM2_E_NOSUPP;
}
ASSERTeq(src->type, PMEM2_SOURCE_FD);
struct ndctl_ctx *ctx;
struct ndctl_region *region;
struct ndctl_namespace *ndns;
struct pmem2_badblock_context *tbbctx = NULL;
enum pmem2_file_type pmem2_type;
int ret = PMEM2_E_UNKNOWN;
*bbctx = NULL;
errno = ndctl_new(&ctx) * (-1);
if (errno) {
ERR("!ndctl_new");
return PMEM2_E_ERRNO;
}
pmem2_type = src->value.ftype;
ret = pmem2_region_namespace(ctx, src, ®ion, &ndns);
if (ret) {
LOG(1, "getting region and namespace failed");
goto exit_ndctl_unref;
}
tbbctx = pmem2_zalloc(sizeof(struct pmem2_badblock_context), &ret);
if (ret)
goto exit_ndctl_unref;
tbbctx->fd = src->value.fd;
tbbctx->file_type = pmem2_type;
tbbctx->ctx = ctx;
if (region == NULL || ndns == NULL) {
/* did not found any matching device */
*bbctx = tbbctx;
return 0;
}
if (ndctl_namespace_get_mode(ndns) == NDCTL_NS_MODE_FSDAX) {
tbbctx->ndns = ndns;
tbbctx->pmem2_badblock_next_func =
pmem2_badblock_next_namespace;
tbbctx->pmem2_badblock_get_next_func =
pmem2_namespace_get_first_badblock;
} else {
unsigned long long ns_beg, ns_size, ns_end;
ret = badblocks_get_namespace_bounds(
region, ndns,
&ns_beg, &ns_size);
if (ret) {
LOG(1, "cannot read namespace's bounds");
goto error_free_all;
}
ns_end = ns_beg + ns_size - 1;
LOG(10,
"namespace: begin %llu, end %llu size %llu (in 512B sectors)",
B2SEC(ns_beg), B2SEC(ns_end + 1) - 1, B2SEC(ns_size));
tbbctx->rgn.bus = ndctl_region_get_bus(region);
tbbctx->rgn.region = region;
tbbctx->rgn.ns_beg = ns_beg;
tbbctx->rgn.ns_end = ns_end;
tbbctx->rgn.ns_res = ns_beg + ndctl_region_get_resource(region);
tbbctx->pmem2_badblock_next_func =
pmem2_badblock_next_region;
tbbctx->pmem2_badblock_get_next_func =
pmem2_region_get_first_badblock;
}
if (pmem2_type == PMEM2_FTYPE_REG) {
/* only regular files have extents */
ret = pmem2_extents_create_get(src->value.fd, &tbbctx->exts);
if (ret) {
LOG(1, "getting extents of fd %i failed",
src->value.fd);
goto error_free_all;
}
}
/* set the context */
*bbctx = tbbctx;
return 0;
error_free_all:
pmem2_extents_destroy(&tbbctx->exts);
Free(tbbctx);
exit_ndctl_unref:
ndctl_unref(ctx);
return ret;
}
/*
* pmem2_badblock_context_delete -- delete and free the bad block context
*/
void
pmem2_badblock_context_delete(struct pmem2_badblock_context **bbctx)
{
LOG(3, "bbctx %p", bbctx);
ASSERTne(bbctx, NULL);
if (*bbctx == NULL)
return;
struct pmem2_badblock_context *tbbctx = *bbctx;
pmem2_extents_destroy(&tbbctx->exts);
ndctl_unref(tbbctx->ctx);
Free(tbbctx);
*bbctx = NULL;
}
/*
* pmem2_namespace_get_next_badblock -- (internal) wrapper for
* ndctl_namespace_get_next_badblock
*/
static void *
pmem2_namespace_get_next_badblock(struct pmem2_badblock_context *bbctx)
{
LOG(3, "bbctx %p", bbctx);
return ndctl_namespace_get_next_badblock(bbctx->ndns);
}
/*
* pmem2_namespace_get_first_badblock -- (internal) wrapper for
* ndctl_namespace_get_first_badblock
*/
static void *
pmem2_namespace_get_first_badblock(struct pmem2_badblock_context *bbctx)
{
LOG(3, "bbctx %p", bbctx);
bbctx->pmem2_badblock_get_next_func = pmem2_namespace_get_next_badblock;
return ndctl_namespace_get_first_badblock(bbctx->ndns);
}
/*
* pmem2_region_get_next_badblock -- (internal) wrapper for
* ndctl_region_get_next_badblock
*/
static void *
pmem2_region_get_next_badblock(struct pmem2_badblock_context *bbctx)
{
LOG(3, "bbctx %p", bbctx);
return ndctl_region_get_next_badblock(bbctx->rgn.region);
}
/*
* pmem2_region_get_first_badblock -- (internal) wrapper for
* ndctl_region_get_first_badblock
*/
static void *
pmem2_region_get_first_badblock(struct pmem2_badblock_context *bbctx)
{
LOG(3, "bbctx %p", bbctx);
bbctx->pmem2_badblock_get_next_func = pmem2_region_get_next_badblock;
return ndctl_region_get_first_badblock(bbctx->rgn.region);
}
/*
* pmem2_badblock_next_namespace -- (internal) version of pmem2_badblock_next()
* called for ndctl with namespace badblock
* iterator
*
* This function works only for fsdax, but does not require any special
* permissions.
*/
static int
pmem2_badblock_next_namespace(struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb)
{
LOG(3, "bbctx %p bb %p", bbctx, bb);
ASSERTne(bbctx, NULL);
ASSERTne(bb, NULL);
struct badblock *bbn;
bbn = bbctx->pmem2_badblock_get_next_func(bbctx);
if (bbn == NULL)
return PMEM2_E_NO_BAD_BLOCK_FOUND;
/*
* libndctl returns offset and length of a bad block
* both expressed in 512B sectors. Offset is relative
* to the beginning of the namespace.
*/
bb->offset = SEC2B(bbn->offset);
bb->length = SEC2B(bbn->len);
return 0;
}
/*
* pmem2_badblock_next_region -- (internal) version of pmem2_badblock_next()
* called for ndctl with region badblock iterator
*
* This function works for all types of namespaces, but requires read access to
* privileged device information.
*/
static int
pmem2_badblock_next_region(struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb)
{
LOG(3, "bbctx %p bb %p", bbctx, bb);
ASSERTne(bbctx, NULL);
ASSERTne(bb, NULL);
unsigned long long bb_beg, bb_end;
unsigned long long beg, end;
struct badblock *bbn;
unsigned long long ns_beg = bbctx->rgn.ns_beg;
unsigned long long ns_end = bbctx->rgn.ns_end;
do {
bbn = bbctx->pmem2_badblock_get_next_func(bbctx);
if (bbn == NULL)
return PMEM2_E_NO_BAD_BLOCK_FOUND;
LOG(10,
"region bad block: begin %llu end %llu length %u (in 512B sectors)",
bbn->offset, bbn->offset + bbn->len - 1, bbn->len);
/*
* libndctl returns offset and length of a bad block
* both expressed in 512B sectors. Offset is relative
* to the beginning of the region.
*/
bb_beg = SEC2B(bbn->offset);
bb_end = bb_beg + SEC2B(bbn->len) - 1;
} while (bb_beg > ns_end || ns_beg > bb_end);
beg = (bb_beg > ns_beg) ? bb_beg : ns_beg;
end = (bb_end < ns_end) ? bb_end : ns_end;
/*
* Form a new bad block structure with offset and length
* expressed in bytes and offset relative to the beginning
* of the namespace.
*/
bb->offset = beg - ns_beg;
bb->length = end - beg + 1;
LOG(4,
"namespace bad block: begin %llu end %llu length %llu (in 512B sectors)",
B2SEC(beg - ns_beg), B2SEC(end - ns_beg), B2SEC(end - beg) + 1);
return 0;
}
/*
* pmem2_badblock_next -- get the next bad block
*/
int
pmem2_badblock_next(struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb)
{
LOG(3, "bbctx %p bb %p", bbctx, bb);
ASSERTne(bbctx, NULL);
ASSERTne(bb, NULL);
struct pmem2_badblock bbn;
unsigned long long bb_beg;
unsigned long long bb_end;
unsigned long long bb_len;
unsigned long long bb_off;
unsigned long long ext_beg;
unsigned long long ext_end;
unsigned e;
int ret;
if (bbctx->rgn.region == NULL && bbctx->ndns == NULL) {
/* did not found any matching device */
return PMEM2_E_NO_BAD_BLOCK_FOUND;
}
struct extents *exts = bbctx->exts;
/* DAX devices have no extents */
if (!exts) {
ret = bbctx->pmem2_badblock_next_func(bbctx, &bbn);
*bb = bbn;
return ret;
}
/*
* There is at least one extent.
* Loop until:
* 1) a bad block overlaps with an extent or
* 2) there are no more bad blocks.
*/
int bb_overlaps_with_extent = 0;
do {
if (bbctx->last_bb.length) {
/*
* We have saved the last bad block to check it
* with the next extent saved
* in bbctx->first_extent.
*/
ASSERTne(bbctx->first_extent, 0);
bbn = bbctx->last_bb;
bbctx->last_bb.offset = 0;
bbctx->last_bb.length = 0;
} else {
ASSERTeq(bbctx->first_extent, 0);
/* look for the next bad block */
ret = bbctx->pmem2_badblock_next_func(bbctx, &bbn);
if (ret)
return ret;
}
bb_beg = bbn.offset;
bb_end = bb_beg + bbn.length - 1;
for (e = bbctx->first_extent;
e < exts->extents_count;
e++) {
ext_beg = exts->extents[e].offset_physical;
ext_end = ext_beg + exts->extents[e].length - 1;
/* check if the bad block overlaps with the extent */
if (bb_beg <= ext_end && ext_beg <= bb_end) {
/* bad block overlaps with the extent */
bb_overlaps_with_extent = 1;
if (bb_end > ext_end &&
e + 1 < exts->extents_count) {
/*
* The bad block is longer than
* the extent and there are
* more extents.
* Save the current bad block
* to check it with the next extent.
*/
bbctx->first_extent = e + 1;
bbctx->last_bb = bbn;
} else {
/*
* All extents were checked
* with the current bad block.
*/
bbctx->first_extent = 0;
bbctx->last_bb.length = 0;
bbctx->last_bb.offset = 0;
}
break;
}
}
/* check all extents with the next bad block */
if (bb_overlaps_with_extent == 0) {
bbctx->first_extent = 0;
bbctx->last_bb.length = 0;
bbctx->last_bb.offset = 0;
}
} while (bb_overlaps_with_extent == 0);
/* bad block overlaps with an extent */
bb_beg = (bb_beg > ext_beg) ? bb_beg : ext_beg;
bb_end = (bb_end < ext_end) ? bb_end : ext_end;
bb_len = bb_end - bb_beg + 1;
bb_off = bb_beg + exts->extents[e].offset_logical
- exts->extents[e].offset_physical;
LOG(10, "bad block found: physical offset: %llu, length: %llu",
bb_beg, bb_len);
/* make sure the offset is block-aligned */
unsigned long long not_block_aligned = bb_off & (exts->blksize - 1);
if (not_block_aligned) {
bb_off -= not_block_aligned;
bb_len += not_block_aligned;
}
/* make sure the length is block-aligned */
bb_len = ALIGN_UP(bb_len, exts->blksize);
LOG(4, "bad block found: logical offset: %llu, length: %llu",
bb_off, bb_len);
/*
* Return the bad block with offset and length
* expressed in bytes and offset relative
* to the beginning of the file.
*/
bb->offset = bb_off;
bb->length = bb_len;
return 0;
}
/*
* pmem2_badblock_clear_fsdax -- (internal) clear one bad block
* in a FSDAX device
*/
static int
pmem2_badblock_clear_fsdax(int fd, const struct pmem2_badblock *bb)
{
LOG(3, "fd %i badblock %p", fd, bb);
ASSERTne(bb, NULL);
LOG(10,
"clearing a bad block: fd %i logical offset %zu length %zu (in 512B sectors)",
fd, B2SEC(bb->offset), B2SEC(bb->length));
/* fallocate() takes offset as the off_t type */
if (bb->offset > (size_t)INT64_MAX) {
ERR("bad block's offset is greater than INT64_MAX");
return PMEM2_E_OFFSET_OUT_OF_RANGE;
}
/* fallocate() takes length as the off_t type */
if (bb->length > (size_t)INT64_MAX) {
ERR("bad block's length is greater than INT64_MAX");
return PMEM2_E_LENGTH_OUT_OF_RANGE;
}
off_t offset = (off_t)bb->offset;
off_t length = (off_t)bb->length;
/* deallocate bad blocks */
if (fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
offset, length)) {
ERR("!fallocate");
return PMEM2_E_ERRNO;
}
/* allocate new blocks */
if (fallocate(fd, FALLOC_FL_KEEP_SIZE, offset, length)) {
ERR("!fallocate");
return PMEM2_E_ERRNO;
}
return 0;
}
/*
* pmem2_badblock_clear_devdax -- (internal) clear one bad block
* in a DAX device
*/
static int
pmem2_badblock_clear_devdax(const struct pmem2_badblock_context *bbctx,
const struct pmem2_badblock *bb)
{
LOG(3, "bbctx %p bb %p", bbctx, bb);
ASSERTne(bb, NULL);
ASSERTne(bbctx, NULL);
ASSERTne(bbctx->rgn.bus, NULL);
ASSERTne(bbctx->rgn.ns_res, 0);
LOG(4,
"clearing a bad block: offset %zu length %zu (in 512B sectors)",
B2SEC(bb->offset), B2SEC(bb->length));
int ret = badblocks_devdax_clear_one_badblock(bbctx->rgn.bus,
bb->offset + bbctx->rgn.ns_res,
bb->length);
if (ret) {
LOG(1,
"failed to clear a bad block: offset %zu length %zu (in 512B sectors)",
B2SEC(bb->offset),
B2SEC(bb->length));
return ret;
}
return 0;
}
/*
* pmem2_badblock_clear -- clear one bad block
*/
int
pmem2_badblock_clear(struct pmem2_badblock_context *bbctx,
const struct pmem2_badblock *bb)
{
LOG(3, "bbctx %p badblock %p", bbctx, bb);
ASSERTne(bbctx, NULL);
ASSERTne(bb, NULL);
if (bbctx->file_type == PMEM2_FTYPE_DEVDAX)
return pmem2_badblock_clear_devdax(bbctx, bb);
ASSERTeq(bbctx->file_type, PMEM2_FTYPE_REG);
return pmem2_badblock_clear_fsdax(bbctx->fd, bb);
}
| 19,316 | 24.218016 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/region_namespace_ndctl.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* region_namespace_ndctl.h -- internal definitions for libpmem2
* common ndctl functions
*/
#ifndef PMDK_REGION_NAMESPACE_NDCTL_H
#define PMDK_REGION_NAMESPACE_NDCTL_H 1
#include "os.h"
#ifdef __cplusplus
extern "C" {
#endif
#define FOREACH_BUS_REGION_NAMESPACE(ctx, bus, region, ndns) \
ndctl_bus_foreach(ctx, bus) \
ndctl_region_foreach(bus, region) \
ndctl_namespace_foreach(region, ndns)
int pmem2_region_namespace(struct ndctl_ctx *ctx,
const struct pmem2_source *src,
struct ndctl_region **pregion,
struct ndctl_namespace **pndns);
#ifdef __cplusplus
}
#endif
#endif /* PMDK_REGION_NAMESPACE_NDCTL_H */
| 754 | 21.878788 | 64 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/vm_reservation.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* vm_reservation.c -- implementation of virtual memory allocation API
*/
#include "libpmem2.h"
/*
* pmem2_vm_reservation_new -- creates new virtual memory reservation
*/
int
pmem2_vm_reservation_new(struct pmem2_vm_reservation **rsv,
size_t size, void *address)
{
return PMEM2_E_NOSUPP;
}
/*
* pmem2_vm_reservation_delete -- deletes reservation bound to
* structure pmem2_vm_reservation
*/
int
pmem2_vm_reservation_delete(struct pmem2_vm_reservation **rsv)
{
return PMEM2_E_NOSUPP;
}
| 614 | 20.206897 | 70 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/usc_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* usc_windows.c -- pmem2 usc function for windows
*/
#include "alloc.h"
#include "source.h"
#include "out.h"
#include "libpmem2.h"
#include "pmem2_utils.h"
#define GUID_SIZE sizeof("XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX")
#define VOLUME_PATH_SIZE sizeof("\\\\?\\Volume{}") + (GUID_SIZE - 2 /* \0 */)
/*
* get_volume_handle -- returns volume handle
*/
static int
get_volume_handle(HANDLE handle, HANDLE *volume_handle)
{
wchar_t *volume;
wchar_t tmp[10];
DWORD len =
GetFinalPathNameByHandleW(handle, tmp, 10, VOLUME_NAME_GUID);
if (len == 0) {
ERR("!!GetFinalPathNameByHandleW");
return pmem2_lasterror_to_err();
}
len *= sizeof(wchar_t);
int err;
volume = pmem2_malloc(len, &err);
if (volume == NULL)
return err;
if (!GetFinalPathNameByHandleW(handle, volume, len,
VOLUME_NAME_GUID)) {
Free(volume);
ERR("!!GetFinalPathNameByHandleW");
return pmem2_lasterror_to_err();
}
ASSERTeq(volume[VOLUME_PATH_SIZE], '\\');
volume[VOLUME_PATH_SIZE] = '\0';
*volume_handle = CreateFileW(volume, /* path to the file */
/* request access to send ioctl to the file */
FILE_READ_ATTRIBUTES,
/* do not block access to the file */
FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
NULL, /* security attributes */
OPEN_EXISTING, /* open only if it exists */
FILE_ATTRIBUTE_NORMAL, /* no attributes */
NULL); /* used only for new files */
Free(volume);
if (*volume_handle == INVALID_HANDLE_VALUE) {
ERR("!!CreateFileW");
return pmem2_lasterror_to_err();
}
return 0;
}
static int
get_device_guid(HANDLE handle, GUID *guid)
{
HANDLE vHandle;
int ret = get_volume_handle(handle, &vHandle);
if (vHandle == INVALID_HANDLE_VALUE)
return ret;
STORAGE_DEVICE_NUMBER_EX sdn;
sdn.DeviceNumber = -1;
DWORD dwBytesReturned = 0;
if (!DeviceIoControl(vHandle,
IOCTL_STORAGE_GET_DEVICE_NUMBER_EX,
NULL, 0,
&sdn, sizeof(sdn),
&dwBytesReturned, NULL)) {
/*
* IOCTL_STORAGE_GET_DEVICE_NUMBER_EX is not supported
* on this server
*/
ERR(
"Getting device id (IOCTL_STORAGE_GET_DEVICE_NUMBER_EX) is not supported on this system");
CloseHandle(vHandle);
return PMEM2_E_NOSUPP;
}
*guid = sdn.DeviceGuid;
CloseHandle(vHandle);
return 0;
}
int
pmem2_source_device_idW(const struct pmem2_source *src, wchar_t *id,
size_t *len)
{
if (src->type == PMEM2_SOURCE_ANON) {
ERR("Anonymous source does not have device id");
return PMEM2_E_NOSUPP;
}
ASSERTeq(src->type, PMEM2_SOURCE_HANDLE);
if (id == NULL) {
*len = GUID_SIZE * sizeof(*id);
return 0;
}
if (*len < GUID_SIZE * sizeof(*id)) {
ERR("id buffer is to small");
return PMEM2_E_BUFFER_TOO_SMALL;
}
GUID guid;
int ret = get_device_guid(src->value.handle, &guid);
if (ret)
return ret;
_snwprintf(id, GUID_SIZE,
L"%08lX-%04hX-%04hX-%02hhX%02hhX-%02hhX%02hhX%02hhX%02hhX%02hhX%02hhX",
guid.Data1, guid.Data2, guid.Data3, guid.Data4[0],
guid.Data4[1], guid.Data4[2], guid.Data4[3],
guid.Data4[4], guid.Data4[5], guid.Data4[6],
guid.Data4[7]);
return 0;
}
int
pmem2_source_device_idU(const struct pmem2_source *src, char *id, size_t *len)
{
if (src->type == PMEM2_SOURCE_ANON) {
ERR("Anonymous source does not have device id");
return PMEM2_E_NOSUPP;
}
ASSERTeq(src->type, PMEM2_SOURCE_HANDLE);
if (id == NULL) {
*len = GUID_SIZE * sizeof(*id);
return 0;
}
if (*len < GUID_SIZE * sizeof(*id)) {
ERR("id buffer is to small");
return PMEM2_E_BUFFER_TOO_SMALL;
}
GUID guid;
int ret = get_device_guid(src->value.handle, &guid);
if (ret)
return ret;
if (util_snprintf(id, GUID_SIZE,
"%08lX-%04hX-%04hX-%02hhX%02hhX-%02hhX%02hhX%02hhX%02hhX%02hhX%02hhX",
guid.Data1, guid.Data2, guid.Data3, guid.Data4[0],
guid.Data4[1], guid.Data4[2], guid.Data4[3],
guid.Data4[4], guid.Data4[5], guid.Data4[6],
guid.Data4[7]) < 0) {
ERR("!snprintf");
return PMEM2_E_ERRNO;
}
return 0;
}
int
pmem2_source_device_usc(const struct pmem2_source *src, uint64_t *usc)
{
LOG(3, "cfg %p, usc %p", src, usc);
if (src->type == PMEM2_SOURCE_ANON) {
ERR("Anonymous source does not support unsafe shutdown count");
return PMEM2_E_NOSUPP;
}
ASSERTeq(src->type, PMEM2_SOURCE_HANDLE);
*usc = 0;
HANDLE vHandle;
int err = get_volume_handle(src->value.handle, &vHandle);
if (vHandle == INVALID_HANDLE_VALUE)
return err;
STORAGE_PROPERTY_QUERY prop;
DWORD dwSize;
prop.PropertyId = StorageDeviceUnsafeShutdownCount;
prop.QueryType = PropertyExistsQuery;
prop.AdditionalParameters[0] = 0;
STORAGE_DEVICE_UNSAFE_SHUTDOWN_COUNT ret;
BOOL bResult = DeviceIoControl(vHandle,
IOCTL_STORAGE_QUERY_PROPERTY,
&prop, sizeof(prop),
&ret, sizeof(ret),
(LPDWORD)&dwSize, (LPOVERLAPPED)NULL);
if (!bResult) {
ERR(
"Getting unsafe shutdown count is not supported on this system");
CloseHandle(vHandle);
return PMEM2_E_NOSUPP;
}
prop.QueryType = PropertyStandardQuery;
bResult = DeviceIoControl(vHandle,
IOCTL_STORAGE_QUERY_PROPERTY,
&prop, sizeof(prop),
&ret, sizeof(ret),
(LPDWORD)&dwSize, (LPOVERLAPPED)NULL);
CloseHandle(vHandle);
if (!bResult) {
ERR("!!DeviceIoControl");
return pmem2_lasterror_to_err();
}
*usc = ret.UnsafeShutdownCount;
return 0;
}
| 5,261 | 22.283186 | 93 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/ravl_interval.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* ravl_interval.c -- ravl_interval implementation
*/
#include "alloc.h"
#include "map.h"
#include "ravl_interval.h"
#include "pmem2_utils.h"
#include "sys_util.h"
#include "os_thread.h"
#include "ravl.h"
/*
* ravl_interval - structure representing two points
* on the number line
*/
struct ravl_interval {
struct ravl *tree;
ravl_interval_min *get_min;
ravl_interval_max *get_max;
};
/*
* ravl_interval_node - structure holding min, max functions and address
*/
struct ravl_interval_node {
void *addr;
ravl_interval_min *get_min;
ravl_interval_max *get_max;
};
/*
* ravl_interval_compare -- compare intervals by its boundaries,
* no overlapping allowed
*/
static int
ravl_interval_compare(const void *lhs, const void *rhs)
{
const struct ravl_interval_node *left = lhs;
const struct ravl_interval_node *right = rhs;
if (left->get_min(left->addr) < right->get_min(right->addr) &&
left->get_max(left->addr) <= right->get_min(right->addr))
return -1;
if (left->get_min(left->addr) > right->get_min(right->addr) &&
left->get_max(left->addr) >= right->get_min(right->addr))
return 1;
return 0;
}
/*
* ravl_interval_delete - finalize the ravl interval module
*/
void
ravl_interval_delete(struct ravl_interval *ri)
{
ravl_delete(ri->tree);
ri->tree = NULL;
Free(ri);
}
/*
* ravl_interval_new -- initialize the ravl interval module
*/
struct ravl_interval *
ravl_interval_new(ravl_interval_min *get_min, ravl_interval_max *get_max)
{
int ret;
struct ravl_interval *interval = pmem2_malloc(sizeof(*interval), &ret);
if (ret)
goto ret_null;
interval->tree = ravl_new_sized(ravl_interval_compare,
sizeof(struct ravl_interval_node));
if (!(interval->tree))
goto free_alloc;
interval->get_min = get_min;
interval->get_max = get_max;
return interval;
free_alloc:
Free(interval);
ret_null:
return NULL;
}
/*
* ravl_interval_insert -- insert interval entry into the tree
*/
int
ravl_interval_insert(struct ravl_interval *ri, void *addr)
{
struct ravl_interval_node rin;
rin.addr = addr;
rin.get_min = ri->get_min;
rin.get_max = ri->get_max;
if (ravl_emplace_copy(ri->tree, &rin))
return PMEM2_E_ERRNO;
return 0;
}
/*
* ravl_interval_remove -- remove interval entry from the tree
*/
int
ravl_interval_remove(struct ravl_interval *ri, struct ravl_interval_node *rin)
{
struct ravl_node *node = ravl_find(ri->tree, rin,
RAVL_PREDICATE_EQUAL);
if (!node)
return PMEM2_E_MAPPING_NOT_FOUND;
ravl_remove(ri->tree, node);
return 0;
}
/*
* ravl_interval_find_prior_or_eq -- find overlapping interval starting prior to
* the current one or at the same place
*/
static struct ravl_interval_node *
ravl_interval_find_prior_or_eq(struct ravl *tree,
struct ravl_interval_node *rin)
{
struct ravl_node *node;
struct ravl_interval_node *cur;
node = ravl_find(tree, rin, RAVL_PREDICATE_LESS_EQUAL);
if (!node)
return NULL;
cur = ravl_data(node);
/*
* If the end of the found interval is below the searched boundary, then
* this is not our interval.
*/
if (cur->get_max(cur->addr) <= rin->get_min(rin->addr))
return NULL;
return cur;
}
/*
* ravl_interval_find_later -- find overlapping interval starting later than
* the current one
*/
static struct ravl_interval_node *
ravl_interval_find_later(struct ravl *tree, struct ravl_interval_node *rin)
{
struct ravl_node *node;
struct ravl_interval_node *cur;
node = ravl_find(tree, rin, RAVL_PREDICATE_GREATER);
if (!node)
return NULL;
cur = ravl_data(node);
/*
* If the beginning of the found interval is above the end of
* the searched range, then this is not our interval.
*/
if (cur->get_min(cur->addr) >= rin->get_max(rin->addr))
return NULL;
return cur;
}
/*
* ravl_interval_find_equal -- find the interval with exact (min, max) range
*/
struct ravl_interval_node *
ravl_interval_find_equal(struct ravl_interval *ri, void *addr)
{
struct ravl_interval_node range;
range.addr = addr;
range.get_min = ri->get_min;
range.get_max = ri->get_max;
struct ravl_node *node;
node = ravl_find(ri->tree, &range, RAVL_PREDICATE_EQUAL);
if (!node)
return NULL;
return ravl_data(node);
}
/*
* ravl_interval_find -- find the earliest interval within (min, max) range
*/
struct ravl_interval_node *
ravl_interval_find(struct ravl_interval *ri, void *addr)
{
struct ravl_interval_node range;
range.addr = addr;
range.get_min = ri->get_min;
range.get_max = ri->get_max;
struct ravl_interval_node *cur;
cur = ravl_interval_find_prior_or_eq(ri->tree, &range);
if (!cur)
cur = ravl_interval_find_later(ri->tree, &range);
return cur;
}
/*
* ravl_interval_data -- returns the data contained within interval node
*/
void *
ravl_interval_data(struct ravl_interval_node *rin)
{
return (void *)rin->addr;
}
| 4,963 | 21.26009 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/map_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* map_windows.c -- pmem2_map (Windows)
*/
#include <stdbool.h>
#include "libpmem2.h"
#include "alloc.h"
#include "auto_flush.h"
#include "config.h"
#include "map.h"
#include "out.h"
#include "persist.h"
#include "pmem2_utils.h"
#include "source.h"
#include "util.h"
#define HIDWORD(x) ((DWORD)((x) >> 32))
#define LODWORD(x) ((DWORD)((x) & 0xFFFFFFFF))
/* requested CACHE_LINE, available PAGE */
#define REQ_CL_AVAIL_PG \
"requested granularity not available because specified volume is not a direct access (DAX) volume"
/* requested BYTE, available PAGE */
#define REQ_BY_AVAIL_PG REQ_CL_AVAIL_PG
/* requested BYTE, available CACHE_LINE */
#define REQ_BY_AVAIL_CL \
"requested granularity not available because the platform doesn't support eADR"
/* indicates the cases in which the error cannot occur */
#define GRAN_IMPOSSIBLE "impossible"
static const char *granularity_err_msg[3][3] = {
/* requested granularity / available granularity */
/* -------------------------------------------------------------------- */
/* BYTE CACHE_LINE PAGE */
/* -------------------------------------------------------------------- */
/* BYTE */ {GRAN_IMPOSSIBLE, REQ_BY_AVAIL_CL, REQ_BY_AVAIL_PG},
/* CL */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, REQ_CL_AVAIL_PG},
/* PAGE */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE}};
/*
* create_mapping -- creates file mapping object for a file
*/
static HANDLE
create_mapping(HANDLE hfile, size_t offset, size_t length, DWORD protect,
unsigned long *err)
{
size_t max_size = length + offset;
SetLastError(0);
HANDLE mh = CreateFileMapping(hfile,
NULL, /* security attributes */
protect,
HIDWORD(max_size),
LODWORD(max_size),
NULL);
*err = GetLastError();
if (!mh) {
ERR("!!CreateFileMapping");
return NULL;
}
if (*err == ERROR_ALREADY_EXISTS) {
ERR("!!CreateFileMapping");
CloseHandle(mh);
return NULL;
}
/* if the handle is valid the last error is undefined */
*err = 0;
return mh;
}
/*
* is_direct_access -- check if the specified volume is a
* direct access (DAX) volume
*/
static int
is_direct_access(HANDLE fh)
{
DWORD filesystemFlags;
if (!GetVolumeInformationByHandleW(fh, NULL, 0, NULL,
NULL, &filesystemFlags, NULL, 0)) {
ERR("!!GetVolumeInformationByHandleW");
/* always return a negative value */
return pmem2_lasterror_to_err();
}
if (filesystemFlags & FILE_DAX_VOLUME)
return 1;
return 0;
}
/*
* pmem2_map -- map memory according to provided config
*/
int
pmem2_map(const struct pmem2_config *cfg, const struct pmem2_source *src,
struct pmem2_map **map_ptr)
{
LOG(3, "cfg %p src %p map_ptr %p", cfg, src, map_ptr);
int ret = 0;
unsigned long err = 0;
size_t file_size;
*map_ptr = NULL;
if ((int)cfg->requested_max_granularity == PMEM2_GRANULARITY_INVALID) {
ERR(
"please define the max granularity requested for the mapping");
return PMEM2_E_GRANULARITY_NOT_SET;
}
ret = pmem2_source_size(src, &file_size);
if (ret)
return ret;
size_t src_alignment;
ret = pmem2_source_alignment(src, &src_alignment);
if (ret)
return ret;
size_t length;
ret = pmem2_config_validate_length(cfg, file_size, src_alignment);
if (ret)
return ret;
size_t effective_offset;
ret = pmem2_validate_offset(cfg, &effective_offset, src_alignment);
if (ret)
return ret;
if (src->type == PMEM2_SOURCE_ANON)
effective_offset = 0;
/* without user-provided length, map to the end of the file */
if (cfg->length)
length = cfg->length;
else
length = file_size - effective_offset;
HANDLE map_handle = INVALID_HANDLE_VALUE;
if (src->type == PMEM2_SOURCE_HANDLE) {
map_handle = src->value.handle;
} else if (src->type == PMEM2_SOURCE_ANON) {
/* no extra settings */
} else {
ASSERT(0);
}
DWORD proto = PAGE_READWRITE;
DWORD access = FILE_MAP_ALL_ACCESS;
/* Unsupported flag combinations */
if ((cfg->protection_flag == PMEM2_PROT_NONE) ||
(cfg->protection_flag == PMEM2_PROT_WRITE) ||
(cfg->protection_flag == PMEM2_PROT_EXEC) ||
(cfg->protection_flag == (PMEM2_PROT_WRITE |
PMEM2_PROT_EXEC))) {
ERR("Windows does not support "
"this protection flag combination.");
return PMEM2_E_NOSUPP;
}
/* Translate protection flags into Windows flags */
if (cfg->protection_flag & PMEM2_PROT_WRITE) {
if (cfg->protection_flag & PMEM2_PROT_EXEC) {
proto = PAGE_EXECUTE_READWRITE;
access = FILE_MAP_READ | FILE_MAP_WRITE |
FILE_MAP_EXECUTE;
} else {
/*
* Due to the already done exclusion
* of incorrect combinations, PROT_WRITE
* implies PROT_READ
*/
proto = PAGE_READWRITE;
access = FILE_MAP_READ | FILE_MAP_WRITE;
}
} else if (cfg->protection_flag & PMEM2_PROT_READ) {
if (cfg->protection_flag & PMEM2_PROT_EXEC) {
proto = PAGE_EXECUTE_READ;
access = FILE_MAP_READ | FILE_MAP_EXECUTE;
} else {
proto = PAGE_READONLY;
access = FILE_MAP_READ;
}
}
if (cfg->sharing == PMEM2_PRIVATE) {
if (cfg->protection_flag & PMEM2_PROT_EXEC) {
proto = PAGE_EXECUTE_WRITECOPY;
access = FILE_MAP_EXECUTE | FILE_MAP_COPY;
} else {
/*
* If FILE_MAP_COPY is set,
* protection is changed to read/write
*/
proto = PAGE_READONLY;
access = FILE_MAP_COPY;
}
}
/* create a file mapping handle */
HANDLE mh = create_mapping(map_handle, effective_offset, length,
proto, &err);
if (!mh) {
if (err == ERROR_ALREADY_EXISTS) {
ERR("mapping already exists");
return PMEM2_E_MAPPING_EXISTS;
} else if (err == ERROR_ACCESS_DENIED) {
return PMEM2_E_NO_ACCESS;
}
return pmem2_lasterror_to_err();
}
ret = pmem2_config_validate_addr_alignment(cfg, src);
if (ret)
return ret;
/* let's get addr from cfg struct */
LPVOID addr_hint = cfg->addr;
/* obtain a pointer to the mapping view */
void *base = MapViewOfFileEx(mh,
access,
HIDWORD(effective_offset),
LODWORD(effective_offset),
length,
addr_hint); /* hint address */
if (base == NULL) {
ERR("!!MapViewOfFileEx");
if (cfg->addr_request == PMEM2_ADDRESS_FIXED_NOREPLACE) {
DWORD ret_windows = GetLastError();
if (ret_windows == ERROR_INVALID_ADDRESS)
ret = PMEM2_E_MAPPING_EXISTS;
else
ret = pmem2_lasterror_to_err();
}
else
ret = pmem2_lasterror_to_err();
goto err_close_mapping_handle;
}
if (!CloseHandle(mh)) {
ERR("!!CloseHandle");
ret = pmem2_lasterror_to_err();
goto err_unmap_base;
}
enum pmem2_granularity available_min_granularity =
PMEM2_GRANULARITY_PAGE;
if (src->type == PMEM2_SOURCE_HANDLE) {
int direct_access = is_direct_access(src->value.handle);
if (direct_access < 0) {
ret = direct_access;
goto err_unmap_base;
}
bool eADR = (pmem2_auto_flush() == 1);
available_min_granularity =
get_min_granularity(eADR, direct_access, cfg->sharing);
} else if (src->type == PMEM2_SOURCE_ANON) {
available_min_granularity = PMEM2_GRANULARITY_BYTE;
} else {
ASSERT(0);
}
if (available_min_granularity > cfg->requested_max_granularity) {
const char *err = granularity_err_msg
[cfg->requested_max_granularity]
[available_min_granularity];
if (strcmp(err, GRAN_IMPOSSIBLE) == 0)
FATAL(
"unhandled granularity error: available_min_granularity: %d" \
"requested_max_granularity: %d",
available_min_granularity,
cfg->requested_max_granularity);
ERR("%s", err);
ret = PMEM2_E_GRANULARITY_NOT_SUPPORTED;
goto err_unmap_base;
}
/* prepare pmem2_map structure */
struct pmem2_map *map;
map = (struct pmem2_map *)pmem2_malloc(sizeof(*map), &ret);
if (!map)
goto err_unmap_base;
map->addr = base;
/*
* XXX probably in some cases the reserved length > the content length.
* Maybe it is worth to do the research.
*/
map->reserved_length = length;
map->content_length = length;
map->effective_granularity = available_min_granularity;
map->source = *src;
pmem2_set_flush_fns(map);
pmem2_set_mem_fns(map);
ret = pmem2_register_mapping(map);
if (ret)
goto err_register;
/* return a pointer to the pmem2_map structure */
*map_ptr = map;
return ret;
err_register:
free(map);
err_unmap_base:
UnmapViewOfFile(base);
return ret;
err_close_mapping_handle:
CloseHandle(mh);
return ret;
}
/*
* pmem2_unmap -- unmap the specified region
*/
int
pmem2_unmap(struct pmem2_map **map_ptr)
{
LOG(3, "mapp %p", map_ptr);
struct pmem2_map *map = *map_ptr;
int ret = pmem2_unregister_mapping(map);
if (ret)
return ret;
if (!UnmapViewOfFile(map->addr)) {
ERR("!!UnmapViewOfFile");
return pmem2_lasterror_to_err();
}
Free(map);
*map_ptr = NULL;
return 0;
}
| 8,611 | 23.123249 | 99 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/extent_linux.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* extent_linux.c - implementation of the linux fs extent query API
*/
#include <string.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <linux/fs.h>
#include <linux/fiemap.h>
#include "libpmem2.h"
#include "pmem2_utils.h"
#include "file.h"
#include "out.h"
#include "extent.h"
#include "alloc.h"
/*
* pmem2_extents_create_get -- allocate extents structure and get extents
* of the given file
*/
int
pmem2_extents_create_get(int fd, struct extents **exts)
{
LOG(3, "fd %i extents %p", fd, exts);
ASSERT(fd > 2);
ASSERTne(exts, NULL);
enum pmem2_file_type pmem2_type;
struct extents *pexts = NULL;
struct fiemap *fmap = NULL;
os_stat_t st;
if (os_fstat(fd, &st) < 0) {
ERR("!fstat %d", fd);
return PMEM2_E_ERRNO;
}
int ret = pmem2_get_type_from_stat(&st, &pmem2_type);
if (ret)
return ret;
/* directories do not have any extents */
if (pmem2_type == PMEM2_FTYPE_DIR) {
ERR(
"checking extents does not make sense in case of directories");
return PMEM2_E_INVALID_FILE_TYPE;
}
/* allocate extents structure */
pexts = pmem2_zalloc(sizeof(struct extents), &ret);
if (ret)
return ret;
/* save block size */
LOG(10, "fd %i: block size: %li", fd, (long int)st.st_blksize);
pexts->blksize = (uint64_t)st.st_blksize;
/* DAX device does not have any extents */
if (pmem2_type == PMEM2_FTYPE_DEVDAX) {
*exts = pexts;
return 0;
}
ASSERTeq(pmem2_type, PMEM2_FTYPE_REG);
fmap = pmem2_zalloc(sizeof(struct fiemap), &ret);
if (ret)
goto error_free;
fmap->fm_start = 0;
fmap->fm_length = (size_t)st.st_size;
fmap->fm_flags = 0;
fmap->fm_extent_count = 0;
fmap->fm_mapped_extents = 0;
if (ioctl(fd, FS_IOC_FIEMAP, fmap) != 0) {
ERR("!fiemap ioctl() for fd=%d failed", fd);
ret = PMEM2_E_ERRNO;
goto error_free;
}
size_t newsize = sizeof(struct fiemap) +
fmap->fm_mapped_extents * sizeof(struct fiemap_extent);
struct fiemap *newfmap = pmem2_realloc(fmap, newsize, &ret);
if (ret)
goto error_free;
fmap = newfmap;
memset(fmap->fm_extents, 0, fmap->fm_mapped_extents *
sizeof(struct fiemap_extent));
fmap->fm_extent_count = fmap->fm_mapped_extents;
fmap->fm_mapped_extents = 0;
if (ioctl(fd, FS_IOC_FIEMAP, fmap) != 0) {
ERR("!fiemap ioctl() for fd=%d failed", fd);
ret = PMEM2_E_ERRNO;
goto error_free;
}
LOG(4, "file with fd=%i has %u extents:", fd, fmap->fm_mapped_extents);
/* save number of extents */
pexts->extents_count = fmap->fm_mapped_extents;
pexts->extents = pmem2_malloc(
pexts->extents_count * sizeof(struct extent),
&ret);
if (ret)
goto error_free;
/* save extents */
unsigned e;
for (e = 0; e < fmap->fm_mapped_extents; e++) {
pexts->extents[e].offset_physical =
fmap->fm_extents[e].fe_physical;
pexts->extents[e].offset_logical =
fmap->fm_extents[e].fe_logical;
pexts->extents[e].length =
fmap->fm_extents[e].fe_length;
LOG(10, " #%u: off_phy: %lu off_log: %lu len: %lu",
e,
pexts->extents[e].offset_physical,
pexts->extents[e].offset_logical,
pexts->extents[e].length);
}
*exts = pexts;
Free(fmap);
return 0;
error_free:
Free(pexts->extents);
Free(pexts);
Free(fmap);
return ret;
}
/*
* pmem2_extents_destroy -- free extents structure
*/
void
pmem2_extents_destroy(struct extents **exts)
{
LOG(3, "extents %p", exts);
ASSERTne(exts, NULL);
if (*exts) {
Free((*exts)->extents);
Free(*exts);
*exts = NULL;
}
}
| 3,519 | 20.333333 | 73 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/x86_64/flush.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
#ifndef X86_64_FLUSH_H
#define X86_64_FLUSH_H
#include <emmintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "util.h"
#include "valgrind_internal.h"
#define FLUSH_ALIGN ((uintptr_t)64)
static force_inline void
pmem_clflush(const void *addr)
{
_mm_clflush(addr);
}
#ifdef _MSC_VER
static force_inline void
pmem_clflushopt(const void *addr)
{
_mm_clflushopt(addr);
}
static force_inline void
pmem_clwb(const void *addr)
{
_mm_clwb(addr);
}
#else
/*
* The x86 memory instructions are new enough that the compiler
* intrinsic functions are not always available. The intrinsic
* functions are defined here in terms of asm statements for now.
*/
static force_inline void
pmem_clflushopt(const void *addr)
{
asm volatile(".byte 0x66; clflush %0" : "+m" \
(*(volatile char *)(addr)));
}
static force_inline void
pmem_clwb(const void *addr)
{
asm volatile(".byte 0x66; xsaveopt %0" : "+m" \
(*(volatile char *)(addr)));
}
#endif /* _MSC_VER */
typedef void flush_fn(const void *, size_t);
/*
* flush_clflush_nolog -- flush the CPU cache, using clflush
*/
static force_inline void
flush_clflush_nolog(const void *addr, size_t len)
{
uintptr_t uptr;
/*
* Loop through cache-line-size (typically 64B) aligned chunks
* covering the given range.
*/
for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1);
uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN)
_mm_clflush((char *)uptr);
}
/*
* flush_clflushopt_nolog -- flush the CPU cache, using clflushopt
*/
static force_inline void
flush_clflushopt_nolog(const void *addr, size_t len)
{
uintptr_t uptr;
/*
* Loop through cache-line-size (typically 64B) aligned chunks
* covering the given range.
*/
for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1);
uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) {
pmem_clflushopt((char *)uptr);
}
}
/*
* flush_clwb_nolog -- flush the CPU cache, using clwb
*/
static force_inline void
flush_clwb_nolog(const void *addr, size_t len)
{
uintptr_t uptr;
/*
* Loop through cache-line-size (typically 64B) aligned chunks
* covering the given range.
*/
for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1);
uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) {
pmem_clwb((char *)uptr);
}
}
/*
* flush64b_empty -- (internal) do not flush the CPU cache
*/
static force_inline void
flush64b_empty(const void *addr)
{
/* NOP, but tell pmemcheck about it */
VALGRIND_DO_FLUSH(addr, 64);
}
#endif
| 2,521 | 20.193277 | 66 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/x86_64/init.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
#include <string.h>
#include <xmmintrin.h>
#include "auto_flush.h"
#include "cpu.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "os.h"
#include "out.h"
#include "pmem2_arch.h"
#include "valgrind_internal.h"
#define MOVNT_THRESHOLD 256
size_t Movnt_threshold = MOVNT_THRESHOLD;
/*
* memory_barrier -- (internal) issue the fence instruction
*/
static void
memory_barrier(void)
{
LOG(15, NULL);
_mm_sfence(); /* ensure CLWB or CLFLUSHOPT completes */
}
/*
* flush_clflush -- (internal) flush the CPU cache, using clflush
*/
static void
flush_clflush(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
flush_clflush_nolog(addr, len);
}
/*
* flush_clflushopt -- (internal) flush the CPU cache, using clflushopt
*/
static void
flush_clflushopt(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
flush_clflushopt_nolog(addr, len);
}
/*
* flush_clwb -- (internal) flush the CPU cache, using clwb
*/
static void
flush_clwb(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
flush_clwb_nolog(addr, len);
}
#if SSE2_AVAILABLE || AVX_AVAILABLE || AVX512F_AVAILABLE
#define PMEM2_F_MEM_MOVNT (PMEM2_F_MEM_WC | PMEM2_F_MEM_NONTEMPORAL)
#define PMEM2_F_MEM_MOV (PMEM2_F_MEM_WB | PMEM2_F_MEM_TEMPORAL)
#define MEMCPY_TEMPLATE(isa, flush, perfbarrier) \
static void *\
memmove_nodrain_##isa##_##flush##perfbarrier(void *dest, const void *src, \
size_t len, unsigned flags, flush_func flushf)\
{\
if (len == 0 || src == dest)\
return dest;\
\
if (flags & PMEM2_F_MEM_NOFLUSH) \
memmove_mov_##isa##_noflush(dest, src, len); \
else if (flags & PMEM2_F_MEM_MOVNT)\
memmove_movnt_##isa ##_##flush##perfbarrier(dest, src, len);\
else if (flags & PMEM2_F_MEM_MOV)\
memmove_mov_##isa##_##flush(dest, src, len);\
else if (len < Movnt_threshold)\
memmove_mov_##isa##_##flush(dest, src, len);\
else\
memmove_movnt_##isa##_##flush##perfbarrier(dest, src, len);\
\
return dest;\
}
#define MEMCPY_TEMPLATE_EADR(isa, perfbarrier) \
static void *\
memmove_nodrain_##isa##_eadr##perfbarrier(void *dest, const void *src, \
size_t len, unsigned flags, flush_func flushf)\
{\
if (len == 0 || src == dest)\
return dest;\
\
if (flags & PMEM2_F_MEM_NOFLUSH)\
memmove_mov_##isa##_noflush(dest, src, len);\
else if (flags & PMEM2_F_MEM_NONTEMPORAL)\
memmove_movnt_##isa##_empty##perfbarrier(dest, src, len);\
else\
memmove_mov_##isa##_empty(dest, src, len);\
\
return dest;\
}
#define MEMSET_TEMPLATE(isa, flush, perfbarrier)\
static void *\
memset_nodrain_##isa##_##flush##perfbarrier(void *dest, int c, size_t len, \
unsigned flags, flush_func flushf)\
{\
if (len == 0)\
return dest;\
\
if (flags & PMEM2_F_MEM_NOFLUSH) \
memset_mov_##isa##_noflush(dest, c, len); \
else if (flags & PMEM2_F_MEM_MOVNT)\
memset_movnt_##isa##_##flush##perfbarrier(dest, c, len);\
else if (flags & PMEM2_F_MEM_MOV)\
memset_mov_##isa##_##flush(dest, c, len);\
else if (len < Movnt_threshold)\
memset_mov_##isa##_##flush(dest, c, len);\
else\
memset_movnt_##isa##_##flush##perfbarrier(dest, c, len);\
\
return dest;\
}
#define MEMSET_TEMPLATE_EADR(isa, perfbarrier) \
static void *\
memset_nodrain_##isa##_eadr##perfbarrier(void *dest, int c, size_t len, \
unsigned flags, flush_func flushf)\
{\
if (len == 0)\
return dest;\
\
if (flags & PMEM2_F_MEM_NOFLUSH)\
memset_mov_##isa##_noflush(dest, c, len);\
else if (flags & PMEM2_F_MEM_NONTEMPORAL)\
memset_movnt_##isa##_empty##perfbarrier(dest, c, len);\
else\
memset_mov_##isa##_empty(dest, c, len);\
\
return dest;\
}
#endif
#if SSE2_AVAILABLE
MEMCPY_TEMPLATE(sse2, clflush, _nobarrier)
MEMCPY_TEMPLATE(sse2, clflushopt, _nobarrier)
MEMCPY_TEMPLATE(sse2, clwb, _nobarrier)
MEMCPY_TEMPLATE_EADR(sse2, _nobarrier)
MEMSET_TEMPLATE(sse2, clflush, _nobarrier)
MEMSET_TEMPLATE(sse2, clflushopt, _nobarrier)
MEMSET_TEMPLATE(sse2, clwb, _nobarrier)
MEMSET_TEMPLATE_EADR(sse2, _nobarrier)
MEMCPY_TEMPLATE(sse2, clflush, _wcbarrier)
MEMCPY_TEMPLATE(sse2, clflushopt, _wcbarrier)
MEMCPY_TEMPLATE(sse2, clwb, _wcbarrier)
MEMCPY_TEMPLATE_EADR(sse2, _wcbarrier)
MEMSET_TEMPLATE(sse2, clflush, _wcbarrier)
MEMSET_TEMPLATE(sse2, clflushopt, _wcbarrier)
MEMSET_TEMPLATE(sse2, clwb, _wcbarrier)
MEMSET_TEMPLATE_EADR(sse2, _wcbarrier)
#endif
#if AVX_AVAILABLE
MEMCPY_TEMPLATE(avx, clflush, _nobarrier)
MEMCPY_TEMPLATE(avx, clflushopt, _nobarrier)
MEMCPY_TEMPLATE(avx, clwb, _nobarrier)
MEMCPY_TEMPLATE_EADR(avx, _nobarrier)
MEMSET_TEMPLATE(avx, clflush, _nobarrier)
MEMSET_TEMPLATE(avx, clflushopt, _nobarrier)
MEMSET_TEMPLATE(avx, clwb, _nobarrier)
MEMSET_TEMPLATE_EADR(avx, _nobarrier)
MEMCPY_TEMPLATE(avx, clflush, _wcbarrier)
MEMCPY_TEMPLATE(avx, clflushopt, _wcbarrier)
MEMCPY_TEMPLATE(avx, clwb, _wcbarrier)
MEMCPY_TEMPLATE_EADR(avx, _wcbarrier)
MEMSET_TEMPLATE(avx, clflush, _wcbarrier)
MEMSET_TEMPLATE(avx, clflushopt, _wcbarrier)
MEMSET_TEMPLATE(avx, clwb, _wcbarrier)
MEMSET_TEMPLATE_EADR(avx, _wcbarrier)
#endif
#if AVX512F_AVAILABLE
MEMCPY_TEMPLATE(avx512f, clflush, /* cstyle wa */)
MEMCPY_TEMPLATE(avx512f, clflushopt, /* */)
MEMCPY_TEMPLATE(avx512f, clwb, /* */)
MEMCPY_TEMPLATE_EADR(avx512f, /* */)
MEMSET_TEMPLATE(avx512f, clflush, /* */)
MEMSET_TEMPLATE(avx512f, clflushopt, /* */)
MEMSET_TEMPLATE(avx512f, clwb, /* */)
MEMSET_TEMPLATE_EADR(avx512f, /* */)
#endif
enum memcpy_impl {
MEMCPY_INVALID,
MEMCPY_SSE2,
MEMCPY_AVX,
MEMCPY_AVX512F
};
/*
* use_sse2_memcpy_memset -- (internal) SSE2 detected, use it if possible
*/
static void
use_sse2_memcpy_memset(struct pmem2_arch_info *info, enum memcpy_impl *impl,
int wc_workaround)
{
#if SSE2_AVAILABLE
*impl = MEMCPY_SSE2;
if (wc_workaround) {
info->memmove_nodrain_eadr =
memmove_nodrain_sse2_eadr_wcbarrier;
if (info->flush == flush_clflush)
info->memmove_nodrain =
memmove_nodrain_sse2_clflush_wcbarrier;
else if (info->flush == flush_clflushopt)
info->memmove_nodrain =
memmove_nodrain_sse2_clflushopt_wcbarrier;
else if (info->flush == flush_clwb)
info->memmove_nodrain =
memmove_nodrain_sse2_clwb_wcbarrier;
else
ASSERT(0);
info->memset_nodrain_eadr = memset_nodrain_sse2_eadr_wcbarrier;
if (info->flush == flush_clflush)
info->memset_nodrain =
memset_nodrain_sse2_clflush_wcbarrier;
else if (info->flush == flush_clflushopt)
info->memset_nodrain =
memset_nodrain_sse2_clflushopt_wcbarrier;
else if (info->flush == flush_clwb)
info->memset_nodrain =
memset_nodrain_sse2_clwb_wcbarrier;
else
ASSERT(0);
} else {
info->memmove_nodrain_eadr =
memmove_nodrain_sse2_eadr_nobarrier;
if (info->flush == flush_clflush)
info->memmove_nodrain =
memmove_nodrain_sse2_clflush_nobarrier;
else if (info->flush == flush_clflushopt)
info->memmove_nodrain =
memmove_nodrain_sse2_clflushopt_nobarrier;
else if (info->flush == flush_clwb)
info->memmove_nodrain =
memmove_nodrain_sse2_clwb_nobarrier;
else
ASSERT(0);
info->memset_nodrain_eadr =
memset_nodrain_sse2_eadr_nobarrier;
if (info->flush == flush_clflush)
info->memset_nodrain =
memset_nodrain_sse2_clflush_nobarrier;
else if (info->flush == flush_clflushopt)
info->memset_nodrain =
memset_nodrain_sse2_clflushopt_nobarrier;
else if (info->flush == flush_clwb)
info->memset_nodrain =
memset_nodrain_sse2_clwb_nobarrier;
else
ASSERT(0);
}
#else
LOG(3, "sse2 disabled at build time");
#endif
}
/*
* use_avx_memcpy_memset -- (internal) AVX detected, use it if possible
*/
static void
use_avx_memcpy_memset(struct pmem2_arch_info *info, enum memcpy_impl *impl,
int wc_workaround)
{
#if AVX_AVAILABLE
LOG(3, "avx supported");
char *e = os_getenv("PMEM_AVX");
if (e != NULL && strcmp(e, "0") == 0) {
LOG(3, "PMEM_AVX set to 0");
return;
}
LOG(3, "PMEM_AVX enabled");
*impl = MEMCPY_AVX;
if (wc_workaround) {
info->memmove_nodrain_eadr =
memmove_nodrain_avx_eadr_wcbarrier;
if (info->flush == flush_clflush)
info->memmove_nodrain =
memmove_nodrain_avx_clflush_wcbarrier;
else if (info->flush == flush_clflushopt)
info->memmove_nodrain =
memmove_nodrain_avx_clflushopt_wcbarrier;
else if (info->flush == flush_clwb)
info->memmove_nodrain =
memmove_nodrain_avx_clwb_wcbarrier;
else
ASSERT(0);
info->memset_nodrain_eadr =
memset_nodrain_avx_eadr_wcbarrier;
if (info->flush == flush_clflush)
info->memset_nodrain =
memset_nodrain_avx_clflush_wcbarrier;
else if (info->flush == flush_clflushopt)
info->memset_nodrain =
memset_nodrain_avx_clflushopt_wcbarrier;
else if (info->flush == flush_clwb)
info->memset_nodrain =
memset_nodrain_avx_clwb_wcbarrier;
else
ASSERT(0);
} else {
info->memmove_nodrain_eadr =
memmove_nodrain_avx_eadr_nobarrier;
if (info->flush == flush_clflush)
info->memmove_nodrain =
memmove_nodrain_avx_clflush_nobarrier;
else if (info->flush == flush_clflushopt)
info->memmove_nodrain =
memmove_nodrain_avx_clflushopt_nobarrier;
else if (info->flush == flush_clwb)
info->memmove_nodrain =
memmove_nodrain_avx_clwb_nobarrier;
else
ASSERT(0);
info->memset_nodrain_eadr =
memset_nodrain_avx_eadr_nobarrier;
if (info->flush == flush_clflush)
info->memset_nodrain =
memset_nodrain_avx_clflush_nobarrier;
else if (info->flush == flush_clflushopt)
info->memset_nodrain =
memset_nodrain_avx_clflushopt_nobarrier;
else if (info->flush == flush_clwb)
info->memset_nodrain =
memset_nodrain_avx_clwb_nobarrier;
else
ASSERT(0);
}
#else
LOG(3, "avx supported, but disabled at build time");
#endif
}
/*
* use_avx512f_memcpy_memset -- (internal) AVX512F detected, use it if possible
*/
static void
use_avx512f_memcpy_memset(struct pmem2_arch_info *info,
enum memcpy_impl *impl)
{
#if AVX512F_AVAILABLE
LOG(3, "avx512f supported");
char *e = os_getenv("PMEM_AVX512F");
if (e != NULL && strcmp(e, "0") == 0) {
LOG(3, "PMEM_AVX512F set to 0");
return;
}
LOG(3, "PMEM_AVX512F enabled");
*impl = MEMCPY_AVX512F;
info->memmove_nodrain_eadr = memmove_nodrain_avx512f_eadr;
if (info->flush == flush_clflush)
info->memmove_nodrain = memmove_nodrain_avx512f_clflush;
else if (info->flush == flush_clflushopt)
info->memmove_nodrain = memmove_nodrain_avx512f_clflushopt;
else if (info->flush == flush_clwb)
info->memmove_nodrain = memmove_nodrain_avx512f_clwb;
else
ASSERT(0);
info->memset_nodrain_eadr = memset_nodrain_avx512f_eadr;
if (info->flush == flush_clflush)
info->memset_nodrain = memset_nodrain_avx512f_clflush;
else if (info->flush == flush_clflushopt)
info->memset_nodrain = memset_nodrain_avx512f_clflushopt;
else if (info->flush == flush_clwb)
info->memset_nodrain = memset_nodrain_avx512f_clwb;
else
ASSERT(0);
#else
LOG(3, "avx512f supported, but disabled at build time");
#endif
}
/*
* pmem_get_cpuinfo -- configure libpmem based on CPUID
*/
static void
pmem_cpuinfo_to_funcs(struct pmem2_arch_info *info, enum memcpy_impl *impl)
{
LOG(3, NULL);
if (is_cpu_clflush_present()) {
LOG(3, "clflush supported");
info->flush = flush_clflush;
info->flush_has_builtin_fence = 1;
info->fence = memory_barrier;
}
if (is_cpu_clflushopt_present()) {
LOG(3, "clflushopt supported");
char *e = os_getenv("PMEM_NO_CLFLUSHOPT");
if (e && strcmp(e, "1") == 0) {
LOG(3, "PMEM_NO_CLFLUSHOPT forced no clflushopt");
} else {
info->flush = flush_clflushopt;
info->flush_has_builtin_fence = 0;
info->fence = memory_barrier;
}
}
if (is_cpu_clwb_present()) {
LOG(3, "clwb supported");
char *e = os_getenv("PMEM_NO_CLWB");
if (e && strcmp(e, "1") == 0) {
LOG(3, "PMEM_NO_CLWB forced no clwb");
} else {
info->flush = flush_clwb;
info->flush_has_builtin_fence = 0;
info->fence = memory_barrier;
}
}
/*
* XXX Disable this work around for Intel CPUs with optimized
* WC eviction.
*/
int wc_workaround = is_cpu_genuine_intel();
char *ptr = os_getenv("PMEM_WC_WORKAROUND");
if (ptr) {
if (strcmp(ptr, "1") == 0) {
LOG(3, "WC workaround forced to 1");
wc_workaround = 1;
} else if (strcmp(ptr, "0") == 0) {
LOG(3, "WC workaround forced to 0");
wc_workaround = 0;
} else {
LOG(3, "incorrect value of PMEM_WC_WORKAROUND (%s)",
ptr);
}
}
LOG(3, "WC workaround = %d", wc_workaround);
ptr = os_getenv("PMEM_NO_MOVNT");
if (ptr && strcmp(ptr, "1") == 0) {
LOG(3, "PMEM_NO_MOVNT forced no movnt");
} else {
use_sse2_memcpy_memset(info, impl, wc_workaround);
if (is_cpu_avx_present())
use_avx_memcpy_memset(info, impl, wc_workaround);
if (is_cpu_avx512f_present())
use_avx512f_memcpy_memset(info, impl);
}
}
/*
* pmem2_arch_init -- initialize architecture-specific list of pmem operations
*/
void
pmem2_arch_init(struct pmem2_arch_info *info)
{
LOG(3, NULL);
enum memcpy_impl impl = MEMCPY_INVALID;
pmem_cpuinfo_to_funcs(info, &impl);
/*
* For testing, allow overriding the default threshold
* for using non-temporal stores in pmem_memcpy_*(), pmem_memmove_*()
* and pmem_memset_*().
* It has no effect if movnt is not supported or disabled.
*/
const char *ptr = os_getenv("PMEM_MOVNT_THRESHOLD");
if (ptr) {
long long val = atoll(ptr);
if (val < 0) {
LOG(3, "Invalid PMEM_MOVNT_THRESHOLD");
} else {
LOG(3, "PMEM_MOVNT_THRESHOLD set to %zu", (size_t)val);
Movnt_threshold = (size_t)val;
}
}
if (info->flush == flush_clwb)
LOG(3, "using clwb");
else if (info->flush == flush_clflushopt)
LOG(3, "using clflushopt");
else if (info->flush == flush_clflush)
LOG(3, "using clflush");
else
FATAL("invalid deep flush function address");
if (impl == MEMCPY_AVX512F)
LOG(3, "using movnt AVX512F");
else if (impl == MEMCPY_AVX)
LOG(3, "using movnt AVX");
else if (impl == MEMCPY_SSE2)
LOG(3, "using movnt SSE2");
}
| 13,899 | 25.275992 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/x86_64/avx.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2018, Intel Corporation */
#ifndef PMEM_AVX_H
#define PMEM_AVX_H
#include <immintrin.h>
#include "util.h"
/*
* avx_zeroupper -- _mm256_zeroupper wrapper
*
* _mm256_zeroupper clears upper parts of avx registers.
*
* It's needed for 2 reasons:
* - it improves performance of non-avx code after avx
* - it works around problem discovered by Valgrind
*
* In optimized builds gcc inserts VZEROUPPER automatically before
* calling non-avx code (or at the end of the function). But in release
* builds it doesn't, so if we don't do this by ourselves, then when
* someone memcpy'ies uninitialized data, Valgrind complains whenever
* someone reads those registers.
*
* One notable example is loader, which tries to detect whether it
* needs to save whole ymm registers by looking at their current
* (possibly uninitialized) value.
*
* Valgrind complains like that:
* Conditional jump or move depends on uninitialised value(s)
* at 0x4015CC9: _dl_runtime_resolve_avx_slow
* (in /lib/x86_64-linux-gnu/ld-2.24.so)
* by 0x10B531: test_realloc_api (obj_basic_integration.c:185)
* by 0x10F1EE: main (obj_basic_integration.c:594)
*
* Note: We have to be careful to not read AVX registers after this
* intrinsic, because of this stupid gcc bug:
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82735
*/
static force_inline void
avx_zeroupper(void)
{
_mm256_zeroupper();
}
static force_inline __m128i
m256_get16b(__m256i ymm)
{
return _mm256_extractf128_si256(ymm, 0);
}
#ifdef _MSC_VER
static force_inline uint64_t
m256_get8b(__m256i ymm)
{
return (uint64_t)_mm_extract_epi64(m256_get16b(ymm), 0);
}
static force_inline uint32_t
m256_get4b(__m256i ymm)
{
return (uint32_t)m256_get8b(ymm);
}
static force_inline uint16_t
m256_get2b(__m256i ymm)
{
return (uint16_t)m256_get8b(ymm);
}
#else
static force_inline uint64_t
m256_get8b(__m256i ymm)
{
return (uint64_t)_mm256_extract_epi64(ymm, 0);
}
static force_inline uint32_t
m256_get4b(__m256i ymm)
{
return (uint32_t)_mm256_extract_epi32(ymm, 0);
}
static force_inline uint16_t
m256_get2b(__m256i ymm)
{
return (uint16_t)_mm256_extract_epi16(ymm, 0);
}
#endif
#endif
| 2,238 | 24.735632 | 72 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/x86_64/memcpy_memset.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
#ifndef MEMCPY_MEMSET_H
#define MEMCPY_MEMSET_H
#include <stddef.h>
#include <xmmintrin.h>
#include "pmem2_arch.h"
typedef void barrier_fn(void);
typedef void flush64b_fn(const void *);
static inline void
barrier_after_ntstores(void)
{
/*
* In this configuration pmem_drain does not contain sfence, so we have
* to serialize non-temporal store instructions.
*/
_mm_sfence();
}
static inline void
no_barrier_after_ntstores(void)
{
/*
* In this configuration pmem_drain contains sfence, so we don't have
* to serialize non-temporal store instructions
*/
}
static inline void
noflush(const void *addr, size_t len)
{
/* NOP, not even pmemcheck annotation */
}
static inline void
noflush64b(const void *addr)
{
/* NOP, not even pmemcheck annotation */
}
typedef void perf_barrier_fn(void);
static force_inline void
wc_barrier(void)
{
/*
* Currently, for SSE2 and AVX code paths, use of non-temporal stores
* on all generations of CPUs must be limited to the number of
* write-combining buffers (12) because otherwise, suboptimal eviction
* policy might impact performance when writing more data than WC
* buffers can simultaneously hold.
*
* The AVX512 code path is not affected, probably because we are
* overwriting whole cache lines.
*/
_mm_sfence();
}
static force_inline void
no_barrier(void)
{
}
#ifndef AVX512F_AVAILABLE
/*
* XXX not supported in MSVC version we currently use.
* Enable Windows tests pmem2_mem_ext when MSVC we
* use will support AVX512F.
*/
#ifdef _MSC_VER
#define AVX512F_AVAILABLE 0
#else
#define AVX512F_AVAILABLE 1
#endif
#endif
#ifndef AVX_AVAILABLE
#define AVX_AVAILABLE 1
#endif
#ifndef SSE2_AVAILABLE
#define SSE2_AVAILABLE 1
#endif
#if SSE2_AVAILABLE
void memmove_mov_sse2_clflush(char *dest, const char *src, size_t len);
void memmove_mov_sse2_clflushopt(char *dest, const char *src, size_t len);
void memmove_mov_sse2_clwb(char *dest, const char *src, size_t len);
void memmove_mov_sse2_empty(char *dest, const char *src, size_t len);
void memmove_mov_sse2_noflush(char *dest, const char *src, size_t len);
void memmove_movnt_sse2_clflush_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_clflushopt_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_clwb_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_empty_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_noflush_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_clflush_wcbarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_clflushopt_wcbarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_clwb_wcbarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_empty_wcbarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_noflush_wcbarrier(char *dest, const char *src,
size_t len);
void memset_mov_sse2_clflush(char *dest, int c, size_t len);
void memset_mov_sse2_clflushopt(char *dest, int c, size_t len);
void memset_mov_sse2_clwb(char *dest, int c, size_t len);
void memset_mov_sse2_empty(char *dest, int c, size_t len);
void memset_mov_sse2_noflush(char *dest, int c, size_t len);
void memset_movnt_sse2_clflush_nobarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_clflushopt_nobarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_clwb_nobarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_empty_nobarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_noflush_nobarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_clflush_wcbarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_clflushopt_wcbarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_clwb_wcbarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_empty_wcbarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_noflush_wcbarrier(char *dest, int c, size_t len);
#endif
#if AVX_AVAILABLE
void memmove_mov_avx_clflush(char *dest, const char *src, size_t len);
void memmove_mov_avx_clflushopt(char *dest, const char *src, size_t len);
void memmove_mov_avx_clwb(char *dest, const char *src, size_t len);
void memmove_mov_avx_empty(char *dest, const char *src, size_t len);
void memmove_mov_avx_noflush(char *dest, const char *src, size_t len);
void memmove_movnt_avx_clflush_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_clflushopt_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_clwb_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_empty_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_noflush_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_clflush_wcbarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_clflushopt_wcbarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_clwb_wcbarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_empty_wcbarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_noflush_wcbarrier(char *dest, const char *src,
size_t len);
void memset_mov_avx_clflush(char *dest, int c, size_t len);
void memset_mov_avx_clflushopt(char *dest, int c, size_t len);
void memset_mov_avx_clwb(char *dest, int c, size_t len);
void memset_mov_avx_empty(char *dest, int c, size_t len);
void memset_mov_avx_noflush(char *dest, int c, size_t len);
void memset_movnt_avx_clflush_nobarrier(char *dest, int c, size_t len);
void memset_movnt_avx_clflushopt_nobarrier(char *dest, int c, size_t len);
void memset_movnt_avx_clwb_nobarrier(char *dest, int c, size_t len);
void memset_movnt_avx_empty_nobarrier(char *dest, int c, size_t len);
void memset_movnt_avx_noflush_nobarrier(char *dest, int c, size_t len);
void memset_movnt_avx_clflush_wcbarrier(char *dest, int c, size_t len);
void memset_movnt_avx_clflushopt_wcbarrier(char *dest, int c, size_t len);
void memset_movnt_avx_clwb_wcbarrier(char *dest, int c, size_t len);
void memset_movnt_avx_empty_wcbarrier(char *dest, int c, size_t len);
void memset_movnt_avx_noflush_wcbarrier(char *dest, int c, size_t len);
#endif
#if AVX512F_AVAILABLE
void memmove_mov_avx512f_clflush(char *dest, const char *src, size_t len);
void memmove_mov_avx512f_clflushopt(char *dest, const char *src, size_t len);
void memmove_mov_avx512f_clwb(char *dest, const char *src, size_t len);
void memmove_mov_avx512f_empty(char *dest, const char *src, size_t len);
void memmove_mov_avx512f_noflush(char *dest, const char *src, size_t len);
void memmove_movnt_avx512f_clflush(char *dest, const char *src, size_t len);
void memmove_movnt_avx512f_clflushopt(char *dest, const char *src, size_t len);
void memmove_movnt_avx512f_clwb(char *dest, const char *src, size_t len);
void memmove_movnt_avx512f_empty(char *dest, const char *src, size_t len);
void memmove_movnt_avx512f_noflush(char *dest, const char *src, size_t len);
void memset_mov_avx512f_clflush(char *dest, int c, size_t len);
void memset_mov_avx512f_clflushopt(char *dest, int c, size_t len);
void memset_mov_avx512f_clwb(char *dest, int c, size_t len);
void memset_mov_avx512f_empty(char *dest, int c, size_t len);
void memset_mov_avx512f_noflush(char *dest, int c, size_t len);
void memset_movnt_avx512f_clflush(char *dest, int c, size_t len);
void memset_movnt_avx512f_clflushopt(char *dest, int c, size_t len);
void memset_movnt_avx512f_clwb(char *dest, int c, size_t len);
void memset_movnt_avx512f_empty(char *dest, int c, size_t len);
void memset_movnt_avx512f_noflush(char *dest, int c, size_t len);
#endif
extern size_t Movnt_threshold;
/*
* SSE2/AVX1 only:
*
* How much data WC buffers can hold at the same time, after which sfence
* is needed to flush them.
*
* For some reason sfence affects performance of reading from DRAM, so we have
* to prefetch the source data earlier.
*/
#define PERF_BARRIER_SIZE (12 * CACHELINE_SIZE /* 768 */)
/*
* How much to prefetch initially.
* Cannot be bigger than the size of L1 (32kB) - PERF_BARRIER_SIZE.
*/
#define INI_PREFETCH_SIZE (64 * CACHELINE_SIZE /* 4096 */)
static force_inline void
prefetch(const char *addr)
{
_mm_prefetch(addr, _MM_HINT_T0);
}
static force_inline void
prefetch_ini_fw(const char *src, size_t len)
{
size_t pref = MIN(len, INI_PREFETCH_SIZE);
for (size_t i = 0; i < pref; i += CACHELINE_SIZE)
prefetch(src + i);
}
static force_inline void
prefetch_ini_bw(const char *src, size_t len)
{
size_t pref = MIN(len, INI_PREFETCH_SIZE);
for (size_t i = 0; i < pref; i += CACHELINE_SIZE)
prefetch(src - i);
}
static force_inline void
prefetch_next_fw(const char *src, const char *srcend)
{
const char *begin = src + INI_PREFETCH_SIZE;
const char *end = begin + PERF_BARRIER_SIZE;
if (end > srcend)
end = srcend;
for (const char *addr = begin; addr < end; addr += CACHELINE_SIZE)
prefetch(addr);
}
static force_inline void
prefetch_next_bw(const char *src, const char *srcbegin)
{
const char *begin = src - INI_PREFETCH_SIZE;
const char *end = begin - PERF_BARRIER_SIZE;
if (end < srcbegin)
end = srcbegin;
for (const char *addr = begin; addr >= end; addr -= CACHELINE_SIZE)
prefetch(addr);
}
#endif
| 9,351 | 33.131387 | 79 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/x86_64/memset/memset_nt_sse2.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem2_arch.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memset_sse2.h"
#include "out.h"
#include "valgrind_internal.h"
static force_inline void
mm_stream_si128(char *dest, unsigned idx, __m128i src)
{
_mm_stream_si128((__m128i *)dest + idx, src);
barrier();
}
static force_inline void
memset_movnt4x64b(char *dest, __m128i xmm)
{
mm_stream_si128(dest, 0, xmm);
mm_stream_si128(dest, 1, xmm);
mm_stream_si128(dest, 2, xmm);
mm_stream_si128(dest, 3, xmm);
mm_stream_si128(dest, 4, xmm);
mm_stream_si128(dest, 5, xmm);
mm_stream_si128(dest, 6, xmm);
mm_stream_si128(dest, 7, xmm);
mm_stream_si128(dest, 8, xmm);
mm_stream_si128(dest, 9, xmm);
mm_stream_si128(dest, 10, xmm);
mm_stream_si128(dest, 11, xmm);
mm_stream_si128(dest, 12, xmm);
mm_stream_si128(dest, 13, xmm);
mm_stream_si128(dest, 14, xmm);
mm_stream_si128(dest, 15, xmm);
}
static force_inline void
memset_movnt2x64b(char *dest, __m128i xmm)
{
mm_stream_si128(dest, 0, xmm);
mm_stream_si128(dest, 1, xmm);
mm_stream_si128(dest, 2, xmm);
mm_stream_si128(dest, 3, xmm);
mm_stream_si128(dest, 4, xmm);
mm_stream_si128(dest, 5, xmm);
mm_stream_si128(dest, 6, xmm);
mm_stream_si128(dest, 7, xmm);
}
static force_inline void
memset_movnt1x64b(char *dest, __m128i xmm)
{
mm_stream_si128(dest, 0, xmm);
mm_stream_si128(dest, 1, xmm);
mm_stream_si128(dest, 2, xmm);
mm_stream_si128(dest, 3, xmm);
}
static force_inline void
memset_movnt1x32b(char *dest, __m128i xmm)
{
mm_stream_si128(dest, 0, xmm);
mm_stream_si128(dest, 1, xmm);
}
static force_inline void
memset_movnt1x16b(char *dest, __m128i xmm)
{
_mm_stream_si128((__m128i *)dest, xmm);
}
static force_inline void
memset_movnt1x8b(char *dest, __m128i xmm)
{
uint64_t x = (uint64_t)_mm_cvtsi128_si64(xmm);
_mm_stream_si64((long long *)dest, (long long)x);
}
static force_inline void
memset_movnt1x4b(char *dest, __m128i xmm)
{
uint32_t x = (uint32_t)_mm_cvtsi128_si32(xmm);
_mm_stream_si32((int *)dest, (int)x);
}
static force_inline void
memset_movnt_sse2(char *dest, int c, size_t len, flush_fn flush,
barrier_fn barrier, perf_barrier_fn perf_barrier)
{
char *orig_dest = dest;
size_t orig_len = len;
__m128i xmm = _mm_set1_epi8((char)c);
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memset_small_sse2(dest, xmm, cnt, flush);
dest += cnt;
len -= cnt;
}
while (len >= PERF_BARRIER_SIZE) {
memset_movnt4x64b(dest, xmm);
dest += 4 * 64;
len -= 4 * 64;
memset_movnt4x64b(dest, xmm);
dest += 4 * 64;
len -= 4 * 64;
memset_movnt4x64b(dest, xmm);
dest += 4 * 64;
len -= 4 * 64;
COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (4 + 4 + 4) * 64);
if (len)
perf_barrier();
}
while (len >= 4 * 64) {
memset_movnt4x64b(dest, xmm);
dest += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memset_movnt2x64b(dest, xmm);
dest += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memset_movnt1x64b(dest, xmm);
dest += 1 * 64;
len -= 1 * 64;
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32)
memset_movnt1x32b(dest, xmm);
else if (len == 16)
memset_movnt1x16b(dest, xmm);
else if (len == 8)
memset_movnt1x8b(dest, xmm);
else if (len == 4)
memset_movnt1x4b(dest, xmm);
else
goto nonnt;
goto end;
}
nonnt:
memset_small_sse2(dest, xmm, len, flush);
end:
barrier();
VALGRIND_DO_FLUSH(orig_dest, orig_len);
}
/* variants without perf_barrier */
void
memset_movnt_sse2_noflush_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, noflush, barrier_after_ntstores,
no_barrier);
}
void
memset_movnt_sse2_empty_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, flush_empty_nolog,
barrier_after_ntstores, no_barrier);
}
void
memset_movnt_sse2_clflush_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, flush_clflush_nolog,
barrier_after_ntstores, no_barrier);
}
void
memset_movnt_sse2_clflushopt_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, flush_clflushopt_nolog,
no_barrier_after_ntstores, no_barrier);
}
void
memset_movnt_sse2_clwb_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, flush_clwb_nolog,
no_barrier_after_ntstores, no_barrier);
}
/* variants with perf_barrier */
void
memset_movnt_sse2_noflush_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, noflush, barrier_after_ntstores,
wc_barrier);
}
void
memset_movnt_sse2_empty_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, flush_empty_nolog,
barrier_after_ntstores, wc_barrier);
}
void
memset_movnt_sse2_clflush_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, flush_clflush_nolog,
barrier_after_ntstores, wc_barrier);
}
void
memset_movnt_sse2_clflushopt_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, flush_clflushopt_nolog,
no_barrier_after_ntstores, wc_barrier);
}
void
memset_movnt_sse2_clwb_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, flush_clwb_nolog,
no_barrier_after_ntstores, wc_barrier);
}
| 5,912 | 20.580292 | 71 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/x86_64/memset/memset_nt_avx.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem2_arch.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memset_avx.h"
#include "out.h"
#include "valgrind_internal.h"
static force_inline void
mm256_stream_si256(char *dest, unsigned idx, __m256i src)
{
_mm256_stream_si256((__m256i *)dest + idx, src);
barrier();
}
static force_inline void
memset_movnt8x64b(char *dest, __m256i ymm)
{
mm256_stream_si256(dest, 0, ymm);
mm256_stream_si256(dest, 1, ymm);
mm256_stream_si256(dest, 2, ymm);
mm256_stream_si256(dest, 3, ymm);
mm256_stream_si256(dest, 4, ymm);
mm256_stream_si256(dest, 5, ymm);
mm256_stream_si256(dest, 6, ymm);
mm256_stream_si256(dest, 7, ymm);
mm256_stream_si256(dest, 8, ymm);
mm256_stream_si256(dest, 9, ymm);
mm256_stream_si256(dest, 10, ymm);
mm256_stream_si256(dest, 11, ymm);
mm256_stream_si256(dest, 12, ymm);
mm256_stream_si256(dest, 13, ymm);
mm256_stream_si256(dest, 14, ymm);
mm256_stream_si256(dest, 15, ymm);
}
static force_inline void
memset_movnt4x64b(char *dest, __m256i ymm)
{
mm256_stream_si256(dest, 0, ymm);
mm256_stream_si256(dest, 1, ymm);
mm256_stream_si256(dest, 2, ymm);
mm256_stream_si256(dest, 3, ymm);
mm256_stream_si256(dest, 4, ymm);
mm256_stream_si256(dest, 5, ymm);
mm256_stream_si256(dest, 6, ymm);
mm256_stream_si256(dest, 7, ymm);
}
static force_inline void
memset_movnt2x64b(char *dest, __m256i ymm)
{
mm256_stream_si256(dest, 0, ymm);
mm256_stream_si256(dest, 1, ymm);
mm256_stream_si256(dest, 2, ymm);
mm256_stream_si256(dest, 3, ymm);
}
static force_inline void
memset_movnt1x64b(char *dest, __m256i ymm)
{
mm256_stream_si256(dest, 0, ymm);
mm256_stream_si256(dest, 1, ymm);
}
static force_inline void
memset_movnt1x32b(char *dest, __m256i ymm)
{
mm256_stream_si256(dest, 0, ymm);
}
static force_inline void
memset_movnt1x16b(char *dest, __m256i ymm)
{
__m128i xmm0 = m256_get16b(ymm);
_mm_stream_si128((__m128i *)dest, xmm0);
}
static force_inline void
memset_movnt1x8b(char *dest, __m256i ymm)
{
uint64_t x = m256_get8b(ymm);
_mm_stream_si64((long long *)dest, (long long)x);
}
static force_inline void
memset_movnt1x4b(char *dest, __m256i ymm)
{
uint32_t x = m256_get4b(ymm);
_mm_stream_si32((int *)dest, (int)x);
}
static force_inline void
memset_movnt_avx(char *dest, int c, size_t len, flush_fn flush,
barrier_fn barrier, perf_barrier_fn perf_barrier)
{
char *orig_dest = dest;
size_t orig_len = len;
__m256i ymm = _mm256_set1_epi8((char)c);
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memset_small_avx(dest, ymm, cnt, flush);
dest += cnt;
len -= cnt;
}
while (len >= PERF_BARRIER_SIZE) {
memset_movnt8x64b(dest, ymm);
dest += 8 * 64;
len -= 8 * 64;
memset_movnt4x64b(dest, ymm);
dest += 4 * 64;
len -= 4 * 64;
COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (8 + 4) * 64);
if (len)
perf_barrier();
}
if (len >= 8 * 64) {
memset_movnt8x64b(dest, ymm);
dest += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memset_movnt4x64b(dest, ymm);
dest += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memset_movnt2x64b(dest, ymm);
dest += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memset_movnt1x64b(dest, ymm);
dest += 1 * 64;
len -= 1 * 64;
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32)
memset_movnt1x32b(dest, ymm);
else if (len == 16)
memset_movnt1x16b(dest, ymm);
else if (len == 8)
memset_movnt1x8b(dest, ymm);
else if (len == 4)
memset_movnt1x4b(dest, ymm);
else
goto nonnt;
goto end;
}
nonnt:
memset_small_avx(dest, ymm, len, flush);
end:
avx_zeroupper();
barrier();
VALGRIND_DO_FLUSH(orig_dest, orig_len);
}
/* variants without perf_barrier */
void
memset_movnt_avx_noflush_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, noflush, barrier_after_ntstores,
no_barrier);
}
void
memset_movnt_avx_empty_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, flush_empty_nolog,
barrier_after_ntstores, no_barrier);
}
void
memset_movnt_avx_clflush_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, flush_clflush_nolog,
barrier_after_ntstores, no_barrier);
}
void
memset_movnt_avx_clflushopt_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, flush_clflushopt_nolog,
no_barrier_after_ntstores, no_barrier);
}
void
memset_movnt_avx_clwb_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, flush_clwb_nolog,
no_barrier_after_ntstores, no_barrier);
}
/* variants with perf_barrier */
void
memset_movnt_avx_noflush_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, noflush, barrier_after_ntstores,
wc_barrier);
}
void
memset_movnt_avx_empty_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, flush_empty_nolog,
barrier_after_ntstores, wc_barrier);
}
void
memset_movnt_avx_clflush_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, flush_clflush_nolog,
barrier_after_ntstores, wc_barrier);
}
void
memset_movnt_avx_clflushopt_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, flush_clflushopt_nolog,
no_barrier_after_ntstores, wc_barrier);
}
void
memset_movnt_avx_clwb_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, flush_clwb_nolog,
no_barrier_after_ntstores, wc_barrier);
}
| 6,151 | 20.43554 | 71 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/x86_64/memset/memset_t_avx512f.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem2_arch.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memset_avx512f.h"
static force_inline void
mm512_store_si512(char *dest, unsigned idx, __m512i src)
{
_mm512_store_si512((__m512i *)dest + idx, src);
}
static force_inline void
memset_mov32x64b(char *dest, __m512i zmm, flush64b_fn flush64b)
{
mm512_store_si512(dest, 0, zmm);
mm512_store_si512(dest, 1, zmm);
mm512_store_si512(dest, 2, zmm);
mm512_store_si512(dest, 3, zmm);
mm512_store_si512(dest, 4, zmm);
mm512_store_si512(dest, 5, zmm);
mm512_store_si512(dest, 6, zmm);
mm512_store_si512(dest, 7, zmm);
mm512_store_si512(dest, 8, zmm);
mm512_store_si512(dest, 9, zmm);
mm512_store_si512(dest, 10, zmm);
mm512_store_si512(dest, 11, zmm);
mm512_store_si512(dest, 12, zmm);
mm512_store_si512(dest, 13, zmm);
mm512_store_si512(dest, 14, zmm);
mm512_store_si512(dest, 15, zmm);
mm512_store_si512(dest, 16, zmm);
mm512_store_si512(dest, 17, zmm);
mm512_store_si512(dest, 18, zmm);
mm512_store_si512(dest, 19, zmm);
mm512_store_si512(dest, 20, zmm);
mm512_store_si512(dest, 21, zmm);
mm512_store_si512(dest, 22, zmm);
mm512_store_si512(dest, 23, zmm);
mm512_store_si512(dest, 24, zmm);
mm512_store_si512(dest, 25, zmm);
mm512_store_si512(dest, 26, zmm);
mm512_store_si512(dest, 27, zmm);
mm512_store_si512(dest, 28, zmm);
mm512_store_si512(dest, 29, zmm);
mm512_store_si512(dest, 30, zmm);
mm512_store_si512(dest, 31, zmm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
flush64b(dest + 8 * 64);
flush64b(dest + 9 * 64);
flush64b(dest + 10 * 64);
flush64b(dest + 11 * 64);
flush64b(dest + 12 * 64);
flush64b(dest + 13 * 64);
flush64b(dest + 14 * 64);
flush64b(dest + 15 * 64);
flush64b(dest + 16 * 64);
flush64b(dest + 17 * 64);
flush64b(dest + 18 * 64);
flush64b(dest + 19 * 64);
flush64b(dest + 20 * 64);
flush64b(dest + 21 * 64);
flush64b(dest + 22 * 64);
flush64b(dest + 23 * 64);
flush64b(dest + 24 * 64);
flush64b(dest + 25 * 64);
flush64b(dest + 26 * 64);
flush64b(dest + 27 * 64);
flush64b(dest + 28 * 64);
flush64b(dest + 29 * 64);
flush64b(dest + 30 * 64);
flush64b(dest + 31 * 64);
}
static force_inline void
memset_mov16x64b(char *dest, __m512i zmm, flush64b_fn flush64b)
{
mm512_store_si512(dest, 0, zmm);
mm512_store_si512(dest, 1, zmm);
mm512_store_si512(dest, 2, zmm);
mm512_store_si512(dest, 3, zmm);
mm512_store_si512(dest, 4, zmm);
mm512_store_si512(dest, 5, zmm);
mm512_store_si512(dest, 6, zmm);
mm512_store_si512(dest, 7, zmm);
mm512_store_si512(dest, 8, zmm);
mm512_store_si512(dest, 9, zmm);
mm512_store_si512(dest, 10, zmm);
mm512_store_si512(dest, 11, zmm);
mm512_store_si512(dest, 12, zmm);
mm512_store_si512(dest, 13, zmm);
mm512_store_si512(dest, 14, zmm);
mm512_store_si512(dest, 15, zmm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
flush64b(dest + 8 * 64);
flush64b(dest + 9 * 64);
flush64b(dest + 10 * 64);
flush64b(dest + 11 * 64);
flush64b(dest + 12 * 64);
flush64b(dest + 13 * 64);
flush64b(dest + 14 * 64);
flush64b(dest + 15 * 64);
}
static force_inline void
memset_mov8x64b(char *dest, __m512i zmm, flush64b_fn flush64b)
{
mm512_store_si512(dest, 0, zmm);
mm512_store_si512(dest, 1, zmm);
mm512_store_si512(dest, 2, zmm);
mm512_store_si512(dest, 3, zmm);
mm512_store_si512(dest, 4, zmm);
mm512_store_si512(dest, 5, zmm);
mm512_store_si512(dest, 6, zmm);
mm512_store_si512(dest, 7, zmm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
}
static force_inline void
memset_mov4x64b(char *dest, __m512i zmm, flush64b_fn flush64b)
{
mm512_store_si512(dest, 0, zmm);
mm512_store_si512(dest, 1, zmm);
mm512_store_si512(dest, 2, zmm);
mm512_store_si512(dest, 3, zmm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
}
static force_inline void
memset_mov2x64b(char *dest, __m512i zmm, flush64b_fn flush64b)
{
mm512_store_si512(dest, 0, zmm);
mm512_store_si512(dest, 1, zmm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
}
static force_inline void
memset_mov1x64b(char *dest, __m512i zmm, flush64b_fn flush64b)
{
mm512_store_si512(dest, 0, zmm);
flush64b(dest + 0 * 64);
}
static force_inline void
memset_mov_avx512f(char *dest, int c, size_t len,
flush_fn flush, flush64b_fn flush64b)
{
__m512i zmm = _mm512_set1_epi8((char)c);
/* See comment in memset_movnt_avx512f */
__m256i ymm = _mm256_set1_epi8((char)c);
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memset_small_avx512f(dest, ymm, cnt, flush);
dest += cnt;
len -= cnt;
}
while (len >= 32 * 64) {
memset_mov32x64b(dest, zmm, flush64b);
dest += 32 * 64;
len -= 32 * 64;
}
if (len >= 16 * 64) {
memset_mov16x64b(dest, zmm, flush64b);
dest += 16 * 64;
len -= 16 * 64;
}
if (len >= 8 * 64) {
memset_mov8x64b(dest, zmm, flush64b);
dest += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memset_mov4x64b(dest, zmm, flush64b);
dest += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memset_mov2x64b(dest, zmm, flush64b);
dest += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memset_mov1x64b(dest, zmm, flush64b);
dest += 1 * 64;
len -= 1 * 64;
}
if (len)
memset_small_avx512f(dest, ymm, len, flush);
avx_zeroupper();
}
void
memset_mov_avx512f_noflush(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_avx512f(dest, c, len, noflush, noflush64b);
}
void
memset_mov_avx512f_empty(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_avx512f(dest, c, len, flush_empty_nolog, flush64b_empty);
}
void
memset_mov_avx512f_clflush(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_avx512f(dest, c, len, flush_clflush_nolog, pmem_clflush);
}
void
memset_mov_avx512f_clflushopt(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_avx512f(dest, c, len, flush_clflushopt_nolog,
pmem_clflushopt);
}
void
memset_mov_avx512f_clwb(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_avx512f(dest, c, len, flush_clwb_nolog, pmem_clwb);
}
| 6,851 | 22.958042 | 69 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/x86_64/memset/memset_nt_avx512f.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem2_arch.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memset_avx512f.h"
#include "out.h"
#include "util.h"
#include "valgrind_internal.h"
static force_inline void
mm512_stream_si512(char *dest, unsigned idx, __m512i src)
{
_mm512_stream_si512((__m512i *)dest + idx, src);
barrier();
}
static force_inline void
memset_movnt32x64b(char *dest, __m512i zmm)
{
mm512_stream_si512(dest, 0, zmm);
mm512_stream_si512(dest, 1, zmm);
mm512_stream_si512(dest, 2, zmm);
mm512_stream_si512(dest, 3, zmm);
mm512_stream_si512(dest, 4, zmm);
mm512_stream_si512(dest, 5, zmm);
mm512_stream_si512(dest, 6, zmm);
mm512_stream_si512(dest, 7, zmm);
mm512_stream_si512(dest, 8, zmm);
mm512_stream_si512(dest, 9, zmm);
mm512_stream_si512(dest, 10, zmm);
mm512_stream_si512(dest, 11, zmm);
mm512_stream_si512(dest, 12, zmm);
mm512_stream_si512(dest, 13, zmm);
mm512_stream_si512(dest, 14, zmm);
mm512_stream_si512(dest, 15, zmm);
mm512_stream_si512(dest, 16, zmm);
mm512_stream_si512(dest, 17, zmm);
mm512_stream_si512(dest, 18, zmm);
mm512_stream_si512(dest, 19, zmm);
mm512_stream_si512(dest, 20, zmm);
mm512_stream_si512(dest, 21, zmm);
mm512_stream_si512(dest, 22, zmm);
mm512_stream_si512(dest, 23, zmm);
mm512_stream_si512(dest, 24, zmm);
mm512_stream_si512(dest, 25, zmm);
mm512_stream_si512(dest, 26, zmm);
mm512_stream_si512(dest, 27, zmm);
mm512_stream_si512(dest, 28, zmm);
mm512_stream_si512(dest, 29, zmm);
mm512_stream_si512(dest, 30, zmm);
mm512_stream_si512(dest, 31, zmm);
}
static force_inline void
memset_movnt16x64b(char *dest, __m512i zmm)
{
mm512_stream_si512(dest, 0, zmm);
mm512_stream_si512(dest, 1, zmm);
mm512_stream_si512(dest, 2, zmm);
mm512_stream_si512(dest, 3, zmm);
mm512_stream_si512(dest, 4, zmm);
mm512_stream_si512(dest, 5, zmm);
mm512_stream_si512(dest, 6, zmm);
mm512_stream_si512(dest, 7, zmm);
mm512_stream_si512(dest, 8, zmm);
mm512_stream_si512(dest, 9, zmm);
mm512_stream_si512(dest, 10, zmm);
mm512_stream_si512(dest, 11, zmm);
mm512_stream_si512(dest, 12, zmm);
mm512_stream_si512(dest, 13, zmm);
mm512_stream_si512(dest, 14, zmm);
mm512_stream_si512(dest, 15, zmm);
}
static force_inline void
memset_movnt8x64b(char *dest, __m512i zmm)
{
mm512_stream_si512(dest, 0, zmm);
mm512_stream_si512(dest, 1, zmm);
mm512_stream_si512(dest, 2, zmm);
mm512_stream_si512(dest, 3, zmm);
mm512_stream_si512(dest, 4, zmm);
mm512_stream_si512(dest, 5, zmm);
mm512_stream_si512(dest, 6, zmm);
mm512_stream_si512(dest, 7, zmm);
}
static force_inline void
memset_movnt4x64b(char *dest, __m512i zmm)
{
mm512_stream_si512(dest, 0, zmm);
mm512_stream_si512(dest, 1, zmm);
mm512_stream_si512(dest, 2, zmm);
mm512_stream_si512(dest, 3, zmm);
}
static force_inline void
memset_movnt2x64b(char *dest, __m512i zmm)
{
mm512_stream_si512(dest, 0, zmm);
mm512_stream_si512(dest, 1, zmm);
}
static force_inline void
memset_movnt1x64b(char *dest, __m512i zmm)
{
mm512_stream_si512(dest, 0, zmm);
}
static force_inline void
memset_movnt1x32b(char *dest, __m256i ymm)
{
_mm256_stream_si256((__m256i *)dest, ymm);
}
static force_inline void
memset_movnt1x16b(char *dest, __m256i ymm)
{
__m128i xmm = _mm256_extracti128_si256(ymm, 0);
_mm_stream_si128((__m128i *)dest, xmm);
}
static force_inline void
memset_movnt1x8b(char *dest, __m256i ymm)
{
uint64_t x = m256_get8b(ymm);
_mm_stream_si64((long long *)dest, (long long)x);
}
static force_inline void
memset_movnt1x4b(char *dest, __m256i ymm)
{
uint32_t x = m256_get4b(ymm);
_mm_stream_si32((int *)dest, (int)x);
}
static force_inline void
memset_movnt_avx512f(char *dest, int c, size_t len, flush_fn flush,
barrier_fn barrier)
{
char *orig_dest = dest;
size_t orig_len = len;
__m512i zmm = _mm512_set1_epi8((char)c);
/*
* Can't use _mm512_extracti64x4_epi64, because some versions of gcc
* crash. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82887
*/
__m256i ymm = _mm256_set1_epi8((char)c);
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memset_small_avx512f(dest, ymm, cnt, flush);
dest += cnt;
len -= cnt;
}
while (len >= 32 * 64) {
memset_movnt32x64b(dest, zmm);
dest += 32 * 64;
len -= 32 * 64;
}
if (len >= 16 * 64) {
memset_movnt16x64b(dest, zmm);
dest += 16 * 64;
len -= 16 * 64;
}
if (len >= 8 * 64) {
memset_movnt8x64b(dest, zmm);
dest += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memset_movnt4x64b(dest, zmm);
dest += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memset_movnt2x64b(dest, zmm);
dest += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memset_movnt1x64b(dest, zmm);
dest += 1 * 64;
len -= 1 * 64;
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32)
memset_movnt1x32b(dest, ymm);
else if (len == 16)
memset_movnt1x16b(dest, ymm);
else if (len == 8)
memset_movnt1x8b(dest, ymm);
else if (len == 4)
memset_movnt1x4b(dest, ymm);
else
goto nonnt;
goto end;
}
nonnt:
memset_small_avx512f(dest, ymm, len, flush);
end:
avx_zeroupper();
barrier();
VALGRIND_DO_FLUSH(orig_dest, orig_len);
}
void
memset_movnt_avx512f_noflush(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx512f(dest, c, len, noflush, barrier_after_ntstores);
}
void
memset_movnt_avx512f_empty(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx512f(dest, c, len, flush_empty_nolog,
barrier_after_ntstores);
}
void
memset_movnt_avx512f_clflush(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx512f(dest, c, len, flush_clflush_nolog,
barrier_after_ntstores);
}
void
memset_movnt_avx512f_clflushopt(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx512f(dest, c, len, flush_clflushopt_nolog,
no_barrier_after_ntstores);
}
void
memset_movnt_avx512f_clwb(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx512f(dest, c, len, flush_clwb_nolog,
no_barrier_after_ntstores);
}
| 6,397 | 21.607774 | 71 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/x86_64/memset/memset_t_sse2.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem2_arch.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memset_sse2.h"
static force_inline void
mm_store_si128(char *dest, unsigned idx, __m128i src)
{
_mm_store_si128((__m128i *)dest + idx, src);
}
static force_inline void
memset_mov4x64b(char *dest, __m128i xmm, flush64b_fn flush64b)
{
mm_store_si128(dest, 0, xmm);
mm_store_si128(dest, 1, xmm);
mm_store_si128(dest, 2, xmm);
mm_store_si128(dest, 3, xmm);
mm_store_si128(dest, 4, xmm);
mm_store_si128(dest, 5, xmm);
mm_store_si128(dest, 6, xmm);
mm_store_si128(dest, 7, xmm);
mm_store_si128(dest, 8, xmm);
mm_store_si128(dest, 9, xmm);
mm_store_si128(dest, 10, xmm);
mm_store_si128(dest, 11, xmm);
mm_store_si128(dest, 12, xmm);
mm_store_si128(dest, 13, xmm);
mm_store_si128(dest, 14, xmm);
mm_store_si128(dest, 15, xmm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
}
static force_inline void
memset_mov2x64b(char *dest, __m128i xmm, flush64b_fn flush64b)
{
mm_store_si128(dest, 0, xmm);
mm_store_si128(dest, 1, xmm);
mm_store_si128(dest, 2, xmm);
mm_store_si128(dest, 3, xmm);
mm_store_si128(dest, 4, xmm);
mm_store_si128(dest, 5, xmm);
mm_store_si128(dest, 6, xmm);
mm_store_si128(dest, 7, xmm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
}
static force_inline void
memset_mov1x64b(char *dest, __m128i xmm, flush64b_fn flush64b)
{
mm_store_si128(dest, 0, xmm);
mm_store_si128(dest, 1, xmm);
mm_store_si128(dest, 2, xmm);
mm_store_si128(dest, 3, xmm);
flush64b(dest + 0 * 64);
}
static force_inline void
memset_mov_sse2(char *dest, int c, size_t len,
flush_fn flush, flush64b_fn flush64b)
{
__m128i xmm = _mm_set1_epi8((char)c);
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memset_small_sse2(dest, xmm, cnt, flush);
dest += cnt;
len -= cnt;
}
while (len >= 4 * 64) {
memset_mov4x64b(dest, xmm, flush64b);
dest += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memset_mov2x64b(dest, xmm, flush64b);
dest += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memset_mov1x64b(dest, xmm, flush64b);
dest += 1 * 64;
len -= 1 * 64;
}
if (len)
memset_small_sse2(dest, xmm, len, flush);
}
void
memset_mov_sse2_noflush(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_sse2(dest, c, len, noflush, noflush64b);
}
void
memset_mov_sse2_empty(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_sse2(dest, c, len, flush_empty_nolog, flush64b_empty);
}
void
memset_mov_sse2_clflush(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_sse2(dest, c, len, flush_clflush_nolog, pmem_clflush);
}
void
memset_mov_sse2_clflushopt(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_sse2(dest, c, len, flush_clflushopt_nolog,
pmem_clflushopt);
}
void
memset_mov_sse2_clwb(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_sse2(dest, c, len, flush_clwb_nolog, pmem_clwb);
}
| 3,304 | 20.461039 | 66 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/x86_64/memset/memset_sse2.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#ifndef PMEM2_MEMSET_SSE2_H
#define PMEM2_MEMSET_SSE2_H
#include <xmmintrin.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include "out.h"
static force_inline void
memset_small_sse2_noflush(char *dest, __m128i xmm, size_t len)
{
ASSERT(len <= 64);
if (len <= 8)
goto le8;
if (len <= 32)
goto le32;
if (len > 48) {
/* 49..64 */
_mm_storeu_si128((__m128i *)(dest + 0), xmm);
_mm_storeu_si128((__m128i *)(dest + 16), xmm);
_mm_storeu_si128((__m128i *)(dest + 32), xmm);
_mm_storeu_si128((__m128i *)(dest + len - 16), xmm);
return;
}
/* 33..48 */
_mm_storeu_si128((__m128i *)(dest + 0), xmm);
_mm_storeu_si128((__m128i *)(dest + 16), xmm);
_mm_storeu_si128((__m128i *)(dest + len - 16), xmm);
return;
le32:
if (len > 16) {
/* 17..32 */
_mm_storeu_si128((__m128i *)(dest + 0), xmm);
_mm_storeu_si128((__m128i *)(dest + len - 16), xmm);
return;
}
/* 9..16 */
uint64_t d8 = (uint64_t)_mm_cvtsi128_si64(xmm);
*(ua_uint64_t *)dest = d8;
*(ua_uint64_t *)(dest + len - 8) = d8;
return;
le8:
if (len <= 2)
goto le2;
if (len > 4) {
/* 5..8 */
uint32_t d4 = (uint32_t)_mm_cvtsi128_si32(xmm);
*(ua_uint32_t *)dest = d4;
*(ua_uint32_t *)(dest + len - 4) = d4;
return;
}
/* 3..4 */
uint16_t d2 = (uint16_t)(uint32_t)_mm_cvtsi128_si32(xmm);
*(ua_uint16_t *)dest = d2;
*(ua_uint16_t *)(dest + len - 2) = d2;
return;
le2:
if (len == 2) {
uint16_t d2 = (uint16_t)(uint32_t)_mm_cvtsi128_si32(xmm);
*(ua_uint16_t *)dest = d2;
return;
}
*(uint8_t *)dest = (uint8_t)_mm_cvtsi128_si32(xmm);
}
static force_inline void
memset_small_sse2(char *dest, __m128i xmm, size_t len, flush_fn flush)
{
/*
* pmemcheck complains about "overwritten stores before they were made
* persistent" for overlapping stores (last instruction in each code
* path) in the optimized version.
* libc's memset also does that, so we can't use it here.
*/
if (On_pmemcheck) {
memset_nodrain_generic(dest, (uint8_t)_mm_cvtsi128_si32(xmm),
len, PMEM2_F_MEM_NOFLUSH, NULL);
} else {
memset_small_sse2_noflush(dest, xmm, len);
}
flush(dest, len);
}
#endif
| 2,213 | 20.085714 | 71 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/x86_64/memset/memset_t_avx.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem2_arch.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memset_avx.h"
static force_inline void
mm256_store_si256(char *dest, unsigned idx, __m256i src)
{
_mm256_store_si256((__m256i *)dest + idx, src);
}
static force_inline void
memset_mov8x64b(char *dest, __m256i ymm, flush64b_fn flush64b)
{
mm256_store_si256(dest, 0, ymm);
mm256_store_si256(dest, 1, ymm);
mm256_store_si256(dest, 2, ymm);
mm256_store_si256(dest, 3, ymm);
mm256_store_si256(dest, 4, ymm);
mm256_store_si256(dest, 5, ymm);
mm256_store_si256(dest, 6, ymm);
mm256_store_si256(dest, 7, ymm);
mm256_store_si256(dest, 8, ymm);
mm256_store_si256(dest, 9, ymm);
mm256_store_si256(dest, 10, ymm);
mm256_store_si256(dest, 11, ymm);
mm256_store_si256(dest, 12, ymm);
mm256_store_si256(dest, 13, ymm);
mm256_store_si256(dest, 14, ymm);
mm256_store_si256(dest, 15, ymm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
}
static force_inline void
memset_mov4x64b(char *dest, __m256i ymm, flush64b_fn flush64b)
{
mm256_store_si256(dest, 0, ymm);
mm256_store_si256(dest, 1, ymm);
mm256_store_si256(dest, 2, ymm);
mm256_store_si256(dest, 3, ymm);
mm256_store_si256(dest, 4, ymm);
mm256_store_si256(dest, 5, ymm);
mm256_store_si256(dest, 6, ymm);
mm256_store_si256(dest, 7, ymm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
}
static force_inline void
memset_mov2x64b(char *dest, __m256i ymm, flush64b_fn flush64b)
{
mm256_store_si256(dest, 0, ymm);
mm256_store_si256(dest, 1, ymm);
mm256_store_si256(dest, 2, ymm);
mm256_store_si256(dest, 3, ymm);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
}
static force_inline void
memset_mov1x64b(char *dest, __m256i ymm, flush64b_fn flush64b)
{
mm256_store_si256(dest, 0, ymm);
mm256_store_si256(dest, 1, ymm);
flush64b(dest + 0 * 64);
}
static force_inline void
memset_mov_avx(char *dest, int c, size_t len,
flush_fn flush, flush64b_fn flush64b)
{
__m256i ymm = _mm256_set1_epi8((char)c);
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memset_small_avx(dest, ymm, cnt, flush);
dest += cnt;
len -= cnt;
}
while (len >= 8 * 64) {
memset_mov8x64b(dest, ymm, flush64b);
dest += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memset_mov4x64b(dest, ymm, flush64b);
dest += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memset_mov2x64b(dest, ymm, flush64b);
dest += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memset_mov1x64b(dest, ymm, flush64b);
dest += 1 * 64;
len -= 1 * 64;
}
if (len)
memset_small_avx(dest, ymm, len, flush);
avx_zeroupper();
}
void
memset_mov_avx_noflush(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_avx(dest, c, len, noflush, noflush64b);
}
void
memset_mov_avx_empty(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_avx(dest, c, len, flush_empty_nolog, flush64b_empty);
}
void
memset_mov_avx_clflush(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_avx(dest, c, len, flush_clflush_nolog, pmem_clflush);
}
void
memset_mov_avx_clflushopt(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_avx(dest, c, len, flush_clflushopt_nolog,
pmem_clflushopt);
}
void
memset_mov_avx_clwb(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_mov_avx(dest, c, len, flush_clwb_nolog, pmem_clwb);
}
| 3,890 | 20.73743 | 65 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/x86_64/memcpy/memcpy_t_sse2.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem2_arch.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memcpy_sse2.h"
#include "out.h"
static force_inline __m128i
mm_loadu_si128(const char *src, unsigned idx)
{
return _mm_loadu_si128((const __m128i *)src + idx);
}
static force_inline void
mm_store_si128(char *dest, unsigned idx, __m128i src)
{
_mm_store_si128((__m128i *)dest + idx, src);
}
static force_inline void
memmove_mov4x64b(char *dest, const char *src, flush64b_fn flush64b)
{
__m128i xmm0 = mm_loadu_si128(src, 0);
__m128i xmm1 = mm_loadu_si128(src, 1);
__m128i xmm2 = mm_loadu_si128(src, 2);
__m128i xmm3 = mm_loadu_si128(src, 3);
__m128i xmm4 = mm_loadu_si128(src, 4);
__m128i xmm5 = mm_loadu_si128(src, 5);
__m128i xmm6 = mm_loadu_si128(src, 6);
__m128i xmm7 = mm_loadu_si128(src, 7);
__m128i xmm8 = mm_loadu_si128(src, 8);
__m128i xmm9 = mm_loadu_si128(src, 9);
__m128i xmm10 = mm_loadu_si128(src, 10);
__m128i xmm11 = mm_loadu_si128(src, 11);
__m128i xmm12 = mm_loadu_si128(src, 12);
__m128i xmm13 = mm_loadu_si128(src, 13);
__m128i xmm14 = mm_loadu_si128(src, 14);
__m128i xmm15 = mm_loadu_si128(src, 15);
mm_store_si128(dest, 0, xmm0);
mm_store_si128(dest, 1, xmm1);
mm_store_si128(dest, 2, xmm2);
mm_store_si128(dest, 3, xmm3);
mm_store_si128(dest, 4, xmm4);
mm_store_si128(dest, 5, xmm5);
mm_store_si128(dest, 6, xmm6);
mm_store_si128(dest, 7, xmm7);
mm_store_si128(dest, 8, xmm8);
mm_store_si128(dest, 9, xmm9);
mm_store_si128(dest, 10, xmm10);
mm_store_si128(dest, 11, xmm11);
mm_store_si128(dest, 12, xmm12);
mm_store_si128(dest, 13, xmm13);
mm_store_si128(dest, 14, xmm14);
mm_store_si128(dest, 15, xmm15);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
}
static force_inline void
memmove_mov2x64b(char *dest, const char *src, flush64b_fn flush64b)
{
__m128i xmm0 = mm_loadu_si128(src, 0);
__m128i xmm1 = mm_loadu_si128(src, 1);
__m128i xmm2 = mm_loadu_si128(src, 2);
__m128i xmm3 = mm_loadu_si128(src, 3);
__m128i xmm4 = mm_loadu_si128(src, 4);
__m128i xmm5 = mm_loadu_si128(src, 5);
__m128i xmm6 = mm_loadu_si128(src, 6);
__m128i xmm7 = mm_loadu_si128(src, 7);
mm_store_si128(dest, 0, xmm0);
mm_store_si128(dest, 1, xmm1);
mm_store_si128(dest, 2, xmm2);
mm_store_si128(dest, 3, xmm3);
mm_store_si128(dest, 4, xmm4);
mm_store_si128(dest, 5, xmm5);
mm_store_si128(dest, 6, xmm6);
mm_store_si128(dest, 7, xmm7);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
}
static force_inline void
memmove_mov1x64b(char *dest, const char *src, flush64b_fn flush64b)
{
__m128i xmm0 = mm_loadu_si128(src, 0);
__m128i xmm1 = mm_loadu_si128(src, 1);
__m128i xmm2 = mm_loadu_si128(src, 2);
__m128i xmm3 = mm_loadu_si128(src, 3);
mm_store_si128(dest, 0, xmm0);
mm_store_si128(dest, 1, xmm1);
mm_store_si128(dest, 2, xmm2);
mm_store_si128(dest, 3, xmm3);
flush64b(dest + 0 * 64);
}
static force_inline void
memmove_mov_sse_fw(char *dest, const char *src, size_t len,
flush_fn flush, flush64b_fn flush64b)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_sse2(dest, src, cnt, flush);
dest += cnt;
src += cnt;
len -= cnt;
}
while (len >= 4 * 64) {
memmove_mov4x64b(dest, src, flush64b);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_mov2x64b(dest, src, flush64b);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_mov1x64b(dest, src, flush64b);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len)
memmove_small_sse2(dest, src, len, flush);
}
static force_inline void
memmove_mov_sse_bw(char *dest, const char *src, size_t len,
flush_fn flush, flush64b_fn flush64b)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_sse2(dest, src, cnt, flush);
}
while (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_mov4x64b(dest, src, flush64b);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_mov2x64b(dest, src, flush64b);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_mov1x64b(dest, src, flush64b);
}
if (len)
memmove_small_sse2(dest - len, src - len, len, flush);
}
static force_inline void
memmove_mov_sse2(char *dest, const char *src, size_t len,
flush_fn flush, flush64b_fn flush64b)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_mov_sse_fw(dest, src, len, flush, flush64b);
else
memmove_mov_sse_bw(dest, src, len, flush, flush64b);
}
void
memmove_mov_sse2_noflush(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_sse2(dest, src, len, noflush, noflush64b);
}
void
memmove_mov_sse2_empty(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_sse2(dest, src, len, flush_empty_nolog, flush64b_empty);
}
void
memmove_mov_sse2_clflush(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_sse2(dest, src, len, flush_clflush_nolog, pmem_clflush);
}
void
memmove_mov_sse2_clflushopt(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_sse2(dest, src, len, flush_clflushopt_nolog,
pmem_clflushopt);
}
void
memmove_mov_sse2_clwb(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_sse2(dest, src, len, flush_clwb_nolog, pmem_clwb);
}
| 5,820 | 22.566802 | 69 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/x86_64/memcpy/memcpy_avx.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#ifndef PMEM2_MEMCPY_AVX_H
#define PMEM2_MEMCPY_AVX_H
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "out.h"
static force_inline void
memmove_small_avx_noflush(char *dest, const char *src, size_t len)
{
ASSERT(len <= 64);
if (len <= 8)
goto le8;
if (len <= 32)
goto le32;
/* 33..64 */
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)(src + len - 32));
_mm256_storeu_si256((__m256i *)dest, ymm0);
_mm256_storeu_si256((__m256i *)(dest + len - 32), ymm1);
return;
le32:
if (len > 16) {
/* 17..32 */
__m128i xmm0 = _mm_loadu_si128((__m128i *)src);
__m128i xmm1 = _mm_loadu_si128((__m128i *)(src + len - 16));
_mm_storeu_si128((__m128i *)dest, xmm0);
_mm_storeu_si128((__m128i *)(dest + len - 16), xmm1);
return;
}
/* 9..16 */
ua_uint64_t d80 = *(ua_uint64_t *)src;
ua_uint64_t d81 = *(ua_uint64_t *)(src + len - 8);
*(ua_uint64_t *)dest = d80;
*(ua_uint64_t *)(dest + len - 8) = d81;
return;
le8:
if (len <= 2)
goto le2;
if (len > 4) {
/* 5..8 */
ua_uint32_t d40 = *(ua_uint32_t *)src;
ua_uint32_t d41 = *(ua_uint32_t *)(src + len - 4);
*(ua_uint32_t *)dest = d40;
*(ua_uint32_t *)(dest + len - 4) = d41;
return;
}
/* 3..4 */
ua_uint16_t d20 = *(ua_uint16_t *)src;
ua_uint16_t d21 = *(ua_uint16_t *)(src + len - 2);
*(ua_uint16_t *)dest = d20;
*(ua_uint16_t *)(dest + len - 2) = d21;
return;
le2:
if (len == 2) {
*(ua_uint16_t *)dest = *(ua_uint16_t *)src;
return;
}
*(uint8_t *)dest = *(uint8_t *)src;
}
static force_inline void
memmove_small_avx(char *dest, const char *src, size_t len, flush_fn flush)
{
/*
* pmemcheck complains about "overwritten stores before they were made
* persistent" for overlapping stores (last instruction in each code
* path) in the optimized version.
* libc's memcpy also does that, so we can't use it here.
*/
if (On_pmemcheck) {
memmove_nodrain_generic(dest, src, len, PMEM2_F_MEM_NOFLUSH,
NULL);
} else {
memmove_small_avx_noflush(dest, src, len);
}
flush(dest, len);
}
#endif
| 2,173 | 20.524752 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/x86_64/memcpy/memcpy_t_avx.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem2_arch.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memcpy_avx.h"
static force_inline __m256i
mm256_loadu_si256(const char *src, unsigned idx)
{
return _mm256_loadu_si256((const __m256i *)src + idx);
}
static force_inline void
mm256_store_si256(char *dest, unsigned idx, __m256i src)
{
_mm256_store_si256((__m256i *)dest + idx, src);
}
static force_inline void
memmove_mov8x64b(char *dest, const char *src, flush64b_fn flush64b)
{
__m256i ymm0 = mm256_loadu_si256(src, 0);
__m256i ymm1 = mm256_loadu_si256(src, 1);
__m256i ymm2 = mm256_loadu_si256(src, 2);
__m256i ymm3 = mm256_loadu_si256(src, 3);
__m256i ymm4 = mm256_loadu_si256(src, 4);
__m256i ymm5 = mm256_loadu_si256(src, 5);
__m256i ymm6 = mm256_loadu_si256(src, 6);
__m256i ymm7 = mm256_loadu_si256(src, 7);
__m256i ymm8 = mm256_loadu_si256(src, 8);
__m256i ymm9 = mm256_loadu_si256(src, 9);
__m256i ymm10 = mm256_loadu_si256(src, 10);
__m256i ymm11 = mm256_loadu_si256(src, 11);
__m256i ymm12 = mm256_loadu_si256(src, 12);
__m256i ymm13 = mm256_loadu_si256(src, 13);
__m256i ymm14 = mm256_loadu_si256(src, 14);
__m256i ymm15 = mm256_loadu_si256(src, 15);
mm256_store_si256(dest, 0, ymm0);
mm256_store_si256(dest, 1, ymm1);
mm256_store_si256(dest, 2, ymm2);
mm256_store_si256(dest, 3, ymm3);
mm256_store_si256(dest, 4, ymm4);
mm256_store_si256(dest, 5, ymm5);
mm256_store_si256(dest, 6, ymm6);
mm256_store_si256(dest, 7, ymm7);
mm256_store_si256(dest, 8, ymm8);
mm256_store_si256(dest, 9, ymm9);
mm256_store_si256(dest, 10, ymm10);
mm256_store_si256(dest, 11, ymm11);
mm256_store_si256(dest, 12, ymm12);
mm256_store_si256(dest, 13, ymm13);
mm256_store_si256(dest, 14, ymm14);
mm256_store_si256(dest, 15, ymm15);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
}
static force_inline void
memmove_mov4x64b(char *dest, const char *src, flush64b_fn flush64b)
{
__m256i ymm0 = mm256_loadu_si256(src, 0);
__m256i ymm1 = mm256_loadu_si256(src, 1);
__m256i ymm2 = mm256_loadu_si256(src, 2);
__m256i ymm3 = mm256_loadu_si256(src, 3);
__m256i ymm4 = mm256_loadu_si256(src, 4);
__m256i ymm5 = mm256_loadu_si256(src, 5);
__m256i ymm6 = mm256_loadu_si256(src, 6);
__m256i ymm7 = mm256_loadu_si256(src, 7);
mm256_store_si256(dest, 0, ymm0);
mm256_store_si256(dest, 1, ymm1);
mm256_store_si256(dest, 2, ymm2);
mm256_store_si256(dest, 3, ymm3);
mm256_store_si256(dest, 4, ymm4);
mm256_store_si256(dest, 5, ymm5);
mm256_store_si256(dest, 6, ymm6);
mm256_store_si256(dest, 7, ymm7);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
}
static force_inline void
memmove_mov2x64b(char *dest, const char *src, flush64b_fn flush64b)
{
__m256i ymm0 = mm256_loadu_si256(src, 0);
__m256i ymm1 = mm256_loadu_si256(src, 1);
__m256i ymm2 = mm256_loadu_si256(src, 2);
__m256i ymm3 = mm256_loadu_si256(src, 3);
mm256_store_si256(dest, 0, ymm0);
mm256_store_si256(dest, 1, ymm1);
mm256_store_si256(dest, 2, ymm2);
mm256_store_si256(dest, 3, ymm3);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
}
static force_inline void
memmove_mov1x64b(char *dest, const char *src, flush64b_fn flush64b)
{
__m256i ymm0 = mm256_loadu_si256(src, 0);
__m256i ymm1 = mm256_loadu_si256(src, 1);
mm256_store_si256(dest, 0, ymm0);
mm256_store_si256(dest, 1, ymm1);
flush64b(dest + 0 * 64);
}
static force_inline void
memmove_mov_avx_fw(char *dest, const char *src, size_t len,
flush_fn flush, flush64b_fn flush64b)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_avx(dest, src, cnt, flush);
dest += cnt;
src += cnt;
len -= cnt;
}
while (len >= 8 * 64) {
memmove_mov8x64b(dest, src, flush64b);
dest += 8 * 64;
src += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memmove_mov4x64b(dest, src, flush64b);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_mov2x64b(dest, src, flush64b);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_mov1x64b(dest, src, flush64b);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len)
memmove_small_avx(dest, src, len, flush);
}
static force_inline void
memmove_mov_avx_bw(char *dest, const char *src, size_t len,
flush_fn flush, flush64b_fn flush64b)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_avx(dest, src, cnt, flush);
}
while (len >= 8 * 64) {
dest -= 8 * 64;
src -= 8 * 64;
len -= 8 * 64;
memmove_mov8x64b(dest, src, flush64b);
}
if (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_mov4x64b(dest, src, flush64b);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_mov2x64b(dest, src, flush64b);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_mov1x64b(dest, src, flush64b);
}
if (len)
memmove_small_avx(dest - len, src - len, len, flush);
}
static force_inline void
memmove_mov_avx(char *dest, const char *src, size_t len,
flush_fn flush, flush64b_fn flush64b)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_mov_avx_fw(dest, src, len, flush, flush64b);
else
memmove_mov_avx_bw(dest, src, len, flush, flush64b);
avx_zeroupper();
}
void
memmove_mov_avx_noflush(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_avx(dest, src, len, noflush, noflush64b);
}
void
memmove_mov_avx_empty(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_avx(dest, src, len, flush_empty_nolog, flush64b_empty);
}
void
memmove_mov_avx_clflush(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_avx(dest, src, len, flush_clflush_nolog, pmem_clflush);
}
void
memmove_mov_avx_clflushopt(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_avx(dest, src, len, flush_clflushopt_nolog,
pmem_clflushopt);
}
void
memmove_mov_avx_clwb(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_avx(dest, src, len, flush_clwb_nolog, pmem_clwb);
}
| 6,705 | 22.780142 | 68 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/x86_64/memcpy/memcpy_t_avx512f.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem2_arch.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memcpy_avx512f.h"
static force_inline __m512i
mm512_loadu_si512(const char *src, unsigned idx)
{
return _mm512_loadu_si512((const __m512i *)src + idx);
}
static force_inline void
mm512_store_si512(char *dest, unsigned idx, __m512i src)
{
_mm512_store_si512((__m512i *)dest + idx, src);
}
static force_inline void
memmove_mov32x64b(char *dest, const char *src, flush64b_fn flush64b)
{
__m512i zmm0 = mm512_loadu_si512(src, 0);
__m512i zmm1 = mm512_loadu_si512(src, 1);
__m512i zmm2 = mm512_loadu_si512(src, 2);
__m512i zmm3 = mm512_loadu_si512(src, 3);
__m512i zmm4 = mm512_loadu_si512(src, 4);
__m512i zmm5 = mm512_loadu_si512(src, 5);
__m512i zmm6 = mm512_loadu_si512(src, 6);
__m512i zmm7 = mm512_loadu_si512(src, 7);
__m512i zmm8 = mm512_loadu_si512(src, 8);
__m512i zmm9 = mm512_loadu_si512(src, 9);
__m512i zmm10 = mm512_loadu_si512(src, 10);
__m512i zmm11 = mm512_loadu_si512(src, 11);
__m512i zmm12 = mm512_loadu_si512(src, 12);
__m512i zmm13 = mm512_loadu_si512(src, 13);
__m512i zmm14 = mm512_loadu_si512(src, 14);
__m512i zmm15 = mm512_loadu_si512(src, 15);
__m512i zmm16 = mm512_loadu_si512(src, 16);
__m512i zmm17 = mm512_loadu_si512(src, 17);
__m512i zmm18 = mm512_loadu_si512(src, 18);
__m512i zmm19 = mm512_loadu_si512(src, 19);
__m512i zmm20 = mm512_loadu_si512(src, 20);
__m512i zmm21 = mm512_loadu_si512(src, 21);
__m512i zmm22 = mm512_loadu_si512(src, 22);
__m512i zmm23 = mm512_loadu_si512(src, 23);
__m512i zmm24 = mm512_loadu_si512(src, 24);
__m512i zmm25 = mm512_loadu_si512(src, 25);
__m512i zmm26 = mm512_loadu_si512(src, 26);
__m512i zmm27 = mm512_loadu_si512(src, 27);
__m512i zmm28 = mm512_loadu_si512(src, 28);
__m512i zmm29 = mm512_loadu_si512(src, 29);
__m512i zmm30 = mm512_loadu_si512(src, 30);
__m512i zmm31 = mm512_loadu_si512(src, 31);
mm512_store_si512(dest, 0, zmm0);
mm512_store_si512(dest, 1, zmm1);
mm512_store_si512(dest, 2, zmm2);
mm512_store_si512(dest, 3, zmm3);
mm512_store_si512(dest, 4, zmm4);
mm512_store_si512(dest, 5, zmm5);
mm512_store_si512(dest, 6, zmm6);
mm512_store_si512(dest, 7, zmm7);
mm512_store_si512(dest, 8, zmm8);
mm512_store_si512(dest, 9, zmm9);
mm512_store_si512(dest, 10, zmm10);
mm512_store_si512(dest, 11, zmm11);
mm512_store_si512(dest, 12, zmm12);
mm512_store_si512(dest, 13, zmm13);
mm512_store_si512(dest, 14, zmm14);
mm512_store_si512(dest, 15, zmm15);
mm512_store_si512(dest, 16, zmm16);
mm512_store_si512(dest, 17, zmm17);
mm512_store_si512(dest, 18, zmm18);
mm512_store_si512(dest, 19, zmm19);
mm512_store_si512(dest, 20, zmm20);
mm512_store_si512(dest, 21, zmm21);
mm512_store_si512(dest, 22, zmm22);
mm512_store_si512(dest, 23, zmm23);
mm512_store_si512(dest, 24, zmm24);
mm512_store_si512(dest, 25, zmm25);
mm512_store_si512(dest, 26, zmm26);
mm512_store_si512(dest, 27, zmm27);
mm512_store_si512(dest, 28, zmm28);
mm512_store_si512(dest, 29, zmm29);
mm512_store_si512(dest, 30, zmm30);
mm512_store_si512(dest, 31, zmm31);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
flush64b(dest + 8 * 64);
flush64b(dest + 9 * 64);
flush64b(dest + 10 * 64);
flush64b(dest + 11 * 64);
flush64b(dest + 12 * 64);
flush64b(dest + 13 * 64);
flush64b(dest + 14 * 64);
flush64b(dest + 15 * 64);
flush64b(dest + 16 * 64);
flush64b(dest + 17 * 64);
flush64b(dest + 18 * 64);
flush64b(dest + 19 * 64);
flush64b(dest + 20 * 64);
flush64b(dest + 21 * 64);
flush64b(dest + 22 * 64);
flush64b(dest + 23 * 64);
flush64b(dest + 24 * 64);
flush64b(dest + 25 * 64);
flush64b(dest + 26 * 64);
flush64b(dest + 27 * 64);
flush64b(dest + 28 * 64);
flush64b(dest + 29 * 64);
flush64b(dest + 30 * 64);
flush64b(dest + 31 * 64);
}
static force_inline void
memmove_mov16x64b(char *dest, const char *src, flush64b_fn flush64b)
{
__m512i zmm0 = mm512_loadu_si512(src, 0);
__m512i zmm1 = mm512_loadu_si512(src, 1);
__m512i zmm2 = mm512_loadu_si512(src, 2);
__m512i zmm3 = mm512_loadu_si512(src, 3);
__m512i zmm4 = mm512_loadu_si512(src, 4);
__m512i zmm5 = mm512_loadu_si512(src, 5);
__m512i zmm6 = mm512_loadu_si512(src, 6);
__m512i zmm7 = mm512_loadu_si512(src, 7);
__m512i zmm8 = mm512_loadu_si512(src, 8);
__m512i zmm9 = mm512_loadu_si512(src, 9);
__m512i zmm10 = mm512_loadu_si512(src, 10);
__m512i zmm11 = mm512_loadu_si512(src, 11);
__m512i zmm12 = mm512_loadu_si512(src, 12);
__m512i zmm13 = mm512_loadu_si512(src, 13);
__m512i zmm14 = mm512_loadu_si512(src, 14);
__m512i zmm15 = mm512_loadu_si512(src, 15);
mm512_store_si512(dest, 0, zmm0);
mm512_store_si512(dest, 1, zmm1);
mm512_store_si512(dest, 2, zmm2);
mm512_store_si512(dest, 3, zmm3);
mm512_store_si512(dest, 4, zmm4);
mm512_store_si512(dest, 5, zmm5);
mm512_store_si512(dest, 6, zmm6);
mm512_store_si512(dest, 7, zmm7);
mm512_store_si512(dest, 8, zmm8);
mm512_store_si512(dest, 9, zmm9);
mm512_store_si512(dest, 10, zmm10);
mm512_store_si512(dest, 11, zmm11);
mm512_store_si512(dest, 12, zmm12);
mm512_store_si512(dest, 13, zmm13);
mm512_store_si512(dest, 14, zmm14);
mm512_store_si512(dest, 15, zmm15);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
flush64b(dest + 8 * 64);
flush64b(dest + 9 * 64);
flush64b(dest + 10 * 64);
flush64b(dest + 11 * 64);
flush64b(dest + 12 * 64);
flush64b(dest + 13 * 64);
flush64b(dest + 14 * 64);
flush64b(dest + 15 * 64);
}
static force_inline void
memmove_mov8x64b(char *dest, const char *src, flush64b_fn flush64b)
{
__m512i zmm0 = mm512_loadu_si512(src, 0);
__m512i zmm1 = mm512_loadu_si512(src, 1);
__m512i zmm2 = mm512_loadu_si512(src, 2);
__m512i zmm3 = mm512_loadu_si512(src, 3);
__m512i zmm4 = mm512_loadu_si512(src, 4);
__m512i zmm5 = mm512_loadu_si512(src, 5);
__m512i zmm6 = mm512_loadu_si512(src, 6);
__m512i zmm7 = mm512_loadu_si512(src, 7);
mm512_store_si512(dest, 0, zmm0);
mm512_store_si512(dest, 1, zmm1);
mm512_store_si512(dest, 2, zmm2);
mm512_store_si512(dest, 3, zmm3);
mm512_store_si512(dest, 4, zmm4);
mm512_store_si512(dest, 5, zmm5);
mm512_store_si512(dest, 6, zmm6);
mm512_store_si512(dest, 7, zmm7);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
}
static force_inline void
memmove_mov4x64b(char *dest, const char *src, flush64b_fn flush64b)
{
__m512i zmm0 = mm512_loadu_si512(src, 0);
__m512i zmm1 = mm512_loadu_si512(src, 1);
__m512i zmm2 = mm512_loadu_si512(src, 2);
__m512i zmm3 = mm512_loadu_si512(src, 3);
mm512_store_si512(dest, 0, zmm0);
mm512_store_si512(dest, 1, zmm1);
mm512_store_si512(dest, 2, zmm2);
mm512_store_si512(dest, 3, zmm3);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
}
static force_inline void
memmove_mov2x64b(char *dest, const char *src, flush64b_fn flush64b)
{
__m512i zmm0 = mm512_loadu_si512(src, 0);
__m512i zmm1 = mm512_loadu_si512(src, 1);
mm512_store_si512(dest, 0, zmm0);
mm512_store_si512(dest, 1, zmm1);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
}
static force_inline void
memmove_mov1x64b(char *dest, const char *src, flush64b_fn flush64b)
{
__m512i zmm0 = mm512_loadu_si512(src, 0);
mm512_store_si512(dest, 0, zmm0);
flush64b(dest + 0 * 64);
}
static force_inline void
memmove_mov_avx512f_fw(char *dest, const char *src, size_t len,
flush_fn flush, flush64b_fn flush64b)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_avx512f(dest, src, cnt, flush);
dest += cnt;
src += cnt;
len -= cnt;
}
while (len >= 32 * 64) {
memmove_mov32x64b(dest, src, flush64b);
dest += 32 * 64;
src += 32 * 64;
len -= 32 * 64;
}
if (len >= 16 * 64) {
memmove_mov16x64b(dest, src, flush64b);
dest += 16 * 64;
src += 16 * 64;
len -= 16 * 64;
}
if (len >= 8 * 64) {
memmove_mov8x64b(dest, src, flush64b);
dest += 8 * 64;
src += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memmove_mov4x64b(dest, src, flush64b);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_mov2x64b(dest, src, flush64b);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_mov1x64b(dest, src, flush64b);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len)
memmove_small_avx512f(dest, src, len, flush);
}
static force_inline void
memmove_mov_avx512f_bw(char *dest, const char *src, size_t len,
flush_fn flush, flush64b_fn flush64b)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_avx512f(dest, src, cnt, flush);
}
while (len >= 32 * 64) {
dest -= 32 * 64;
src -= 32 * 64;
len -= 32 * 64;
memmove_mov32x64b(dest, src, flush64b);
}
if (len >= 16 * 64) {
dest -= 16 * 64;
src -= 16 * 64;
len -= 16 * 64;
memmove_mov16x64b(dest, src, flush64b);
}
if (len >= 8 * 64) {
dest -= 8 * 64;
src -= 8 * 64;
len -= 8 * 64;
memmove_mov8x64b(dest, src, flush64b);
}
if (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_mov4x64b(dest, src, flush64b);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_mov2x64b(dest, src, flush64b);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_mov1x64b(dest, src, flush64b);
}
if (len)
memmove_small_avx512f(dest - len, src - len, len, flush);
}
static force_inline void
memmove_mov_avx512f(char *dest, const char *src, size_t len,
flush_fn flush, flush64b_fn flush64b)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_mov_avx512f_fw(dest, src, len, flush, flush64b);
else
memmove_mov_avx512f_bw(dest, src, len, flush, flush64b);
avx_zeroupper();
}
void
memmove_mov_avx512f_noflush(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_avx512f(dest, src, len, noflush, noflush64b);
}
void
memmove_mov_avx512f_empty(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_avx512f(dest, src, len, flush_empty_nolog, flush64b_empty);
}
void
memmove_mov_avx512f_clflush(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_avx512f(dest, src, len, flush_clflush_nolog, pmem_clflush);
}
void
memmove_mov_avx512f_clflushopt(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_avx512f(dest, src, len, flush_clflushopt_nolog,
pmem_clflushopt);
}
void
memmove_mov_avx512f_clwb(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_mov_avx512f(dest, src, len, flush_clwb_nolog, pmem_clwb);
}
| 11,422 | 25.020501 | 72 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/x86_64/memcpy/memcpy_sse2.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#ifndef PMEM2_MEMCPY_SSE2_H
#define PMEM2_MEMCPY_SSE2_H
#include <xmmintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "out.h"
static force_inline void
memmove_small_sse2_noflush(char *dest, const char *src, size_t len)
{
ASSERT(len <= 64);
if (len <= 8)
goto le8;
if (len <= 32)
goto le32;
if (len > 48) {
/* 49..64 */
__m128i xmm0 = _mm_loadu_si128((__m128i *)src);
__m128i xmm1 = _mm_loadu_si128((__m128i *)(src + 16));
__m128i xmm2 = _mm_loadu_si128((__m128i *)(src + 32));
__m128i xmm3 = _mm_loadu_si128((__m128i *)(src + len - 16));
_mm_storeu_si128((__m128i *)dest, xmm0);
_mm_storeu_si128((__m128i *)(dest + 16), xmm1);
_mm_storeu_si128((__m128i *)(dest + 32), xmm2);
_mm_storeu_si128((__m128i *)(dest + len - 16), xmm3);
return;
}
/* 33..48 */
__m128i xmm0 = _mm_loadu_si128((__m128i *)src);
__m128i xmm1 = _mm_loadu_si128((__m128i *)(src + 16));
__m128i xmm2 = _mm_loadu_si128((__m128i *)(src + len - 16));
_mm_storeu_si128((__m128i *)dest, xmm0);
_mm_storeu_si128((__m128i *)(dest + 16), xmm1);
_mm_storeu_si128((__m128i *)(dest + len - 16), xmm2);
return;
le32:
if (len > 16) {
/* 17..32 */
__m128i xmm0 = _mm_loadu_si128((__m128i *)src);
__m128i xmm1 = _mm_loadu_si128((__m128i *)(src + len - 16));
_mm_storeu_si128((__m128i *)dest, xmm0);
_mm_storeu_si128((__m128i *)(dest + len - 16), xmm1);
return;
}
/* 9..16 */
uint64_t d80 = *(ua_uint64_t *)src;
uint64_t d81 = *(ua_uint64_t *)(src + len - 8);
*(ua_uint64_t *)dest = d80;
*(ua_uint64_t *)(dest + len - 8) = d81;
return;
le8:
if (len <= 2)
goto le2;
if (len > 4) {
/* 5..8 */
uint32_t d40 = *(ua_uint32_t *)src;
uint32_t d41 = *(ua_uint32_t *)(src + len - 4);
*(ua_uint32_t *)dest = d40;
*(ua_uint32_t *)(dest + len - 4) = d41;
return;
}
/* 3..4 */
uint16_t d20 = *(ua_uint16_t *)src;
uint16_t d21 = *(ua_uint16_t *)(src + len - 2);
*(ua_uint16_t *)dest = d20;
*(ua_uint16_t *)(dest + len - 2) = d21;
return;
le2:
if (len == 2) {
*(ua_uint16_t *)dest = *(ua_uint16_t *)src;
return;
}
*(uint8_t *)dest = *(uint8_t *)src;
}
static force_inline void
memmove_small_sse2(char *dest, const char *src, size_t len, flush_fn flush)
{
/*
* pmemcheck complains about "overwritten stores before they were made
* persistent" for overlapping stores (last instruction in each code
* path) in the optimized version.
* libc's memcpy also does that, so we can't use it here.
*/
if (On_pmemcheck) {
memmove_nodrain_generic(dest, src, len, PMEM2_F_MEM_NOFLUSH,
NULL);
} else {
memmove_small_sse2_noflush(dest, src, len);
}
flush(dest, len);
}
#endif
| 2,726 | 22.307692 | 75 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/x86_64/memcpy/memcpy_nt_avx.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem2_arch.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memcpy_avx.h"
#include "valgrind_internal.h"
static force_inline __m256i
mm256_loadu_si256(const char *src, unsigned idx)
{
return _mm256_loadu_si256((const __m256i *)src + idx);
}
static force_inline void
mm256_stream_si256(char *dest, unsigned idx, __m256i src)
{
_mm256_stream_si256((__m256i *)dest + idx, src);
barrier();
}
static force_inline void
memmove_movnt8x64b(char *dest, const char *src)
{
__m256i ymm0 = mm256_loadu_si256(src, 0);
__m256i ymm1 = mm256_loadu_si256(src, 1);
__m256i ymm2 = mm256_loadu_si256(src, 2);
__m256i ymm3 = mm256_loadu_si256(src, 3);
__m256i ymm4 = mm256_loadu_si256(src, 4);
__m256i ymm5 = mm256_loadu_si256(src, 5);
__m256i ymm6 = mm256_loadu_si256(src, 6);
__m256i ymm7 = mm256_loadu_si256(src, 7);
__m256i ymm8 = mm256_loadu_si256(src, 8);
__m256i ymm9 = mm256_loadu_si256(src, 9);
__m256i ymm10 = mm256_loadu_si256(src, 10);
__m256i ymm11 = mm256_loadu_si256(src, 11);
__m256i ymm12 = mm256_loadu_si256(src, 12);
__m256i ymm13 = mm256_loadu_si256(src, 13);
__m256i ymm14 = mm256_loadu_si256(src, 14);
__m256i ymm15 = mm256_loadu_si256(src, 15);
mm256_stream_si256(dest, 0, ymm0);
mm256_stream_si256(dest, 1, ymm1);
mm256_stream_si256(dest, 2, ymm2);
mm256_stream_si256(dest, 3, ymm3);
mm256_stream_si256(dest, 4, ymm4);
mm256_stream_si256(dest, 5, ymm5);
mm256_stream_si256(dest, 6, ymm6);
mm256_stream_si256(dest, 7, ymm7);
mm256_stream_si256(dest, 8, ymm8);
mm256_stream_si256(dest, 9, ymm9);
mm256_stream_si256(dest, 10, ymm10);
mm256_stream_si256(dest, 11, ymm11);
mm256_stream_si256(dest, 12, ymm12);
mm256_stream_si256(dest, 13, ymm13);
mm256_stream_si256(dest, 14, ymm14);
mm256_stream_si256(dest, 15, ymm15);
}
static force_inline void
memmove_movnt4x64b(char *dest, const char *src)
{
__m256i ymm0 = mm256_loadu_si256(src, 0);
__m256i ymm1 = mm256_loadu_si256(src, 1);
__m256i ymm2 = mm256_loadu_si256(src, 2);
__m256i ymm3 = mm256_loadu_si256(src, 3);
__m256i ymm4 = mm256_loadu_si256(src, 4);
__m256i ymm5 = mm256_loadu_si256(src, 5);
__m256i ymm6 = mm256_loadu_si256(src, 6);
__m256i ymm7 = mm256_loadu_si256(src, 7);
mm256_stream_si256(dest, 0, ymm0);
mm256_stream_si256(dest, 1, ymm1);
mm256_stream_si256(dest, 2, ymm2);
mm256_stream_si256(dest, 3, ymm3);
mm256_stream_si256(dest, 4, ymm4);
mm256_stream_si256(dest, 5, ymm5);
mm256_stream_si256(dest, 6, ymm6);
mm256_stream_si256(dest, 7, ymm7);
}
static force_inline void
memmove_movnt2x64b(char *dest, const char *src)
{
__m256i ymm0 = mm256_loadu_si256(src, 0);
__m256i ymm1 = mm256_loadu_si256(src, 1);
__m256i ymm2 = mm256_loadu_si256(src, 2);
__m256i ymm3 = mm256_loadu_si256(src, 3);
mm256_stream_si256(dest, 0, ymm0);
mm256_stream_si256(dest, 1, ymm1);
mm256_stream_si256(dest, 2, ymm2);
mm256_stream_si256(dest, 3, ymm3);
}
static force_inline void
memmove_movnt1x64b(char *dest, const char *src)
{
__m256i ymm0 = mm256_loadu_si256(src, 0);
__m256i ymm1 = mm256_loadu_si256(src, 1);
mm256_stream_si256(dest, 0, ymm0);
mm256_stream_si256(dest, 1, ymm1);
}
static force_inline void
memmove_movnt1x32b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src);
mm256_stream_si256(dest, 0, ymm0);
}
static force_inline void
memmove_movnt1x16b(char *dest, const char *src)
{
__m128i xmm0 = _mm_loadu_si128((__m128i *)src);
_mm_stream_si128((__m128i *)dest, xmm0);
}
static force_inline void
memmove_movnt1x8b(char *dest, const char *src)
{
_mm_stream_si64((long long *)dest, *(long long *)src);
}
static force_inline void
memmove_movnt1x4b(char *dest, const char *src)
{
_mm_stream_si32((int *)dest, *(int *)src);
}
static force_inline void
memmove_movnt_avx_fw(char *dest, const char *src, size_t len, flush_fn flush,
perf_barrier_fn perf_barrier)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_avx(dest, src, cnt, flush);
dest += cnt;
src += cnt;
len -= cnt;
}
const char *srcend = src + len;
prefetch_ini_fw(src, len);
while (len >= PERF_BARRIER_SIZE) {
prefetch_next_fw(src, srcend);
memmove_movnt8x64b(dest, src);
dest += 8 * 64;
src += 8 * 64;
len -= 8 * 64;
memmove_movnt4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (8 + 4) * 64);
if (len)
perf_barrier();
}
if (len >= 8 * 64) {
memmove_movnt8x64b(dest, src);
dest += 8 * 64;
src += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memmove_movnt4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_movnt2x64b(dest, src);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_movnt1x64b(dest, src);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32)
memmove_movnt1x32b(dest, src);
else if (len == 16)
memmove_movnt1x16b(dest, src);
else if (len == 8)
memmove_movnt1x8b(dest, src);
else if (len == 4)
memmove_movnt1x4b(dest, src);
else
goto nonnt;
goto end;
}
nonnt:
memmove_small_avx(dest, src, len, flush);
end:
avx_zeroupper();
}
static force_inline void
memmove_movnt_avx_bw(char *dest, const char *src, size_t len, flush_fn flush,
perf_barrier_fn perf_barrier)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_avx(dest, src, cnt, flush);
}
const char *srcbegin = src - len;
prefetch_ini_bw(src, len);
while (len >= PERF_BARRIER_SIZE) {
prefetch_next_bw(src, srcbegin);
dest -= 8 * 64;
src -= 8 * 64;
len -= 8 * 64;
memmove_movnt8x64b(dest, src);
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (8 + 4) * 64);
if (len)
perf_barrier();
}
if (len >= 8 * 64) {
dest -= 8 * 64;
src -= 8 * 64;
len -= 8 * 64;
memmove_movnt8x64b(dest, src);
}
if (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_movnt2x64b(dest, src);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_movnt1x64b(dest, src);
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32) {
dest -= 32;
src -= 32;
memmove_movnt1x32b(dest, src);
} else if (len == 16) {
dest -= 16;
src -= 16;
memmove_movnt1x16b(dest, src);
} else if (len == 8) {
dest -= 8;
src -= 8;
memmove_movnt1x8b(dest, src);
} else if (len == 4) {
dest -= 4;
src -= 4;
memmove_movnt1x4b(dest, src);
} else {
goto nonnt;
}
goto end;
}
nonnt:
dest -= len;
src -= len;
memmove_small_avx(dest, src, len, flush);
end:
avx_zeroupper();
}
static force_inline void
memmove_movnt_avx(char *dest, const char *src, size_t len, flush_fn flush,
barrier_fn barrier, perf_barrier_fn perf_barrier)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_movnt_avx_fw(dest, src, len, flush, perf_barrier);
else
memmove_movnt_avx_bw(dest, src, len, flush, perf_barrier);
barrier();
VALGRIND_DO_FLUSH(dest, len);
}
/* variants without perf_barrier */
void
memmove_movnt_avx_noflush_nobarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx(dest, src, len, noflush, barrier_after_ntstores,
no_barrier);
}
void
memmove_movnt_avx_empty_nobarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx(dest, src, len, flush_empty_nolog,
barrier_after_ntstores, no_barrier);
}
void
memmove_movnt_avx_clflush_nobarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx(dest, src, len, flush_clflush_nolog,
barrier_after_ntstores, no_barrier);
}
void
memmove_movnt_avx_clflushopt_nobarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx(dest, src, len, flush_clflushopt_nolog,
no_barrier_after_ntstores, no_barrier);
}
void
memmove_movnt_avx_clwb_nobarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx(dest, src, len, flush_clwb_nolog,
no_barrier_after_ntstores, no_barrier);
}
/* variants with perf_barrier */
void
memmove_movnt_avx_noflush_wcbarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx(dest, src, len, noflush, barrier_after_ntstores,
wc_barrier);
}
void
memmove_movnt_avx_empty_wcbarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx(dest, src, len, flush_empty_nolog,
barrier_after_ntstores, wc_barrier);
}
void
memmove_movnt_avx_clflush_wcbarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx(dest, src, len, flush_clflush_nolog,
barrier_after_ntstores, wc_barrier);
}
void
memmove_movnt_avx_clflushopt_wcbarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx(dest, src, len, flush_clflushopt_nolog,
no_barrier_after_ntstores, wc_barrier);
}
void
memmove_movnt_avx_clwb_wcbarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx(dest, src, len, flush_clwb_nolog,
no_barrier_after_ntstores, wc_barrier);
}
| 10,092 | 21.731982 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/x86_64/memcpy/memcpy_nt_sse2.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem2_arch.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memcpy_sse2.h"
#include "valgrind_internal.h"
static force_inline __m128i
mm_loadu_si128(const char *src, unsigned idx)
{
return _mm_loadu_si128((const __m128i *)src + idx);
}
static force_inline void
mm_stream_si128(char *dest, unsigned idx, __m128i src)
{
_mm_stream_si128((__m128i *)dest + idx, src);
barrier();
}
static force_inline void
memmove_movnt4x64b(char *dest, const char *src)
{
__m128i xmm0 = mm_loadu_si128(src, 0);
__m128i xmm1 = mm_loadu_si128(src, 1);
__m128i xmm2 = mm_loadu_si128(src, 2);
__m128i xmm3 = mm_loadu_si128(src, 3);
__m128i xmm4 = mm_loadu_si128(src, 4);
__m128i xmm5 = mm_loadu_si128(src, 5);
__m128i xmm6 = mm_loadu_si128(src, 6);
__m128i xmm7 = mm_loadu_si128(src, 7);
__m128i xmm8 = mm_loadu_si128(src, 8);
__m128i xmm9 = mm_loadu_si128(src, 9);
__m128i xmm10 = mm_loadu_si128(src, 10);
__m128i xmm11 = mm_loadu_si128(src, 11);
__m128i xmm12 = mm_loadu_si128(src, 12);
__m128i xmm13 = mm_loadu_si128(src, 13);
__m128i xmm14 = mm_loadu_si128(src, 14);
__m128i xmm15 = mm_loadu_si128(src, 15);
mm_stream_si128(dest, 0, xmm0);
mm_stream_si128(dest, 1, xmm1);
mm_stream_si128(dest, 2, xmm2);
mm_stream_si128(dest, 3, xmm3);
mm_stream_si128(dest, 4, xmm4);
mm_stream_si128(dest, 5, xmm5);
mm_stream_si128(dest, 6, xmm6);
mm_stream_si128(dest, 7, xmm7);
mm_stream_si128(dest, 8, xmm8);
mm_stream_si128(dest, 9, xmm9);
mm_stream_si128(dest, 10, xmm10);
mm_stream_si128(dest, 11, xmm11);
mm_stream_si128(dest, 12, xmm12);
mm_stream_si128(dest, 13, xmm13);
mm_stream_si128(dest, 14, xmm14);
mm_stream_si128(dest, 15, xmm15);
}
static force_inline void
memmove_movnt2x64b(char *dest, const char *src)
{
__m128i xmm0 = mm_loadu_si128(src, 0);
__m128i xmm1 = mm_loadu_si128(src, 1);
__m128i xmm2 = mm_loadu_si128(src, 2);
__m128i xmm3 = mm_loadu_si128(src, 3);
__m128i xmm4 = mm_loadu_si128(src, 4);
__m128i xmm5 = mm_loadu_si128(src, 5);
__m128i xmm6 = mm_loadu_si128(src, 6);
__m128i xmm7 = mm_loadu_si128(src, 7);
mm_stream_si128(dest, 0, xmm0);
mm_stream_si128(dest, 1, xmm1);
mm_stream_si128(dest, 2, xmm2);
mm_stream_si128(dest, 3, xmm3);
mm_stream_si128(dest, 4, xmm4);
mm_stream_si128(dest, 5, xmm5);
mm_stream_si128(dest, 6, xmm6);
mm_stream_si128(dest, 7, xmm7);
}
static force_inline void
memmove_movnt1x64b(char *dest, const char *src)
{
__m128i xmm0 = mm_loadu_si128(src, 0);
__m128i xmm1 = mm_loadu_si128(src, 1);
__m128i xmm2 = mm_loadu_si128(src, 2);
__m128i xmm3 = mm_loadu_si128(src, 3);
mm_stream_si128(dest, 0, xmm0);
mm_stream_si128(dest, 1, xmm1);
mm_stream_si128(dest, 2, xmm2);
mm_stream_si128(dest, 3, xmm3);
}
static force_inline void
memmove_movnt1x32b(char *dest, const char *src)
{
__m128i xmm0 = mm_loadu_si128(src, 0);
__m128i xmm1 = mm_loadu_si128(src, 1);
mm_stream_si128(dest, 0, xmm0);
mm_stream_si128(dest, 1, xmm1);
}
static force_inline void
memmove_movnt1x16b(char *dest, const char *src)
{
__m128i xmm0 = mm_loadu_si128(src, 0);
mm_stream_si128(dest, 0, xmm0);
}
static force_inline void
memmove_movnt1x8b(char *dest, const char *src)
{
_mm_stream_si64((long long *)dest, *(long long *)src);
}
static force_inline void
memmove_movnt1x4b(char *dest, const char *src)
{
_mm_stream_si32((int *)dest, *(int *)src);
}
static force_inline void
memmove_movnt_sse_fw(char *dest, const char *src, size_t len, flush_fn flush,
perf_barrier_fn perf_barrier)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_sse2(dest, src, cnt, flush);
dest += cnt;
src += cnt;
len -= cnt;
}
const char *srcend = src + len;
prefetch_ini_fw(src, len);
while (len >= PERF_BARRIER_SIZE) {
prefetch_next_fw(src, srcend);
memmove_movnt4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (4 + 4 + 4) * 64);
if (len)
perf_barrier();
}
while (len >= 4 * 64) {
memmove_movnt4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_movnt2x64b(dest, src);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_movnt1x64b(dest, src);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len == 0)
return;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32)
memmove_movnt1x32b(dest, src);
else if (len == 16)
memmove_movnt1x16b(dest, src);
else if (len == 8)
memmove_movnt1x8b(dest, src);
else if (len == 4)
memmove_movnt1x4b(dest, src);
else
goto nonnt;
return;
}
nonnt:
memmove_small_sse2(dest, src, len, flush);
}
static force_inline void
memmove_movnt_sse_bw(char *dest, const char *src, size_t len, flush_fn flush,
perf_barrier_fn perf_barrier)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_sse2(dest, src, cnt, flush);
}
const char *srcbegin = src - len;
prefetch_ini_bw(src, len);
while (len >= PERF_BARRIER_SIZE) {
prefetch_next_bw(src, srcbegin);
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (4 + 4 + 4) * 64);
if (len)
perf_barrier();
}
while (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_movnt2x64b(dest, src);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_movnt1x64b(dest, src);
}
if (len == 0)
return;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32) {
dest -= 32;
src -= 32;
memmove_movnt1x32b(dest, src);
} else if (len == 16) {
dest -= 16;
src -= 16;
memmove_movnt1x16b(dest, src);
} else if (len == 8) {
dest -= 8;
src -= 8;
memmove_movnt1x8b(dest, src);
} else if (len == 4) {
dest -= 4;
src -= 4;
memmove_movnt1x4b(dest, src);
} else {
goto nonnt;
}
return;
}
nonnt:
dest -= len;
src -= len;
memmove_small_sse2(dest, src, len, flush);
}
static force_inline void
memmove_movnt_sse2(char *dest, const char *src, size_t len, flush_fn flush,
barrier_fn barrier, perf_barrier_fn perf_barrier)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_movnt_sse_fw(dest, src, len, flush, perf_barrier);
else
memmove_movnt_sse_bw(dest, src, len, flush, perf_barrier);
barrier();
VALGRIND_DO_FLUSH(dest, len);
}
/* variants without perf_barrier */
void
memmove_movnt_sse2_noflush_nobarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_sse2(dest, src, len, noflush, barrier_after_ntstores,
no_barrier);
}
void
memmove_movnt_sse2_empty_nobarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_sse2(dest, src, len, flush_empty_nolog,
barrier_after_ntstores, no_barrier);
}
void
memmove_movnt_sse2_clflush_nobarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_sse2(dest, src, len, flush_clflush_nolog,
barrier_after_ntstores, no_barrier);
}
void
memmove_movnt_sse2_clflushopt_nobarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_sse2(dest, src, len, flush_clflushopt_nolog,
no_barrier_after_ntstores, no_barrier);
}
void
memmove_movnt_sse2_clwb_nobarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_sse2(dest, src, len, flush_clwb_nolog,
no_barrier_after_ntstores, no_barrier);
}
/* variants with perf_barrier */
void
memmove_movnt_sse2_noflush_wcbarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_sse2(dest, src, len, noflush, barrier_after_ntstores,
wc_barrier);
}
void
memmove_movnt_sse2_empty_wcbarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_sse2(dest, src, len, flush_empty_nolog,
barrier_after_ntstores, wc_barrier);
}
void
memmove_movnt_sse2_clflush_wcbarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_sse2(dest, src, len, flush_clflush_nolog,
barrier_after_ntstores, wc_barrier);
}
void
memmove_movnt_sse2_clflushopt_wcbarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_sse2(dest, src, len, flush_clflushopt_nolog,
no_barrier_after_ntstores, wc_barrier);
}
void
memmove_movnt_sse2_clwb_wcbarrier(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_sse2(dest, src, len, flush_clwb_nolog,
no_barrier_after_ntstores, wc_barrier);
}
| 9,636 | 21.463869 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/x86_64/memcpy/memcpy_nt_avx512f.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem2_arch.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memcpy_avx512f.h"
#include "valgrind_internal.h"
static force_inline __m512i
mm512_loadu_si512(const char *src, unsigned idx)
{
return _mm512_loadu_si512((const __m512i *)src + idx);
}
static force_inline void
mm512_stream_si512(char *dest, unsigned idx, __m512i src)
{
_mm512_stream_si512((__m512i *)dest + idx, src);
barrier();
}
static force_inline void
memmove_movnt32x64b(char *dest, const char *src)
{
__m512i zmm0 = mm512_loadu_si512(src, 0);
__m512i zmm1 = mm512_loadu_si512(src, 1);
__m512i zmm2 = mm512_loadu_si512(src, 2);
__m512i zmm3 = mm512_loadu_si512(src, 3);
__m512i zmm4 = mm512_loadu_si512(src, 4);
__m512i zmm5 = mm512_loadu_si512(src, 5);
__m512i zmm6 = mm512_loadu_si512(src, 6);
__m512i zmm7 = mm512_loadu_si512(src, 7);
__m512i zmm8 = mm512_loadu_si512(src, 8);
__m512i zmm9 = mm512_loadu_si512(src, 9);
__m512i zmm10 = mm512_loadu_si512(src, 10);
__m512i zmm11 = mm512_loadu_si512(src, 11);
__m512i zmm12 = mm512_loadu_si512(src, 12);
__m512i zmm13 = mm512_loadu_si512(src, 13);
__m512i zmm14 = mm512_loadu_si512(src, 14);
__m512i zmm15 = mm512_loadu_si512(src, 15);
__m512i zmm16 = mm512_loadu_si512(src, 16);
__m512i zmm17 = mm512_loadu_si512(src, 17);
__m512i zmm18 = mm512_loadu_si512(src, 18);
__m512i zmm19 = mm512_loadu_si512(src, 19);
__m512i zmm20 = mm512_loadu_si512(src, 20);
__m512i zmm21 = mm512_loadu_si512(src, 21);
__m512i zmm22 = mm512_loadu_si512(src, 22);
__m512i zmm23 = mm512_loadu_si512(src, 23);
__m512i zmm24 = mm512_loadu_si512(src, 24);
__m512i zmm25 = mm512_loadu_si512(src, 25);
__m512i zmm26 = mm512_loadu_si512(src, 26);
__m512i zmm27 = mm512_loadu_si512(src, 27);
__m512i zmm28 = mm512_loadu_si512(src, 28);
__m512i zmm29 = mm512_loadu_si512(src, 29);
__m512i zmm30 = mm512_loadu_si512(src, 30);
__m512i zmm31 = mm512_loadu_si512(src, 31);
mm512_stream_si512(dest, 0, zmm0);
mm512_stream_si512(dest, 1, zmm1);
mm512_stream_si512(dest, 2, zmm2);
mm512_stream_si512(dest, 3, zmm3);
mm512_stream_si512(dest, 4, zmm4);
mm512_stream_si512(dest, 5, zmm5);
mm512_stream_si512(dest, 6, zmm6);
mm512_stream_si512(dest, 7, zmm7);
mm512_stream_si512(dest, 8, zmm8);
mm512_stream_si512(dest, 9, zmm9);
mm512_stream_si512(dest, 10, zmm10);
mm512_stream_si512(dest, 11, zmm11);
mm512_stream_si512(dest, 12, zmm12);
mm512_stream_si512(dest, 13, zmm13);
mm512_stream_si512(dest, 14, zmm14);
mm512_stream_si512(dest, 15, zmm15);
mm512_stream_si512(dest, 16, zmm16);
mm512_stream_si512(dest, 17, zmm17);
mm512_stream_si512(dest, 18, zmm18);
mm512_stream_si512(dest, 19, zmm19);
mm512_stream_si512(dest, 20, zmm20);
mm512_stream_si512(dest, 21, zmm21);
mm512_stream_si512(dest, 22, zmm22);
mm512_stream_si512(dest, 23, zmm23);
mm512_stream_si512(dest, 24, zmm24);
mm512_stream_si512(dest, 25, zmm25);
mm512_stream_si512(dest, 26, zmm26);
mm512_stream_si512(dest, 27, zmm27);
mm512_stream_si512(dest, 28, zmm28);
mm512_stream_si512(dest, 29, zmm29);
mm512_stream_si512(dest, 30, zmm30);
mm512_stream_si512(dest, 31, zmm31);
}
static force_inline void
memmove_movnt16x64b(char *dest, const char *src)
{
__m512i zmm0 = mm512_loadu_si512(src, 0);
__m512i zmm1 = mm512_loadu_si512(src, 1);
__m512i zmm2 = mm512_loadu_si512(src, 2);
__m512i zmm3 = mm512_loadu_si512(src, 3);
__m512i zmm4 = mm512_loadu_si512(src, 4);
__m512i zmm5 = mm512_loadu_si512(src, 5);
__m512i zmm6 = mm512_loadu_si512(src, 6);
__m512i zmm7 = mm512_loadu_si512(src, 7);
__m512i zmm8 = mm512_loadu_si512(src, 8);
__m512i zmm9 = mm512_loadu_si512(src, 9);
__m512i zmm10 = mm512_loadu_si512(src, 10);
__m512i zmm11 = mm512_loadu_si512(src, 11);
__m512i zmm12 = mm512_loadu_si512(src, 12);
__m512i zmm13 = mm512_loadu_si512(src, 13);
__m512i zmm14 = mm512_loadu_si512(src, 14);
__m512i zmm15 = mm512_loadu_si512(src, 15);
mm512_stream_si512(dest, 0, zmm0);
mm512_stream_si512(dest, 1, zmm1);
mm512_stream_si512(dest, 2, zmm2);
mm512_stream_si512(dest, 3, zmm3);
mm512_stream_si512(dest, 4, zmm4);
mm512_stream_si512(dest, 5, zmm5);
mm512_stream_si512(dest, 6, zmm6);
mm512_stream_si512(dest, 7, zmm7);
mm512_stream_si512(dest, 8, zmm8);
mm512_stream_si512(dest, 9, zmm9);
mm512_stream_si512(dest, 10, zmm10);
mm512_stream_si512(dest, 11, zmm11);
mm512_stream_si512(dest, 12, zmm12);
mm512_stream_si512(dest, 13, zmm13);
mm512_stream_si512(dest, 14, zmm14);
mm512_stream_si512(dest, 15, zmm15);
}
static force_inline void
memmove_movnt8x64b(char *dest, const char *src)
{
__m512i zmm0 = mm512_loadu_si512(src, 0);
__m512i zmm1 = mm512_loadu_si512(src, 1);
__m512i zmm2 = mm512_loadu_si512(src, 2);
__m512i zmm3 = mm512_loadu_si512(src, 3);
__m512i zmm4 = mm512_loadu_si512(src, 4);
__m512i zmm5 = mm512_loadu_si512(src, 5);
__m512i zmm6 = mm512_loadu_si512(src, 6);
__m512i zmm7 = mm512_loadu_si512(src, 7);
mm512_stream_si512(dest, 0, zmm0);
mm512_stream_si512(dest, 1, zmm1);
mm512_stream_si512(dest, 2, zmm2);
mm512_stream_si512(dest, 3, zmm3);
mm512_stream_si512(dest, 4, zmm4);
mm512_stream_si512(dest, 5, zmm5);
mm512_stream_si512(dest, 6, zmm6);
mm512_stream_si512(dest, 7, zmm7);
}
static force_inline void
memmove_movnt4x64b(char *dest, const char *src)
{
__m512i zmm0 = mm512_loadu_si512(src, 0);
__m512i zmm1 = mm512_loadu_si512(src, 1);
__m512i zmm2 = mm512_loadu_si512(src, 2);
__m512i zmm3 = mm512_loadu_si512(src, 3);
mm512_stream_si512(dest, 0, zmm0);
mm512_stream_si512(dest, 1, zmm1);
mm512_stream_si512(dest, 2, zmm2);
mm512_stream_si512(dest, 3, zmm3);
}
static force_inline void
memmove_movnt2x64b(char *dest, const char *src)
{
__m512i zmm0 = mm512_loadu_si512(src, 0);
__m512i zmm1 = mm512_loadu_si512(src, 1);
mm512_stream_si512(dest, 0, zmm0);
mm512_stream_si512(dest, 1, zmm1);
}
static force_inline void
memmove_movnt1x64b(char *dest, const char *src)
{
__m512i zmm0 = mm512_loadu_si512(src, 0);
mm512_stream_si512(dest, 0, zmm0);
}
static force_inline void
memmove_movnt1x32b(char *dest, const char *src)
{
__m256i zmm0 = _mm256_loadu_si256((__m256i *)src);
_mm256_stream_si256((__m256i *)dest, zmm0);
}
static force_inline void
memmove_movnt1x16b(char *dest, const char *src)
{
__m128i ymm0 = _mm_loadu_si128((__m128i *)src);
_mm_stream_si128((__m128i *)dest, ymm0);
}
static force_inline void
memmove_movnt1x8b(char *dest, const char *src)
{
_mm_stream_si64((long long *)dest, *(long long *)src);
}
static force_inline void
memmove_movnt1x4b(char *dest, const char *src)
{
_mm_stream_si32((int *)dest, *(int *)src);
}
static force_inline void
memmove_movnt_avx512f_fw(char *dest, const char *src, size_t len,
flush_fn flush)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_avx512f(dest, src, cnt, flush);
dest += cnt;
src += cnt;
len -= cnt;
}
while (len >= 32 * 64) {
memmove_movnt32x64b(dest, src);
dest += 32 * 64;
src += 32 * 64;
len -= 32 * 64;
}
if (len >= 16 * 64) {
memmove_movnt16x64b(dest, src);
dest += 16 * 64;
src += 16 * 64;
len -= 16 * 64;
}
if (len >= 8 * 64) {
memmove_movnt8x64b(dest, src);
dest += 8 * 64;
src += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memmove_movnt4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_movnt2x64b(dest, src);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_movnt1x64b(dest, src);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32)
memmove_movnt1x32b(dest, src);
else if (len == 16)
memmove_movnt1x16b(dest, src);
else if (len == 8)
memmove_movnt1x8b(dest, src);
else if (len == 4)
memmove_movnt1x4b(dest, src);
else
goto nonnt;
goto end;
}
nonnt:
memmove_small_avx512f(dest, src, len, flush);
end:
avx_zeroupper();
}
static force_inline void
memmove_movnt_avx512f_bw(char *dest, const char *src, size_t len,
flush_fn flush)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_avx512f(dest, src, cnt, flush);
}
while (len >= 32 * 64) {
dest -= 32 * 64;
src -= 32 * 64;
len -= 32 * 64;
memmove_movnt32x64b(dest, src);
}
if (len >= 16 * 64) {
dest -= 16 * 64;
src -= 16 * 64;
len -= 16 * 64;
memmove_movnt16x64b(dest, src);
}
if (len >= 8 * 64) {
dest -= 8 * 64;
src -= 8 * 64;
len -= 8 * 64;
memmove_movnt8x64b(dest, src);
}
if (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_movnt2x64b(dest, src);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_movnt1x64b(dest, src);
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32) {
dest -= 32;
src -= 32;
memmove_movnt1x32b(dest, src);
} else if (len == 16) {
dest -= 16;
src -= 16;
memmove_movnt1x16b(dest, src);
} else if (len == 8) {
dest -= 8;
src -= 8;
memmove_movnt1x8b(dest, src);
} else if (len == 4) {
dest -= 4;
src -= 4;
memmove_movnt1x4b(dest, src);
} else {
goto nonnt;
}
goto end;
}
nonnt:
dest -= len;
src -= len;
memmove_small_avx512f(dest, src, len, flush);
end:
avx_zeroupper();
}
static force_inline void
memmove_movnt_avx512f(char *dest, const char *src, size_t len, flush_fn flush,
barrier_fn barrier)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_movnt_avx512f_fw(dest, src, len, flush);
else
memmove_movnt_avx512f_bw(dest, src, len, flush);
barrier();
VALGRIND_DO_FLUSH(dest, len);
}
void
memmove_movnt_avx512f_noflush(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx512f(dest, src, len, noflush, barrier_after_ntstores);
}
void
memmove_movnt_avx512f_empty(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx512f(dest, src, len, flush_empty_nolog,
barrier_after_ntstores);
}
void
memmove_movnt_avx512f_clflush(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx512f(dest, src, len, flush_clflush_nolog,
barrier_after_ntstores);
}
void
memmove_movnt_avx512f_clflushopt(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx512f(dest, src, len, flush_clflushopt_nolog,
no_barrier_after_ntstores);
}
void
memmove_movnt_avx512f_clwb(char *dest, const char *src, size_t len)
{
LOG(15, "dest %p src %p len %zu", dest, src, len);
memmove_movnt_avx512f(dest, src, len, flush_clwb_nolog,
no_barrier_after_ntstores);
}
| 11,246 | 23.45 | 78 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/aarch64/arm_cacheops.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* ARM inline assembly to flush and invalidate caches
* clwb => dc cvac
* clflushopt => dc civac
* fence => dmb ish
* sfence => dmb ishst
*/
/*
* Cache instructions on ARM:
* ARMv8.0-a DC CVAC - cache clean to Point of Coherency
* Meant for thread synchronization, usually implies
* real memory flush but may mean less.
* ARMv8.2-a DC CVAP - cache clean to Point of Persistency
* Meant exactly for our use.
* ARMv8.5-a DC CVADP - cache clean to Point of Deep Persistency
* As of mid-2019 not on any commercially available CPU.
* Any of the above may be disabled for EL0, but it's probably safe to consider
* that a system configuration error.
* Other flags include I (like "DC CIVAC") that invalidates the cache line, but
* we don't want that.
*
* Memory fences:
* * DMB [ISH] MFENCE
* * DMB [ISH]ST SFENCE
* * DMB [ISH]LD LFENCE
*
* Memory domains (cache coherency):
* * non-shareable - local to a single core
* * inner shareable (ISH) - a group of CPU clusters/sockets/other hardware
* Linux requires that anything within one operating system/hypervisor
* is within the same Inner Shareable domain.
* * outer shareable (OSH) - one or more separate ISH domains
* * full system (SY) - anything that can possibly access memory
* Docs: ARM DDI 0487E.a page B2-144.
*
* Exception (privilege) levels:
* * EL0 - userspace (ring 3)
* * EL1 - kernel (ring 0)
* * EL2 - hypervisor (ring -1)
* * EL3 - "secure world" (ring -3)
*/
#ifndef AARCH64_CACHEOPS_H
#define AARCH64_CACHEOPS_H
#include <stdlib.h>
static inline void
arm_clean_va_to_poc(void const *p __attribute__((unused)))
{
asm volatile("dc cvac, %0" : : "r" (p) : "memory");
}
static inline void
arm_store_memory_barrier(void)
{
asm volatile("dmb ishst" : : : "memory");
}
#endif
| 1,988 | 30.571429 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmem2/ppc64/init.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, IBM Corporation */
/* Copyright 2019-2020, Intel Corporation */
#include <errno.h>
#include <sys/mman.h>
#include "out.h"
#include "pmem2_arch.h"
#include "util.h"
/*
* Older assemblers versions do not support the latest versions of L, e.g.
* Binutils 2.34.
* Workaround this by using longs.
*/
#define __SYNC(l) ".long (0x7c0004AC | ((" #l ") << 21))"
#define __DCBF(ra, rb, l) ".long (0x7c0000AC | ((" #l ") << 21)" \
" | ((" #ra ") << 16) | ((" #rb ") << 11))"
static void
ppc_fence(void)
{
LOG(15, NULL);
/*
* Force a memory barrier to flush out all cache lines.
* Uses a heavyweight sync in order to guarantee the memory ordering
* even with a data cache flush.
* According to the POWER ISA 3.1, phwsync (aka. sync (L=4)) is treated
* as a hwsync by processors compatible with previous versions of the
* POWER ISA.
*/
asm volatile(__SYNC(4) : : : "memory");
}
static void
ppc_flush(const void *addr, size_t size)
{
LOG(15, "addr %p size %zu", addr, size);
uintptr_t uptr = (uintptr_t)addr;
uintptr_t end = uptr + size;
/* round down the address */
uptr &= ~(CACHELINE_SIZE - 1);
while (uptr < end) {
/*
* Flush the data cache block.
* According to the POWER ISA 3.1, dcbstps (aka. dcbf (L=6))
* behaves as dcbf (L=0) on previous processors.
*/
asm volatile(__DCBF(0, %0, 6) : :"r"(uptr) : "memory");
uptr += CACHELINE_SIZE;
}
}
void
pmem2_arch_init(struct pmem2_arch_info *info)
{
LOG(3, "libpmem*: PPC64 support");
info->fence = ppc_fence;
info->flush = ppc_flush;
}
| 1,594 | 22.80597 | 74 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/windows/getopt/getopt.c | /*
* *Copyright (c) 2012, Kim Gräsman
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Kim Gräsman nor the
* names of contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL KIM GRÄSMAN BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "getopt.h"
#include <stddef.h>
#include <string.h>
#include <stdio.h>
char* optarg;
int optopt;
/* The variable optind [...] shall be initialized to 1 by the system. */
int optind = 1;
int opterr;
static char* optcursor = NULL;
static char *first = NULL;
/* rotates argv array */
static void rotate(char **argv, int argc) {
if (argc <= 1)
return;
char *tmp = argv[0];
memmove(argv, argv + 1, (argc - 1) * sizeof(char *));
argv[argc - 1] = tmp;
}
/* Implemented based on [1] and [2] for optional arguments.
optopt is handled FreeBSD-style, per [3].
Other GNU and FreeBSD extensions are purely accidental.
[1] https://pubs.opengroup.org/onlinepubs/000095399/functions/getopt.html
[2] https://www.kernel.org/doc/man-pages/online/pages/man3/getopt.3.html
[3] https://www.freebsd.org/cgi/man.cgi?query=getopt&sektion=3&manpath=FreeBSD+9.0-RELEASE
*/
int getopt(int argc, char* const argv[], const char* optstring) {
int optchar = -1;
const char* optdecl = NULL;
optarg = NULL;
opterr = 0;
optopt = 0;
/* Unspecified, but we need it to avoid overrunning the argv bounds. */
if (optind >= argc)
goto no_more_optchars;
/* If, when getopt() is called argv[optind] is a null pointer, getopt()
shall return -1 without changing optind. */
if (argv[optind] == NULL)
goto no_more_optchars;
/* If, when getopt() is called *argv[optind] is not the character '-',
permute argv to move non options to the end */
if (*argv[optind] != '-') {
if (argc - optind <= 1)
goto no_more_optchars;
if (!first)
first = argv[optind];
do {
rotate((char **)(argv + optind), argc - optind);
} while (*argv[optind] != '-' && argv[optind] != first);
if (argv[optind] == first)
goto no_more_optchars;
}
/* If, when getopt() is called argv[optind] points to the string "-",
getopt() shall return -1 without changing optind. */
if (strcmp(argv[optind], "-") == 0)
goto no_more_optchars;
/* If, when getopt() is called argv[optind] points to the string "--",
getopt() shall return -1 after incrementing optind. */
if (strcmp(argv[optind], "--") == 0) {
++optind;
if (first) {
do {
rotate((char **)(argv + optind), argc - optind);
} while (argv[optind] != first);
}
goto no_more_optchars;
}
if (optcursor == NULL || *optcursor == '\0')
optcursor = argv[optind] + 1;
optchar = *optcursor;
/* FreeBSD: The variable optopt saves the last known option character
returned by getopt(). */
optopt = optchar;
/* The getopt() function shall return the next option character (if one is
found) from argv that matches a character in optstring, if there is
one that matches. */
optdecl = strchr(optstring, optchar);
if (optdecl) {
/* [I]f a character is followed by a colon, the option takes an
argument. */
if (optdecl[1] == ':') {
optarg = ++optcursor;
if (*optarg == '\0') {
/* GNU extension: Two colons mean an option takes an
optional arg; if there is text in the current argv-element
(i.e., in the same word as the option name itself, for example,
"-oarg"), then it is returned in optarg, otherwise optarg is set
to zero. */
if (optdecl[2] != ':') {
/* If the option was the last character in the string pointed to by
an element of argv, then optarg shall contain the next element
of argv, and optind shall be incremented by 2. If the resulting
value of optind is greater than argc, this indicates a missing
option-argument, and getopt() shall return an error indication.
Otherwise, optarg shall point to the string following the
option character in that element of argv, and optind shall be
incremented by 1.
*/
if (++optind < argc) {
optarg = argv[optind];
} else {
/* If it detects a missing option-argument, it shall return the
colon character ( ':' ) if the first character of optstring
was a colon, or a question-mark character ( '?' ) otherwise.
*/
optarg = NULL;
fprintf(stderr, "%s: option requires an argument -- '%c'\n", argv[0], optchar);
optchar = (optstring[0] == ':') ? ':' : '?';
}
} else {
optarg = NULL;
}
}
optcursor = NULL;
}
} else {
fprintf(stderr,"%s: invalid option -- '%c'\n", argv[0], optchar);
/* If getopt() encounters an option character that is not contained in
optstring, it shall return the question-mark ( '?' ) character. */
optchar = '?';
}
if (optcursor == NULL || *++optcursor == '\0')
++optind;
return optchar;
no_more_optchars:
optcursor = NULL;
first = NULL;
return -1;
}
/* Implementation based on [1].
[1] https://www.kernel.org/doc/man-pages/online/pages/man3/getopt.3.html
*/
int getopt_long(int argc, char* const argv[], const char* optstring,
const struct option* longopts, int* longindex) {
const struct option* o = longopts;
const struct option* match = NULL;
int num_matches = 0;
size_t argument_name_length = 0;
const char* current_argument = NULL;
int retval = -1;
optarg = NULL;
optopt = 0;
if (optind >= argc)
return -1;
/* If, when getopt() is called argv[optind] is a null pointer, getopt_long()
shall return -1 without changing optind. */
if (argv[optind] == NULL)
goto no_more_optchars;
/* If, when getopt_long() is called *argv[optind] is not the character '-',
permute argv to move non options to the end */
if (*argv[optind] != '-') {
if (argc - optind <= 1)
goto no_more_optchars;
if (!first)
first = argv[optind];
do {
rotate((char **)(argv + optind), argc - optind);
} while (*argv[optind] != '-' && argv[optind] != first);
if (argv[optind] == first)
goto no_more_optchars;
}
if (strlen(argv[optind]) < 3 || strncmp(argv[optind], "--", 2) != 0)
return getopt(argc, argv, optstring);
/* It's an option; starts with -- and is longer than two chars. */
current_argument = argv[optind] + 2;
argument_name_length = strcspn(current_argument, "=");
for (; o->name; ++o) {
if (strncmp(o->name, current_argument, argument_name_length) == 0) {
match = o;
++num_matches;
if (strlen(o->name) == argument_name_length) {
/* found match is exactly the one which we are looking for */
num_matches = 1;
break;
}
}
}
if (num_matches == 1) {
/* If longindex is not NULL, it points to a variable which is set to the
index of the long option relative to longopts. */
if (longindex)
*longindex = (int)(match - longopts);
/* If flag is NULL, then getopt_long() shall return val.
Otherwise, getopt_long() returns 0, and flag shall point to a variable
which shall be set to val if the option is found, but left unchanged if
the option is not found. */
if (match->flag)
*(match->flag) = match->val;
retval = match->flag ? 0 : match->val;
if (match->has_arg != no_argument) {
optarg = strchr(argv[optind], '=');
if (optarg != NULL)
++optarg;
if (match->has_arg == required_argument) {
/* Only scan the next argv for required arguments. Behavior is not
specified, but has been observed with Ubuntu and Mac OSX. */
if (optarg == NULL && ++optind < argc) {
optarg = argv[optind];
}
if (optarg == NULL)
retval = ':';
}
} else if (strchr(argv[optind], '=')) {
/* An argument was provided to a non-argument option.
I haven't seen this specified explicitly, but both GNU and BSD-based
implementations show this behavior.
*/
retval = '?';
}
} else {
/* Unknown option or ambiguous match. */
retval = '?';
if (num_matches == 0) {
fprintf(stderr, "%s: unrecognized option -- '%s'\n", argv[0], argv[optind]);
} else {
fprintf(stderr, "%s: option '%s' is ambiguous\n", argv[0], argv[optind]);
}
}
++optind;
return retval;
no_more_optchars:
first = NULL;
return -1;
}
| 9,866 | 32.561224 | 91 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/windows/getopt/getopt.h | /*
* *Copyright (c) 2012, Kim Gräsman
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Kim Gräsman nor the
* names of contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL KIM GRÄSMAN BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef INCLUDED_GETOPT_PORT_H
#define INCLUDED_GETOPT_PORT_H
#if defined(__cplusplus)
extern "C" {
#endif
#define no_argument 0
#define required_argument 1
#define optional_argument 2
extern char* optarg;
extern int optind, opterr, optopt;
struct option {
const char* name;
int has_arg;
int* flag;
int val;
};
int getopt(int argc, char* const argv[], const char* optstring);
int getopt_long(int argc, char* const argv[],
const char* optstring, const struct option* longopts, int* longindex);
#if defined(__cplusplus)
}
#endif
#endif // INCLUDED_GETOPT_PORT_H
| 2,137 | 35.237288 | 79 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/windows/include/win_mmap.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* win_mmap.h -- (internal) tracks the regions mapped by mmap
*/
#ifndef WIN_MMAP_H
#define WIN_MMAP_H 1
#include "queue.h"
#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
#define rounddown(x, y) (((x) / (y)) * (y))
void win_mmap_init(void);
void win_mmap_fini(void);
/* allocation/mmap granularity */
extern unsigned long long Mmap_align;
typedef enum FILE_MAPPING_TRACKER_FLAGS {
FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED = 0x0001,
/*
* This should hold the value of all flags ORed for debug purpose.
*/
FILE_MAPPING_TRACKER_FLAGS_MASK =
FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED
} FILE_MAPPING_TRACKER_FLAGS;
/*
* this structure tracks the file mappings outstanding per file handle
*/
typedef struct FILE_MAPPING_TRACKER {
PMDK_SORTEDQ_ENTRY(FILE_MAPPING_TRACKER) ListEntry;
HANDLE FileHandle;
HANDLE FileMappingHandle;
void *BaseAddress;
void *EndAddress;
DWORD Access;
os_off_t Offset;
size_t FileLen;
FILE_MAPPING_TRACKER_FLAGS Flags;
} FILE_MAPPING_TRACKER, *PFILE_MAPPING_TRACKER;
extern SRWLOCK FileMappingQLock;
extern PMDK_SORTEDQ_HEAD(FMLHead, FILE_MAPPING_TRACKER) FileMappingQHead;
#endif /* WIN_MMAP_H */
| 2,871 | 34.02439 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/windows/include/platform.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* platform.h -- dirty hacks to compile Linux code on Windows using VC++
*
* This is included to each source file using "/FI" (forced include) option.
*
* XXX - it is a subject for refactoring
*/
#ifndef PLATFORM_H
#define PLATFORM_H 1
#pragma warning(disable : 4996)
#pragma warning(disable : 4200) /* allow flexible array member */
#pragma warning(disable : 4819) /* non unicode characters */
#ifdef __cplusplus
extern "C" {
#endif
/* Prevent PMDK compilation for 32-bit platforms */
#if defined(_WIN32) && !defined(_WIN64)
#error "32-bit builds of PMDK are not supported!"
#endif
#define _CRT_RAND_S /* rand_s() */
#include <windows.h>
#include <stdint.h>
#include <time.h>
#include <io.h>
#include <process.h>
#include <fcntl.h>
#include <sys/types.h>
#include <malloc.h>
#include <signal.h>
#include <intrin.h>
#include <direct.h>
/* use uuid_t definition from util.h */
#ifdef uuid_t
#undef uuid_t
#endif
/* a few trivial substitutions */
#define PATH_MAX MAX_PATH
#define __thread __declspec(thread)
#define __func__ __FUNCTION__
#ifdef _DEBUG
#define DEBUG
#endif
/*
* The inline keyword is available only in VC++.
* https://msdn.microsoft.com/en-us/library/bw1hbe6y.aspx
*/
#ifndef __cplusplus
#define inline __inline
#endif
/* XXX - no equivalents in VC++ */
#define __attribute__(a)
#define __builtin_constant_p(cnd) 0
/*
* missing definitions
*/
/* errno.h */
#define ELIBACC 79 /* cannot access a needed shared library */
/* sys/stat.h */
#define S_IRUSR S_IREAD
#define S_IWUSR S_IWRITE
#define S_IRGRP S_IRUSR
#define S_IWGRP S_IWUSR
#define O_SYNC 0
typedef int mode_t;
#define fchmod(fd, mode) 0 /* XXX - dummy */
#define setlinebuf(fp) setvbuf(fp, NULL, _IOLBF, BUFSIZ);
/* unistd.h */
typedef long long os_off_t;
typedef long long ssize_t;
int setenv(const char *name, const char *value, int overwrite);
int unsetenv(const char *name);
/* fcntl.h */
int posix_fallocate(int fd, os_off_t offset, os_off_t len);
/* string.h */
#define strtok_r strtok_s
/* time.h */
#define CLOCK_MONOTONIC 1
#define CLOCK_REALTIME 2
int clock_gettime(int id, struct timespec *ts);
/* signal.h */
typedef unsigned long long sigset_t; /* one bit for each signal */
C_ASSERT(NSIG <= sizeof(sigset_t) * 8);
struct sigaction {
void (*sa_handler) (int signum);
/* void (*sa_sigaction)(int, siginfo_t *, void *); */
sigset_t sa_mask;
int sa_flags;
void (*sa_restorer) (void);
};
__inline int
sigemptyset(sigset_t *set)
{
*set = 0;
return 0;
}
__inline int
sigfillset(sigset_t *set)
{
*set = ~0;
return 0;
}
__inline int
sigaddset(sigset_t *set, int signum)
{
if (signum <= 0 || signum >= NSIG) {
errno = EINVAL;
return -1;
}
*set |= (1ULL << (signum - 1));
return 0;
}
__inline int
sigdelset(sigset_t *set, int signum)
{
if (signum <= 0 || signum >= NSIG) {
errno = EINVAL;
return -1;
}
*set &= ~(1ULL << (signum - 1));
return 0;
}
__inline int
sigismember(const sigset_t *set, int signum)
{
if (signum <= 0 || signum >= NSIG) {
errno = EINVAL;
return -1;
}
return ((*set & (1ULL << (signum - 1))) ? 1 : 0);
}
/* sched.h */
/*
* sched_yield -- yield the processor
*/
__inline int
sched_yield(void)
{
SwitchToThread();
return 0; /* always succeeds */
}
/*
* helper macros for library ctor/dtor function declarations
*/
#define MSVC_CONSTR(func) \
void func(void); \
__pragma(comment(linker, "/include:_" #func)) \
__pragma(section(".CRT$XCU", read)) \
__declspec(allocate(".CRT$XCU")) \
const void (WINAPI *_##func)(void) = (const void (WINAPI *)(void))func;
#define MSVC_DESTR(func) \
void func(void); \
static void _##func##_reg(void) { atexit(func); }; \
MSVC_CONSTR(_##func##_reg)
#ifdef __cplusplus
}
#endif
#endif /* PLATFORM_H */
| 5,431 | 22.929515 | 76 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/windows/include/endian.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* endian.h -- convert values between host and big-/little-endian byte order
*/
#ifndef ENDIAN_H
#define ENDIAN_H 1
/*
* XXX: On Windows we can assume little-endian architecture
*/
#include <intrin.h>
#define htole16(a) (a)
#define htole32(a) (a)
#define htole64(a) (a)
#define le16toh(a) (a)
#define le32toh(a) (a)
#define le64toh(a) (a)
#define htobe16(x) _byteswap_ushort(x)
#define htobe32(x) _byteswap_ulong(x)
#define htobe64(x) _byteswap_uint64(x)
#define be16toh(x) _byteswap_ushort(x)
#define be32toh(x) _byteswap_ulong(x)
#define be64toh(x) _byteswap_uint64(x)
#endif /* ENDIAN_H */
| 696 | 20.121212 | 76 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/windows/include/sys/file.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* sys/file.h -- file locking
*/
| 1,750 | 45.078947 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/windows/include/sys/param.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* sys/param.h -- a few useful macros
*/
#ifndef SYS_PARAM_H
#define SYS_PARAM_H 1
#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
#define howmany(x, y) (((x) + ((y) - 1)) / (y))
#define BPB 8 /* bits per byte */
#define setbit(b, i) ((b)[(i) / BPB] |= 1 << ((i) % BPB))
#define isset(b, i) ((b)[(i) / BPB] & (1 << ((i) % BPB)))
#define isclr(b, i) (((b)[(i) / BPB] & (1 << ((i) % BPB))) == 0)
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
#endif /* SYS_PARAM_H */
| 612 | 24.541667 | 64 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/include/libpmemblk.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* libpmemblk.h -- definitions of libpmemblk entry points
*
* This library provides support for programming with persistent memory (pmem).
*
* libpmemblk provides support for arrays of atomically-writable blocks.
*
* See libpmemblk(7) for details.
*/
#ifndef LIBPMEMBLK_H
#define LIBPMEMBLK_H 1
#include <sys/types.h>
#ifdef _WIN32
#include <pmemcompat.h>
#ifndef PMDK_UTF8_API
#define pmemblk_open pmemblk_openW
#define pmemblk_create pmemblk_createW
#define pmemblk_check pmemblk_checkW
#define pmemblk_check_version pmemblk_check_versionW
#define pmemblk_errormsg pmemblk_errormsgW
#define pmemblk_ctl_get pmemblk_ctl_getW
#define pmemblk_ctl_set pmemblk_ctl_setW
#define pmemblk_ctl_exec pmemblk_ctl_execW
#else
#define pmemblk_open pmemblk_openU
#define pmemblk_create pmemblk_createU
#define pmemblk_check pmemblk_checkU
#define pmemblk_check_version pmemblk_check_versionU
#define pmemblk_errormsg pmemblk_errormsgU
#define pmemblk_ctl_get pmemblk_ctl_getU
#define pmemblk_ctl_set pmemblk_ctl_setU
#define pmemblk_ctl_exec pmemblk_ctl_execU
#endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
/*
* opaque type, internal to libpmemblk
*/
typedef struct pmemblk PMEMblkpool;
/*
* PMEMBLK_MAJOR_VERSION and PMEMBLK_MINOR_VERSION provide the current version
* of the libpmemblk API as provided by this header file. Applications can
* verify that the version available at run-time is compatible with the version
* used at compile-time by passing these defines to pmemblk_check_version().
*/
#define PMEMBLK_MAJOR_VERSION 1
#define PMEMBLK_MINOR_VERSION 1
#ifndef _WIN32
const char *pmemblk_check_version(unsigned major_required,
unsigned minor_required);
#else
const char *pmemblk_check_versionU(unsigned major_required,
unsigned minor_required);
const wchar_t *pmemblk_check_versionW(unsigned major_required,
unsigned minor_required);
#endif
/* XXX - unify minimum pool size for both OS-es */
#ifndef _WIN32
#if defined(__x86_64__) || defined(__M_X64__) || defined(__aarch64__)
/* minimum pool size: 16MiB + 4KiB (minimum BTT size + mmap alignment) */
#define PMEMBLK_MIN_POOL ((size_t)((1u << 20) * 16 + (1u << 10) * 8))
#elif defined(__PPC64__)
/* minimum pool size: 16MiB + 128KiB (minimum BTT size + mmap alignment) */
#define PMEMBLK_MIN_POOL ((size_t)((1u << 20) * 16 + (1u << 10) * 128))
#else
#error unable to recognize ISA at compile time
#endif
#else
/* minimum pool size: 16MiB + 64KiB (minimum BTT size + mmap alignment) */
#define PMEMBLK_MIN_POOL ((size_t)((1u << 20) * 16 + (1u << 10) * 64))
#endif
/*
* This limit is set arbitrary to incorporate a pool header and required
* alignment plus supply.
*/
#define PMEMBLK_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */
#define PMEMBLK_MIN_BLK ((size_t)512)
#ifndef _WIN32
PMEMblkpool *pmemblk_open(const char *path, size_t bsize);
#else
PMEMblkpool *pmemblk_openU(const char *path, size_t bsize);
PMEMblkpool *pmemblk_openW(const wchar_t *path, size_t bsize);
#endif
#ifndef _WIN32
PMEMblkpool *pmemblk_create(const char *path, size_t bsize,
size_t poolsize, mode_t mode);
#else
PMEMblkpool *pmemblk_createU(const char *path, size_t bsize,
size_t poolsize, mode_t mode);
PMEMblkpool *pmemblk_createW(const wchar_t *path, size_t bsize,
size_t poolsize, mode_t mode);
#endif
#ifndef _WIN32
int pmemblk_check(const char *path, size_t bsize);
#else
int pmemblk_checkU(const char *path, size_t bsize);
int pmemblk_checkW(const wchar_t *path, size_t bsize);
#endif
void pmemblk_close(PMEMblkpool *pbp);
size_t pmemblk_bsize(PMEMblkpool *pbp);
size_t pmemblk_nblock(PMEMblkpool *pbp);
int pmemblk_read(PMEMblkpool *pbp, void *buf, long long blockno);
int pmemblk_write(PMEMblkpool *pbp, const void *buf, long long blockno);
int pmemblk_set_zero(PMEMblkpool *pbp, long long blockno);
int pmemblk_set_error(PMEMblkpool *pbp, long long blockno);
/*
* Passing NULL to pmemblk_set_funcs() tells libpmemblk to continue to use the
* default for that function. The replacement functions must not make calls
* back into libpmemblk.
*/
void pmemblk_set_funcs(
void *(*malloc_func)(size_t size),
void (*free_func)(void *ptr),
void *(*realloc_func)(void *ptr, size_t size),
char *(*strdup_func)(const char *s));
#ifndef _WIN32
const char *pmemblk_errormsg(void);
#else
const char *pmemblk_errormsgU(void);
const wchar_t *pmemblk_errormsgW(void);
#endif
#ifndef _WIN32
/* EXPERIMENTAL */
int pmemblk_ctl_get(PMEMblkpool *pbp, const char *name, void *arg);
int pmemblk_ctl_set(PMEMblkpool *pbp, const char *name, void *arg);
int pmemblk_ctl_exec(PMEMblkpool *pbp, const char *name, void *arg);
#else
int pmemblk_ctl_getU(PMEMblkpool *pbp, const char *name, void *arg);
int pmemblk_ctl_getW(PMEMblkpool *pbp, const wchar_t *name, void *arg);
int pmemblk_ctl_setU(PMEMblkpool *pbp, const char *name, void *arg);
int pmemblk_ctl_setW(PMEMblkpool *pbp, const wchar_t *name, void *arg);
int pmemblk_ctl_execU(PMEMblkpool *pbp, const char *name, void *arg);
int pmemblk_ctl_execW(PMEMblkpool *pbp, const wchar_t *name, void *arg);
#endif
#ifdef __cplusplus
}
#endif
#endif /* libpmemblk.h */
| 5,183 | 30.418182 | 79 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/include/libpmempool.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* libpmempool.h -- definitions of libpmempool entry points
*
* See libpmempool(7) for details.
*/
#ifndef LIBPMEMPOOL_H
#define LIBPMEMPOOL_H 1
#include <stdint.h>
#include <stddef.h>
#include <limits.h>
#ifdef _WIN32
#include <pmemcompat.h>
#ifndef PMDK_UTF8_API
#define pmempool_check_status pmempool_check_statusW
#define pmempool_check_args pmempool_check_argsW
#define pmempool_check_init pmempool_check_initW
#define pmempool_check pmempool_checkW
#define pmempool_sync pmempool_syncW
#define pmempool_transform pmempool_transformW
#define pmempool_rm pmempool_rmW
#define pmempool_check_version pmempool_check_versionW
#define pmempool_errormsg pmempool_errormsgW
#define pmempool_feature_enable pmempool_feature_enableW
#define pmempool_feature_disable pmempool_feature_disableW
#define pmempool_feature_query pmempool_feature_queryW
#else
#define pmempool_check_status pmempool_check_statusU
#define pmempool_check_args pmempool_check_argsU
#define pmempool_check_init pmempool_check_initU
#define pmempool_check pmempool_checkU
#define pmempool_sync pmempool_syncU
#define pmempool_transform pmempool_transformU
#define pmempool_rm pmempool_rmU
#define pmempool_check_version pmempool_check_versionU
#define pmempool_errormsg pmempool_errormsgU
#define pmempool_feature_enable pmempool_feature_enableU
#define pmempool_feature_disable pmempool_feature_disableU
#define pmempool_feature_query pmempool_feature_queryU
#endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* PMEMPOOL CHECK */
/*
* pool types
*/
enum pmempool_pool_type {
PMEMPOOL_POOL_TYPE_DETECT,
PMEMPOOL_POOL_TYPE_LOG,
PMEMPOOL_POOL_TYPE_BLK,
PMEMPOOL_POOL_TYPE_OBJ,
PMEMPOOL_POOL_TYPE_BTT,
PMEMPOOL_POOL_TYPE_RESERVED1, /* used to be cto */
};
/*
* perform repairs
*/
#define PMEMPOOL_CHECK_REPAIR (1U << 0)
/*
* emulate repairs
*/
#define PMEMPOOL_CHECK_DRY_RUN (1U << 1)
/*
* perform hazardous repairs
*/
#define PMEMPOOL_CHECK_ADVANCED (1U << 2)
/*
* do not ask before repairs
*/
#define PMEMPOOL_CHECK_ALWAYS_YES (1U << 3)
/*
* generate info statuses
*/
#define PMEMPOOL_CHECK_VERBOSE (1U << 4)
/*
* generate string format statuses
*/
#define PMEMPOOL_CHECK_FORMAT_STR (1U << 5)
/*
* types of check statuses
*/
enum pmempool_check_msg_type {
PMEMPOOL_CHECK_MSG_TYPE_INFO,
PMEMPOOL_CHECK_MSG_TYPE_ERROR,
PMEMPOOL_CHECK_MSG_TYPE_QUESTION,
};
/*
* check result types
*/
enum pmempool_check_result {
PMEMPOOL_CHECK_RESULT_CONSISTENT,
PMEMPOOL_CHECK_RESULT_NOT_CONSISTENT,
PMEMPOOL_CHECK_RESULT_REPAIRED,
PMEMPOOL_CHECK_RESULT_CANNOT_REPAIR,
PMEMPOOL_CHECK_RESULT_ERROR,
PMEMPOOL_CHECK_RESULT_SYNC_REQ,
};
/*
* check context
*/
typedef struct pmempool_check_ctx PMEMpoolcheck;
/*
* finalize the check and get the result
*/
enum pmempool_check_result pmempool_check_end(PMEMpoolcheck *ppc);
/* PMEMPOOL RM */
#define PMEMPOOL_RM_FORCE (1U << 0) /* ignore any errors */
#define PMEMPOOL_RM_POOLSET_LOCAL (1U << 1) /* remove local poolsets */
#define PMEMPOOL_RM_POOLSET_REMOTE (1U << 2) /* remove remote poolsets */
/*
* LIBPMEMPOOL SYNC
*/
/*
* fix bad blocks - it requires creating or reading special recovery files
*/
#define PMEMPOOL_SYNC_FIX_BAD_BLOCKS (1U << 0)
/*
* do not apply changes, only check if operation is viable
*/
#define PMEMPOOL_SYNC_DRY_RUN (1U << 1)
/*
* LIBPMEMPOOL TRANSFORM
*/
/*
* do not apply changes, only check if operation is viable
*/
#define PMEMPOOL_TRANSFORM_DRY_RUN (1U << 1)
/*
* PMEMPOOL_MAJOR_VERSION and PMEMPOOL_MINOR_VERSION provide the current version
* of the libpmempool API as provided by this header file. Applications can
* verify that the version available at run-time is compatible with the version
* used at compile-time by passing these defines to pmempool_check_version().
*/
#define PMEMPOOL_MAJOR_VERSION 1
#define PMEMPOOL_MINOR_VERSION 3
/*
* check status
*/
struct pmempool_check_statusU {
enum pmempool_check_msg_type type;
struct {
const char *msg;
const char *answer;
} str;
};
#ifndef _WIN32
#define pmempool_check_status pmempool_check_statusU
#else
struct pmempool_check_statusW {
enum pmempool_check_msg_type type;
struct {
const wchar_t *msg;
const wchar_t *answer;
} str;
};
#endif
/*
* check context arguments
*/
struct pmempool_check_argsU {
const char *path;
const char *backup_path;
enum pmempool_pool_type pool_type;
unsigned flags;
};
#ifndef _WIN32
#define pmempool_check_args pmempool_check_argsU
#else
struct pmempool_check_argsW {
const wchar_t *path;
const wchar_t *backup_path;
enum pmempool_pool_type pool_type;
unsigned flags;
};
#endif
/*
* initialize a check context
*/
#ifndef _WIN32
PMEMpoolcheck *
pmempool_check_init(struct pmempool_check_args *args, size_t args_size);
#else
PMEMpoolcheck *
pmempool_check_initU(struct pmempool_check_argsU *args, size_t args_size);
PMEMpoolcheck *
pmempool_check_initW(struct pmempool_check_argsW *args, size_t args_size);
#endif
/*
* start / resume the check
*/
#ifndef _WIN32
struct pmempool_check_status *pmempool_check(PMEMpoolcheck *ppc);
#else
struct pmempool_check_statusU *pmempool_checkU(PMEMpoolcheck *ppc);
struct pmempool_check_statusW *pmempool_checkW(PMEMpoolcheck *ppc);
#endif
/*
* LIBPMEMPOOL SYNC & TRANSFORM
*/
/*
* Synchronize data between replicas within a poolset.
*
* EXPERIMENTAL
*/
#ifndef _WIN32
int pmempool_sync(const char *poolset_file, unsigned flags);
#else
int pmempool_syncU(const char *poolset_file, unsigned flags);
int pmempool_syncW(const wchar_t *poolset_file, unsigned flags);
#endif
/*
* Modify internal structure of a poolset.
*
* EXPERIMENTAL
*/
#ifndef _WIN32
int pmempool_transform(const char *poolset_file_src,
const char *poolset_file_dst, unsigned flags);
#else
int pmempool_transformU(const char *poolset_file_src,
const char *poolset_file_dst, unsigned flags);
int pmempool_transformW(const wchar_t *poolset_file_src,
const wchar_t *poolset_file_dst, unsigned flags);
#endif
/* PMEMPOOL feature enable, disable, query */
/*
* feature types
*/
enum pmempool_feature {
PMEMPOOL_FEAT_SINGLEHDR,
PMEMPOOL_FEAT_CKSUM_2K,
PMEMPOOL_FEAT_SHUTDOWN_STATE,
PMEMPOOL_FEAT_CHECK_BAD_BLOCKS,
};
/* PMEMPOOL FEATURE ENABLE */
#ifndef _WIN32
int pmempool_feature_enable(const char *path, enum pmempool_feature feature,
unsigned flags);
#else
int pmempool_feature_enableU(const char *path, enum pmempool_feature feature,
unsigned flags);
int pmempool_feature_enableW(const wchar_t *path,
enum pmempool_feature feature, unsigned flags);
#endif
/* PMEMPOOL FEATURE DISABLE */
#ifndef _WIN32
int pmempool_feature_disable(const char *path, enum pmempool_feature feature,
unsigned flags);
#else
int pmempool_feature_disableU(const char *path, enum pmempool_feature feature,
unsigned flags);
int pmempool_feature_disableW(const wchar_t *path,
enum pmempool_feature feature, unsigned flags);
#endif
/* PMEMPOOL FEATURE QUERY */
#ifndef _WIN32
int pmempool_feature_query(const char *path, enum pmempool_feature feature,
unsigned flags);
#else
int pmempool_feature_queryU(const char *path, enum pmempool_feature feature,
unsigned flags);
int pmempool_feature_queryW(const wchar_t *path,
enum pmempool_feature feature, unsigned flags);
#endif
/* PMEMPOOL RM */
#ifndef _WIN32
int pmempool_rm(const char *path, unsigned flags);
#else
int pmempool_rmU(const char *path, unsigned flags);
int pmempool_rmW(const wchar_t *path, unsigned flags);
#endif
#ifndef _WIN32
const char *pmempool_check_version(unsigned major_required,
unsigned minor_required);
#else
const char *pmempool_check_versionU(unsigned major_required,
unsigned minor_required);
const wchar_t *pmempool_check_versionW(unsigned major_required,
unsigned minor_required);
#endif
#ifndef _WIN32
const char *pmempool_errormsg(void);
#else
const char *pmempool_errormsgU(void);
const wchar_t *pmempool_errormsgW(void);
#endif
#ifdef __cplusplus
}
#endif
#endif /* libpmempool.h */
| 8,009 | 22.910448 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/include/librpmem.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* librpmem.h -- definitions of librpmem entry points (EXPERIMENTAL)
*
* This library provides low-level support for remote access to persistent
* memory utilizing RDMA-capable RNICs.
*
* See librpmem(7) for details.
*/
#ifndef LIBRPMEM_H
#define LIBRPMEM_H 1
#include <sys/types.h>
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct rpmem_pool RPMEMpool;
#define RPMEM_POOL_HDR_SIG_LEN 8
#define RPMEM_POOL_HDR_UUID_LEN 16 /* uuid byte length */
#define RPMEM_POOL_USER_FLAGS_LEN 16
struct rpmem_pool_attr {
char signature[RPMEM_POOL_HDR_SIG_LEN]; /* pool signature */
uint32_t major; /* format major version number */
uint32_t compat_features; /* mask: compatible "may" features */
uint32_t incompat_features; /* mask: "must support" features */
uint32_t ro_compat_features; /* mask: force RO if unsupported */
unsigned char poolset_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* pool uuid */
unsigned char uuid[RPMEM_POOL_HDR_UUID_LEN]; /* first part uuid */
unsigned char next_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* next pool uuid */
unsigned char prev_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* prev pool uuid */
unsigned char user_flags[RPMEM_POOL_USER_FLAGS_LEN]; /* user flags */
};
RPMEMpool *rpmem_create(const char *target, const char *pool_set_name,
void *pool_addr, size_t pool_size, unsigned *nlanes,
const struct rpmem_pool_attr *create_attr);
RPMEMpool *rpmem_open(const char *target, const char *pool_set_name,
void *pool_addr, size_t pool_size, unsigned *nlanes,
struct rpmem_pool_attr *open_attr);
int rpmem_set_attr(RPMEMpool *rpp, const struct rpmem_pool_attr *attr);
int rpmem_close(RPMEMpool *rpp);
#define RPMEM_PERSIST_RELAXED (1U << 0)
#define RPMEM_FLUSH_RELAXED (1U << 0)
int rpmem_flush(RPMEMpool *rpp, size_t offset, size_t length, unsigned lane,
unsigned flags);
int rpmem_drain(RPMEMpool *rpp, unsigned lane, unsigned flags);
int rpmem_persist(RPMEMpool *rpp, size_t offset, size_t length,
unsigned lane, unsigned flags);
int rpmem_read(RPMEMpool *rpp, void *buff, size_t offset, size_t length,
unsigned lane);
int rpmem_deep_persist(RPMEMpool *rpp, size_t offset, size_t length,
unsigned lane);
#define RPMEM_REMOVE_FORCE 0x1
#define RPMEM_REMOVE_POOL_SET 0x2
int rpmem_remove(const char *target, const char *pool_set, int flags);
/*
* RPMEM_MAJOR_VERSION and RPMEM_MINOR_VERSION provide the current version of
* the librpmem API as provided by this header file. Applications can verify
* that the version available at run-time is compatible with the version used
* at compile-time by passing these defines to rpmem_check_version().
*/
#define RPMEM_MAJOR_VERSION 1
#define RPMEM_MINOR_VERSION 3
const char *rpmem_check_version(unsigned major_required,
unsigned minor_required);
const char *rpmem_errormsg(void);
/* minimum size of a pool */
#define RPMEM_MIN_POOL ((size_t)(1024 * 8)) /* 8 KB */
/*
* This limit is set arbitrary to incorporate a pool header and required
* alignment plus supply.
*/
#define RPMEM_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */
#ifdef __cplusplus
}
#endif
#endif /* librpmem.h */
| 3,197 | 31.30303 | 77 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/include/libpmemobj.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* libpmemobj.h -- definitions of libpmemobj entry points
*
* This library provides support for programming with persistent memory (pmem).
*
* libpmemobj provides a pmem-resident transactional object store.
*
* See libpmemobj(7) for details.
*/
#ifndef LIBPMEMOBJ_H
#define LIBPMEMOBJ_H 1
#include <libpmemobj/action.h>
#include <libpmemobj/atomic.h>
#include <libpmemobj/ctl.h>
#include <libpmemobj/iterator.h>
#include <libpmemobj/lists_atomic.h>
#include <libpmemobj/pool.h>
#include <libpmemobj/thread.h>
#include <libpmemobj/tx.h>
#endif /* libpmemobj.h */
| 662 | 23.555556 | 79 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/include/libpmemlog.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* libpmemlog.h -- definitions of libpmemlog entry points
*
* This library provides support for programming with persistent memory (pmem).
*
* libpmemlog provides support for pmem-resident log files.
*
* See libpmemlog(7) for details.
*/
#ifndef LIBPMEMLOG_H
#define LIBPMEMLOG_H 1
#include <sys/types.h>
#ifdef _WIN32
#include <pmemcompat.h>
#ifndef PMDK_UTF8_API
#define pmemlog_open pmemlog_openW
#define pmemlog_create pmemlog_createW
#define pmemlog_check pmemlog_checkW
#define pmemlog_check_version pmemlog_check_versionW
#define pmemlog_errormsg pmemlog_errormsgW
#define pmemlog_ctl_get pmemlog_ctl_getW
#define pmemlog_ctl_set pmemlog_ctl_setW
#define pmemlog_ctl_exec pmemlog_ctl_execW
#else
#define pmemlog_open pmemlog_openU
#define pmemlog_create pmemlog_createU
#define pmemlog_check pmemlog_checkU
#define pmemlog_check_version pmemlog_check_versionU
#define pmemlog_errormsg pmemlog_errormsgU
#define pmemlog_ctl_get pmemlog_ctl_getU
#define pmemlog_ctl_set pmemlog_ctl_setU
#define pmemlog_ctl_exec pmemlog_ctl_execU
#endif
#else
#include <sys/uio.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
/*
* opaque type, internal to libpmemlog
*/
typedef struct pmemlog PMEMlogpool;
/*
* PMEMLOG_MAJOR_VERSION and PMEMLOG_MINOR_VERSION provide the current
* version of the libpmemlog API as provided by this header file.
* Applications can verify that the version available at run-time
* is compatible with the version used at compile-time by passing
* these defines to pmemlog_check_version().
*/
#define PMEMLOG_MAJOR_VERSION 1
#define PMEMLOG_MINOR_VERSION 1
#ifndef _WIN32
const char *pmemlog_check_version(unsigned major_required,
unsigned minor_required);
#else
const char *pmemlog_check_versionU(unsigned major_required,
unsigned minor_required);
const wchar_t *pmemlog_check_versionW(unsigned major_required,
unsigned minor_required);
#endif
/*
* support for PMEM-resident log files...
*/
#define PMEMLOG_MIN_POOL ((size_t)(1024 * 1024 * 2)) /* min pool size: 2MiB */
/*
* This limit is set arbitrary to incorporate a pool header and required
* alignment plus supply.
*/
#define PMEMLOG_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */
#ifndef _WIN32
PMEMlogpool *pmemlog_open(const char *path);
#else
PMEMlogpool *pmemlog_openU(const char *path);
PMEMlogpool *pmemlog_openW(const wchar_t *path);
#endif
#ifndef _WIN32
PMEMlogpool *pmemlog_create(const char *path, size_t poolsize, mode_t mode);
#else
PMEMlogpool *pmemlog_createU(const char *path, size_t poolsize, mode_t mode);
PMEMlogpool *pmemlog_createW(const wchar_t *path, size_t poolsize, mode_t mode);
#endif
#ifndef _WIN32
int pmemlog_check(const char *path);
#else
int pmemlog_checkU(const char *path);
int pmemlog_checkW(const wchar_t *path);
#endif
void pmemlog_close(PMEMlogpool *plp);
size_t pmemlog_nbyte(PMEMlogpool *plp);
int pmemlog_append(PMEMlogpool *plp, const void *buf, size_t count);
int pmemlog_appendv(PMEMlogpool *plp, const struct iovec *iov, int iovcnt);
long long pmemlog_tell(PMEMlogpool *plp);
void pmemlog_rewind(PMEMlogpool *plp);
void pmemlog_walk(PMEMlogpool *plp, size_t chunksize,
int (*process_chunk)(const void *buf, size_t len, void *arg),
void *arg);
/*
* Passing NULL to pmemlog_set_funcs() tells libpmemlog to continue to use the
* default for that function. The replacement functions must not make calls
* back into libpmemlog.
*/
void pmemlog_set_funcs(
void *(*malloc_func)(size_t size),
void (*free_func)(void *ptr),
void *(*realloc_func)(void *ptr, size_t size),
char *(*strdup_func)(const char *s));
#ifndef _WIN32
const char *pmemlog_errormsg(void);
#else
const char *pmemlog_errormsgU(void);
const wchar_t *pmemlog_errormsgW(void);
#endif
#ifndef _WIN32
/* EXPERIMENTAL */
int pmemlog_ctl_get(PMEMlogpool *plp, const char *name, void *arg);
int pmemlog_ctl_set(PMEMlogpool *plp, const char *name, void *arg);
int pmemlog_ctl_exec(PMEMlogpool *plp, const char *name, void *arg);
#else
int pmemlog_ctl_getU(PMEMlogpool *plp, const char *name, void *arg);
int pmemlog_ctl_getW(PMEMlogpool *plp, const wchar_t *name, void *arg);
int pmemlog_ctl_setU(PMEMlogpool *plp, const char *name, void *arg);
int pmemlog_ctl_setW(PMEMlogpool *plp, const wchar_t *name, void *arg);
int pmemlog_ctl_execU(PMEMlogpool *plp, const char *name, void *arg);
int pmemlog_ctl_execW(PMEMlogpool *plp, const wchar_t *name, void *arg);
#endif
#ifdef __cplusplus
}
#endif
#endif /* libpmemlog.h */
| 4,540 | 28.679739 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/include/libpmem.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* libpmem.h -- definitions of libpmem entry points
*
* This library provides support for programming with persistent memory (pmem).
*
* libpmem provides support for using raw pmem directly.
*
* See libpmem(7) for details.
*/
#ifndef LIBPMEM_H
#define LIBPMEM_H 1
#include <sys/types.h>
#ifdef _WIN32
#include <pmemcompat.h>
#ifndef PMDK_UTF8_API
#define pmem_map_file pmem_map_fileW
#define pmem_check_version pmem_check_versionW
#define pmem_errormsg pmem_errormsgW
#else
#define pmem_map_file pmem_map_fileU
#define pmem_check_version pmem_check_versionU
#define pmem_errormsg pmem_errormsgU
#endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
/*
* This limit is set arbitrary to incorporate a pool header and required
* alignment plus supply.
*/
#define PMEM_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */
/*
* flags supported by pmem_map_file()
*/
#define PMEM_FILE_CREATE (1 << 0)
#define PMEM_FILE_EXCL (1 << 1)
#define PMEM_FILE_SPARSE (1 << 2)
#define PMEM_FILE_TMPFILE (1 << 3)
#ifndef _WIN32
void *pmem_map_file(const char *path, size_t len, int flags, mode_t mode,
size_t *mapped_lenp, int *is_pmemp);
#else
void *pmem_map_fileU(const char *path, size_t len, int flags, mode_t mode,
size_t *mapped_lenp, int *is_pmemp);
void *pmem_map_fileW(const wchar_t *path, size_t len, int flags, mode_t mode,
size_t *mapped_lenp, int *is_pmemp);
#endif
int pmem_unmap(void *addr, size_t len);
int pmem_is_pmem(const void *addr, size_t len);
void pmem_persist(const void *addr, size_t len);
int pmem_msync(const void *addr, size_t len);
int pmem_has_auto_flush(void);
void pmem_flush(const void *addr, size_t len);
void pmem_deep_flush(const void *addr, size_t len);
int pmem_deep_drain(const void *addr, size_t len);
int pmem_deep_persist(const void *addr, size_t len);
void pmem_drain(void);
int pmem_has_hw_drain(void);
void *pmem_memmove_persist(void *pmemdest, const void *src, size_t len);
void *pmem_memcpy_persist(void *pmemdest, const void *src, size_t len);
void *pmem_memset_persist(void *pmemdest, int c, size_t len);
void *pmem_memmove_nodrain(void *pmemdest, const void *src, size_t len);
void *pmem_memcpy_nodrain(void *pmemdest, const void *src, size_t len);
void *pmem_memset_nodrain(void *pmemdest, int c, size_t len);
#define PMEM_F_MEM_NODRAIN (1U << 0)
#define PMEM_F_MEM_NONTEMPORAL (1U << 1)
#define PMEM_F_MEM_TEMPORAL (1U << 2)
#define PMEM_F_MEM_WC (1U << 3)
#define PMEM_F_MEM_WB (1U << 4)
#define PMEM_F_MEM_NOFLUSH (1U << 5)
#define PMEM_F_MEM_VALID_FLAGS (PMEM_F_MEM_NODRAIN | \
PMEM_F_MEM_NONTEMPORAL | \
PMEM_F_MEM_TEMPORAL | \
PMEM_F_MEM_WC | \
PMEM_F_MEM_WB | \
PMEM_F_MEM_NOFLUSH)
void *pmem_memmove(void *pmemdest, const void *src, size_t len, unsigned flags);
void *pmem_memcpy(void *pmemdest, const void *src, size_t len, unsigned flags);
void *pmem_memset(void *pmemdest, int c, size_t len, unsigned flags);
/*
* PMEM_MAJOR_VERSION and PMEM_MINOR_VERSION provide the current version of the
* libpmem API as provided by this header file. Applications can verify that
* the version available at run-time is compatible with the version used at
* compile-time by passing these defines to pmem_check_version().
*/
#define PMEM_MAJOR_VERSION 1
#define PMEM_MINOR_VERSION 1
#ifndef _WIN32
const char *pmem_check_version(unsigned major_required,
unsigned minor_required);
#else
const char *pmem_check_versionU(unsigned major_required,
unsigned minor_required);
const wchar_t *pmem_check_versionW(unsigned major_required,
unsigned minor_required);
#endif
#ifndef _WIN32
const char *pmem_errormsg(void);
#else
const char *pmem_errormsgU(void);
const wchar_t *pmem_errormsgW(void);
#endif
#ifdef __cplusplus
}
#endif
#endif /* libpmem.h */
| 3,829 | 28.015152 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/include/libpmem2.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* libpmem2.h -- definitions of libpmem2 entry points (EXPERIMENTAL)
*
* This library provides support for programming with persistent memory (pmem).
*
* libpmem2 provides support for using raw pmem directly.
*
* See libpmem2(7) for details.
*/
#ifndef LIBPMEM2_H
#define LIBPMEM2_H 1
#include <stddef.h>
#include <stdint.h>
#ifdef _WIN32
#include <pmemcompat.h>
#ifndef PMDK_UTF8_API
#define pmem2_source_device_id pmem2_source_device_idW
#define pmem2_errormsg pmem2_errormsgW
#define pmem2_perror pmem2_perrorW
#else
#define pmem2_source_device_id pmem2_source_device_idU
#define pmem2_errormsg pmem2_errormsgU
#define pmem2_perror pmem2_perrorU
#endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
#define PMEM2_E_UNKNOWN (-100000)
#define PMEM2_E_NOSUPP (-100001)
#define PMEM2_E_FILE_HANDLE_NOT_SET (-100003)
#define PMEM2_E_INVALID_FILE_HANDLE (-100004)
#define PMEM2_E_INVALID_FILE_TYPE (-100005)
#define PMEM2_E_MAP_RANGE (-100006)
#define PMEM2_E_MAPPING_EXISTS (-100007)
#define PMEM2_E_GRANULARITY_NOT_SET (-100008)
#define PMEM2_E_GRANULARITY_NOT_SUPPORTED (-100009)
#define PMEM2_E_OFFSET_OUT_OF_RANGE (-100010)
#define PMEM2_E_OFFSET_UNALIGNED (-100011)
#define PMEM2_E_INVALID_ALIGNMENT_FORMAT (-100012)
#define PMEM2_E_INVALID_ALIGNMENT_VALUE (-100013)
#define PMEM2_E_INVALID_SIZE_FORMAT (-100014)
#define PMEM2_E_LENGTH_UNALIGNED (-100015)
#define PMEM2_E_MAPPING_NOT_FOUND (-100016)
#define PMEM2_E_BUFFER_TOO_SMALL (-100017)
#define PMEM2_E_SOURCE_EMPTY (-100018)
#define PMEM2_E_INVALID_SHARING_VALUE (-100019)
#define PMEM2_E_SRC_DEVDAX_PRIVATE (-100020)
#define PMEM2_E_INVALID_ADDRESS_REQUEST_TYPE (-100021)
#define PMEM2_E_ADDRESS_UNALIGNED (-100022)
#define PMEM2_E_ADDRESS_NULL (-100023)
#define PMEM2_E_DEEP_FLUSH_RANGE (-100024)
#define PMEM2_E_INVALID_REGION_FORMAT (-100025)
#define PMEM2_E_DAX_REGION_NOT_FOUND (-100026)
#define PMEM2_E_INVALID_DEV_FORMAT (-100027)
#define PMEM2_E_CANNOT_READ_BOUNDS (-100028)
#define PMEM2_E_NO_BAD_BLOCK_FOUND (-100029)
#define PMEM2_E_LENGTH_OUT_OF_RANGE (-100030)
#define PMEM2_E_INVALID_PROT_FLAG (-100031)
#define PMEM2_E_NO_ACCESS (-100032)
/* source setup */
struct pmem2_source;
int pmem2_source_from_fd(struct pmem2_source **src, int fd);
int pmem2_source_from_anon(struct pmem2_source **src, size_t size);
#ifdef _WIN32
int pmem2_source_from_handle(struct pmem2_source **src, HANDLE handle);
#endif
int pmem2_source_size(const struct pmem2_source *src, size_t *size);
int pmem2_source_alignment(const struct pmem2_source *src,
size_t *alignment);
int pmem2_source_delete(struct pmem2_source **src);
/* vm reservation setup */
struct pmem2_vm_reservation;
int pmem2_vm_reservation_new(struct pmem2_vm_reservation **rsv,
size_t size, void *address);
int pmem2_vm_reservation_delete(struct pmem2_vm_reservation **rsv);
/* config setup */
struct pmem2_config;
int pmem2_config_new(struct pmem2_config **cfg);
int pmem2_config_delete(struct pmem2_config **cfg);
enum pmem2_granularity {
PMEM2_GRANULARITY_BYTE,
PMEM2_GRANULARITY_CACHE_LINE,
PMEM2_GRANULARITY_PAGE,
};
int pmem2_config_set_required_store_granularity(struct pmem2_config *cfg,
enum pmem2_granularity g);
int pmem2_config_set_offset(struct pmem2_config *cfg, size_t offset);
int pmem2_config_set_length(struct pmem2_config *cfg, size_t length);
enum pmem2_sharing_type {
PMEM2_SHARED,
PMEM2_PRIVATE,
};
int pmem2_config_set_sharing(struct pmem2_config *cfg,
enum pmem2_sharing_type type);
#define PMEM2_PROT_EXEC (1U << 29)
#define PMEM2_PROT_READ (1U << 30)
#define PMEM2_PROT_WRITE (1U << 31)
#define PMEM2_PROT_NONE 0
int pmem2_config_set_protection(struct pmem2_config *cfg,
unsigned prot);
enum pmem2_address_request_type {
PMEM2_ADDRESS_FIXED_REPLACE = 1,
PMEM2_ADDRESS_FIXED_NOREPLACE = 2,
};
int pmem2_config_set_address(struct pmem2_config *cfg, void *addr,
enum pmem2_address_request_type request_type);
int pmem2_config_set_vm_reservation(struct pmem2_config *cfg,
struct pmem2_vm_reservation *rsv, size_t offset);
void pmem2_config_clear_address(struct pmem2_config *cfg);
/* mapping */
struct pmem2_map;
int pmem2_map(const struct pmem2_config *cfg, const struct pmem2_source *src,
struct pmem2_map **map_ptr);
int pmem2_unmap(struct pmem2_map **map_ptr);
void *pmem2_map_get_address(struct pmem2_map *map);
size_t pmem2_map_get_size(struct pmem2_map *map);
enum pmem2_granularity pmem2_map_get_store_granularity(struct pmem2_map *map);
/* flushing */
typedef void (*pmem2_persist_fn)(const void *ptr, size_t size);
typedef void (*pmem2_flush_fn)(const void *ptr, size_t size);
typedef void (*pmem2_drain_fn)(void);
pmem2_persist_fn pmem2_get_persist_fn(struct pmem2_map *map);
pmem2_flush_fn pmem2_get_flush_fn(struct pmem2_map *map);
pmem2_drain_fn pmem2_get_drain_fn(struct pmem2_map *map);
#define PMEM2_F_MEM_NODRAIN (1U << 0)
#define PMEM2_F_MEM_NONTEMPORAL (1U << 1)
#define PMEM2_F_MEM_TEMPORAL (1U << 2)
#define PMEM2_F_MEM_WC (1U << 3)
#define PMEM2_F_MEM_WB (1U << 4)
#define PMEM2_F_MEM_NOFLUSH (1U << 5)
#define PMEM2_F_MEM_VALID_FLAGS (PMEM2_F_MEM_NODRAIN | \
PMEM2_F_MEM_NONTEMPORAL | \
PMEM2_F_MEM_TEMPORAL | \
PMEM2_F_MEM_WC | \
PMEM2_F_MEM_WB | \
PMEM2_F_MEM_NOFLUSH)
typedef void *(*pmem2_memmove_fn)(void *pmemdest, const void *src, size_t len,
unsigned flags);
typedef void *(*pmem2_memcpy_fn)(void *pmemdest, const void *src, size_t len,
unsigned flags);
typedef void *(*pmem2_memset_fn)(void *pmemdest, int c, size_t len,
unsigned flags);
pmem2_memmove_fn pmem2_get_memmove_fn(struct pmem2_map *map);
pmem2_memcpy_fn pmem2_get_memcpy_fn(struct pmem2_map *map);
pmem2_memset_fn pmem2_get_memset_fn(struct pmem2_map *map);
/* RAS */
int pmem2_deep_flush(struct pmem2_map *map, void *ptr, size_t size);
#ifndef _WIN32
int pmem2_source_device_id(const struct pmem2_source *src,
char *id, size_t *len);
#else
int pmem2_source_device_idW(const struct pmem2_source *src,
wchar_t *id, size_t *len);
int pmem2_source_device_idU(const struct pmem2_source *src,
char *id, size_t *len);
#endif
int pmem2_source_device_usc(const struct pmem2_source *src, uint64_t *usc);
struct pmem2_badblock_context;
struct pmem2_badblock {
size_t offset;
size_t length;
};
int pmem2_badblock_context_new(const struct pmem2_source *src,
struct pmem2_badblock_context **bbctx);
int pmem2_badblock_next(struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb);
void pmem2_badblock_context_delete(
struct pmem2_badblock_context **bbctx);
int pmem2_badblock_clear(struct pmem2_badblock_context *bbctx,
const struct pmem2_badblock *bb);
/* error handling */
#ifndef _WIN32
const char *pmem2_errormsg(void);
#else
const char *pmem2_errormsgU(void);
const wchar_t *pmem2_errormsgW(void);
#endif
int pmem2_err_to_errno(int);
#ifndef _WIN32
void pmem2_perror(const char *format,
...) __attribute__((__format__(__printf__, 1, 2)));
#else
void pmem2_perrorU(const char *format, ...);
void pmem2_perrorW(const wchar_t *format, ...);
#endif
#ifdef __cplusplus
}
#endif
#endif /* libpmem2.h */
| 7,202 | 25.677778 | 79 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/include/libpmemobj/ctl.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2019, Intel Corporation */
/*
* libpmemobj/ctl.h -- definitions of pmemobj_ctl related entry points
*/
#ifndef LIBPMEMOBJ_CTL_H
#define LIBPMEMOBJ_CTL_H 1
#include <stddef.h>
#include <sys/types.h>
#include <libpmemobj/base.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Allocation class interface
*
* When requesting an object from the allocator, the first step is to determine
* which allocation class best approximates the size of the object.
* Once found, the appropriate free list, called bucket, for that
* class is selected in a fashion that minimizes contention between threads.
* Depending on the requested size and the allocation class, it might happen
* that the object size (including required metadata) would be bigger than the
* allocation class size - called unit size. In those situations, the object is
* constructed from two or more units (up to 64).
*
* If the requested number of units cannot be retrieved from the selected
* bucket, the thread reaches out to the global, shared, heap which manages
* memory in 256 kilobyte chunks and gives it out in a best-fit fashion. This
* operation must be performed under an exclusive lock.
* Once the thread is in the possession of a chunk, the lock is dropped, and the
* memory is split into units that repopulate the bucket.
*
* These are the CTL entry points that control allocation classes:
* - heap.alloc_class.[class_id].desc
* Creates/retrieves allocation class information
*
* It's VERY important to remember that the allocation classes are a RUNTIME
* property of the allocator - they are NOT stored persistently in the pool.
* It's recommended to always create custom allocation classes immediately after
* creating or opening the pool, before any use.
* If there are existing objects created using a class that is no longer stored
* in the runtime state of the allocator, they can be normally freed, but
* allocating equivalent objects will be done using the allocation class that
* is currently defined for that size.
*
* Please see the libpmemobj man page for more information about entry points.
*/
/*
* Persistent allocation header
*/
enum pobj_header_type {
/*
* 64-byte header used up until the version 1.3 of the library,
* functionally equivalent to the compact header.
* It's not recommended to create any new classes with this header.
*/
POBJ_HEADER_LEGACY,
/*
* 16-byte header used by the default allocation classes. All library
* metadata is by default allocated using this header.
* Supports type numbers and variably sized allocations.
*/
POBJ_HEADER_COMPACT,
/*
* 0-byte header with metadata stored exclusively in a bitmap. This
* ensures that objects are allocated in memory contiguously and
* without attached headers.
* This can be used to create very small allocation classes, but it
* does not support type numbers.
* Additionally, allocations with this header can only span a single
* unit.
* Objects allocated with this header do show up when iterating through
* the heap using pmemobj_first/pmemobj_next functions, but have a
* type_num equal 0.
*/
POBJ_HEADER_NONE,
MAX_POBJ_HEADER_TYPES
};
/*
* Description of allocation classes
*/
struct pobj_alloc_class_desc {
/*
* The number of bytes in a single unit of allocation. A single
* allocation can span up to 64 units (or 1 in the case of no header).
* If one creates an allocation class with a certain unit size and
* forces it to handle bigger sizes, more than one unit
* will be used.
* For example, an allocation class with a compact header and 128 bytes
* unit size, for a request of 200 bytes will create a memory block
* containing 256 bytes that spans two units. The usable size of that
* allocation will be 240 bytes: 2 * 128 - 16 (header).
*/
size_t unit_size;
/*
* Desired alignment of objects from the allocation class.
* If non zero, must be a power of two and an even divisor of unit size.
*
* All allocation classes have default alignment
* of 64. User data alignment is affected by the size of a header. For
* compact one this means that the alignment is 48 bytes.
*
*/
size_t alignment;
/*
* The minimum number of units that must be present in a
* single, contiguous, memory block.
* Those blocks (internally called runs), are fetched on demand from the
* heap. Accessing that global state is a serialization point for the
* allocator and thus it is imperative for performance and scalability
* that a reasonable amount of memory is fetched in a single call.
* Threads generally do not share memory blocks from which they
* allocate, but blocks do go back to the global heap if they are no
* longer actively used for allocation.
*/
unsigned units_per_block;
/*
* The header of allocations that originate from this allocation class.
*/
enum pobj_header_type header_type;
/*
* The identifier of this allocation class.
*/
unsigned class_id;
};
enum pobj_stats_enabled {
POBJ_STATS_ENABLED_TRANSIENT,
POBJ_STATS_ENABLED_BOTH,
POBJ_STATS_ENABLED_PERSISTENT,
POBJ_STATS_DISABLED,
};
#ifndef _WIN32
/* EXPERIMENTAL */
int pmemobj_ctl_get(PMEMobjpool *pop, const char *name, void *arg);
int pmemobj_ctl_set(PMEMobjpool *pop, const char *name, void *arg);
int pmemobj_ctl_exec(PMEMobjpool *pop, const char *name, void *arg);
#else
int pmemobj_ctl_getU(PMEMobjpool *pop, const char *name, void *arg);
int pmemobj_ctl_getW(PMEMobjpool *pop, const wchar_t *name, void *arg);
int pmemobj_ctl_setU(PMEMobjpool *pop, const char *name, void *arg);
int pmemobj_ctl_setW(PMEMobjpool *pop, const wchar_t *name, void *arg);
int pmemobj_ctl_execU(PMEMobjpool *pop, const char *name, void *arg);
int pmemobj_ctl_execW(PMEMobjpool *pop, const wchar_t *name, void *arg);
#ifndef PMDK_UTF8_API
#define pmemobj_ctl_get pmemobj_ctl_getW
#define pmemobj_ctl_set pmemobj_ctl_setW
#define pmemobj_ctl_exec pmemobj_ctl_execW
#else
#define pmemobj_ctl_get pmemobj_ctl_getU
#define pmemobj_ctl_set pmemobj_ctl_setU
#define pmemobj_ctl_exec pmemobj_ctl_execU
#endif
#endif
#ifdef __cplusplus
}
#endif
#endif /* libpmemobj/ctl.h */
| 6,198 | 34.221591 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/include/libpmemobj/lists_atomic.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* libpmemobj/lists_atomic.h -- definitions of libpmemobj atomic lists macros
*/
#ifndef LIBPMEMOBJ_LISTS_ATOMIC_H
#define LIBPMEMOBJ_LISTS_ATOMIC_H 1
#include <libpmemobj/lists_atomic_base.h>
#include <libpmemobj/thread.h>
#include <libpmemobj/types.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Non-transactional persistent atomic circular doubly-linked list
*/
#define POBJ_LIST_ENTRY(type)\
struct {\
TOID(type) pe_next;\
TOID(type) pe_prev;\
}
#define POBJ_LIST_HEAD(name, type)\
struct name {\
TOID(type) pe_first;\
PMEMmutex lock;\
}
#define POBJ_LIST_FIRST(head) ((head)->pe_first)
#define POBJ_LIST_LAST(head, field) (\
TOID_IS_NULL((head)->pe_first) ?\
(head)->pe_first :\
D_RO((head)->pe_first)->field.pe_prev)
#define POBJ_LIST_EMPTY(head) (TOID_IS_NULL((head)->pe_first))
#define POBJ_LIST_NEXT(elm, field) (D_RO(elm)->field.pe_next)
#define POBJ_LIST_PREV(elm, field) (D_RO(elm)->field.pe_prev)
#define POBJ_LIST_DEST_HEAD 1
#define POBJ_LIST_DEST_TAIL 0
#define POBJ_LIST_DEST_BEFORE 1
#define POBJ_LIST_DEST_AFTER 0
#define POBJ_LIST_FOREACH(var, head, field)\
for (_pobj_debug_notice("POBJ_LIST_FOREACH", __FILE__, __LINE__),\
(var) = POBJ_LIST_FIRST((head));\
TOID_IS_NULL((var)) == 0;\
TOID_EQUALS(POBJ_LIST_NEXT((var), field),\
POBJ_LIST_FIRST((head))) ?\
TOID_ASSIGN((var), OID_NULL) :\
((var) = POBJ_LIST_NEXT((var), field)))
#define POBJ_LIST_FOREACH_REVERSE(var, head, field)\
for (_pobj_debug_notice("POBJ_LIST_FOREACH_REVERSE", __FILE__, __LINE__),\
(var) = POBJ_LIST_LAST((head), field);\
TOID_IS_NULL((var)) == 0;\
TOID_EQUALS(POBJ_LIST_PREV((var), field),\
POBJ_LIST_LAST((head), field)) ?\
TOID_ASSIGN((var), OID_NULL) :\
((var) = POBJ_LIST_PREV((var), field)))
#define POBJ_LIST_INSERT_HEAD(pop, head, elm, field)\
pmemobj_list_insert((pop),\
TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\
(head), OID_NULL,\
POBJ_LIST_DEST_HEAD, (elm).oid)
#define POBJ_LIST_INSERT_TAIL(pop, head, elm, field)\
pmemobj_list_insert((pop),\
TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\
(head), OID_NULL,\
POBJ_LIST_DEST_TAIL, (elm).oid)
#define POBJ_LIST_INSERT_AFTER(pop, head, listelm, elm, field)\
pmemobj_list_insert((pop),\
TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\
(head), (listelm).oid,\
0 /* after */, (elm).oid)
#define POBJ_LIST_INSERT_BEFORE(pop, head, listelm, elm, field)\
pmemobj_list_insert((pop), \
TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\
(head), (listelm).oid,\
1 /* before */, (elm).oid)
#define POBJ_LIST_INSERT_NEW_HEAD(pop, head, field, size, constr, arg)\
pmemobj_list_insert_new((pop),\
TOID_OFFSETOF((head)->pe_first, field),\
(head), OID_NULL, POBJ_LIST_DEST_HEAD, (size),\
TOID_TYPE_NUM_OF((head)->pe_first), (constr), (arg))
#define POBJ_LIST_INSERT_NEW_TAIL(pop, head, field, size, constr, arg)\
pmemobj_list_insert_new((pop),\
TOID_OFFSETOF((head)->pe_first, field),\
(head), OID_NULL, POBJ_LIST_DEST_TAIL, (size),\
TOID_TYPE_NUM_OF((head)->pe_first), (constr), (arg))
#define POBJ_LIST_INSERT_NEW_AFTER(pop, head, listelm, field, size,\
constr, arg)\
pmemobj_list_insert_new((pop),\
TOID_OFFSETOF((head)->pe_first, field),\
(head), (listelm).oid, 0 /* after */, (size),\
TOID_TYPE_NUM_OF((head)->pe_first), (constr), (arg))
#define POBJ_LIST_INSERT_NEW_BEFORE(pop, head, listelm, field, size,\
constr, arg)\
pmemobj_list_insert_new((pop),\
TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\
(head), (listelm).oid, 1 /* before */, (size),\
TOID_TYPE_NUM_OF((head)->pe_first), (constr), (arg))
#define POBJ_LIST_REMOVE(pop, head, elm, field)\
pmemobj_list_remove((pop),\
TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\
(head), (elm).oid, 0 /* no free */)
#define POBJ_LIST_REMOVE_FREE(pop, head, elm, field)\
pmemobj_list_remove((pop),\
TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\
(head), (elm).oid, 1 /* free */)
#define POBJ_LIST_MOVE_ELEMENT_HEAD(pop, head, head_new, elm, field, field_new)\
pmemobj_list_move((pop),\
TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\
(head),\
TOID_OFFSETOF(POBJ_LIST_FIRST(head_new), field_new),\
(head_new), OID_NULL, POBJ_LIST_DEST_HEAD, (elm).oid)
#define POBJ_LIST_MOVE_ELEMENT_TAIL(pop, head, head_new, elm, field, field_new)\
pmemobj_list_move((pop),\
TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\
(head),\
TOID_OFFSETOF(POBJ_LIST_FIRST(head_new), field_new),\
(head_new), OID_NULL, POBJ_LIST_DEST_TAIL, (elm).oid)
#define POBJ_LIST_MOVE_ELEMENT_AFTER(pop,\
head, head_new, listelm, elm, field, field_new)\
pmemobj_list_move((pop),\
TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\
(head),\
TOID_OFFSETOF(POBJ_LIST_FIRST(head_new), field_new),\
(head_new),\
(listelm).oid,\
0 /* after */, (elm).oid)
#define POBJ_LIST_MOVE_ELEMENT_BEFORE(pop,\
head, head_new, listelm, elm, field, field_new)\
pmemobj_list_move((pop),\
TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\
(head),\
TOID_OFFSETOF(POBJ_LIST_FIRST(head_new), field_new),\
(head_new),\
(listelm).oid,\
1 /* before */, (elm).oid)
#ifdef __cplusplus
}
#endif
#endif /* libpmemobj/lists_atomic.h */
| 5,121 | 30.042424 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/include/libpmemobj/iterator.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* libpmemobj/iterator.h -- definitions of libpmemobj iterator macros
*/
#ifndef LIBPMEMOBJ_ITERATOR_H
#define LIBPMEMOBJ_ITERATOR_H 1
#include <libpmemobj/iterator_base.h>
#include <libpmemobj/types.h>
#ifdef __cplusplus
extern "C" {
#endif
static inline PMEMoid
POBJ_FIRST_TYPE_NUM(PMEMobjpool *pop, uint64_t type_num)
{
PMEMoid _pobj_ret = pmemobj_first(pop);
while (!OID_IS_NULL(_pobj_ret) &&
pmemobj_type_num(_pobj_ret) != type_num) {
_pobj_ret = pmemobj_next(_pobj_ret);
}
return _pobj_ret;
}
static inline PMEMoid
POBJ_NEXT_TYPE_NUM(PMEMoid o)
{
PMEMoid _pobj_ret = o;
do {
_pobj_ret = pmemobj_next(_pobj_ret);\
} while (!OID_IS_NULL(_pobj_ret) &&
pmemobj_type_num(_pobj_ret) != pmemobj_type_num(o));
return _pobj_ret;
}
#define POBJ_FIRST(pop, t) ((TOID(t))POBJ_FIRST_TYPE_NUM(pop, TOID_TYPE_NUM(t)))
#define POBJ_NEXT(o) ((__typeof__(o))POBJ_NEXT_TYPE_NUM((o).oid))
/*
* Iterates through every existing allocated object.
*/
#define POBJ_FOREACH(pop, varoid)\
for (_pobj_debug_notice("POBJ_FOREACH", __FILE__, __LINE__),\
varoid = pmemobj_first(pop);\
(varoid).off != 0; varoid = pmemobj_next(varoid))
/*
* Safe variant of POBJ_FOREACH in which pmemobj_free on varoid is allowed
*/
#define POBJ_FOREACH_SAFE(pop, varoid, nvaroid)\
for (_pobj_debug_notice("POBJ_FOREACH_SAFE", __FILE__, __LINE__),\
varoid = pmemobj_first(pop);\
(varoid).off != 0 && (nvaroid = pmemobj_next(varoid), 1);\
varoid = nvaroid)
/*
* Iterates through every object of the specified type.
*/
#define POBJ_FOREACH_TYPE(pop, var)\
POBJ_FOREACH(pop, (var).oid)\
if (pmemobj_type_num((var).oid) == TOID_TYPE_NUM_OF(var))
/*
* Safe variant of POBJ_FOREACH_TYPE in which pmemobj_free on var
* is allowed.
*/
#define POBJ_FOREACH_SAFE_TYPE(pop, var, nvar)\
POBJ_FOREACH_SAFE(pop, (var).oid, (nvar).oid)\
if (pmemobj_type_num((var).oid) == TOID_TYPE_NUM_OF(var))
#ifdef __cplusplus
}
#endif
#endif /* libpmemobj/iterator.h */
| 2,041 | 23.60241 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/include/libpmemobj/lists_atomic_base.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* libpmemobj/lists_atomic_base.h -- definitions of libpmemobj atomic lists
*/
#ifndef LIBPMEMOBJ_LISTS_ATOMIC_BASE_H
#define LIBPMEMOBJ_LISTS_ATOMIC_BASE_H 1
#include <libpmemobj/base.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Non-transactional persistent atomic circular doubly-linked list
*/
int pmemobj_list_insert(PMEMobjpool *pop, size_t pe_offset, void *head,
PMEMoid dest, int before, PMEMoid oid);
PMEMoid pmemobj_list_insert_new(PMEMobjpool *pop, size_t pe_offset, void *head,
PMEMoid dest, int before, size_t size, uint64_t type_num,
pmemobj_constr constructor, void *arg);
int pmemobj_list_remove(PMEMobjpool *pop, size_t pe_offset, void *head,
PMEMoid oid, int free);
int pmemobj_list_move(PMEMobjpool *pop, size_t pe_old_offset,
void *head_old, size_t pe_new_offset, void *head_new,
PMEMoid dest, int before, PMEMoid oid);
#ifdef __cplusplus
}
#endif
#endif /* libpmemobj/lists_atomic_base.h */
| 1,022 | 24.575 | 79 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/include/libpmemobj/tx_base.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* libpmemobj/tx_base.h -- definitions of libpmemobj transactional entry points
*/
#ifndef LIBPMEMOBJ_TX_BASE_H
#define LIBPMEMOBJ_TX_BASE_H 1
#include <setjmp.h>
#include <libpmemobj/base.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Transactions
*
* Stages are changed only by the pmemobj_tx_* functions, each transition
* to the TX_STAGE_ONABORT is followed by a longjmp to the jmp_buf provided in
* the pmemobj_tx_begin function.
*/
enum pobj_tx_stage {
TX_STAGE_NONE, /* no transaction in this thread */
TX_STAGE_WORK, /* transaction in progress */
TX_STAGE_ONCOMMIT, /* successfully committed */
TX_STAGE_ONABORT, /* tx_begin failed or transaction aborted */
TX_STAGE_FINALLY, /* always called */
MAX_TX_STAGE
};
/*
* Always returns the current transaction stage for a thread.
*/
enum pobj_tx_stage pmemobj_tx_stage(void);
enum pobj_tx_param {
TX_PARAM_NONE,
TX_PARAM_MUTEX, /* PMEMmutex */
TX_PARAM_RWLOCK, /* PMEMrwlock */
TX_PARAM_CB, /* pmemobj_tx_callback cb, void *arg */
};
enum pobj_log_type {
TX_LOG_TYPE_SNAPSHOT,
TX_LOG_TYPE_INTENT,
};
enum pobj_tx_failure_behavior {
POBJ_TX_FAILURE_ABORT,
POBJ_TX_FAILURE_RETURN,
};
#if !defined(pmdk_use_attr_deprec_with_msg) && defined(__COVERITY__)
#define pmdk_use_attr_deprec_with_msg 0
#endif
#if !defined(pmdk_use_attr_deprec_with_msg) && defined(__clang__)
#if __has_extension(attribute_deprecated_with_message)
#define pmdk_use_attr_deprec_with_msg 1
#else
#define pmdk_use_attr_deprec_with_msg 0
#endif
#endif
#if !defined(pmdk_use_attr_deprec_with_msg) && \
defined(__GNUC__) && !defined(__INTEL_COMPILER)
#if __GNUC__ * 100 + __GNUC_MINOR__ >= 601 /* 6.1 */
#define pmdk_use_attr_deprec_with_msg 1
#else
#define pmdk_use_attr_deprec_with_msg 0
#endif
#endif
#if !defined(pmdk_use_attr_deprec_with_msg)
#define pmdk_use_attr_deprec_with_msg 0
#endif
#if pmdk_use_attr_deprec_with_msg
#define tx_lock_deprecated __attribute__((deprecated(\
"enum pobj_tx_lock is deprecated, use enum pobj_tx_param")))
#else
#define tx_lock_deprecated
#endif
/* deprecated, do not use */
enum tx_lock_deprecated pobj_tx_lock {
TX_LOCK_NONE tx_lock_deprecated = TX_PARAM_NONE,
TX_LOCK_MUTEX tx_lock_deprecated = TX_PARAM_MUTEX,
TX_LOCK_RWLOCK tx_lock_deprecated = TX_PARAM_RWLOCK,
};
typedef void (*pmemobj_tx_callback)(PMEMobjpool *pop, enum pobj_tx_stage stage,
void *);
#define POBJ_TX_XALLOC_VALID_FLAGS (POBJ_XALLOC_ZERO |\
POBJ_XALLOC_NO_FLUSH |\
POBJ_XALLOC_ARENA_MASK |\
POBJ_XALLOC_CLASS_MASK |\
POBJ_XALLOC_NO_ABORT)
#define POBJ_XADD_NO_FLUSH POBJ_FLAG_NO_FLUSH
#define POBJ_XADD_NO_SNAPSHOT POBJ_FLAG_NO_SNAPSHOT
#define POBJ_XADD_ASSUME_INITIALIZED POBJ_FLAG_ASSUME_INITIALIZED
#define POBJ_XADD_NO_ABORT POBJ_FLAG_TX_NO_ABORT
#define POBJ_XADD_VALID_FLAGS (POBJ_XADD_NO_FLUSH |\
POBJ_XADD_NO_SNAPSHOT |\
POBJ_XADD_ASSUME_INITIALIZED |\
POBJ_XADD_NO_ABORT)
#define POBJ_XLOCK_NO_ABORT POBJ_FLAG_TX_NO_ABORT
#define POBJ_XLOCK_VALID_FLAGS (POBJ_XLOCK_NO_ABORT)
#define POBJ_XFREE_NO_ABORT POBJ_FLAG_TX_NO_ABORT
#define POBJ_XFREE_VALID_FLAGS (POBJ_XFREE_NO_ABORT)
#define POBJ_XPUBLISH_NO_ABORT POBJ_FLAG_TX_NO_ABORT
#define POBJ_XPUBLISH_VALID_FLAGS (POBJ_XPUBLISH_NO_ABORT)
#define POBJ_XLOG_APPEND_BUFFER_NO_ABORT POBJ_FLAG_TX_NO_ABORT
#define POBJ_XLOG_APPEND_BUFFER_VALID_FLAGS (POBJ_XLOG_APPEND_BUFFER_NO_ABORT)
/*
* Starts a new transaction in the current thread.
* If called within an open transaction, starts a nested transaction.
*
* If successful, transaction stage changes to TX_STAGE_WORK and function
* returns zero. Otherwise, stage changes to TX_STAGE_ONABORT and an error
* number is returned.
*/
int pmemobj_tx_begin(PMEMobjpool *pop, jmp_buf env, ...);
/*
* Adds lock of given type to current transaction.
* 'Flags' is a bitmask of the following values:
* - POBJ_XLOCK_NO_ABORT - if the function does not end successfully,
* do not abort the transaction and return the error number.
*/
int pmemobj_tx_xlock(enum pobj_tx_param type, void *lockp, uint64_t flags);
/*
* Adds lock of given type to current transaction.
*/
int pmemobj_tx_lock(enum pobj_tx_param type, void *lockp);
/*
* Aborts current transaction
*
* Causes transition to TX_STAGE_ONABORT.
*
* This function must be called during TX_STAGE_WORK.
*/
void pmemobj_tx_abort(int errnum);
/*
* Commits current transaction
*
* This function must be called during TX_STAGE_WORK.
*/
void pmemobj_tx_commit(void);
/*
* Cleanups current transaction. Must always be called after pmemobj_tx_begin,
* even if starting the transaction failed.
*
* If called during TX_STAGE_NONE, has no effect.
*
* Always causes transition to TX_STAGE_NONE.
*
* If transaction was successful, returns 0. Otherwise returns error code set
* by pmemobj_tx_abort.
*
* This function must *not* be called during TX_STAGE_WORK.
*/
int pmemobj_tx_end(void);
/*
* Performs the actions associated with current stage of the transaction,
* and makes the transition to the next stage. Current stage must always
* be obtained by calling pmemobj_tx_stage.
*
* This function must be called in transaction.
*/
void pmemobj_tx_process(void);
/*
* Returns last transaction error code.
*/
int pmemobj_tx_errno(void);
/*
* Takes a "snapshot" of the memory block of given size and located at given
* offset 'off' in the object 'oid' and saves it in the undo log.
* The application is then free to directly modify the object in that memory
* range. In case of failure or abort, all the changes within this range will
* be rolled-back automatically.
*
* If successful, returns zero.
* Otherwise, stage changes to TX_STAGE_ONABORT and an error number is returned.
*
* This function must be called during TX_STAGE_WORK.
*/
int pmemobj_tx_add_range(PMEMoid oid, uint64_t off, size_t size);
/*
* Takes a "snapshot" of the given memory region and saves it in the undo log.
* The application is then free to directly modify the object in that memory
* range. In case of failure or abort, all the changes within this range will
* be rolled-back automatically. The supplied block of memory has to be within
* the given pool.
*
* If successful, returns zero.
* Otherwise, stage changes to TX_STAGE_ONABORT and an error number is returned.
*
* This function must be called during TX_STAGE_WORK.
*/
int pmemobj_tx_add_range_direct(const void *ptr, size_t size);
/*
* Behaves exactly the same as pmemobj_tx_add_range when 'flags' equals 0.
* 'Flags' is a bitmask of the following values:
* - POBJ_XADD_NO_FLUSH - skips flush on commit
* - POBJ_XADD_NO_SNAPSHOT - added range will not be snapshotted
* - POBJ_XADD_ASSUME_INITIALIZED - added range is assumed to be initialized
* - POBJ_XADD_NO_ABORT - if the function does not end successfully,
* do not abort the transaction and return the error number.
*/
int pmemobj_tx_xadd_range(PMEMoid oid, uint64_t off, size_t size,
uint64_t flags);
/*
* Behaves exactly the same as pmemobj_tx_add_range_direct when 'flags' equals
* 0. 'Flags' is a bitmask of the following values:
* - POBJ_XADD_NO_FLUSH - skips flush on commit
* - POBJ_XADD_NO_SNAPSHOT - added range will not be snapshotted
* - POBJ_XADD_ASSUME_INITIALIZED - added range is assumed to be initialized
* - POBJ_XADD_NO_ABORT - if the function does not end successfully,
* do not abort the transaction and return the error number.
*/
int pmemobj_tx_xadd_range_direct(const void *ptr, size_t size, uint64_t flags);
/*
* Transactionally allocates a new object.
*
* If successful, returns PMEMoid.
* Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned.
*
* This function must be called during TX_STAGE_WORK.
*/
PMEMoid pmemobj_tx_alloc(size_t size, uint64_t type_num);
/*
* Transactionally allocates a new object.
*
* If successful, returns PMEMoid.
* Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned.
* 'Flags' is a bitmask of the following values:
* - POBJ_XALLOC_ZERO - zero the allocated object
* - POBJ_XALLOC_NO_FLUSH - skip flush on commit
* - POBJ_XALLOC_NO_ABORT - if the function does not end successfully,
* do not abort the transaction and return the error number.
*
* This function must be called during TX_STAGE_WORK.
*/
PMEMoid pmemobj_tx_xalloc(size_t size, uint64_t type_num, uint64_t flags);
/*
* Transactionally allocates new zeroed object.
*
* If successful, returns PMEMoid.
* Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned.
*
* This function must be called during TX_STAGE_WORK.
*/
PMEMoid pmemobj_tx_zalloc(size_t size, uint64_t type_num);
/*
* Transactionally resizes an existing object.
*
* If successful, returns PMEMoid.
* Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned.
*
* This function must be called during TX_STAGE_WORK.
*/
PMEMoid pmemobj_tx_realloc(PMEMoid oid, size_t size, uint64_t type_num);
/*
* Transactionally resizes an existing object, if extended new space is zeroed.
*
* If successful, returns PMEMoid.
* Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned.
*
* This function must be called during TX_STAGE_WORK.
*/
PMEMoid pmemobj_tx_zrealloc(PMEMoid oid, size_t size, uint64_t type_num);
/*
* Transactionally allocates a new object with duplicate of the string s.
*
* If successful, returns PMEMoid.
* Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned.
*
* This function must be called during TX_STAGE_WORK.
*/
PMEMoid pmemobj_tx_strdup(const char *s, uint64_t type_num);
/*
* Transactionally allocates a new object with duplicate of the string s.
*
* If successful, returns PMEMoid.
* Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned.
* 'Flags' is a bitmask of the following values:
* - POBJ_XALLOC_ZERO - zero the allocated object
* - POBJ_XALLOC_NO_FLUSH - skip flush on commit
* - POBJ_XALLOC_NO_ABORT - if the function does not end successfully,
* do not abort the transaction and return the error number.
*
* This function must be called during TX_STAGE_WORK.
*/
PMEMoid pmemobj_tx_xstrdup(const char *s, uint64_t type_num, uint64_t flags);
/*
* Transactionally allocates a new object with duplicate of the wide character
* string s.
*
* If successful, returns PMEMoid.
* Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned.
*
* This function must be called during TX_STAGE_WORK.
*/
PMEMoid pmemobj_tx_wcsdup(const wchar_t *s, uint64_t type_num);
/*
* Transactionally allocates a new object with duplicate of the wide character
* string s.
*
* If successful, returns PMEMoid.
* Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned.
* 'Flags' is a bitmask of the following values:
* - POBJ_XALLOC_ZERO - zero the allocated object
* - POBJ_XALLOC_NO_FLUSH - skip flush on commit
* - POBJ_XALLOC_NO_ABORT - if the function does not end successfully,
* do not abort the transaction and return the error number.
*
* This function must be called during TX_STAGE_WORK.
*/
PMEMoid pmemobj_tx_xwcsdup(const wchar_t *s, uint64_t type_num, uint64_t flags);
/*
* Transactionally frees an existing object.
*
* If successful, returns zero.
* Otherwise, stage changes to TX_STAGE_ONABORT and an error number is returned.
*
* This function must be called during TX_STAGE_WORK.
*/
int pmemobj_tx_free(PMEMoid oid);
/*
* Transactionally frees an existing object.
*
* If successful, returns zero.
* Otherwise, the stage changes to TX_STAGE_ONABORT and the error number is
* returned.
* 'Flags' is a bitmask of the following values:
* - POBJ_XFREE_NO_ABORT - if the function does not end successfully,
* do not abort the transaction and return the error number.
*
* This function must be called during TX_STAGE_WORK.
*/
int pmemobj_tx_xfree(PMEMoid oid, uint64_t flags);
/*
* Append user allocated buffer to the ulog.
*
* If successful, returns zero.
* Otherwise, stage changes to TX_STAGE_ONABORT and an error number is returned.
*
* This function must be called during TX_STAGE_WORK.
*/
int pmemobj_tx_log_append_buffer(enum pobj_log_type type,
void *addr, size_t size);
/*
* Append user allocated buffer to the ulog.
*
* If successful, returns zero.
* Otherwise, stage changes to TX_STAGE_ONABORT and an error number is returned.
* 'Flags' is a bitmask of the following values:
* - POBJ_XLOG_APPEND_BUFFER_NO_ABORT - if the function does not end
* successfully, do not abort the transaction and return the error number.
*
* This function must be called during TX_STAGE_WORK.
*/
int pmemobj_tx_xlog_append_buffer(enum pobj_log_type type,
void *addr, size_t size, uint64_t flags);
/*
* Enables or disables automatic ulog allocations.
*
* If successful, returns zero.
* Otherwise, stage changes to TX_STAGE_ONABORT and an error number is returned.
*
* This function must be called during TX_STAGE_WORK.
*/
int pmemobj_tx_log_auto_alloc(enum pobj_log_type type, int on_off);
/*
* Calculates and returns size for user buffers for snapshots.
*/
size_t pmemobj_tx_log_snapshots_max_size(size_t *sizes, size_t nsizes);
/*
* Calculates and returns size for user buffers for intents.
*/
size_t pmemobj_tx_log_intents_max_size(size_t nintents);
/*
* Sets volatile pointer to the user data for the current transaction.
*/
void pmemobj_tx_set_user_data(void *data);
/*
* Gets volatile pointer to the user data associated with the current
* transaction.
*/
void *pmemobj_tx_get_user_data(void);
/*
* Sets the failure behavior of transactional functions.
*
* This function must be called during TX_STAGE_WORK.
*/
void pmemobj_tx_set_failure_behavior(enum pobj_tx_failure_behavior behavior);
/*
* Returns failure behavior for the current transaction.
*
* This function must be called during TX_STAGE_WORK.
*/
enum pobj_tx_failure_behavior pmemobj_tx_get_failure_behavior(void);
#ifdef __cplusplus
}
#endif
#endif /* libpmemobj/tx_base.h */
| 14,087 | 30.237251 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/include/libpmemobj/pool_base.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* libpmemobj/pool_base.h -- definitions of libpmemobj pool entry points
*/
#ifndef LIBPMEMOBJ_POOL_BASE_H
#define LIBPMEMOBJ_POOL_BASE_H 1
#include <stddef.h>
#include <sys/types.h>
#include <libpmemobj/base.h>
#ifdef __cplusplus
extern "C" {
#endif
//NEW
//#define _GNU_SOURCE
//#include <sys/types.h>
//#include <sys/stat.h>
#include <fcntl.h>
#include <sys/mman.h>
//int __real_open(const char *__path, int __oflag);
//int __wrap_open(const char *__path, int __oflag);
void* open_device(const char* pathname);
//END NEW
#define PMEMOBJ_MIN_POOL ((size_t)(1024 * 1024 * 256)) /* 8 MiB */
/*
* This limit is set arbitrary to incorporate a pool header and required
* alignment plus supply.
*/
#define PMEMOBJ_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */
/*
* Pool management.
*/
#ifdef _WIN32
#ifndef PMDK_UTF8_API
#define pmemobj_open pmemobj_openW
#define pmemobj_create pmemobj_createW
#define pmemobj_check pmemobj_checkW
#else
#define pmemobj_open pmemobj_openU
#define pmemobj_create pmemobj_createU
#define pmemobj_check pmemobj_checkU
#endif
#endif
#ifndef _WIN32
PMEMobjpool *pmemobj_open(const char *path, const char *layout);
#else
PMEMobjpool *pmemobj_openU(const char *path, const char *layout);
PMEMobjpool *pmemobj_openW(const wchar_t *path, const wchar_t *layout);
#endif
#ifndef _WIN32
PMEMobjpool *pmemobj_create(const char *path, const char *layout,
size_t poolsize, mode_t mode);
#else
PMEMobjpool *pmemobj_createU(const char *path, const char *layout,
size_t poolsize, mode_t mode);
PMEMobjpool *pmemobj_createW(const wchar_t *path, const wchar_t *layout,
size_t poolsize, mode_t mode);
#endif
#ifndef _WIN32
int pmemobj_check(const char *path, const char *layout);
#else
int pmemobj_checkU(const char *path, const char *layout);
int pmemobj_checkW(const wchar_t *path, const wchar_t *layout);
#endif
void pmemobj_close(PMEMobjpool *pop);
/*
* If called for the first time on a newly created pool, the root object
* of given size is allocated. Otherwise, it returns the existing root object.
* In such case, the size must be not less than the actual root object size
* stored in the pool. If it's larger, the root object is automatically
* resized.
*
* This function is thread-safe.
*/
PMEMoid pmemobj_root(PMEMobjpool *pop, size_t size);
/*
* Same as above, but calls the constructor function when the object is first
* created and on all subsequent reallocations.
*/
PMEMoid pmemobj_root_construct(PMEMobjpool *pop, size_t size,
pmemobj_constr constructor, void *arg);
/*
* Returns the size in bytes of the root object. Always equal to the requested
* size.
*/
size_t pmemobj_root_size(PMEMobjpool *pop);
/*
* Sets volatile pointer to the user data for specified pool.
*/
void pmemobj_set_user_data(PMEMobjpool *pop, void *data);
/*
* Gets volatile pointer to the user data associated with the specified pool.
*/
void *pmemobj_get_user_data(PMEMobjpool *pop);
#ifdef __cplusplus
}
#endif
#endif /* libpmemobj/pool_base.h */
| 3,095 | 24.377049 | 79 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/include/libpmemobj/action_base.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* libpmemobj/action_base.h -- definitions of libpmemobj action interface
*/
#ifndef LIBPMEMOBJ_ACTION_BASE_H
#define LIBPMEMOBJ_ACTION_BASE_H 1
#include <libpmemobj/base.h>
#ifdef __cplusplus
extern "C" {
#endif
enum pobj_action_type {
/* a heap action (e.g., alloc) */
POBJ_ACTION_TYPE_HEAP,
/* a single memory operation (e.g., value set) */
POBJ_ACTION_TYPE_MEM,
POBJ_MAX_ACTION_TYPE
};
struct pobj_action_heap {
/* offset to the element being freed/allocated */
uint64_t offset;
/* usable size of the element being allocated */
uint64_t usable_size;
};
struct pobj_action {
/*
* These fields are internal for the implementation and are not
* guaranteed to be stable across different versions of the API.
* Use with caution.
*
* This structure should NEVER be stored on persistent memory!
*/
enum pobj_action_type type;
uint32_t data[3];
union {
struct pobj_action_heap heap;
uint64_t data2[14];
};
};
#define POBJ_ACTION_XRESERVE_VALID_FLAGS\
(POBJ_XALLOC_CLASS_MASK |\
POBJ_XALLOC_ARENA_MASK |\
POBJ_XALLOC_ZERO)
PMEMoid pmemobj_reserve(PMEMobjpool *pop, struct pobj_action *act,
size_t size, uint64_t type_num);
PMEMoid pmemobj_xreserve(PMEMobjpool *pop, struct pobj_action *act,
size_t size, uint64_t type_num, uint64_t flags);
void pmemobj_set_value(PMEMobjpool *pop, struct pobj_action *act,
uint64_t *ptr, uint64_t value);
void pmemobj_defer_free(PMEMobjpool *pop, PMEMoid oid, struct pobj_action *act);
int pmemobj_publish(PMEMobjpool *pop, struct pobj_action *actv,
size_t actvcnt);
int pmemobj_tx_publish(struct pobj_action *actv, size_t actvcnt);
int pmemobj_tx_xpublish(struct pobj_action *actv, size_t actvcnt,
uint64_t flags);
void pmemobj_cancel(PMEMobjpool *pop, struct pobj_action *actv, size_t actvcnt);
#ifdef __cplusplus
}
#endif
#endif /* libpmemobj/action_base.h */
| 1,935 | 24.813333 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/include/libpmemobj/types.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* libpmemobj/types.h -- definitions of libpmemobj type-safe macros
*/
#ifndef LIBPMEMOBJ_TYPES_H
#define LIBPMEMOBJ_TYPES_H 1
#include <libpmemobj/base.h>
#ifdef __cplusplus
extern "C" {
#endif
#define TOID_NULL(t) ((TOID(t))OID_NULL)
#define PMEMOBJ_MAX_LAYOUT ((size_t)1024)
/*
* Type safety macros
*/
#if !(defined _MSC_VER || defined __clang__)
#define TOID_ASSIGN(o, value)(\
{\
(o).oid = value;\
(o); /* to avoid "error: statement with no effect" */\
})
#else /* _MSC_VER or __clang__ */
#define TOID_ASSIGN(o, value) ((o).oid = value, (o))
#endif
#if (defined _MSC_VER && _MSC_VER < 1912)
/*
* XXX - workaround for offsetof issue in VS 15.3,
* it has been fixed since Visual Studio 2017 Version 15.5
* (_MSC_VER == 1912)
*/
#ifdef PMEMOBJ_OFFSETOF_WA
#ifdef _CRT_USE_BUILTIN_OFFSETOF
#undef offsetof
#define offsetof(s, m) ((size_t)&reinterpret_cast < char const volatile& > \
((((s *)0)->m)))
#endif
#else
#ifdef _CRT_USE_BUILTIN_OFFSETOF
#error "Invalid definition of offsetof() macro - see: \
https://developercommunity.visualstudio.com/content/problem/96174/\
offsetof-macro-is-broken-for-nested-objects.html \
Please upgrade your VS, fix offsetof as described under the link or define \
PMEMOBJ_OFFSETOF_WA to enable workaround in libpmemobj.h"
#endif
#endif
#endif /* _MSC_VER */
#define TOID_EQUALS(lhs, rhs)\
((lhs).oid.off == (rhs).oid.off &&\
(lhs).oid.pool_uuid_lo == (rhs).oid.pool_uuid_lo)
/* type number of root object */
#define POBJ_ROOT_TYPE_NUM 0
#define _toid_struct
#define _toid_union
#define _toid_enum
#define _POBJ_LAYOUT_REF(name) (sizeof(_pobj_layout_##name##_ref))
/*
* Typed OID
*/
#define TOID(t)\
union _toid_##t##_toid
#ifdef __cplusplus
#define _TOID_CONSTR(t)\
_toid_##t##_toid()\
{ }\
_toid_##t##_toid(PMEMoid _oid) : oid(_oid)\
{ }
#else
#define _TOID_CONSTR(t)
#endif
/*
* Declaration of typed OID
*/
#define _TOID_DECLARE(t, i)\
typedef uint8_t _toid_##t##_toid_type_num[(i) + 1];\
TOID(t)\
{\
_TOID_CONSTR(t)\
PMEMoid oid;\
t *_type;\
_toid_##t##_toid_type_num *_type_num;\
}
/*
* Declaration of typed OID of an object
*/
#define TOID_DECLARE(t, i) _TOID_DECLARE(t, i)
/*
* Declaration of typed OID of a root object
*/
#define TOID_DECLARE_ROOT(t) _TOID_DECLARE(t, POBJ_ROOT_TYPE_NUM)
/*
* Type number of specified type
*/
#define TOID_TYPE_NUM(t) (sizeof(_toid_##t##_toid_type_num) - 1)
/*
* Type number of object read from typed OID
*/
#define TOID_TYPE_NUM_OF(o) (sizeof(*(o)._type_num) - 1)
/*
* NULL check
*/
#define TOID_IS_NULL(o) ((o).oid.off == 0)
/*
* Validates whether type number stored in typed OID is the same
* as type number stored in object's metadata
*/
#define TOID_VALID(o) (TOID_TYPE_NUM_OF(o) == pmemobj_type_num((o).oid))
/*
* Checks whether the object is of a given type
*/
#define OID_INSTANCEOF(o, t) (TOID_TYPE_NUM(t) == pmemobj_type_num(o))
/*
* Begin of layout declaration
*/
#define POBJ_LAYOUT_BEGIN(name)\
typedef uint8_t _pobj_layout_##name##_ref[__COUNTER__ + 1]
/*
* End of layout declaration
*/
#define POBJ_LAYOUT_END(name)\
typedef char _pobj_layout_##name##_cnt[__COUNTER__ + 1 -\
_POBJ_LAYOUT_REF(name)];
/*
* Number of types declared inside layout without the root object
*/
#define POBJ_LAYOUT_TYPES_NUM(name) (sizeof(_pobj_layout_##name##_cnt) - 1)
/*
* Declaration of typed OID inside layout declaration
*/
#define POBJ_LAYOUT_TOID(name, t)\
TOID_DECLARE(t, (__COUNTER__ + 1 - _POBJ_LAYOUT_REF(name)));
/*
* Declaration of typed OID of root inside layout declaration
*/
#define POBJ_LAYOUT_ROOT(name, t)\
TOID_DECLARE_ROOT(t);
/*
* Name of declared layout
*/
#define POBJ_LAYOUT_NAME(name) #name
#define TOID_TYPEOF(o) __typeof__(*(o)._type)
#define TOID_OFFSETOF(o, field) offsetof(TOID_TYPEOF(o), field)
/*
* XXX - DIRECT_RW and DIRECT_RO are not available when compiled using VC++
* as C code (/TC). Use /TP option.
*/
#ifndef _MSC_VER
#define DIRECT_RW(o) (\
{__typeof__(o) _o; _o._type = NULL; (void)_o;\
(__typeof__(*(o)._type) *)pmemobj_direct((o).oid); })
#define DIRECT_RO(o) ((const __typeof__(*(o)._type) *)pmemobj_direct((o).oid))
#elif defined(__cplusplus)
/*
* XXX - On Windows, these macros do not behave exactly the same as on Linux.
*/
#define DIRECT_RW(o) \
(reinterpret_cast < __typeof__((o)._type) > (pmemobj_direct((o).oid)))
#define DIRECT_RO(o) \
(reinterpret_cast < const __typeof__((o)._type) > \
(pmemobj_direct((o).oid)))
#endif /* (defined(_MSC_VER) || defined(__cplusplus)) */
#define D_RW DIRECT_RW
#define D_RO DIRECT_RO
#ifdef __cplusplus
}
#endif
#endif /* libpmemobj/types.h */
| 4,701 | 21.825243 | 78 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/include/libpmemobj/base.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* libpmemobj/base.h -- definitions of base libpmemobj entry points
*/
#ifndef LIBPMEMOBJ_BASE_H
#define LIBPMEMOBJ_BASE_H 1
#ifndef __STDC_LIMIT_MACROS
#define __STDC_LIMIT_MACROS
#endif
#include <stddef.h>
#include <stdint.h>
#ifdef _WIN32
#include <pmemcompat.h>
#ifndef PMDK_UTF8_API
#define pmemobj_check_version pmemobj_check_versionW
#define pmemobj_errormsg pmemobj_errormsgW
#else
#define pmemobj_check_version pmemobj_check_versionU
#define pmemobj_errormsg pmemobj_errormsgU
#endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
/*
* opaque type internal to libpmemobj
*/
typedef struct pmemobjpool PMEMobjpool;
#define PMEMOBJ_MAX_ALLOC_SIZE ((size_t)0x3FFDFFFC0)
/*
* allocation functions flags
*/
#define POBJ_FLAG_ZERO (((uint64_t)1) << 0)
#define POBJ_FLAG_NO_FLUSH (((uint64_t)1) << 1)
#define POBJ_FLAG_NO_SNAPSHOT (((uint64_t)1) << 2)
#define POBJ_FLAG_ASSUME_INITIALIZED (((uint64_t)1) << 3)
#define POBJ_FLAG_TX_NO_ABORT (((uint64_t)1) << 4)
#define POBJ_CLASS_ID(id) (((uint64_t)(id)) << 48)
#define POBJ_ARENA_ID(id) (((uint64_t)(id)) << 32)
#define POBJ_XALLOC_CLASS_MASK ((((uint64_t)1 << 16) - 1) << 48)
#define POBJ_XALLOC_ARENA_MASK ((((uint64_t)1 << 16) - 1) << 32)
#define POBJ_XALLOC_ZERO POBJ_FLAG_ZERO
#define POBJ_XALLOC_NO_FLUSH POBJ_FLAG_NO_FLUSH
#define POBJ_XALLOC_NO_ABORT POBJ_FLAG_TX_NO_ABORT
/*
* pmemobj_mem* flags
*/
#define PMEMOBJ_F_MEM_NODRAIN (1U << 0)
#define PMEMOBJ_F_MEM_NONTEMPORAL (1U << 1)
#define PMEMOBJ_F_MEM_TEMPORAL (1U << 2)
#define PMEMOBJ_F_MEM_WC (1U << 3)
#define PMEMOBJ_F_MEM_WB (1U << 4)
#define PMEMOBJ_F_MEM_NOFLUSH (1U << 5)
/*
* pmemobj_mem*, pmemobj_xflush & pmemobj_xpersist flags
*/
#define PMEMOBJ_F_RELAXED (1U << 31)
/*
* Persistent memory object
*/
/*
* Object handle
*/
typedef struct pmemoid {
uint64_t pool_uuid_lo;
uint64_t off;
} PMEMoid;
static const PMEMoid OID_NULL = { 0, 0 };
#define OID_IS_NULL(o) ((o).off == 0)
#define OID_EQUALS(lhs, rhs)\
((lhs).off == (rhs).off &&\
(lhs).pool_uuid_lo == (rhs).pool_uuid_lo)
PMEMobjpool *pmemobj_pool_by_ptr(const void *addr);
PMEMobjpool *pmemobj_pool_by_oid(PMEMoid oid);
#ifndef _WIN32
extern int _pobj_cache_invalidate;
extern __thread struct _pobj_pcache {
PMEMobjpool *pop;
uint64_t uuid_lo;
int invalidate;
} _pobj_cached_pool;
/*
* Returns the direct pointer of an object.
*/
static inline void *
pmemobj_direct_inline(PMEMoid oid)
{
if (oid.off == 0 || oid.pool_uuid_lo == 0)
return NULL;
struct _pobj_pcache *cache = &_pobj_cached_pool;
if (_pobj_cache_invalidate != cache->invalidate ||
cache->uuid_lo != oid.pool_uuid_lo) {
cache->invalidate = _pobj_cache_invalidate;
if (!(cache->pop = pmemobj_pool_by_oid(oid))) {
cache->uuid_lo = 0;
return NULL;
}
cache->uuid_lo = oid.pool_uuid_lo;
}
return (void *)((uintptr_t)cache->pop + oid.off);
}
#endif /* _WIN32 */
/*
* Returns the direct pointer of an object.
*/
#if defined(_WIN32) || defined(_PMEMOBJ_INTRNL) ||\
defined(PMEMOBJ_DIRECT_NON_INLINE)
void *pmemobj_direct(PMEMoid oid);
#else
#define pmemobj_direct pmemobj_direct_inline
#endif
struct pmemvlt {
uint64_t runid;
};
#define PMEMvlt(T)\
struct {\
struct pmemvlt vlt;\
T value;\
}
/*
* Returns lazily initialized volatile variable. (EXPERIMENTAL)
*/
void *pmemobj_volatile(PMEMobjpool *pop, struct pmemvlt *vlt,
void *ptr, size_t size,
int (*constr)(void *ptr, void *arg), void *arg);
/*
* Returns the OID of the object pointed to by addr.
*/
PMEMoid pmemobj_oid(const void *addr);
/*
* Returns the number of usable bytes in the object. May be greater than
* the requested size of the object because of internal alignment.
*
* Can be used with objects allocated by any of the available methods.
*/
size_t pmemobj_alloc_usable_size(PMEMoid oid);
/*
* Returns the type number of the object.
*/
uint64_t pmemobj_type_num(PMEMoid oid);
/*
* Pmemobj specific low-level memory manipulation functions.
*
* These functions are meant to be used with pmemobj pools, because they provide
* additional functionality specific to this type of pool. These may include
* for example replication support. They also take advantage of the knowledge
* of the type of memory in the pool (pmem/non-pmem) to assure persistence.
*/
/*
* Pmemobj version of memcpy. Data copied is made persistent.
*/
void *pmemobj_memcpy_persist(PMEMobjpool *pop, void *dest, const void *src,
size_t len);
/*
* Pmemobj version of memset. Data range set is made persistent.
*/
void *pmemobj_memset_persist(PMEMobjpool *pop, void *dest, int c, size_t len);
/*
* Pmemobj version of memcpy. Data copied is made persistent (unless opted-out
* using flags).
*/
void *pmemobj_memcpy(PMEMobjpool *pop, void *dest, const void *src, size_t len,
unsigned flags);
/*
* Pmemobj version of memmove. Data copied is made persistent (unless opted-out
* using flags).
*/
void *pmemobj_memmove(PMEMobjpool *pop, void *dest, const void *src, size_t len,
unsigned flags);
/*
* Pmemobj version of memset. Data range set is made persistent (unless
* opted-out using flags).
*/
void *pmemobj_memset(PMEMobjpool *pop, void *dest, int c, size_t len,
unsigned flags);
/*
* Pmemobj version of pmem_persist.
*/
void pmemobj_persist(PMEMobjpool *pop, const void *addr, size_t len);
/*
* Pmemobj version of pmem_persist with additional flags argument.
*/
int pmemobj_xpersist(PMEMobjpool *pop, const void *addr, size_t len,
unsigned flags);
/*
* Pmemobj version of pmem_flush.
*/
void pmemobj_flush(PMEMobjpool *pop, const void *addr, size_t len);
/*
* Pmemobj version of pmem_flush with additional flags argument.
*/
int pmemobj_xflush(PMEMobjpool *pop, const void *addr, size_t len,
unsigned flags);
/*
* Pmemobj version of pmem_drain.
*/
void pmemobj_drain(PMEMobjpool *pop);
/*
* Version checking.
*/
/*
* PMEMOBJ_MAJOR_VERSION and PMEMOBJ_MINOR_VERSION provide the current version
* of the libpmemobj API as provided by this header file. Applications can
* verify that the version available at run-time is compatible with the version
* used at compile-time by passing these defines to pmemobj_check_version().
*/
#define PMEMOBJ_MAJOR_VERSION 2
#define PMEMOBJ_MINOR_VERSION 4
#ifndef _WIN32
const char *pmemobj_check_version(unsigned major_required,
unsigned minor_required);
#else
const char *pmemobj_check_versionU(unsigned major_required,
unsigned minor_required);
const wchar_t *pmemobj_check_versionW(unsigned major_required,
unsigned minor_required);
#endif
/*
* Passing NULL to pmemobj_set_funcs() tells libpmemobj to continue to use the
* default for that function. The replacement functions must not make calls
* back into libpmemobj.
*/
void pmemobj_set_funcs(
void *(*malloc_func)(size_t size),
void (*free_func)(void *ptr),
void *(*realloc_func)(void *ptr, size_t size),
char *(*strdup_func)(const char *s));
typedef int (*pmemobj_constr)(PMEMobjpool *pop, void *ptr, void *arg);
/*
* (debug helper function) logs notice message if used inside a transaction
*/
void _pobj_debug_notice(const char *func_name, const char *file, int line);
#ifndef _WIN32
const char *pmemobj_errormsg(void);
#else
const char *pmemobj_errormsgU(void);
const wchar_t *pmemobj_errormsgW(void);
#endif
#ifdef __cplusplus
}
#endif
#endif /* libpmemobj/base.h */
| 7,415 | 23.72 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/include/libpmemobj/tx.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* libpmemobj/tx.h -- definitions of libpmemobj transactional macros
*/
#ifndef LIBPMEMOBJ_TX_H
#define LIBPMEMOBJ_TX_H 1
#include <errno.h>
#include <string.h>
#include <libpmemobj/tx_base.h>
#include <libpmemobj/types.h>
extern uint64_t waitCycles;
extern uint64_t resetCycles;
//extern int current_tx1 = 1 ;
#ifdef __cplusplus
extern "C" {
#endif
#ifdef POBJ_TX_CRASH_ON_NO_ONABORT
#define TX_ONABORT_CHECK do {\
if (_stage == TX_STAGE_ONABORT)\
abort();\
} while (0)
#else
#define TX_ONABORT_CHECK do {} while (0)
#endif
#define _POBJ_TX_BEGIN(pop, ...)\
{\
jmp_buf _tx_env;\
enum pobj_tx_stage _stage;\
int _pobj_errno;\
if (setjmp(_tx_env)) {\
errno = pmemobj_tx_errno();\
} else {\
_pobj_errno = pmemobj_tx_begin(pop, _tx_env, __VA_ARGS__,\
TX_PARAM_NONE);\
if (_pobj_errno)\
errno = _pobj_errno;\
}\
while ((_stage = pmemobj_tx_stage()) != TX_STAGE_NONE) {\
switch (_stage) {\
case TX_STAGE_WORK:
#define TX_BEGIN_PARAM(pop, ...)\
_POBJ_TX_BEGIN(pop, ##__VA_ARGS__)
#define TX_BEGIN_LOCK TX_BEGIN_PARAM
/* Just to let compiler warn when incompatible function pointer is used */
static inline pmemobj_tx_callback
_pobj_validate_cb_sig(pmemobj_tx_callback cb)
{
return cb;
}
#define TX_BEGIN_CB(pop, cb, arg, ...) _POBJ_TX_BEGIN(pop, TX_PARAM_CB,\
_pobj_validate_cb_sig(cb), arg, ##__VA_ARGS__)
#define TX_BEGIN(pop) _POBJ_TX_BEGIN(pop, TX_PARAM_NONE)
#define TX_ONABORT\
pmemobj_tx_process();\
break;\
case TX_STAGE_ONABORT:
#define TX_ONCOMMIT\
pmemobj_tx_process();\
break;\
case TX_STAGE_ONCOMMIT:
#define TX_FINALLY\
pmemobj_tx_process();\
break;\
case TX_STAGE_FINALLY:
#define TX_END\
pmemobj_tx_process();\
break;\
default:\
TX_ONABORT_CHECK;\
pmemobj_tx_process();\
break;\
}\
}\
_pobj_errno = pmemobj_tx_end();\
if (_pobj_errno)\
errno = _pobj_errno;\
}
#define TX_ADD(o)\
pmemobj_tx_add_range((o).oid, 0, sizeof(*(o)._type))
#define TX_ADD_FIELD(o, field)\
TX_ADD_DIRECT(&(D_RO(o)->field))
#define TX_ADD_DIRECT(p)\
pmemobj_tx_add_range_direct(p, sizeof(*(p)))
#define TX_ADD_FIELD_DIRECT(p, field)\
pmemobj_tx_add_range_direct(&(p)->field, sizeof((p)->field))
#define TX_XADD(o, flags)\
pmemobj_tx_xadd_range((o).oid, 0, sizeof(*(o)._type), flags)
#define TX_XADD_FIELD(o, field, flags)\
TX_XADD_DIRECT(&(D_RO(o)->field), flags)
#define TX_XADD_DIRECT(p, flags)\
pmemobj_tx_xadd_range_direct(p, sizeof(*(p)), flags)
#define TX_XADD_FIELD_DIRECT(p, field, flags)\
pmemobj_tx_xadd_range_direct(&(p)->field, sizeof((p)->field), flags)
#define TX_NEW(t)\
((TOID(t))pmemobj_tx_alloc(sizeof(t), TOID_TYPE_NUM(t)))
#define TX_ALLOC(t, size)\
((TOID(t))pmemobj_tx_alloc(size, TOID_TYPE_NUM(t)))
#define TX_ZNEW(t)\
((TOID(t))pmemobj_tx_zalloc(sizeof(t), TOID_TYPE_NUM(t)))
#define TX_ZALLOC(t, size)\
((TOID(t))pmemobj_tx_zalloc(size, TOID_TYPE_NUM(t)))
#define TX_XALLOC(t, size, flags)\
((TOID(t))pmemobj_tx_xalloc(size, TOID_TYPE_NUM(t), flags))
/* XXX - not available when compiled with VC++ as C code (/TC) */
#if !defined(_MSC_VER) || defined(__cplusplus)
#define TX_REALLOC(o, size)\
((__typeof__(o))pmemobj_tx_realloc((o).oid, size, TOID_TYPE_NUM_OF(o)))
#define TX_ZREALLOC(o, size)\
((__typeof__(o))pmemobj_tx_zrealloc((o).oid, size, TOID_TYPE_NUM_OF(o)))
#endif /* !defined(_MSC_VER) || defined(__cplusplus) */
#define TX_STRDUP(s, type_num)\
pmemobj_tx_strdup(s, type_num)
#define TX_XSTRDUP(s, type_num, flags)\
pmemobj_tx_xstrdup(s, type_num, flags)
#define TX_WCSDUP(s, type_num)\
pmemobj_tx_wcsdup(s, type_num)
#define TX_XWCSDUP(s, type_num, flags)\
pmemobj_tx_xwcsdup(s, type_num, flags)
#define TX_FREE(o)\
pmemobj_tx_free((o).oid)
#define TX_XFREE(o, flags)\
pmemobj_tx_xfree((o).oid, flags)
#define TX_SET(o, field, value) (\
TX_ADD_FIELD(o, field),\
D_RW(o)->field = (value))
#define TX_SET_DIRECT(p, field, value) (\
TX_ADD_FIELD_DIRECT(p, field),\
(p)->field = (value))
static inline void *
TX_MEMCPY(void *dest, const void *src, size_t num)
{
pmemobj_tx_add_range_direct(dest, num);
return memcpy(dest, src, num);
}
static inline void *
TX_MEMSET(void *dest, int c, size_t num)
{
pmemobj_tx_add_range_direct(dest, num);
return memset(dest, c, num);
}
#ifdef __cplusplus
}
#endif
#endif /* libpmemobj/tx.h */
| 4,386 | 21.848958 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/include/libpmemobj/atomic_base.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* libpmemobj/atomic_base.h -- definitions of libpmemobj atomic entry points
*/
#ifndef LIBPMEMOBJ_ATOMIC_BASE_H
#define LIBPMEMOBJ_ATOMIC_BASE_H 1
#include <libpmemobj/base.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Non-transactional atomic allocations
*
* Those functions can be used outside transactions. The allocations are always
* aligned to the cache-line boundary.
*/
#define POBJ_XALLOC_VALID_FLAGS (POBJ_XALLOC_ZERO |\
POBJ_XALLOC_CLASS_MASK)
/*
* Allocates a new object from the pool and calls a constructor function before
* returning. It is guaranteed that allocated object is either properly
* initialized, or if it's interrupted before the constructor completes, the
* memory reserved for the object is automatically reclaimed.
*/
int pmemobj_alloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size,
uint64_t type_num, pmemobj_constr constructor, void *arg);
/*
* Allocates with flags a new object from the pool.
*/
int pmemobj_xalloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size,
uint64_t type_num, uint64_t flags,
pmemobj_constr constructor, void *arg);
/*
* Allocates a new zeroed object from the pool.
*/
int pmemobj_zalloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size,
uint64_t type_num);
/*
* Resizes an existing object.
*/
int pmemobj_realloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size,
uint64_t type_num);
/*
* Resizes an existing object, if extended new space is zeroed.
*/
int pmemobj_zrealloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size,
uint64_t type_num);
/*
* Allocates a new object with duplicate of the string s.
*/
int pmemobj_strdup(PMEMobjpool *pop, PMEMoid *oidp, const char *s,
uint64_t type_num);
/*
* Allocates a new object with duplicate of the wide character string s.
*/
int pmemobj_wcsdup(PMEMobjpool *pop, PMEMoid *oidp, const wchar_t *s,
uint64_t type_num);
/*
* Frees an existing object.
*/
void pmemobj_free(PMEMoid *oidp);
struct pobj_defrag_result {
size_t total; /* number of processed objects */
size_t relocated; /* number of relocated objects */
};
/*
* Performs defragmentation on the provided array of objects.
*/
int pmemobj_defrag(PMEMobjpool *pop, PMEMoid **oidv, size_t oidcnt,
struct pobj_defrag_result *result);
#ifdef __cplusplus
}
#endif
#endif /* libpmemobj/atomic_base.h */
| 2,386 | 24.393617 | 79 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/include/libpmemobj/thread.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* libpmemobj/thread.h -- definitions of libpmemobj thread/locking entry points
*/
#ifndef LIBPMEMOBJ_THREAD_H
#define LIBPMEMOBJ_THREAD_H 1
#include <time.h>
#include <libpmemobj/base.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Locking.
*/
#define _POBJ_CL_SIZE 64 /* cache line size */
typedef union {
long long align;
char padding[_POBJ_CL_SIZE];
} PMEMmutex;
typedef union {
long long align;
char padding[_POBJ_CL_SIZE];
} PMEMrwlock;
typedef union {
long long align;
char padding[_POBJ_CL_SIZE];
} PMEMcond;
void pmemobj_mutex_zero(PMEMobjpool *pop, PMEMmutex *mutexp);
int pmemobj_mutex_lock(PMEMobjpool *pop, PMEMmutex *mutexp);
int pmemobj_mutex_timedlock(PMEMobjpool *pop, PMEMmutex *__restrict mutexp,
const struct timespec *__restrict abs_timeout);
int pmemobj_mutex_trylock(PMEMobjpool *pop, PMEMmutex *mutexp);
int pmemobj_mutex_unlock(PMEMobjpool *pop, PMEMmutex *mutexp);
void pmemobj_rwlock_zero(PMEMobjpool *pop, PMEMrwlock *rwlockp);
int pmemobj_rwlock_rdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp);
int pmemobj_rwlock_wrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp);
int pmemobj_rwlock_timedrdlock(PMEMobjpool *pop,
PMEMrwlock *__restrict rwlockp,
const struct timespec *__restrict abs_timeout);
int pmemobj_rwlock_timedwrlock(PMEMobjpool *pop,
PMEMrwlock *__restrict rwlockp,
const struct timespec *__restrict abs_timeout);
int pmemobj_rwlock_tryrdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp);
int pmemobj_rwlock_trywrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp);
int pmemobj_rwlock_unlock(PMEMobjpool *pop, PMEMrwlock *rwlockp);
void pmemobj_cond_zero(PMEMobjpool *pop, PMEMcond *condp);
int pmemobj_cond_broadcast(PMEMobjpool *pop, PMEMcond *condp);
int pmemobj_cond_signal(PMEMobjpool *pop, PMEMcond *condp);
int pmemobj_cond_timedwait(PMEMobjpool *pop, PMEMcond *__restrict condp,
PMEMmutex *__restrict mutexp,
const struct timespec *__restrict abs_timeout);
int pmemobj_cond_wait(PMEMobjpool *pop, PMEMcond *condp,
PMEMmutex *__restrict mutexp);
#ifdef __cplusplus
}
#endif
#endif /* libpmemobj/thread.h */
| 2,150 | 28.875 | 79 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/include/libpmemobj/action.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2018, Intel Corporation */
/*
* libpmemobj/action.h -- definitions of libpmemobj action interface
*/
#ifndef LIBPMEMOBJ_ACTION_H
#define LIBPMEMOBJ_ACTION_H 1
#include <libpmemobj/action_base.h>
#ifdef __cplusplus
extern "C" {
#endif
#define POBJ_RESERVE_NEW(pop, t, act)\
((TOID(t))pmemobj_reserve(pop, act, sizeof(t), TOID_TYPE_NUM(t)))
#define POBJ_RESERVE_ALLOC(pop, t, size, act)\
((TOID(t))pmemobj_reserve(pop, act, size, TOID_TYPE_NUM(t)))
#define POBJ_XRESERVE_NEW(pop, t, act, flags)\
((TOID(t))pmemobj_xreserve(pop, act, sizeof(t), TOID_TYPE_NUM(t), flags))
#define POBJ_XRESERVE_ALLOC(pop, t, size, act, flags)\
((TOID(t))pmemobj_xreserve(pop, act, size, TOID_TYPE_NUM(t), flags))
#ifdef __cplusplus
}
#endif
#endif /* libpmemobj/action_base.h */
| 829 | 23.411765 | 73 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/include/libpmemobj/atomic.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* libpmemobj/atomic.h -- definitions of libpmemobj atomic macros
*/
#ifndef LIBPMEMOBJ_ATOMIC_H
#define LIBPMEMOBJ_ATOMIC_H 1
#include <libpmemobj/atomic_base.h>
#include <libpmemobj/types.h>
#ifdef __cplusplus
extern "C" {
#endif
#define POBJ_NEW(pop, o, t, constr, arg)\
pmemobj_alloc((pop), (PMEMoid *)(o), sizeof(t), TOID_TYPE_NUM(t),\
(constr), (arg))
#define POBJ_ALLOC(pop, o, t, size, constr, arg)\
pmemobj_alloc((pop), (PMEMoid *)(o), (size), TOID_TYPE_NUM(t),\
(constr), (arg))
#define POBJ_ZNEW(pop, o, t)\
pmemobj_zalloc((pop), (PMEMoid *)(o), sizeof(t), TOID_TYPE_NUM(t))
#define POBJ_ZALLOC(pop, o, t, size)\
pmemobj_zalloc((pop), (PMEMoid *)(o), (size), TOID_TYPE_NUM(t))
#define POBJ_REALLOC(pop, o, t, size)\
pmemobj_realloc((pop), (PMEMoid *)(o), (size), TOID_TYPE_NUM(t))
#define POBJ_ZREALLOC(pop, o, t, size)\
pmemobj_zrealloc((pop), (PMEMoid *)(o), (size), TOID_TYPE_NUM(t))
#define POBJ_FREE(o)\
pmemobj_free((PMEMoid *)(o))
#ifdef __cplusplus
}
#endif
#endif /* libpmemobj/atomic.h */
| 1,115 | 23.26087 | 66 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/include/libpmemobj/iterator_base.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* libpmemobj/iterator_base.h -- definitions of libpmemobj iterator entry points
*/
#ifndef LIBPMEMOBJ_ITERATOR_BASE_H
#define LIBPMEMOBJ_ITERATOR_BASE_H 1
#include <libpmemobj/base.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* The following functions allow access to the entire collection of objects.
*
* Use with conjunction with non-transactional allocations. Pmemobj pool acts
* as a generic container (list) of objects that are not assigned to any
* user-defined data structures.
*/
/*
* Returns the first object of the specified type number.
*/
PMEMoid pmemobj_first(PMEMobjpool *pop);
/*
* Returns the next object of the same type.
*/
PMEMoid pmemobj_next(PMEMoid oid);
#ifdef __cplusplus
}
#endif
#endif /* libpmemobj/iterator_base.h */
| 855 | 20.4 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/TPCC_CP/tpcc_db.h | /*
Author: Vaibhav Gogte <[email protected]>
Aasheesh Kolli <[email protected]>
This file declares the tpcc database and the accesor transactions.
*/
#include "table_entries.h"
#include <atomic>
#include "simple_queue.h"
#include <pthread.h>
#include <cstdlib>
#include "../include/txopt.h"
typedef simple_queue queue_t;
struct backUpLog{
struct district_entry district_back;
//fill_new_order_entry
struct new_order_entry new_order_entry_back;
//update_order_entry
struct order_entry order_entry_back;
//update_stock_entry
struct stock_entry stock_entry_back[15];
int fill_new_order_entry_indx = 0;
int update_order_entry_indx = 0;
int update_stock_entry_indx[16];
uint64_t district_back_valid;
uint64_t fill_new_order_entry_back_valid;
uint64_t update_order_entry_back_valid;
uint64_t update_stock_entry_num_valid;
//global log valid
uint64_t log_valid;
};
class TPCC_DB {
private:
// Tables with size dependent on num warehouses
short num_warehouses;
short random_3000[3000];
warehouse_entry* warehouse;
district_entry* district;
customer_entry* customer;
stock_entry* stock;
// Tables with slight variation in sizes (due to inserts/deletes etc.)
history_entry* history;
order_entry* order;
new_order_entry* new_order;
order_line_entry* order_line;
// Fixed size table
item_entry* item;
unsigned long* rndm_seeds;
queue_t* perTxLocks; // Array of queues of locks held by active Tx
pthread_mutex_t* locks; // Array of locks held by the TxEngn. RDSs acquire locks through the TxEngn
unsigned g_seed;
public:
struct backUpLog * backUpInst;
TPCC_DB();
~TPCC_DB();
void initialize(int _num_warehouses, int numThreads);
void populate_tables();
void fill_item_entry(int _i_id);
void fill_warehouse_entry(int _w_id);
void fill_stock_entry(int _s_w_id, int s_i_id);
void fill_district_entry(int _d_w_id, int _d_id);
void fill_customer_entry(int _c_w_id, int _c_d_id, int _c_id);
void fill_history_entry(int _h_c_w_id, int _h_c_d_id, int _h_c_id);
void fill_order_entry(int _o_w_id, int _o_d_id, int _o_id);
void fill_order_line_entry(int _ol_w_id, int _ol_d_id, int _ol_o_id, int _o_ol_cnt, long long _o_entry_d);
void fill_new_order_entry(int _no_w_id, int _no_d_id, int _no_o_id, int threadId);
void random_a_string(int min, int max, char* string_ptr);
void random_n_string(int min, int max, char* string_ptr);
void random_a_original_string(int min, int max, int probability, char* string_ptr);
void random_zip(char* string_ptr);
void fill_time(long long &time_slot);
int rand_local(int min, int max);
void new_order_tx(int threadId, int w_id, int d_id, int c_id);
void copy_district_info(district_entry &dest, district_entry &source);
void copy_customer_info(customer_entry &dest, customer_entry &source);
void copy_new_order_info(new_order_entry &dest, new_order_entry &source);
void copy_order_info(order_entry &dest, order_entry &source);
void copy_stock_info(stock_entry &dest, stock_entry &source);
void copy_order_line_info(order_line_entry &dest, order_line_entry &source);
void update_order_entry(int _w_id, short _d_id, int _o_id, int _c_id, int _ol_cnt, int threadId);
void update_stock_entry(int threadId, int _w_id, int _i_id, int _d_id, float &amount, int itr);
unsigned long get_random(int thread_id, int min, int max);
unsigned long get_random(int thread_id);
void printStackPointer(int* sp, int thread_id);
void acquire_locks(int thread_id, queue_t &reqLocks);
void release_locks(int thread_id);
unsigned fastrand();
};
| 3,755 | 30.041322 | 110 | h |
null | NearPMSW-main/nearpm/checkpointing/include/txopt.h | // The starting address of the selected counter_atomic writes
#ifndef TXOPT_H
#define TXOPT_H
#define COUNTER_ATOMIC_VADDR (4096UL*1024*1024)
#define NUM_COUNTER_ATOMIC_PAGE 262144
// The starting address of the flush cache instruction
#define CACHE_FLUSH_VADDR (4096UL*1024*1024+4*NUM_COUNTER_ATOMIC_PAGE*1024)
// The starting address of the flush metadata cache instruction
#define METADATA_CACHE_FLUSH_VADDR (4096UL*1024*1024+(4*NUM_COUNTER_ATOMIC_PAGE+4)*1024)
#define STATUS_OUTPUT_VADDR (METADATA_CACHE_FLUSH_VADDR + 1024UL)
#define INIT_METADATA_CACHE_VADDR (STATUS_OUTPUT_VADDR + 1024UL)
#define TXOPT_VADDR (INIT_METADATA_CACHE_VADDR+1024UL)
#define CACHE_LINE_SIZE 64UL
#include <vector>
#include <deque>
#include <cstdlib>
#include <cstdint>
#include <atomic>
#include <stdio.h>
#include <cassert>
enum opt_flag {
FLAG_OPT,
FLAG_OPT_VAL,
FLAG_OPT_ADDR,
FLAG_OPT_DATA,
FLAG_OPT_DATA_VAL,
/* register no execute */
FLAG_OPT_REG,
FLAG_OPT_VAL_REG,
FLAG_OPT_ADDR_REG,
FLAG_OPT_DATA_REG,
FLAG_OPT_DATA_VAL_REG,
/* execute registered OPT */
FLAG_OPT_START
};
struct opt_t {
//int pid;
int obj_id;
};
// Fields in the OPT packet
// Used by both SW and HW
struct opt_packet_t {
void* opt_obj;
void* pmemaddr;
//void* data_ptr;
//int seg_id;
//int data_val;
unsigned size;
opt_flag type;
};
// OPT with both data and addr ready
volatile void OPT(void* opt_obj, bool reg, void* pmemaddr, void* data, unsigned size);
//#define OPT(opt_obj, pmemaddr, data, size) \
// *((opt_packet_t*)TXOPT_VADDR) = (opt_packet_t){opt_obj, pmemaddr, size, FLAG_OPT_DATA};
// OPT with both data (int) and addr ready
volatile void OPT_VAL(void* opt_obj, bool reg, void* pmemaddr, int data_val);
// OPT with only data ready
volatile void OPT_DATA(void* opt_obj, bool reg, void* data, unsigned size);
// OPT with only addr ready
volatile void OPT_ADDR(void* opt_obj, bool reg, void* pmemaddr, unsigned size);
// OPT with only data (int) ready
volatile void OPT_DATA_VAL(void* opt_obj, bool reg, int data_val);
// Begin OPT operation
volatile void OPT_START(void* opt_obj);
// store barrier
volatile void s_fence();
// flush both metadata cache and data cache
volatile void flush_caches(void* addr, unsigned size);
// flush data cache only
volatile void cache_flush(void* addr, unsigned size);
// flush metadata cache only
volatile void metadata_cache_flush(void* addr, unsigned size);
// malloc that is cache-line aligned
void *aligned_malloc(int size);
class CounterAtomic {
public:
static void* counter_atomic_malloc(unsigned _size);
// size is num of bytes
static volatile void statOutput();
static volatile void initCounterCache();
uint64_t getValue();
uint64_t getPtr();
CounterAtomic();
CounterAtomic(uint64_t _val);
CounterAtomic(bool _val);
CounterAtomic& operator=(uint64_t _val);
CounterAtomic& operator+(uint64_t _val);
CounterAtomic& operator++();
CounterAtomic& operator--();
CounterAtomic& operator-(uint64_t _val);
bool operator==(uint64_t _val);
bool operator!=(uint64_t _val);
private:
void init();
static uint64_t getNextAtomicAddr(unsigned _size);
static uint64_t getNextCacheFlushAddr(unsigned _size);
//static uint64_t getNextPersistBarrierAddr(unsigned _size);
static uint64_t getNextCounterCacheFlushAddr(unsigned _size);
static uint64_t currAtomicAddr;
static uint64_t currCacheFlushAddr;
//static uint64_t currPersistentBarrierAddr;
static uint64_t currCounterCacheFlushAddr;
/*
static bool hasAllocateCacheFlush;
static bool hasAllocateCounterCacheFlush;
static bool hasAllocatePersistBarrier;
*/
//uint64_t val;
uint64_t val_addr = 0;
};
#endif
| 3,665 | 26.155556 | 90 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/tools/rpmemd/rpmemd_config.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmemd_config.h -- internal definitions for rpmemd config
*/
#include <stdint.h>
#include <stdbool.h>
#ifndef RPMEMD_DEFAULT_LOG_FILE
#define RPMEMD_DEFAULT_LOG_FILE ("/var/log/" DAEMON_NAME ".log")
#endif
#ifndef RPMEMD_GLOBAL_CONFIG_FILE
#define RPMEMD_GLOBAL_CONFIG_FILE ("/etc/" DAEMON_NAME "/" DAEMON_NAME\
".conf")
#endif
#define RPMEMD_USER_CONFIG_FILE ("." DAEMON_NAME ".conf")
#define RPMEM_DEFAULT_MAX_LANES 1024
#define RPMEM_DEFAULT_NTHREADS 0
#define HOME_ENV "HOME"
#define HOME_STR_PLACEHOLDER ("$" HOME_ENV)
struct rpmemd_config {
char *log_file;
char *poolset_dir;
const char *rm_poolset;
bool force;
bool pool_set;
bool persist_apm;
bool persist_general;
bool use_syslog;
uint64_t max_lanes;
enum rpmemd_log_level log_level;
size_t nthreads;
};
int rpmemd_config_read(struct rpmemd_config *config, int argc, char *argv[]);
void rpmemd_config_free(struct rpmemd_config *config);
| 1,012 | 21.021739 | 77 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/tools/rpmemd/rpmemd.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* rpmemd.c -- rpmemd main source file
*/
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include "librpmem.h"
#include "rpmemd.h"
#include "rpmemd_log.h"
#include "rpmemd_config.h"
#include "rpmem_common.h"
#include "rpmemd_fip.h"
#include "rpmemd_obc.h"
#include "rpmemd_db.h"
#include "rpmemd_util.h"
#include "pool_hdr.h"
#include "os.h"
#include "os_thread.h"
#include "util.h"
#include "uuid.h"
#include "set.h"
/*
* rpmemd -- rpmem handle
*/
struct rpmemd {
struct rpmemd_obc *obc; /* out-of-band connection handle */
struct rpmemd_db *db; /* pool set database handle */
struct rpmemd_db_pool *pool; /* pool handle */
char *pool_desc; /* pool descriptor */
struct rpmemd_fip *fip; /* fabric provider handle */
struct rpmemd_config config; /* configuration */
enum rpmem_persist_method persist_method;
int closing; /* set when closing connection */
int created; /* pool created */
os_thread_t fip_thread;
int fip_running;
};
#ifdef DEBUG
/*
* bool2str -- convert bool to yes/no string
*/
static inline const char *
bool2str(int v)
{
return v ? "yes" : "no";
}
#endif
/*
* str_or_null -- return null string instead of NULL pointer
*/
static inline const char *
_str(const char *str)
{
if (!str)
return "(null)";
return str;
}
/*
* uuid2str -- convert uuid to string
*/
static const char *
uuid2str(const uuid_t uuid)
{
static char uuid_str[64] = {0, };
int ret = util_uuid_to_string(uuid, uuid_str);
if (ret != 0) {
return "(error)";
}
return uuid_str;
}
/*
* rpmemd_get_pm -- returns persist method based on configuration
*/
static enum rpmem_persist_method
rpmemd_get_pm(struct rpmemd_config *config)
{
enum rpmem_persist_method ret = RPMEM_PM_GPSPM;
if (config->persist_apm)
ret = RPMEM_PM_APM;
return ret;
}
/*
* rpmemd_db_get_status -- convert error number to status for db operation
*/
static int
rpmemd_db_get_status(int err)
{
switch (err) {
case EEXIST:
return RPMEM_ERR_EXISTS;
case EACCES:
return RPMEM_ERR_NOACCESS;
case ENOENT:
return RPMEM_ERR_NOEXIST;
case EWOULDBLOCK:
return RPMEM_ERR_BUSY;
case EBADF:
return RPMEM_ERR_BADNAME;
case EINVAL:
return RPMEM_ERR_POOL_CFG;
default:
return RPMEM_ERR_FATAL;
}
}
/*
* rpmemd_check_pool -- verify pool parameters
*/
static int
rpmemd_check_pool(struct rpmemd *rpmemd, const struct rpmem_req_attr *req,
int *status)
{
if (rpmemd->pool->pool_size < RPMEM_MIN_POOL) {
RPMEMD_LOG(ERR, "invalid pool size -- must be >= %zu",
RPMEM_MIN_POOL);
*status = RPMEM_ERR_POOL_CFG;
return -1;
}
if (rpmemd->pool->pool_size < req->pool_size) {
RPMEMD_LOG(ERR, "requested size is too big");
*status = RPMEM_ERR_BADSIZE;
return -1;
}
return 0;
}
/*
* rpmemd_deep_persist -- perform deep persist operation
*/
static int
rpmemd_deep_persist(const void *addr, size_t size, void *ctx)
{
struct rpmemd *rpmemd = (struct rpmemd *)ctx;
return util_replica_deep_persist(addr, size, rpmemd->pool->set, 0);
}
/*
* rpmemd_common_fip_init -- initialize fabric provider
*/
static int
rpmemd_common_fip_init(struct rpmemd *rpmemd, const struct rpmem_req_attr *req,
struct rpmem_resp_attr *resp, int *status)
{
/* register the whole pool with header in RDMA */
void *addr = (void *)((uintptr_t)rpmemd->pool->pool_addr);
struct rpmemd_fip_attr fip_attr = {
.addr = addr,
.size = req->pool_size,
.nlanes = req->nlanes,
.nthreads = rpmemd->config.nthreads,
.provider = req->provider,
.persist_method = rpmemd->persist_method,
.deep_persist = rpmemd_deep_persist,
.ctx = rpmemd,
.buff_size = req->buff_size,
};
const int is_pmem = rpmemd_db_pool_is_pmem(rpmemd->pool);
if (rpmemd_apply_pm_policy(&fip_attr.persist_method,
&fip_attr.persist,
&fip_attr.memcpy_persist,
is_pmem)) {
*status = RPMEM_ERR_FATAL;
goto err_fip_init;
}
const char *node = rpmem_get_ssh_conn_addr();
enum rpmem_err err;
rpmemd->fip = rpmemd_fip_init(node, NULL, &fip_attr, resp, &err);
if (!rpmemd->fip) {
*status = (int)err;
goto err_fip_init;
}
return 0;
err_fip_init:
return -1;
}
/*
* rpmemd_print_req_attr -- print request attributes
*/
static void
rpmemd_print_req_attr(const struct rpmem_req_attr *req)
{
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "pool descriptor: '%s'",
_str(req->pool_desc));
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "pool size: %lu", req->pool_size);
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "nlanes: %u", req->nlanes);
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "provider: %s",
rpmem_provider_to_str(req->provider));
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "buff_size: %lu", req->buff_size);
}
/*
* rpmemd_print_pool_attr -- print pool attributes
*/
static void
rpmemd_print_pool_attr(const struct rpmem_pool_attr *attr)
{
if (attr == NULL) {
RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "NULL");
} else {
RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "signature: '%s'",
_str(attr->signature));
RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "major: %u", attr->major);
RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "compat_features: 0x%x",
attr->compat_features);
RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "incompat_features: 0x%x",
attr->incompat_features);
RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "ro_compat_features: 0x%x",
attr->ro_compat_features);
RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "poolset_uuid: %s",
uuid2str(attr->poolset_uuid));
RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "uuid: %s",
uuid2str(attr->uuid));
RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "next_uuid: %s",
uuid2str(attr->next_uuid));
RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "prev_uuid: %s",
uuid2str(attr->prev_uuid));
}
}
/*
* rpmemd_print_resp_attr -- print response attributes
*/
static void
rpmemd_print_resp_attr(const struct rpmem_resp_attr *attr)
{
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "port: %u", attr->port);
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "rkey: 0x%lx", attr->rkey);
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "raddr: 0x%lx", attr->raddr);
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "nlanes: %u", attr->nlanes);
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "persist method: %s",
rpmem_persist_method_to_str(attr->persist_method));
}
/*
* rpmemd_fip_thread -- background thread for establishing in-band connection
*/
static void *
rpmemd_fip_thread(void *arg)
{
struct rpmemd *rpmemd = (struct rpmemd *)arg;
int ret;
RPMEMD_LOG(INFO, "waiting for in-band connection");
ret = rpmemd_fip_accept(rpmemd->fip, RPMEM_ACCEPT_TIMEOUT);
if (ret)
goto err_accept;
RPMEMD_LOG(NOTICE, "in-band connection established");
ret = rpmemd_fip_process_start(rpmemd->fip);
if (ret)
goto err_process_start;
return NULL;
err_process_start:
rpmemd_fip_close(rpmemd->fip);
err_accept:
return (void *)(uintptr_t)ret;
}
/*
* rpmemd_fip_start_thread -- start background thread for establishing
* in-band connection
*/
static int
rpmemd_fip_start_thread(struct rpmemd *rpmemd)
{
errno = os_thread_create(&rpmemd->fip_thread, NULL,
rpmemd_fip_thread, rpmemd);
if (errno) {
RPMEMD_LOG(ERR, "!creating in-band thread");
goto err_os_thread_create;
}
rpmemd->fip_running = 1;
return 0;
err_os_thread_create:
return -1;
}
/*
* rpmemd_fip_stop_thread -- stop background thread for in-band connection
*/
static int
rpmemd_fip_stop_thread(struct rpmemd *rpmemd)
{
RPMEMD_ASSERT(rpmemd->fip_running);
void *tret;
errno = os_thread_join(&rpmemd->fip_thread, &tret);
if (errno)
RPMEMD_LOG(ERR, "!waiting for in-band thread");
int ret = (int)(uintptr_t)tret;
if (ret)
RPMEMD_LOG(ERR, "in-band thread failed -- '%d'", ret);
return ret;
}
/*
* rpmemd_fip-stop -- stop in-band thread and stop processing thread
*/
static int
rpmemd_fip_stop(struct rpmemd *rpmemd)
{
int ret;
int fip_ret = rpmemd_fip_stop_thread(rpmemd);
if (fip_ret) {
RPMEMD_LOG(ERR, "!in-band thread failed");
}
if (!fip_ret) {
ret = rpmemd_fip_process_stop(rpmemd->fip);
if (ret) {
RPMEMD_LOG(ERR, "!stopping fip process failed");
}
}
rpmemd->fip_running = 0;
return fip_ret;
}
/*
* rpmemd_close_pool -- close pool and remove it if required
*/
static int
rpmemd_close_pool(struct rpmemd *rpmemd, int remove)
{
int ret = 0;
RPMEMD_LOG(NOTICE, "closing pool");
rpmemd_db_pool_close(rpmemd->db, rpmemd->pool);
RPMEMD_LOG(INFO, "pool closed");
if (remove) {
RPMEMD_LOG(NOTICE, "removing '%s'", rpmemd->pool_desc);
ret = rpmemd_db_pool_remove(rpmemd->db,
rpmemd->pool_desc, 0, 0);
if (ret) {
RPMEMD_LOG(ERR, "!removing pool '%s' failed",
rpmemd->pool_desc);
} else {
RPMEMD_LOG(INFO, "removed '%s'", rpmemd->pool_desc);
}
}
free(rpmemd->pool_desc);
return ret;
}
/*
* rpmemd_req_cleanup -- cleanup in-band connection and all resources allocated
* during open/create requests
*/
static void
rpmemd_req_cleanup(struct rpmemd *rpmemd)
{
if (!rpmemd->fip_running)
return;
int ret;
ret = rpmemd_fip_stop(rpmemd);
if (!ret) {
rpmemd_fip_close(rpmemd->fip);
rpmemd_fip_fini(rpmemd->fip);
}
int remove = rpmemd->created && ret;
rpmemd_close_pool(rpmemd, remove);
}
/*
* rpmemd_req_create -- handle create request
*/
static int
rpmemd_req_create(struct rpmemd_obc *obc, void *arg,
const struct rpmem_req_attr *req,
const struct rpmem_pool_attr *pool_attr)
{
RPMEMD_ASSERT(arg != NULL);
RPMEMD_LOG(NOTICE, "create request:");
rpmemd_print_req_attr(req);
RPMEMD_LOG(NOTICE, "pool attributes:");
rpmemd_print_pool_attr(pool_attr);
struct rpmemd *rpmemd = (struct rpmemd *)arg;
int ret;
int status = 0;
int err_send = 1;
struct rpmem_resp_attr resp;
memset(&resp, 0, sizeof(resp));
if (rpmemd->pool) {
RPMEMD_LOG(ERR, "pool already opened");
ret = -1;
status = RPMEM_ERR_FATAL;
goto err_pool_opened;
}
rpmemd->pool_desc = strdup(req->pool_desc);
if (!rpmemd->pool_desc) {
RPMEMD_LOG(ERR, "!allocating pool descriptor");
ret = -1;
status = RPMEM_ERR_FATAL;
goto err_strdup;
}
rpmemd->pool = rpmemd_db_pool_create(rpmemd->db,
req->pool_desc, 0, pool_attr);
if (!rpmemd->pool) {
ret = -1;
status = rpmemd_db_get_status(errno);
goto err_pool_create;
}
rpmemd->created = 1;
ret = rpmemd_check_pool(rpmemd, req, &status);
if (ret)
goto err_pool_check;
ret = rpmemd_common_fip_init(rpmemd, req, &resp, &status);
if (ret)
goto err_fip_init;
RPMEMD_LOG(NOTICE, "create request response: (status = %u)", status);
if (!status)
rpmemd_print_resp_attr(&resp);
ret = rpmemd_obc_create_resp(obc, status, &resp);
if (ret)
goto err_create_resp;
ret = rpmemd_fip_start_thread(rpmemd);
if (ret)
goto err_fip_start;
return 0;
err_fip_start:
err_create_resp:
err_send = 0;
rpmemd_fip_fini(rpmemd->fip);
err_fip_init:
err_pool_check:
rpmemd_db_pool_close(rpmemd->db, rpmemd->pool);
rpmemd_db_pool_remove(rpmemd->db, req->pool_desc, 0, 0);
err_pool_create:
free(rpmemd->pool_desc);
err_strdup:
err_pool_opened:
if (err_send)
ret = rpmemd_obc_create_resp(obc, status, &resp);
rpmemd->closing = 1;
return ret;
}
/*
* rpmemd_req_open -- handle open request
*/
static int
rpmemd_req_open(struct rpmemd_obc *obc, void *arg,
const struct rpmem_req_attr *req)
{
RPMEMD_ASSERT(arg != NULL);
RPMEMD_LOG(NOTICE, "open request:");
rpmemd_print_req_attr(req);
struct rpmemd *rpmemd = (struct rpmemd *)arg;
int ret;
int status = 0;
int err_send = 1;
struct rpmem_resp_attr resp;
memset(&resp, 0, sizeof(resp));
struct rpmem_pool_attr pool_attr;
memset(&pool_attr, 0, sizeof(pool_attr));
if (rpmemd->pool) {
RPMEMD_LOG(ERR, "pool already opened");
ret = -1;
status = RPMEM_ERR_FATAL;
goto err_pool_opened;
}
rpmemd->pool_desc = strdup(req->pool_desc);
if (!rpmemd->pool_desc) {
RPMEMD_LOG(ERR, "!allocating pool descriptor");
ret = -1;
status = RPMEM_ERR_FATAL;
goto err_strdup;
}
rpmemd->pool = rpmemd_db_pool_open(rpmemd->db,
req->pool_desc, 0, &pool_attr);
if (!rpmemd->pool) {
ret = -1;
status = rpmemd_db_get_status(errno);
goto err_pool_open;
}
RPMEMD_LOG(NOTICE, "pool attributes:");
rpmemd_print_pool_attr(&pool_attr);
ret = rpmemd_check_pool(rpmemd, req, &status);
if (ret)
goto err_pool_check;
ret = rpmemd_common_fip_init(rpmemd, req, &resp, &status);
if (ret)
goto err_fip_init;
RPMEMD_LOG(NOTICE, "open request response: (status = %u)", status);
if (!status)
rpmemd_print_resp_attr(&resp);
ret = rpmemd_obc_open_resp(obc, status, &resp, &pool_attr);
if (ret)
goto err_open_resp;
ret = rpmemd_fip_start_thread(rpmemd);
if (ret)
goto err_fip_start;
return 0;
err_fip_start:
err_open_resp:
err_send = 0;
rpmemd_fip_fini(rpmemd->fip);
err_fip_init:
err_pool_check:
rpmemd_db_pool_close(rpmemd->db, rpmemd->pool);
err_pool_open:
free(rpmemd->pool_desc);
err_strdup:
err_pool_opened:
if (err_send)
ret = rpmemd_obc_open_resp(obc, status, &resp, &pool_attr);
rpmemd->closing = 1;
return ret;
}
/*
* rpmemd_req_close -- handle close request
*/
static int
rpmemd_req_close(struct rpmemd_obc *obc, void *arg, int flags)
{
RPMEMD_ASSERT(arg != NULL);
RPMEMD_LOG(NOTICE, "close request");
struct rpmemd *rpmemd = (struct rpmemd *)arg;
rpmemd->closing = 1;
int ret;
int status = 0;
if (!rpmemd->pool) {
RPMEMD_LOG(ERR, "pool not opened");
status = RPMEM_ERR_FATAL;
return rpmemd_obc_close_resp(obc, status);
}
ret = rpmemd_fip_stop(rpmemd);
if (ret) {
status = RPMEM_ERR_FATAL;
} else {
rpmemd_fip_close(rpmemd->fip);
rpmemd_fip_fini(rpmemd->fip);
}
int remove = rpmemd->created &&
(status || (flags & RPMEM_CLOSE_FLAGS_REMOVE));
if (rpmemd_close_pool(rpmemd, remove))
RPMEMD_LOG(ERR, "closing pool failed");
RPMEMD_LOG(NOTICE, "close request response (status = %u)", status);
ret = rpmemd_obc_close_resp(obc, status);
return ret;
}
/*
* rpmemd_req_set_attr -- handle set attributes request
*/
static int
rpmemd_req_set_attr(struct rpmemd_obc *obc, void *arg,
const struct rpmem_pool_attr *pool_attr)
{
RPMEMD_ASSERT(arg != NULL);
RPMEMD_LOG(NOTICE, "set attributes request");
struct rpmemd *rpmemd = (struct rpmemd *)arg;
RPMEMD_ASSERT(rpmemd->pool != NULL);
int ret;
int status = 0;
int err_send = 1;
ret = rpmemd_db_pool_set_attr(rpmemd->pool, pool_attr);
if (ret) {
ret = -1;
status = rpmemd_db_get_status(errno);
goto err_set_attr;
}
RPMEMD_LOG(NOTICE, "new pool attributes:");
rpmemd_print_pool_attr(pool_attr);
ret = rpmemd_obc_set_attr_resp(obc, status);
if (ret)
goto err_set_attr_resp;
return ret;
err_set_attr_resp:
err_send = 0;
err_set_attr:
if (err_send)
ret = rpmemd_obc_set_attr_resp(obc, status);
return ret;
}
static struct rpmemd_obc_requests rpmemd_req = {
.create = rpmemd_req_create,
.open = rpmemd_req_open,
.close = rpmemd_req_close,
.set_attr = rpmemd_req_set_attr,
};
/*
* rpmemd_print_info -- print basic info and configuration
*/
static void
rpmemd_print_info(struct rpmemd *rpmemd)
{
RPMEMD_LOG(NOTICE, "ssh connection: %s",
_str(os_getenv("SSH_CONNECTION")));
RPMEMD_LOG(NOTICE, "user: %s", _str(os_getenv("USER")));
RPMEMD_LOG(NOTICE, "configuration");
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "pool set directory: '%s'",
_str(rpmemd->config.poolset_dir));
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "persist method: %s",
rpmem_persist_method_to_str(rpmemd->persist_method));
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "number of threads: %lu",
rpmemd->config.nthreads);
RPMEMD_DBG(RPMEMD_LOG_INDENT "persist APM: %s",
bool2str(rpmemd->config.persist_apm));
RPMEMD_DBG(RPMEMD_LOG_INDENT "persist GPSPM: %s",
bool2str(rpmemd->config.persist_general));
RPMEMD_DBG(RPMEMD_LOG_INDENT "use syslog: %s",
bool2str(rpmemd->config.use_syslog));
RPMEMD_DBG(RPMEMD_LOG_INDENT "log file: %s",
_str(rpmemd->config.log_file));
RPMEMD_DBG(RPMEMD_LOG_INDENT "log level: %s",
rpmemd_log_level_to_str(rpmemd->config.log_level));
}
int
main(int argc, char *argv[])
{
util_init();
int send_status = 1;
int ret = 1;
struct rpmemd *rpmemd = calloc(1, sizeof(*rpmemd));
if (!rpmemd) {
RPMEMD_LOG(ERR, "!calloc");
goto err_rpmemd;
}
rpmemd->obc = rpmemd_obc_init(STDIN_FILENO, STDOUT_FILENO);
if (!rpmemd->obc) {
RPMEMD_LOG(ERR, "out-of-band connection initialization");
goto err_obc;
}
if (rpmemd_log_init(DAEMON_NAME, NULL, 0)) {
RPMEMD_LOG(ERR, "logging subsystem initialization failed");
goto err_log_init;
}
if (rpmemd_config_read(&rpmemd->config, argc, argv) != 0) {
RPMEMD_LOG(ERR, "reading configuration failed");
goto err_config;
}
rpmemd_log_close();
rpmemd_log_level = rpmemd->config.log_level;
if (rpmemd_log_init(DAEMON_NAME, rpmemd->config.log_file,
rpmemd->config.use_syslog)) {
RPMEMD_LOG(ERR, "logging subsystem initialization"
" failed (%s, %d)", rpmemd->config.log_file,
rpmemd->config.use_syslog);
goto err_log_init_config;
}
RPMEMD_LOG(INFO, "%s version %s", DAEMON_NAME, SRCVERSION);
rpmemd->persist_method = rpmemd_get_pm(&rpmemd->config);
rpmemd->db = rpmemd_db_init(rpmemd->config.poolset_dir, 0666);
if (!rpmemd->db) {
RPMEMD_LOG(ERR, "!pool set db initialization");
goto err_db_init;
}
if (rpmemd->config.rm_poolset) {
RPMEMD_LOG(INFO, "removing '%s'",
rpmemd->config.rm_poolset);
if (rpmemd_db_pool_remove(rpmemd->db,
rpmemd->config.rm_poolset,
rpmemd->config.force,
rpmemd->config.pool_set)) {
RPMEMD_LOG(ERR, "removing '%s' failed",
rpmemd->config.rm_poolset);
ret = errno;
} else {
RPMEMD_LOG(NOTICE, "removed '%s'",
rpmemd->config.rm_poolset);
ret = 0;
}
send_status = 0;
goto out_rm;
}
ret = rpmemd_obc_status(rpmemd->obc, 0);
if (ret) {
RPMEMD_LOG(ERR, "writing status failed");
goto err_status;
}
rpmemd_print_info(rpmemd);
while (!ret) {
ret = rpmemd_obc_process(rpmemd->obc, &rpmemd_req, rpmemd);
if (ret) {
RPMEMD_LOG(ERR, "out-of-band connection"
" process failed");
goto err;
}
if (rpmemd->closing)
break;
}
rpmemd_db_fini(rpmemd->db);
rpmemd_config_free(&rpmemd->config);
rpmemd_log_close();
rpmemd_obc_fini(rpmemd->obc);
free(rpmemd);
return 0;
err:
rpmemd_req_cleanup(rpmemd);
err_status:
out_rm:
rpmemd_db_fini(rpmemd->db);
err_db_init:
err_log_init_config:
rpmemd_config_free(&rpmemd->config);
err_config:
rpmemd_log_close();
err_log_init:
if (send_status) {
if (rpmemd_obc_status(rpmemd->obc, (uint32_t)errno))
RPMEMD_LOG(ERR, "writing status failed");
}
rpmemd_obc_fini(rpmemd->obc);
err_obc:
free(rpmemd);
err_rpmemd:
return ret;
}
| 18,497 | 22.007463 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/tools/rpmemd/rpmemd_log.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmemd_log.h -- rpmemd logging functions declarations
*/
#include <string.h>
#include "util.h"
#define FORMAT_PRINTF(a, b) __attribute__((__format__(__printf__, (a), (b))))
/*
* The tab character is not allowed in rpmemd log,
* because it is not well handled by syslog.
* Please use RPMEMD_LOG_INDENT instead.
*/
#define RPMEMD_LOG_INDENT " "
#ifdef DEBUG
#define RPMEMD_LOG(level, fmt, arg...) do {\
COMPILE_ERROR_ON(strchr(fmt, '\t') != 0);\
rpmemd_log(RPD_LOG_##level, __FILE__, __LINE__, fmt, ## arg);\
} while (0)
#else
#define RPMEMD_LOG(level, fmt, arg...) do {\
COMPILE_ERROR_ON(strchr(fmt, '\t') != 0);\
rpmemd_log(RPD_LOG_##level, NULL, 0, fmt, ## arg);\
} while (0)
#endif
#ifdef DEBUG
#define RPMEMD_DBG(fmt, arg...) do {\
COMPILE_ERROR_ON(strchr(fmt, '\t') != 0);\
rpmemd_log(_RPD_LOG_DBG, __FILE__, __LINE__, fmt, ## arg);\
} while (0)
#else
#define RPMEMD_DBG(fmt, arg...) do {} while (0)
#endif
#define RPMEMD_ERR(fmt, arg...) do {\
RPMEMD_LOG(ERR, fmt, ## arg);\
} while (0)
#define RPMEMD_FATAL(fmt, arg...) do {\
RPMEMD_LOG(ERR, fmt, ## arg);\
abort();\
} while (0)
#define RPMEMD_ASSERT(cond) do {\
if (!(cond)) {\
rpmemd_log(RPD_LOG_ERR, __FILE__, __LINE__,\
"assertion fault: %s", #cond);\
abort();\
}\
} while (0)
enum rpmemd_log_level {
RPD_LOG_ERR,
RPD_LOG_WARN,
RPD_LOG_NOTICE,
RPD_LOG_INFO,
_RPD_LOG_DBG, /* disallow to use this with LOG macro */
MAX_RPD_LOG,
};
enum rpmemd_log_level rpmemd_log_level_from_str(const char *str);
const char *rpmemd_log_level_to_str(enum rpmemd_log_level level);
extern enum rpmemd_log_level rpmemd_log_level;
int rpmemd_log_init(const char *ident, const char *fname, int use_syslog);
void rpmemd_log_close(void);
int rpmemd_prefix(const char *fmt, ...) FORMAT_PRINTF(1, 2);
void rpmemd_log(enum rpmemd_log_level level, const char *fname,
int lineno, const char *fmt, ...) FORMAT_PRINTF(4, 5);
| 1,991 | 25.210526 | 77 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/tools/rpmemd/rpmemd_util.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2018, Intel Corporation */
/*
* rpmemd_util.c -- rpmemd utility functions definitions
*/
#include <stdlib.h>
#include <unistd.h>
#include "libpmem.h"
#include "rpmem_common.h"
#include "rpmemd_log.h"
#include "rpmemd_util.h"
/*
* rpmemd_pmem_persist -- pmem_persist wrapper required to unify function
* pointer type with pmem_msync
*/
int
rpmemd_pmem_persist(const void *addr, size_t len)
{
pmem_persist(addr, len);
return 0;
}
/*
* rpmemd_flush_fatal -- APM specific flush function which should never be
* called because APM does not require flushes
*/
int
rpmemd_flush_fatal(const void *addr, size_t len)
{
RPMEMD_FATAL("rpmemd_flush_fatal should never be called");
}
/*
* rpmemd_persist_to_str -- convert persist function pointer to string
*/
static const char *
rpmemd_persist_to_str(int (*persist)(const void *addr, size_t len))
{
if (persist == rpmemd_pmem_persist) {
return "pmem_persist";
} else if (persist == pmem_msync) {
return "pmem_msync";
} else if (persist == rpmemd_flush_fatal) {
return "none";
} else {
return NULL;
}
}
/*
* rpmem_print_pm_policy -- print persistency method policy
*/
static void
rpmem_print_pm_policy(enum rpmem_persist_method persist_method,
int (*persist)(const void *addr, size_t len))
{
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "persist method: %s",
rpmem_persist_method_to_str(persist_method));
RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "persist flush: %s",
rpmemd_persist_to_str(persist));
}
/*
* rpmem_memcpy_msync -- memcpy and msync
*/
static void *
rpmem_memcpy_msync(void *pmemdest, const void *src, size_t len)
{
void *ret = pmem_memcpy(pmemdest, src, len, PMEM_F_MEM_NOFLUSH);
pmem_msync(pmemdest, len);
return ret;
}
/*
* rpmemd_apply_pm_policy -- choose the persistency method and the flush
* function according to the pool type and the persistency method read from the
* config
*/
int
rpmemd_apply_pm_policy(enum rpmem_persist_method *persist_method,
int (**persist)(const void *addr, size_t len),
void *(**memcpy_persist)(void *pmemdest, const void *src, size_t len),
const int is_pmem)
{
switch (*persist_method) {
case RPMEM_PM_APM:
if (is_pmem) {
*persist_method = RPMEM_PM_APM;
*persist = rpmemd_flush_fatal;
} else {
*persist_method = RPMEM_PM_GPSPM;
*persist = pmem_msync;
}
break;
case RPMEM_PM_GPSPM:
*persist_method = RPMEM_PM_GPSPM;
*persist = is_pmem ? rpmemd_pmem_persist : pmem_msync;
break;
default:
RPMEMD_FATAL("invalid persist method: %d", *persist_method);
return -1;
}
/* this is for RPMEM_PERSIST_INLINE */
if (is_pmem)
*memcpy_persist = pmem_memcpy_persist;
else
*memcpy_persist = rpmem_memcpy_msync;
RPMEMD_LOG(NOTICE, "persistency policy:");
rpmem_print_pm_policy(*persist_method, *persist);
return 0;
}
| 2,839 | 22.666667 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/tools/rpmemd/rpmemd_db.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmemd_db.h -- internal definitions for rpmemd database of pool set files
*/
struct rpmemd_db;
struct rpmem_pool_attr;
/*
* struct rpmemd_db_pool -- remote pool context
*/
struct rpmemd_db_pool {
void *pool_addr;
size_t pool_size;
struct pool_set *set;
};
struct rpmemd_db *rpmemd_db_init(const char *root_dir, mode_t mode);
struct rpmemd_db_pool *rpmemd_db_pool_create(struct rpmemd_db *db,
const char *pool_desc, size_t pool_size,
const struct rpmem_pool_attr *rattr);
struct rpmemd_db_pool *rpmemd_db_pool_open(struct rpmemd_db *db,
const char *pool_desc, size_t pool_size, struct rpmem_pool_attr *rattr);
int rpmemd_db_pool_remove(struct rpmemd_db *db, const char *pool_desc,
int force, int pool_set);
int rpmemd_db_pool_set_attr(struct rpmemd_db_pool *prp,
const struct rpmem_pool_attr *rattr);
void rpmemd_db_pool_close(struct rpmemd_db *db, struct rpmemd_db_pool *prp);
void rpmemd_db_fini(struct rpmemd_db *db);
int rpmemd_db_check_dir(struct rpmemd_db *db);
int rpmemd_db_pool_is_pmem(struct rpmemd_db_pool *pool);
| 1,132 | 32.323529 | 76 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/tools/rpmemd/rpmemd_obc.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmemd_obc.c -- rpmemd out-of-band connection definitions
*/
#include <stdlib.h>
#include <errno.h>
#include <stdint.h>
#include <string.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <sys/socket.h>
#include <unistd.h>
#include <netdb.h>
#include "librpmem.h"
#include "rpmemd_log.h"
#include "rpmem_proto.h"
#include "rpmem_common.h"
#include "rpmemd_obc.h"
struct rpmemd_obc {
int fd_in;
int fd_out;
};
/*
* rpmemd_obc_check_proto_ver -- check protocol version
*/
static int
rpmemd_obc_check_proto_ver(unsigned major, unsigned minor)
{
if (major != RPMEM_PROTO_MAJOR ||
minor != RPMEM_PROTO_MINOR) {
RPMEMD_LOG(ERR, "unsupported protocol version -- %u.%u",
major, minor);
return -1;
}
return 0;
}
/*
* rpmemd_obc_check_msg_hdr -- check message header
*/
static int
rpmemd_obc_check_msg_hdr(struct rpmem_msg_hdr *hdrp)
{
switch (hdrp->type) {
case RPMEM_MSG_TYPE_OPEN:
case RPMEM_MSG_TYPE_CREATE:
case RPMEM_MSG_TYPE_CLOSE:
case RPMEM_MSG_TYPE_SET_ATTR:
/* all messages from obc to server are fine */
break;
default:
RPMEMD_LOG(ERR, "invalid message type -- %u", hdrp->type);
return -1;
}
if (hdrp->size < sizeof(struct rpmem_msg_hdr)) {
RPMEMD_LOG(ERR, "invalid message size -- %lu", hdrp->size);
return -1;
}
return 0;
}
/*
* rpmemd_obc_check_pool_desc -- check pool descriptor
*/
static int
rpmemd_obc_check_pool_desc(struct rpmem_msg_hdr *hdrp, size_t msg_size,
struct rpmem_msg_pool_desc *pool_desc)
{
size_t body_size = msg_size + pool_desc->size;
if (hdrp->size != body_size) {
RPMEMD_LOG(ERR, "message and pool descriptor size mismatch "
"-- is %lu should be %lu", hdrp->size, body_size);
return -1;
}
if (pool_desc->size < 2) {
RPMEMD_LOG(ERR, "invalid pool descriptor size -- %u "
"(must be >= 2)", pool_desc->size);
return -1;
}
if (pool_desc->desc[pool_desc->size - 1] != '\0') {
RPMEMD_LOG(ERR, "invalid pool descriptor "
"(must be null-terminated string)");
return -1;
}
size_t len = strlen((char *)pool_desc->desc) + 1;
if (pool_desc->size != len) {
RPMEMD_LOG(ERR, "invalid pool descriptor size -- is %lu "
"should be %u", len, pool_desc->size);
return -1;
}
return 0;
}
/*
* rpmemd_obc_check_provider -- check provider value
*/
static int
rpmemd_obc_check_provider(uint32_t provider)
{
if (provider == 0 || provider >= MAX_RPMEM_PROV) {
RPMEMD_LOG(ERR, "invalid provider -- %u", provider);
return -1;
}
return 0;
}
/*
* rpmemd_obc_ntoh_check_msg_create -- convert and check create request message
*/
static int
rpmemd_obc_ntoh_check_msg_create(struct rpmem_msg_hdr *hdrp)
{
int ret;
struct rpmem_msg_create *msg = (struct rpmem_msg_create *)hdrp;
rpmem_ntoh_msg_create(msg);
ret = rpmemd_obc_check_proto_ver(msg->c.major, msg->c.minor);
if (ret)
return ret;
ret = rpmemd_obc_check_pool_desc(hdrp, sizeof(*msg), &msg->pool_desc);
if (ret)
return ret;
ret = rpmemd_obc_check_provider(msg->c.provider);
if (ret)
return ret;
return 0;
}
/*
* rpmemd_obc_ntoh_check_msg_open -- convert and check open request message
*/
static int
rpmemd_obc_ntoh_check_msg_open(struct rpmem_msg_hdr *hdrp)
{
int ret;
struct rpmem_msg_open *msg = (struct rpmem_msg_open *)hdrp;
rpmem_ntoh_msg_open(msg);
ret = rpmemd_obc_check_proto_ver(msg->c.major, msg->c.minor);
if (ret)
return ret;
ret = rpmemd_obc_check_pool_desc(hdrp, sizeof(*msg), &msg->pool_desc);
if (ret)
return ret;
ret = rpmemd_obc_check_provider(msg->c.provider);
if (ret)
return ret;
return 0;
}
/*
* rpmemd_obc_ntoh_check_msg_close -- convert and check close request message
*/
static int
rpmemd_obc_ntoh_check_msg_close(struct rpmem_msg_hdr *hdrp)
{
struct rpmem_msg_close *msg = (struct rpmem_msg_close *)hdrp;
rpmem_ntoh_msg_close(msg);
/* nothing to do */
return 0;
}
/*
* rpmemd_obc_ntoh_check_msg_set_attr -- convert and check set attributes
* request message
*/
static int
rpmemd_obc_ntoh_check_msg_set_attr(struct rpmem_msg_hdr *hdrp)
{
struct rpmem_msg_set_attr *msg = (struct rpmem_msg_set_attr *)hdrp;
rpmem_ntoh_msg_set_attr(msg);
/* nothing to do */
return 0;
}
typedef int (*rpmemd_obc_ntoh_check_msg_fn)(struct rpmem_msg_hdr *hdrp);
static rpmemd_obc_ntoh_check_msg_fn rpmemd_obc_ntoh_check_msg[] = {
[RPMEM_MSG_TYPE_CREATE] = rpmemd_obc_ntoh_check_msg_create,
[RPMEM_MSG_TYPE_OPEN] = rpmemd_obc_ntoh_check_msg_open,
[RPMEM_MSG_TYPE_CLOSE] = rpmemd_obc_ntoh_check_msg_close,
[RPMEM_MSG_TYPE_SET_ATTR] = rpmemd_obc_ntoh_check_msg_set_attr,
};
/*
* rpmemd_obc_process_create -- process create request
*/
static int
rpmemd_obc_process_create(struct rpmemd_obc *obc,
struct rpmemd_obc_requests *req_cb, void *arg,
struct rpmem_msg_hdr *hdrp)
{
struct rpmem_msg_create *msg = (struct rpmem_msg_create *)hdrp;
struct rpmem_req_attr req = {
.pool_size = msg->c.pool_size,
.nlanes = (unsigned)msg->c.nlanes,
.pool_desc = (char *)msg->pool_desc.desc,
.provider = (enum rpmem_provider)msg->c.provider,
.buff_size = msg->c.buff_size,
};
struct rpmem_pool_attr *rattr = NULL;
struct rpmem_pool_attr rpmem_attr;
unpack_rpmem_pool_attr(&msg->pool_attr, &rpmem_attr);
if (!util_is_zeroed(&rpmem_attr, sizeof(rpmem_attr)))
rattr = &rpmem_attr;
return req_cb->create(obc, arg, &req, rattr);
}
/*
* rpmemd_obc_process_open -- process open request
*/
static int
rpmemd_obc_process_open(struct rpmemd_obc *obc,
struct rpmemd_obc_requests *req_cb, void *arg,
struct rpmem_msg_hdr *hdrp)
{
struct rpmem_msg_open *msg = (struct rpmem_msg_open *)hdrp;
struct rpmem_req_attr req = {
.pool_size = msg->c.pool_size,
.nlanes = (unsigned)msg->c.nlanes,
.pool_desc = (const char *)msg->pool_desc.desc,
.provider = (enum rpmem_provider)msg->c.provider,
.buff_size = msg->c.buff_size,
};
return req_cb->open(obc, arg, &req);
}
/*
* rpmemd_obc_process_close -- process close request
*/
static int
rpmemd_obc_process_close(struct rpmemd_obc *obc,
struct rpmemd_obc_requests *req_cb, void *arg,
struct rpmem_msg_hdr *hdrp)
{
struct rpmem_msg_close *msg = (struct rpmem_msg_close *)hdrp;
return req_cb->close(obc, arg, (int)msg->flags);
}
/*
* rpmemd_obc_process_set_attr -- process set attributes request
*/
static int
rpmemd_obc_process_set_attr(struct rpmemd_obc *obc,
struct rpmemd_obc_requests *req_cb, void *arg,
struct rpmem_msg_hdr *hdrp)
{
struct rpmem_msg_set_attr *msg = (struct rpmem_msg_set_attr *)hdrp;
struct rpmem_pool_attr *rattr = NULL;
struct rpmem_pool_attr rpmem_attr;
unpack_rpmem_pool_attr(&msg->pool_attr, &rpmem_attr);
if (!util_is_zeroed(&rpmem_attr, sizeof(rpmem_attr)))
rattr = &rpmem_attr;
return req_cb->set_attr(obc, arg, rattr);
}
typedef int (*rpmemd_obc_process_fn)(struct rpmemd_obc *obc,
struct rpmemd_obc_requests *req_cb, void *arg,
struct rpmem_msg_hdr *hdrp);
static rpmemd_obc_process_fn rpmemd_obc_process_cb[] = {
[RPMEM_MSG_TYPE_CREATE] = rpmemd_obc_process_create,
[RPMEM_MSG_TYPE_OPEN] = rpmemd_obc_process_open,
[RPMEM_MSG_TYPE_CLOSE] = rpmemd_obc_process_close,
[RPMEM_MSG_TYPE_SET_ATTR] = rpmemd_obc_process_set_attr,
};
/*
* rpmemd_obc_recv -- wrapper for read and decode data function
*/
static inline int
rpmemd_obc_recv(struct rpmemd_obc *obc, void *buff, size_t len)
{
return rpmem_xread(obc->fd_in, buff, len, 0);
}
/*
* rpmemd_obc_send -- wrapper for encode and write data function
*/
static inline int
rpmemd_obc_send(struct rpmemd_obc *obc, const void *buff, size_t len)
{
return rpmem_xwrite(obc->fd_out, buff, len, 0);
}
/*
* rpmemd_obc_msg_recv -- receive and check request message
*
* Return values:
* 0 - success
* < 0 - error
* 1 - obc disconnected
*/
static int
rpmemd_obc_msg_recv(struct rpmemd_obc *obc,
struct rpmem_msg_hdr **hdrpp)
{
struct rpmem_msg_hdr hdr;
struct rpmem_msg_hdr nhdr;
struct rpmem_msg_hdr *hdrp;
int ret;
ret = rpmemd_obc_recv(obc, &nhdr, sizeof(nhdr));
if (ret == 1) {
RPMEMD_LOG(NOTICE, "out-of-band connection disconnected");
return 1;
}
if (ret < 0) {
RPMEMD_LOG(ERR, "!receiving message header failed");
return ret;
}
memcpy(&hdr, &nhdr, sizeof(hdr));
rpmem_ntoh_msg_hdr(&hdr);
ret = rpmemd_obc_check_msg_hdr(&hdr);
if (ret) {
RPMEMD_LOG(ERR, "parsing message header failed");
return ret;
}
hdrp = malloc(hdr.size);
if (!hdrp) {
RPMEMD_LOG(ERR, "!allocating message buffer failed");
return -1;
}
memcpy(hdrp, &nhdr, sizeof(*hdrp));
size_t body_size = hdr.size - sizeof(hdr);
ret = rpmemd_obc_recv(obc, hdrp->body, body_size);
if (ret) {
RPMEMD_LOG(ERR, "!receiving message body failed");
goto err_recv_body;
}
ret = rpmemd_obc_ntoh_check_msg[hdr.type](hdrp);
if (ret) {
RPMEMD_LOG(ERR, "parsing message body failed");
goto err_body;
}
*hdrpp = hdrp;
return 0;
err_body:
err_recv_body:
free(hdrp);
return -1;
}
/*
* rpmemd_obc_init -- initialize rpmemd
*/
struct rpmemd_obc *
rpmemd_obc_init(int fd_in, int fd_out)
{
struct rpmemd_obc *obc = calloc(1, sizeof(*obc));
if (!obc) {
RPMEMD_LOG(ERR, "!allocating obc failed");
goto err_calloc;
}
obc->fd_in = fd_in;
obc->fd_out = fd_out;
return obc;
err_calloc:
return NULL;
}
/*
* rpmemd_obc_fini -- destroy obc
*/
void
rpmemd_obc_fini(struct rpmemd_obc *obc)
{
free(obc);
}
/*
* rpmemd_obc_status -- sends initial status to the client
*/
int
rpmemd_obc_status(struct rpmemd_obc *obc, uint32_t status)
{
return rpmemd_obc_send(obc, &status, sizeof(status));
}
/*
* rpmemd_obc_process -- wait for and process a message from client
*
* Return values:
* 0 - success
* < 0 - error
* 1 - client disconnected
*/
int
rpmemd_obc_process(struct rpmemd_obc *obc,
struct rpmemd_obc_requests *req_cb, void *arg)
{
RPMEMD_ASSERT(req_cb != NULL);
RPMEMD_ASSERT(req_cb->create != NULL);
RPMEMD_ASSERT(req_cb->open != NULL);
RPMEMD_ASSERT(req_cb->close != NULL);
RPMEMD_ASSERT(req_cb->set_attr != NULL);
struct rpmem_msg_hdr *hdrp = NULL;
int ret;
ret = rpmemd_obc_msg_recv(obc, &hdrp);
if (ret)
return ret;
RPMEMD_ASSERT(hdrp != NULL);
ret = rpmemd_obc_process_cb[hdrp->type](obc, req_cb, arg, hdrp);
free(hdrp);
return ret;
}
/*
* rpmemd_obc_create_resp -- send create request response message
*/
int
rpmemd_obc_create_resp(struct rpmemd_obc *obc,
int status, const struct rpmem_resp_attr *res)
{
struct rpmem_msg_create_resp resp = {
.hdr = {
.type = RPMEM_MSG_TYPE_CREATE_RESP,
.size = sizeof(struct rpmem_msg_create_resp),
.status = (uint32_t)status,
},
.ibc = {
.port = res->port,
.rkey = res->rkey,
.raddr = res->raddr,
.persist_method = res->persist_method,
.nlanes = res->nlanes,
},
};
rpmem_hton_msg_create_resp(&resp);
return rpmemd_obc_send(obc, &resp, sizeof(resp));
}
/*
* rpmemd_obc_open_resp -- send open request response message
*/
int
rpmemd_obc_open_resp(struct rpmemd_obc *obc,
int status, const struct rpmem_resp_attr *res,
const struct rpmem_pool_attr *pool_attr)
{
struct rpmem_msg_open_resp resp = {
.hdr = {
.type = RPMEM_MSG_TYPE_OPEN_RESP,
.size = sizeof(struct rpmem_msg_open_resp),
.status = (uint32_t)status,
},
.ibc = {
.port = res->port,
.rkey = res->rkey,
.raddr = res->raddr,
.persist_method = res->persist_method,
.nlanes = res->nlanes,
},
};
pack_rpmem_pool_attr(pool_attr, &resp.pool_attr);
rpmem_hton_msg_open_resp(&resp);
return rpmemd_obc_send(obc, &resp, sizeof(resp));
}
/*
* rpmemd_obc_close_resp -- send close request response message
*/
int
rpmemd_obc_close_resp(struct rpmemd_obc *obc,
int status)
{
struct rpmem_msg_close_resp resp = {
.hdr = {
.type = RPMEM_MSG_TYPE_CLOSE_RESP,
.size = sizeof(struct rpmem_msg_close_resp),
.status = (uint32_t)status,
},
};
rpmem_hton_msg_close_resp(&resp);
return rpmemd_obc_send(obc, &resp, sizeof(resp));
}
/*
* rpmemd_obc_set_attr_resp -- send set attributes request response message
*/
int
rpmemd_obc_set_attr_resp(struct rpmemd_obc *obc, int status)
{
struct rpmem_msg_set_attr_resp resp = {
.hdr = {
.type = RPMEM_MSG_TYPE_SET_ATTR_RESP,
.size = sizeof(struct rpmem_msg_set_attr_resp),
.status = (uint32_t)status,
},
};
rpmem_hton_msg_set_attr_resp(&resp);
return rpmemd_obc_send(obc, &resp, sizeof(resp));
}
| 12,309 | 21.422587 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/tools/rpmemd/rpmemd_config.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* rpmemd_config.c -- rpmemd config source file
*/
#include <pwd.h>
#include <stdio.h>
#include <stddef.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
#include <ctype.h>
#include <errno.h>
#include <getopt.h>
#include <limits.h>
#include <inttypes.h>
#include "rpmemd.h"
#include "rpmemd_log.h"
#include "rpmemd_config.h"
#include "os.h"
#define CONFIG_LINE_SIZE_INIT 50
#define INVALID_CHAR_POS UINT64_MAX
struct rpmemd_special_chars_pos {
uint64_t equal_char;
uint64_t comment_char;
uint64_t EOL_char;
};
enum rpmemd_option {
RPD_OPT_LOG_FILE,
RPD_OPT_POOLSET_DIR,
RPD_OPT_PERSIST_APM,
RPD_OPT_PERSIST_GENERAL,
RPD_OPT_USE_SYSLOG,
RPD_OPT_LOG_LEVEL,
RPD_OPT_RM_POOLSET,
RPD_OPT_MAX_VALUE,
RPD_OPT_INVALID = UINT64_MAX,
};
static const char *optstr = "c:hVr:fst:";
/*
* options -- cl and config file options
*/
static const struct option options[] = {
{"config", required_argument, NULL, 'c'},
{"help", no_argument, NULL, 'h'},
{"version", no_argument, NULL, 'V'},
{"log-file", required_argument, NULL, RPD_OPT_LOG_FILE},
{"poolset-dir", required_argument, NULL, RPD_OPT_POOLSET_DIR},
{"persist-apm", no_argument, NULL, RPD_OPT_PERSIST_APM},
{"persist-general", no_argument, NULL, RPD_OPT_PERSIST_GENERAL},
{"use-syslog", no_argument, NULL, RPD_OPT_USE_SYSLOG},
{"log-level", required_argument, NULL, RPD_OPT_LOG_LEVEL},
{"remove", required_argument, NULL, 'r'},
{"force", no_argument, NULL, 'f'},
{"pool-set", no_argument, NULL, 's'},
{"nthreads", required_argument, NULL, 't'},
{NULL, 0, NULL, 0},
};
#define VALUE_INDENT " "
static const char * const help_str =
"\n"
"Options:\n"
" -c, --config <path> configuration file location\n"
" -r, --remove <poolset> remove pool described by given poolset file\n"
" -f, --force ignore errors when removing a pool\n"
" -t, --nthreads <num> number of processing threads\n"
" -h, --help display help message and exit\n"
" -V, --version display target daemon version and exit\n"
" --log-file <path> log file location\n"
" --poolset-dir <path> pool set files directory\n"
" --persist-apm enable Appliance Persistency Method\n"
" --persist-general enable General Server Persistency Mechanism\n"
" --use-syslog use syslog(3) for logging messages\n"
" --log-level <level> set log level value\n"
VALUE_INDENT "err error conditions\n"
VALUE_INDENT "warn warning conditions\n"
VALUE_INDENT "notice normal, but significant, condition\n"
VALUE_INDENT "info informational message\n"
VALUE_INDENT "debug debug-level message\n"
"\n"
"For complete documentation see %s(1) manual page.";
/*
* print_version -- (internal) prints version message
*/
static void
print_version(void)
{
RPMEMD_LOG(ERR, "%s version %s", DAEMON_NAME, SRCVERSION);
}
/*
* print_usage -- (internal) prints usage message
*/
static void
print_usage(const char *name)
{
RPMEMD_LOG(ERR, "usage: %s [--version] [--help] [<args>]",
name);
}
/*
* print_help -- (internal) prints help message
*/
static void
print_help(const char *name)
{
print_usage(name);
print_version();
RPMEMD_LOG(ERR, help_str, DAEMON_NAME);
}
/*
* parse_config_string -- (internal) parse string value
*/
static inline char *
parse_config_string(const char *value)
{
if (strlen(value) == 0) {
errno = EINVAL;
return NULL;
}
char *output = strdup(value);
if (output == NULL)
RPMEMD_FATAL("!strdup");
return output;
}
/*
* parse_config_bool -- (internal) parse yes / no flag
*/
static inline int
parse_config_bool(bool *config_value, const char *value)
{
if (value == NULL)
*config_value = true;
else if (strcmp("yes", value) == 0)
*config_value = true;
else if (strcmp("no", value) == 0)
*config_value = false;
else {
errno = EINVAL;
return -1;
}
return 0;
}
/*
* set_option -- (internal) set single config option
*/
static int
set_option(enum rpmemd_option option, const char *value,
struct rpmemd_config *config)
{
int ret = 0;
switch (option) {
case RPD_OPT_LOG_FILE:
free(config->log_file);
config->log_file = parse_config_string(value);
if (config->log_file == NULL)
return -1;
else
config->use_syslog = false;
break;
case RPD_OPT_POOLSET_DIR:
free(config->poolset_dir);
config->poolset_dir = parse_config_string(value);
if (config->poolset_dir == NULL)
return -1;
break;
case RPD_OPT_PERSIST_APM:
ret = parse_config_bool(&config->persist_apm, value);
break;
case RPD_OPT_PERSIST_GENERAL:
ret = parse_config_bool(&config->persist_general, value);
break;
case RPD_OPT_USE_SYSLOG:
ret = parse_config_bool(&config->use_syslog, value);
break;
case RPD_OPT_LOG_LEVEL:
config->log_level = rpmemd_log_level_from_str(value);
if (config->log_level == MAX_RPD_LOG) {
errno = EINVAL;
return -1;
}
break;
default:
errno = EINVAL;
return -1;
}
return ret;
}
/*
* get_config_line -- (internal) read single line from file
*/
static int
get_config_line(FILE *file, char **line, uint64_t *line_max,
uint8_t *line_max_increased, struct rpmemd_special_chars_pos *pos)
{
uint8_t line_complete = 0;
uint64_t line_length = 0;
char *line_part = *line;
do {
char *ret = fgets(line_part,
(int)(*line_max - line_length), file);
if (ret == NULL)
return 0;
for (uint64_t i = 0; i < *line_max; ++i) {
if (line_part[i] == '\n')
line_complete = 1;
else if (line_part[i] == '\0') {
line_length += i;
if (line_length + 1 < *line_max)
line_complete = 1;
break;
} else if (line_part[i] == '#' &&
pos->comment_char == UINT64_MAX)
pos->comment_char = line_length + i;
else if (line_part[i] == '=' &&
pos->equal_char == UINT64_MAX)
pos->equal_char = line_length + i;
}
if (line_complete == 0) {
*line = realloc(*line, sizeof(char) * (*line_max) * 2);
if (*line == NULL) {
RPMEMD_FATAL("!realloc");
}
line_part = *line + *line_max - 1;
line_length = *line_max - 1;
*line_max *= 2;
*line_max_increased = 1;
}
} while (line_complete != 1);
pos->EOL_char = line_length;
return 0;
}
/*
* trim_line_element -- (internal) remove white characters
*/
static char *
trim_line_element(char *line, uint64_t start, uint64_t end)
{
for (; start <= end; ++start) {
if (!isspace(line[start]))
break;
}
for (; end > start; --end) {
if (!isspace(line[end - 1]))
break;
}
if (start == end)
return NULL;
line[end] = '\0';
return &line[start];
}
/*
* parse_config_key -- (internal) lookup config key
*/
static enum rpmemd_option
parse_config_key(const char *key)
{
for (int i = 0; options[i].name != 0; ++i) {
if (strcmp(key, options[i].name) == 0)
return (enum rpmemd_option)options[i].val;
}
return RPD_OPT_INVALID;
}
/*
* parse_config_line -- (internal) parse single config line
*
* Return newly written option flag. Store possible errors in errno.
*/
static int
parse_config_line(char *line, struct rpmemd_special_chars_pos *pos,
struct rpmemd_config *config, uint64_t disabled)
{
if (pos->comment_char < pos->equal_char)
pos->equal_char = INVALID_CHAR_POS;
uint64_t end_of_content = pos->comment_char != INVALID_CHAR_POS ?
pos->comment_char : pos->EOL_char;
if (pos->equal_char == INVALID_CHAR_POS) {
char *leftover = trim_line_element(line, 0, end_of_content);
if (leftover != NULL) {
errno = EINVAL;
return -1;
} else {
return 0;
}
}
char *key_name = trim_line_element(line, 0, pos->equal_char);
char *value = trim_line_element(line, pos->equal_char + 1,
end_of_content);
if (key_name == NULL || value == NULL) {
errno = EINVAL;
return -1;
}
enum rpmemd_option key = parse_config_key(key_name);
if (key != RPD_OPT_INVALID) {
if ((disabled & (uint64_t)(1 << key)) == 0)
if (set_option(key, value, config) != 0)
return -1;
} else {
errno = EINVAL;
return -1;
}
return 0;
}
/*
* parse_config_file -- (internal) parse config file
*/
static int
parse_config_file(const char *filename, struct rpmemd_config *config,
uint64_t disabled, int required)
{
RPMEMD_ASSERT(filename != NULL);
FILE *file = os_fopen(filename, "r");
if (file == NULL) {
if (required) {
RPMEMD_LOG(ERR, "!%s", filename);
goto error_fopen;
} else {
goto optional_config_missing;
}
}
uint8_t line_max_increased = 0;
uint64_t line_max = CONFIG_LINE_SIZE_INIT;
uint64_t line_num = 1;
char *line = (char *)malloc(sizeof(char) * line_max);
if (line == NULL) {
RPMEMD_LOG(ERR, "!malloc");
goto error_malloc_line;
}
char *line_copy = (char *)malloc(sizeof(char) * line_max);
if (line_copy == NULL) {
RPMEMD_LOG(ERR, "!malloc");
goto error_malloc_line_copy;
}
struct rpmemd_special_chars_pos pos;
do {
memset(&pos, 0xff, sizeof(pos));
if (get_config_line(file, &line, &line_max,
&line_max_increased, &pos) != 0)
goto error;
if (line_max_increased) {
char *line_new = (char *)realloc(line_copy,
sizeof(char) * line_max);
if (line_new == NULL) {
RPMEMD_LOG(ERR, "!malloc");
goto error;
}
line_copy = line_new;
line_max_increased = 0;
}
if (pos.EOL_char != INVALID_CHAR_POS) {
strcpy(line_copy, line);
int ret = parse_config_line(line_copy, &pos, config,
disabled);
if (ret != 0) {
size_t len = strlen(line);
if (len > 0 && line[len - 1] == '\n')
line[len - 1] = '\0';
RPMEMD_LOG(ERR, "Invalid config file line at "
"%s:%lu\n%s",
filename, line_num, line);
goto error;
}
}
++line_num;
} while (pos.EOL_char != INVALID_CHAR_POS);
free(line_copy);
free(line);
fclose(file);
optional_config_missing:
return 0;
error:
free(line_copy);
error_malloc_line_copy:
free(line);
error_malloc_line:
fclose(file);
error_fopen:
return -1;
}
/*
* parse_cl_args -- (internal) parse command line arguments
*/
static void
parse_cl_args(int argc, char *argv[], struct rpmemd_config *config,
const char **config_file, uint64_t *cl_options)
{
RPMEMD_ASSERT(argv != NULL);
RPMEMD_ASSERT(config != NULL);
int opt;
int option_index = 0;
while ((opt = getopt_long(argc, argv, optstr, options,
&option_index)) != -1) {
switch (opt) {
case 'c':
(*config_file) = optarg;
break;
case 'r':
config->rm_poolset = optarg;
break;
case 'f':
config->force = true;
break;
case 's':
config->pool_set = true;
break;
case 't':
errno = 0;
char *endptr;
config->nthreads = strtoul(optarg, &endptr, 10);
if (errno || *endptr != '\0') {
RPMEMD_LOG(ERR,
"invalid number of threads -- '%s'",
optarg);
exit(-1);
}
break;
case 'h':
print_help(argv[0]);
exit(0);
case 'V':
print_version();
exit(0);
break;
default:
if (set_option((enum rpmemd_option)opt, optarg, config)
== 0) {
*cl_options |= (UINT64_C(1) << opt);
} else {
print_usage(argv[0]);
exit(-1);
}
}
}
}
/*
* get_home_dir -- (internal) return user home directory
*
* Function will lookup user home directory in order:
* 1. HOME environment variable
* 2. Password file entry using real user ID
*/
static void
get_home_dir(char *str, size_t size)
{
char *home = os_getenv(HOME_ENV);
if (home) {
int r = util_snprintf(str, size, "%s", home);
if (r < 0)
RPMEMD_FATAL("!snprintf");
} else {
uid_t uid = getuid();
struct passwd *pw = getpwuid(uid);
if (pw == NULL)
RPMEMD_FATAL("!getpwuid");
int r = util_snprintf(str, size, "%s", pw->pw_dir);
if (r < 0)
RPMEMD_FATAL("!snprintf");
}
}
/*
* concat_dir_and_file_name -- (internal) concatenate directory and file name
* into single string path
*/
static void
concat_dir_and_file_name(char *path, size_t size, const char *dir,
const char *file)
{
int r = util_snprintf(path, size, "%s/%s", dir, file);
if (r < 0)
RPMEMD_FATAL("!snprintf");
}
/*
* str_replace_home -- (internal) replace $HOME string with user home directory
*
* If function does not find $HOME string it will return haystack untouched.
* Otherwise it will allocate new string with $HOME replaced with provided
* home_dir path. haystack will be released and newly created string returned.
*/
static char *
str_replace_home(char *haystack, const char *home_dir)
{
const size_t placeholder_len = strlen(HOME_STR_PLACEHOLDER);
const size_t home_len = strlen(home_dir);
size_t haystack_len = strlen(haystack);
char *pos = strstr(haystack, HOME_STR_PLACEHOLDER);
if (!pos)
return haystack;
const char *after = pos + placeholder_len;
if (isalnum(*after))
return haystack;
haystack_len += home_len - placeholder_len + 1;
char *buf = malloc(sizeof(char) * haystack_len);
if (!buf)
RPMEMD_FATAL("!malloc");
*pos = '\0';
int r = util_snprintf(buf, haystack_len, "%s%s%s", haystack, home_dir,
after);
if (r < 0)
RPMEMD_FATAL("!snprintf");
free(haystack);
return buf;
}
/*
* config_set_default -- (internal) load default config
*/
static void
config_set_default(struct rpmemd_config *config, const char *poolset_dir)
{
config->log_file = strdup(RPMEMD_DEFAULT_LOG_FILE);
if (!config->log_file)
RPMEMD_FATAL("!strdup");
config->poolset_dir = strdup(poolset_dir);
if (!config->poolset_dir)
RPMEMD_FATAL("!strdup");
config->persist_apm = false;
config->persist_general = true;
config->use_syslog = true;
config->max_lanes = RPMEM_DEFAULT_MAX_LANES;
config->log_level = RPD_LOG_ERR;
config->rm_poolset = NULL;
config->force = false;
config->nthreads = RPMEM_DEFAULT_NTHREADS;
}
/*
* rpmemd_config_read -- read config from cl and config files
*
* cl param overwrites configuration from any config file. Config file are read
* in order:
* 1. Global config file
* 2. User config file
* or
* cl provided config file
*/
int
rpmemd_config_read(struct rpmemd_config *config, int argc, char *argv[])
{
const char *cl_config_file = NULL;
char user_config_file[PATH_MAX];
char home_dir[PATH_MAX];
uint64_t cl_options = 0;
get_home_dir(home_dir, PATH_MAX);
config_set_default(config, home_dir);
parse_cl_args(argc, argv, config, &cl_config_file, &cl_options);
if (cl_config_file) {
if (parse_config_file(cl_config_file, config, cl_options, 1)) {
rpmemd_config_free(config);
return 1;
}
} else {
if (parse_config_file(RPMEMD_GLOBAL_CONFIG_FILE, config,
cl_options, 0)) {
rpmemd_config_free(config);
return 1;
}
concat_dir_and_file_name(user_config_file, PATH_MAX, home_dir,
RPMEMD_USER_CONFIG_FILE);
if (parse_config_file(user_config_file, config, cl_options,
0)) {
rpmemd_config_free(config);
return 1;
}
}
config->poolset_dir = str_replace_home(config->poolset_dir, home_dir);
return 0;
}
/*
* rpmemd_config_free -- rpmemd config release
*/
void
rpmemd_config_free(struct rpmemd_config *config)
{
free(config->log_file);
free(config->poolset_dir);
}
| 15,007 | 22.413417 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/tools/rpmemd/rpmemd_db.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* rpmemd_db.c -- rpmemd database of pool set files
*/
#include <stdio.h>
#include <stdint.h>
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <dirent.h>
#include <sys/file.h>
#include <sys/mman.h>
#include "queue.h"
#include "set.h"
#include "os.h"
#include "out.h"
#include "file.h"
#include "sys_util.h"
#include "librpmem.h"
#include "rpmemd_db.h"
#include "rpmemd_log.h"
/*
* struct rpmemd_db -- pool set database structure
*/
struct rpmemd_db {
os_mutex_t lock;
char *root_dir;
mode_t mode;
};
/*
* declaration of the 'struct list_head' type
*/
PMDK_LIST_HEAD(list_head, rpmemd_db_entry);
/*
* struct rpmemd_db_entry -- entry in the pool set list
*/
struct rpmemd_db_entry {
PMDK_LIST_ENTRY(rpmemd_db_entry) next;
char *pool_desc;
struct pool_set *set;
};
/*
* rpmemd_db_init -- initialize the rpmem database of pool set files
*/
struct rpmemd_db *
rpmemd_db_init(const char *root_dir, mode_t mode)
{
if (root_dir[0] != '/') {
RPMEMD_LOG(ERR, "root directory is not an absolute path"
" -- '%s'", root_dir);
errno = EINVAL;
return NULL;
}
struct rpmemd_db *db = calloc(1, sizeof(*db));
if (!db) {
RPMEMD_LOG(ERR, "!allocating the rpmem database structure");
return NULL;
}
db->root_dir = strdup(root_dir);
if (!db->root_dir) {
RPMEMD_LOG(ERR, "!allocating the root dir path");
free(db);
return NULL;
}
db->mode = mode;
util_mutex_init(&db->lock);
return db;
}
/*
* rpmemd_db_concat -- (internal) concatenate two paths
*/
static char *
rpmemd_db_concat(const char *path1, const char *path2)
{
size_t len1 = strlen(path1);
size_t len2 = strlen(path2);
size_t new_len = len1 + len2 + 2; /* +1 for '/' in snprintf() */
if (path1[0] != '/') {
RPMEMD_LOG(ERR, "the first path is not an absolute one -- '%s'",
path1);
errno = EINVAL;
return NULL;
}
if (path2[0] == '/') {
RPMEMD_LOG(ERR, "the second path is not a relative one -- '%s'",
path2);
/* set to EBADF to distinguish this case from other errors */
errno = EBADF;
return NULL;
}
char *new_str = malloc(new_len);
if (new_str == NULL) {
RPMEMD_LOG(ERR, "!allocating path buffer");
return NULL;
}
int ret = util_snprintf(new_str, new_len, "%s/%s", path1, path2);
if (ret < 0) {
RPMEMD_LOG(ERR, "!snprintf");
free(new_str);
errno = EINVAL;
return NULL;
}
return new_str;
}
/*
* rpmemd_db_get_path -- (internal) get the full path of the pool set file
*/
static char *
rpmemd_db_get_path(struct rpmemd_db *db, const char *pool_desc)
{
return rpmemd_db_concat(db->root_dir, pool_desc);
}
/*
* rpmemd_db_pool_madvise -- (internal) workaround device dax alignment issue
*/
static int
rpmemd_db_pool_madvise(struct pool_set *set)
{
/*
* This is a workaround for an issue with using device dax with
* libibverbs. The problem is that we use ibv_fork_init(3) which
* makes all registered memory being madvised with MADV_DONTFORK
* flag. In libpmemobj the remote replication is performed without
* pool header (first 4k). In such case the address passed to
* madvise(2) is aligned to 4k, but device dax can require different
* alignment (default is 2MB). This workaround madvises the entire
* memory region before registering it by ibv_reg_mr(3).
*/
const struct pool_set_part *part = &set->replica[0]->part[0];
if (part->is_dev_dax) {
int ret = os_madvise(part->addr, part->filesize,
MADV_DONTFORK);
if (ret) {
ERR("!madvise");
return -1;
}
}
return 0;
}
/*
* rpmemd_get_attr -- (internal) get pool attributes from remote pool attributes
*/
static void
rpmemd_get_attr(struct pool_attr *attr, const struct rpmem_pool_attr *rattr)
{
LOG(3, "attr %p, rattr %p", attr, rattr);
memcpy(attr->signature, rattr->signature, POOL_HDR_SIG_LEN);
attr->major = rattr->major;
attr->features.compat = rattr->compat_features;
attr->features.incompat = rattr->incompat_features;
attr->features.ro_compat = rattr->ro_compat_features;
memcpy(attr->poolset_uuid, rattr->poolset_uuid, POOL_HDR_UUID_LEN);
memcpy(attr->first_part_uuid, rattr->uuid, POOL_HDR_UUID_LEN);
memcpy(attr->prev_repl_uuid, rattr->prev_uuid, POOL_HDR_UUID_LEN);
memcpy(attr->next_repl_uuid, rattr->next_uuid, POOL_HDR_UUID_LEN);
memcpy(attr->arch_flags, rattr->user_flags, POOL_HDR_ARCH_LEN);
}
/*
* rpmemd_db_pool_create -- create a new pool set
*/
struct rpmemd_db_pool *
rpmemd_db_pool_create(struct rpmemd_db *db, const char *pool_desc,
size_t pool_size, const struct rpmem_pool_attr *rattr)
{
RPMEMD_ASSERT(db != NULL);
util_mutex_lock(&db->lock);
struct rpmemd_db_pool *prp = NULL;
struct pool_set *set;
char *path;
int ret;
prp = malloc(sizeof(struct rpmemd_db_pool));
if (!prp) {
RPMEMD_LOG(ERR, "!allocating pool set db entry");
goto err_unlock;
}
path = rpmemd_db_get_path(db, pool_desc);
if (!path) {
goto err_free_prp;
}
struct pool_attr attr;
struct pool_attr *pattr = NULL;
if (rattr != NULL) {
rpmemd_get_attr(&attr, rattr);
pattr = &attr;
}
ret = util_pool_create_uuids(&set, path, 0, RPMEM_MIN_POOL,
RPMEM_MIN_PART, pattr, NULL, REPLICAS_DISABLED,
POOL_REMOTE);
if (ret) {
RPMEMD_LOG(ERR, "!cannot create pool set -- '%s'", path);
goto err_free_path;
}
ret = util_poolset_chmod(set, db->mode);
if (ret) {
RPMEMD_LOG(ERR, "!cannot change pool set mode bits to 0%o",
db->mode);
}
if (rpmemd_db_pool_madvise(set))
goto err_poolset_close;
/* mark as opened */
prp->pool_addr = set->replica[0]->part[0].addr;
prp->pool_size = set->poolsize;
prp->set = set;
free(path);
util_mutex_unlock(&db->lock);
return prp;
err_poolset_close:
util_poolset_close(set, DO_NOT_DELETE_PARTS);
err_free_path:
free(path);
err_free_prp:
free(prp);
err_unlock:
util_mutex_unlock(&db->lock);
return NULL;
}
/*
* rpmemd_db_pool_open -- open a pool set
*/
struct rpmemd_db_pool *
rpmemd_db_pool_open(struct rpmemd_db *db, const char *pool_desc,
size_t pool_size, struct rpmem_pool_attr *rattr)
{
RPMEMD_ASSERT(db != NULL);
RPMEMD_ASSERT(rattr != NULL);
util_mutex_lock(&db->lock);
struct rpmemd_db_pool *prp = NULL;
struct pool_set *set;
char *path;
int ret;
prp = malloc(sizeof(struct rpmemd_db_pool));
if (!prp) {
RPMEMD_LOG(ERR, "!allocating pool set db entry");
goto err_unlock;
}
path = rpmemd_db_get_path(db, pool_desc);
if (!path) {
goto err_free_prp;
}
ret = util_pool_open_remote(&set, path, 0, RPMEM_MIN_PART, rattr);
if (ret) {
RPMEMD_LOG(ERR, "!cannot open pool set -- '%s'", path);
goto err_free_path;
}
if (rpmemd_db_pool_madvise(set))
goto err_poolset_close;
/* mark as opened */
prp->pool_addr = set->replica[0]->part[0].addr;
prp->pool_size = set->poolsize;
prp->set = set;
free(path);
util_mutex_unlock(&db->lock);
return prp;
err_poolset_close:
util_poolset_close(set, DO_NOT_DELETE_PARTS);
err_free_path:
free(path);
err_free_prp:
free(prp);
err_unlock:
util_mutex_unlock(&db->lock);
return NULL;
}
/*
* rpmemd_db_pool_close -- close a pool set
*/
void
rpmemd_db_pool_close(struct rpmemd_db *db, struct rpmemd_db_pool *prp)
{
RPMEMD_ASSERT(db != NULL);
util_mutex_lock(&db->lock);
util_poolset_close(prp->set, DO_NOT_DELETE_PARTS);
free(prp);
util_mutex_unlock(&db->lock);
}
/*
* rpmemd_db_pool_set_attr -- overwrite pool attributes
*/
int
rpmemd_db_pool_set_attr(struct rpmemd_db_pool *prp,
const struct rpmem_pool_attr *rattr)
{
RPMEMD_ASSERT(prp != NULL);
RPMEMD_ASSERT(prp->set != NULL);
RPMEMD_ASSERT(prp->set->nreplicas == 1);
return util_replica_set_attr(prp->set->replica[0], rattr);
}
struct rm_cb_args {
int force;
int ret;
};
/*
* rm_poolset_cb -- (internal) callback for removing part files
*/
static int
rm_poolset_cb(struct part_file *pf, void *arg)
{
struct rm_cb_args *args = (struct rm_cb_args *)arg;
if (pf->is_remote) {
RPMEMD_LOG(ERR, "removing remote replica not supported");
return -1;
}
int ret = util_unlink_flock(pf->part->path);
if (!args->force && ret) {
RPMEMD_LOG(ERR, "!unlink -- '%s'", pf->part->path);
args->ret = ret;
}
return 0;
}
/*
* rpmemd_db_pool_remove -- remove a pool set
*/
int
rpmemd_db_pool_remove(struct rpmemd_db *db, const char *pool_desc,
int force, int pool_set)
{
RPMEMD_ASSERT(db != NULL);
RPMEMD_ASSERT(pool_desc != NULL);
util_mutex_lock(&db->lock);
struct rm_cb_args args;
args.force = force;
args.ret = 0;
char *path;
path = rpmemd_db_get_path(db, pool_desc);
if (!path) {
args.ret = -1;
goto err_unlock;
}
int ret = util_poolset_foreach_part(path, rm_poolset_cb, &args);
if (!force && ret) {
RPMEMD_LOG(ERR, "!removing '%s' failed", path);
args.ret = ret;
goto err_free_path;
}
if (pool_set)
os_unlink(path);
err_free_path:
free(path);
err_unlock:
util_mutex_unlock(&db->lock);
return args.ret;
}
/*
* rpmemd_db_fini -- deinitialize the rpmem database of pool set files
*/
void
rpmemd_db_fini(struct rpmemd_db *db)
{
RPMEMD_ASSERT(db != NULL);
util_mutex_destroy(&db->lock);
free(db->root_dir);
free(db);
}
/*
* rpmemd_db_check_dups_set -- (internal) check for duplicates in the database
*/
static inline int
rpmemd_db_check_dups_set(struct pool_set *set, const char *path)
{
for (unsigned r = 0; r < set->nreplicas; r++) {
struct pool_replica *rep = set->replica[r];
for (unsigned p = 0; p < rep->nparts; p++) {
if (strcmp(path, rep->part[p].path) == 0)
return -1;
}
}
return 0;
}
/*
* rpmemd_db_check_dups -- (internal) check for duplicates in the database
*/
static int
rpmemd_db_check_dups(struct list_head *head, struct rpmemd_db *db,
const char *pool_desc, struct pool_set *set)
{
struct rpmemd_db_entry *edb;
PMDK_LIST_FOREACH(edb, head, next) {
for (unsigned r = 0; r < edb->set->nreplicas; r++) {
struct pool_replica *rep = edb->set->replica[r];
for (unsigned p = 0; p < rep->nparts; p++) {
if (rpmemd_db_check_dups_set(set,
rep->part[p].path)) {
RPMEMD_LOG(ERR, "part file '%s' from "
"pool set '%s' duplicated in "
"pool set '%s'",
rep->part[p].path,
pool_desc,
edb->pool_desc);
errno = EEXIST;
return -1;
}
}
}
}
return 0;
}
/*
* rpmemd_db_add -- (internal) add an entry for a given set to the database
*/
static struct rpmemd_db_entry *
rpmemd_db_add(struct list_head *head, struct rpmemd_db *db,
const char *pool_desc, struct pool_set *set)
{
struct rpmemd_db_entry *edb;
edb = calloc(1, sizeof(*edb));
if (!edb) {
RPMEMD_LOG(ERR, "!allocating database entry");
goto err_calloc;
}
edb->set = set;
edb->pool_desc = strdup(pool_desc);
if (!edb->pool_desc) {
RPMEMD_LOG(ERR, "!allocating path for database entry");
goto err_strdup;
}
PMDK_LIST_INSERT_HEAD(head, edb, next);
return edb;
err_strdup:
free(edb);
err_calloc:
return NULL;
}
/*
* new_paths -- (internal) create two new paths
*/
static int
new_paths(const char *dir, const char *name, const char *old_desc,
char **path, char **new_desc)
{
*path = rpmemd_db_concat(dir, name);
if (!(*path))
return -1;
if (old_desc[0] != 0)
*new_desc = rpmemd_db_concat(old_desc, name);
else {
*new_desc = strdup(name);
if (!(*new_desc)) {
RPMEMD_LOG(ERR, "!allocating new descriptor");
}
}
if (!(*new_desc)) {
free(*path);
return -1;
}
return 0;
}
/*
* rpmemd_db_check_dir_r -- (internal) recursively check given directory
* for duplicates
*/
static int
rpmemd_db_check_dir_r(struct list_head *head, struct rpmemd_db *db,
const char *dir, char *pool_desc)
{
char *new_dir, *new_desc, *full_path;
struct dirent *dentry;
struct pool_set *set = NULL;
DIR *dirp;
int ret = 0;
dirp = opendir(dir);
if (dirp == NULL) {
RPMEMD_LOG(ERR, "cannot open the directory -- %s", dir);
return -1;
}
while ((dentry = readdir(dirp)) != NULL) {
if (strcmp(dentry->d_name, ".") == 0 ||
strcmp(dentry->d_name, "..") == 0)
continue;
if (dentry->d_type == DT_DIR) { /* directory */
if (new_paths(dir, dentry->d_name, pool_desc,
&new_dir, &new_desc))
goto err_closedir;
/* call recursively for a new directory */
ret = rpmemd_db_check_dir_r(head, db, new_dir,
new_desc);
free(new_dir);
free(new_desc);
if (ret)
goto err_closedir;
continue;
}
if (new_paths(dir, dentry->d_name, pool_desc,
&full_path, &new_desc)) {
goto err_closedir;
}
if (util_poolset_read(&set, full_path)) {
RPMEMD_LOG(ERR, "!error reading pool set file -- %s",
full_path);
goto err_free_paths;
}
if (rpmemd_db_check_dups(head, db, new_desc, set)) {
RPMEMD_LOG(ERR, "!duplicate found in pool set file"
" -- %s", full_path);
goto err_free_set;
}
if (rpmemd_db_add(head, db, new_desc, set) == NULL) {
goto err_free_set;
}
free(new_desc);
free(full_path);
}
closedir(dirp);
return 0;
err_free_set:
util_poolset_close(set, DO_NOT_DELETE_PARTS);
err_free_paths:
free(new_desc);
free(full_path);
err_closedir:
closedir(dirp);
return -1;
}
/*
* rpmemd_db_check_dir -- check given directory for duplicates
*/
int
rpmemd_db_check_dir(struct rpmemd_db *db)
{
RPMEMD_ASSERT(db != NULL);
util_mutex_lock(&db->lock);
struct list_head head;
PMDK_LIST_INIT(&head);
int ret = rpmemd_db_check_dir_r(&head, db, db->root_dir, "");
while (!PMDK_LIST_EMPTY(&head)) {
struct rpmemd_db_entry *edb = PMDK_LIST_FIRST(&head);
PMDK_LIST_REMOVE(edb, next);
util_poolset_close(edb->set, DO_NOT_DELETE_PARTS);
free(edb->pool_desc);
free(edb);
}
util_mutex_unlock(&db->lock);
return ret;
}
/*
* rpmemd_db_pool_is_pmem -- true if pool is in PMEM
*/
int
rpmemd_db_pool_is_pmem(struct rpmemd_db_pool *pool)
{
return REP(pool->set, 0)->is_pmem;
}
| 13,747 | 20.616352 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/tools/rpmemd/rpmemd_fip.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmemd_fip.h -- rpmemd libfabric provider module header file
*/
#include <stddef.h>
struct rpmemd_fip;
struct rpmemd_fip_attr {
void *addr;
size_t size;
unsigned nlanes;
size_t nthreads;
size_t buff_size;
enum rpmem_provider provider;
enum rpmem_persist_method persist_method;
int (*persist)(const void *addr, size_t len);
void *(*memcpy_persist)(void *pmemdest, const void *src, size_t len);
int (*deep_persist)(const void *addr, size_t len, void *ctx);
void *ctx;
};
struct rpmemd_fip *rpmemd_fip_init(const char *node,
const char *service,
struct rpmemd_fip_attr *attr,
struct rpmem_resp_attr *resp,
enum rpmem_err *err);
void rpmemd_fip_fini(struct rpmemd_fip *fip);
int rpmemd_fip_accept(struct rpmemd_fip *fip, int timeout);
int rpmemd_fip_process_start(struct rpmemd_fip *fip);
int rpmemd_fip_process_stop(struct rpmemd_fip *fip);
int rpmemd_fip_wait_close(struct rpmemd_fip *fip, int timeout);
int rpmemd_fip_close(struct rpmemd_fip *fip);
| 1,066 | 27.078947 | 70 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/tools/rpmemd/rpmemd_obc.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmemd_obc.h -- rpmemd out-of-band connection declarations
*/
#include <stdint.h>
#include <sys/types.h>
#include <sys/socket.h>
struct rpmemd_obc;
struct rpmemd_obc_requests {
int (*create)(struct rpmemd_obc *obc, void *arg,
const struct rpmem_req_attr *req,
const struct rpmem_pool_attr *pool_attr);
int (*open)(struct rpmemd_obc *obc, void *arg,
const struct rpmem_req_attr *req);
int (*close)(struct rpmemd_obc *obc, void *arg, int flags);
int (*set_attr)(struct rpmemd_obc *obc, void *arg,
const struct rpmem_pool_attr *pool_attr);
};
struct rpmemd_obc *rpmemd_obc_init(int fd_in, int fd_out);
void rpmemd_obc_fini(struct rpmemd_obc *obc);
int rpmemd_obc_status(struct rpmemd_obc *obc, uint32_t status);
int rpmemd_obc_process(struct rpmemd_obc *obc,
struct rpmemd_obc_requests *req_cb, void *arg);
int rpmemd_obc_create_resp(struct rpmemd_obc *obc,
int status, const struct rpmem_resp_attr *res);
int rpmemd_obc_open_resp(struct rpmemd_obc *obc,
int status, const struct rpmem_resp_attr *res,
const struct rpmem_pool_attr *pool_attr);
int rpmemd_obc_set_attr_resp(struct rpmemd_obc *obc, int status);
int rpmemd_obc_close_resp(struct rpmemd_obc *obc,
int status);
| 1,296 | 31.425 | 65 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/tools/pmempool/create.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* create.c -- pmempool create command source file
*/
#include <stdio.h>
#include <getopt.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/statvfs.h>
#include <errno.h>
#include <libgen.h>
#include <err.h>
#include "common.h"
#include "file.h"
#include "create.h"
#include "os.h"
#include "set.h"
#include "output.h"
#include "libpmemblk.h"
#include "libpmemlog.h"
#include "libpmempool.h"
#define DEFAULT_MODE 0664
/*
* pmempool_create -- context and args for create command
*/
struct pmempool_create {
int verbose;
char *fname;
int fexists;
char *inherit_fname;
int max_size;
char *str_type;
struct pmem_pool_params params;
struct pmem_pool_params inherit_params;
char *str_size;
char *str_mode;
char *str_bsize;
uint64_t csize;
int write_btt_layout;
int force;
char *layout;
struct options *opts;
int clearbadblocks;
};
/*
* pmempool_create_default -- default args for create command
*/
static const struct pmempool_create pmempool_create_default = {
.verbose = 0,
.fname = NULL,
.fexists = 0,
.inherit_fname = NULL,
.max_size = 0,
.str_type = NULL,
.str_bsize = NULL,
.csize = 0,
.write_btt_layout = 0,
.force = 0,
.layout = NULL,
.clearbadblocks = 0,
.params = {
.type = PMEM_POOL_TYPE_UNKNOWN,
.size = 0,
.mode = DEFAULT_MODE,
}
};
/*
* help_str -- string for help message
*/
static const char * const help_str =
"Create pmem pool of specified size, type and name\n"
"\n"
"Common options:\n"
" -s, --size <size> size of pool\n"
" -M, --max-size use maximum available space on file system\n"
" -m, --mode <octal> set permissions to <octal> (the default is 0664)\n"
" -i, --inherit <file> take required parameters from specified pool file\n"
" -b, --clear-bad-blocks clear bad blocks in existing files\n"
" -f, --force remove the pool first\n"
" -v, --verbose increase verbosity level\n"
" -h, --help display this help and exit\n"
"\n"
"Options for PMEMBLK:\n"
" -w, --write-layout force writing the BTT layout\n"
"\n"
"Options for PMEMOBJ:\n"
" -l, --layout <name> layout name stored in pool's header\n"
"\n"
"For complete documentation see %s-create(1) manual page.\n"
;
/*
* long_options -- command line options
*/
static const struct option long_options[] = {
{"size", required_argument, NULL, 's' | OPT_ALL},
{"verbose", no_argument, NULL, 'v' | OPT_ALL},
{"help", no_argument, NULL, 'h' | OPT_ALL},
{"max-size", no_argument, NULL, 'M' | OPT_ALL},
{"inherit", required_argument, NULL, 'i' | OPT_ALL},
{"mode", required_argument, NULL, 'm' | OPT_ALL},
{"write-layout", no_argument, NULL, 'w' | OPT_BLK},
{"layout", required_argument, NULL, 'l' | OPT_OBJ},
{"force", no_argument, NULL, 'f' | OPT_ALL},
{"clear-bad-blocks", no_argument, NULL, 'b' | OPT_ALL},
{NULL, 0, NULL, 0 },
};
/*
* print_usage -- print application usage short description
*/
static void
print_usage(const char *appname)
{
printf("Usage: %s create [<args>] <blk|log|obj> [<bsize>] <file>\n",
appname);
}
/*
* print_version -- print version string
*/
static void
print_version(const char *appname)
{
printf("%s %s\n", appname, SRCVERSION);
}
/*
* pmempool_create_help -- print help message for create command
*/
void
pmempool_create_help(const char *appname)
{
print_usage(appname);
print_version(appname);
printf(help_str, appname);
}
/*
* pmempool_create_obj -- create pmem obj pool
*/
static int
pmempool_create_obj(struct pmempool_create *pcp)
{
PMEMobjpool *pop = pmemobj_create(pcp->fname, pcp->layout,
pcp->params.size, pcp->params.mode);
if (!pop) {
outv_err("'%s' -- %s\n", pcp->fname, pmemobj_errormsg());
return -1;
}
pmemobj_close(pop);
return 0;
}
/*
* pmempool_create_blk -- create pmem blk pool
*/
static int
pmempool_create_blk(struct pmempool_create *pcp)
{
ASSERTne(pcp->params.blk.bsize, 0);
int ret = 0;
PMEMblkpool *pbp = pmemblk_create(pcp->fname, pcp->params.blk.bsize,
pcp->params.size, pcp->params.mode);
if (!pbp) {
outv_err("'%s' -- %s\n", pcp->fname, pmemblk_errormsg());
return -1;
}
if (pcp->write_btt_layout) {
outv(1, "Writing BTT layout using block %d.\n",
pcp->write_btt_layout);
if (pmemblk_set_error(pbp, 0) || pmemblk_set_zero(pbp, 0)) {
outv_err("writing BTT layout to block 0 failed\n");
ret = -1;
}
}
pmemblk_close(pbp);
return ret;
}
/*
* pmempool_create_log -- create pmem log pool
*/
static int
pmempool_create_log(struct pmempool_create *pcp)
{
PMEMlogpool *plp = pmemlog_create(pcp->fname,
pcp->params.size, pcp->params.mode);
if (!plp) {
outv_err("'%s' -- %s\n", pcp->fname, pmemlog_errormsg());
return -1;
}
pmemlog_close(plp);
return 0;
}
/*
* pmempool_get_max_size -- return maximum allowed size of file
*/
#ifndef _WIN32
static int
pmempool_get_max_size(const char *fname, uint64_t *sizep)
{
struct statvfs buf;
int ret = 0;
char *name = strdup(fname);
if (name == NULL) {
return -1;
}
char *dir = dirname(name);
if (statvfs(dir, &buf))
ret = -1;
else
*sizep = buf.f_bsize * buf.f_bavail;
free(name);
return ret;
}
#else
static int
pmempool_get_max_size(const char *fname, uint64_t *sizep)
{
int ret = 0;
ULARGE_INTEGER freespace;
char *name = strdup(fname);
if (name == NULL) {
return -1;
}
char *dir = dirname(name);
wchar_t *str = util_toUTF16(dir);
if (str == NULL) {
free(name);
return -1;
}
if (GetDiskFreeSpaceExW(str, &freespace, NULL, NULL) == 0)
ret = -1;
else
*sizep = freespace.QuadPart;
free(str);
free(name);
return ret;
}
#endif
/*
* print_pool_params -- print some parameters of a pool
*/
static void
print_pool_params(struct pmem_pool_params *params)
{
outv(1, "\ttype : %s\n", out_get_pool_type_str(params->type));
outv(1, "\tsize : %s\n", out_get_size_str(params->size, 2));
outv(1, "\tmode : 0%o\n", params->mode);
switch (params->type) {
case PMEM_POOL_TYPE_BLK:
outv(1, "\tbsize : %s\n",
out_get_size_str(params->blk.bsize, 0));
break;
case PMEM_POOL_TYPE_OBJ:
outv(1, "\tlayout: '%s'\n", params->obj.layout);
break;
default:
break;
}
}
/*
* inherit_pool_params -- inherit pool parameters from specified file
*/
static int
inherit_pool_params(struct pmempool_create *pcp)
{
outv(1, "Parsing pool: '%s'\n", pcp->inherit_fname);
/*
* If no type string passed, --inherit option must be passed
* so parse file and get required parameters.
*/
if (pmem_pool_parse_params(pcp->inherit_fname,
&pcp->inherit_params, 1)) {
if (errno)
perror(pcp->inherit_fname);
else
outv_err("%s: cannot determine type of pool\n",
pcp->inherit_fname);
return -1;
}
if (PMEM_POOL_TYPE_UNKNOWN == pcp->inherit_params.type) {
outv_err("'%s' -- unknown pool type\n",
pcp->inherit_fname);
return -1;
}
print_pool_params(&pcp->inherit_params);
return 0;
}
/*
* pmempool_create_parse_args -- parse command line args
*/
static int
pmempool_create_parse_args(struct pmempool_create *pcp, const char *appname,
int argc, char *argv[], struct options *opts)
{
int opt, ret;
while ((opt = util_options_getopt(argc, argv, "vhi:s:Mm:l:wfb",
opts)) != -1) {
switch (opt) {
case 'v':
pcp->verbose = 1;
break;
case 'h':
pmempool_create_help(appname);
exit(EXIT_SUCCESS);
case 's':
pcp->str_size = optarg;
ret = util_parse_size(optarg,
(size_t *)&pcp->params.size);
if (ret || pcp->params.size == 0) {
outv_err("invalid size value specified '%s'\n",
optarg);
return -1;
}
break;
case 'M':
pcp->max_size = 1;
break;
case 'm':
pcp->str_mode = optarg;
if (util_parse_mode(optarg, &pcp->params.mode)) {
outv_err("invalid mode value specified '%s'\n",
optarg);
return -1;
}
break;
case 'i':
pcp->inherit_fname = optarg;
break;
case 'w':
pcp->write_btt_layout = 1;
break;
case 'l':
pcp->layout = optarg;
break;
case 'f':
pcp->force = 1;
break;
case 'b':
pcp->clearbadblocks = 1;
break;
default:
print_usage(appname);
return -1;
}
}
/* check for <type>, <bsize> and <file> strings */
if (optind + 2 < argc) {
pcp->str_type = argv[optind];
pcp->str_bsize = argv[optind + 1];
pcp->fname = argv[optind + 2];
} else if (optind + 1 < argc) {
pcp->str_type = argv[optind];
pcp->fname = argv[optind + 1];
} else if (optind < argc) {
pcp->fname = argv[optind];
pcp->str_type = NULL;
} else {
print_usage(appname);
return -1;
}
return 0;
}
static int
allocate_max_size_available_file(const char *name_of_file, mode_t mode,
os_off_t max_size)
{
int fd = os_open(name_of_file, O_CREAT | O_EXCL | O_RDWR, mode);
if (fd == -1) {
outv_err("!open '%s' failed", name_of_file);
return -1;
}
os_off_t offset = 0;
os_off_t length = max_size - (max_size % (os_off_t)Pagesize);
int ret;
do {
ret = os_posix_fallocate(fd, offset, length);
if (ret == 0)
offset += length;
else if (ret != ENOSPC) {
os_close(fd);
if (os_unlink(name_of_file) == -1)
outv_err("!unlink '%s' failed", name_of_file);
errno = ret;
outv_err("!space allocation for '%s' failed",
name_of_file);
return -1;
}
length /= 2;
length -= (length % (os_off_t)Pagesize);
} while (length > (os_off_t)Pagesize);
os_close(fd);
return 0;
}
/*
* pmempool_create_func -- main function for create command
*/
int
pmempool_create_func(const char *appname, int argc, char *argv[])
{
int ret = 0;
struct pmempool_create pc = pmempool_create_default;
pc.opts = util_options_alloc(long_options, sizeof(long_options) /
sizeof(long_options[0]), NULL);
/* parse command line arguments */
ret = pmempool_create_parse_args(&pc, appname, argc, argv, pc.opts);
if (ret)
exit(EXIT_FAILURE);
/* set verbosity level */
out_set_vlevel(pc.verbose);
umask(0);
int exists = util_file_exists(pc.fname);
if (exists < 0)
return -1;
pc.fexists = exists;
int is_poolset = util_is_poolset_file(pc.fname) == 1;
if (pc.inherit_fname) {
if (inherit_pool_params(&pc)) {
outv_err("parsing pool '%s' failed\n",
pc.inherit_fname);
return -1;
}
}
/*
* Parse pool type and other parameters if --inherit option
* passed. It is possible to either pass --inherit option
* or pool type string in command line arguments. This is
* validated here.
*/
if (pc.str_type) {
/* parse pool type string if passed in command line arguments */
pc.params.type = pmem_pool_type_parse_str(pc.str_type);
if (PMEM_POOL_TYPE_UNKNOWN == pc.params.type) {
outv_err("'%s' -- unknown pool type\n", pc.str_type);
return -1;
}
if (PMEM_POOL_TYPE_BLK == pc.params.type) {
if (pc.str_bsize == NULL) {
outv_err("blk pool requires <bsize> "
"argument\n");
return -1;
}
if (util_parse_size(pc.str_bsize,
(size_t *)&pc.params.blk.bsize)) {
outv_err("cannot parse '%s' as block size\n",
pc.str_bsize);
return -1;
}
}
if (PMEM_POOL_TYPE_OBJ == pc.params.type && pc.layout != NULL) {
size_t max_layout = PMEMOBJ_MAX_LAYOUT;
if (strlen(pc.layout) >= max_layout) {
outv_err(
"Layout name is too long, maximum number of characters (including the terminating null byte) is %zu\n",
max_layout);
return -1;
}
size_t len = sizeof(pc.params.obj.layout);
strncpy(pc.params.obj.layout, pc.layout, len);
pc.params.obj.layout[len - 1] = '\0';
}
} else if (pc.inherit_fname) {
pc.params.type = pc.inherit_params.type;
} else {
/* neither pool type string nor --inherit options passed */
print_usage(appname);
return -1;
}
if (util_options_verify(pc.opts, pc.params.type))
return -1;
if (pc.params.type != PMEM_POOL_TYPE_BLK && pc.str_bsize != NULL) {
outv_err("invalid option specified for %s pool type"
" -- block size\n",
out_get_pool_type_str(pc.params.type));
return -1;
}
if (is_poolset) {
if (pc.params.size) {
outv_err("-s|--size cannot be used with "
"poolset file\n");
return -1;
}
if (pc.max_size) {
outv_err("-M|--max-size cannot be used with "
"poolset file\n");
return -1;
}
}
if (pc.params.size && pc.max_size) {
outv_err("-M|--max-size option cannot be used with -s|--size"
" option\n");
return -1;
}
if (pc.inherit_fname) {
if (!pc.str_size && !pc.max_size)
pc.params.size = pc.inherit_params.size;
if (!pc.str_mode)
pc.params.mode = pc.inherit_params.mode;
switch (pc.params.type) {
case PMEM_POOL_TYPE_BLK:
if (!pc.str_bsize)
pc.params.blk.bsize =
pc.inherit_params.blk.bsize;
break;
case PMEM_POOL_TYPE_OBJ:
if (!pc.layout) {
memcpy(pc.params.obj.layout,
pc.inherit_params.obj.layout,
sizeof(pc.params.obj.layout));
} else {
size_t len = sizeof(pc.params.obj.layout);
strncpy(pc.params.obj.layout, pc.layout,
len - 1);
pc.params.obj.layout[len - 1] = '\0';
}
break;
default:
break;
}
}
/*
* If neither --size nor --inherit options passed, check
* for --max-size option - if not passed use minimum pool size.
*/
uint64_t min_size = pmem_pool_get_min_size(pc.params.type);
if (pc.params.size == 0) {
if (pc.max_size) {
outv(1, "Maximum size option passed "
"- getting available space of file system.\n");
ret = pmempool_get_max_size(pc.fname,
&pc.params.size);
if (ret) {
outv_err("cannot get available space of fs\n");
return -1;
}
if (pc.params.size == 0) {
outv_err("No space left on device\n");
return -1;
}
outv(1, "Available space is %s\n",
out_get_size_str(pc.params.size, 2));
if (allocate_max_size_available_file(pc.fname,
pc.params.mode,
(os_off_t)pc.params.size))
return -1;
/*
* We are going to create pool based
* on file size instead of the pc.params.size.
*/
pc.params.size = 0;
} else {
if (!pc.fexists) {
outv(1, "No size option passed "
"- picking minimum pool size.\n");
pc.params.size = min_size;
}
}
} else {
if (pc.params.size < min_size) {
outv_err("size must be >= %lu bytes\n", min_size);
return -1;
}
}
if (pc.force)
pmempool_rm(pc.fname, PMEMPOOL_RM_FORCE);
outv(1, "Creating pool: %s\n", pc.fname);
print_pool_params(&pc.params);
if (pc.clearbadblocks) {
int ret = util_pool_clear_badblocks(pc.fname,
1 /* ignore non-existing */);
if (ret) {
outv_err("'%s' -- clearing bad blocks failed\n",
pc.fname);
return -1;
}
}
switch (pc.params.type) {
case PMEM_POOL_TYPE_BLK:
ret = pmempool_create_blk(&pc);
break;
case PMEM_POOL_TYPE_LOG:
ret = pmempool_create_log(&pc);
break;
case PMEM_POOL_TYPE_OBJ:
ret = pmempool_create_obj(&pc);
break;
default:
ret = -1;
break;
}
if (ret) {
outv_err("creating pool file failed\n");
if (!pc.fexists)
util_unlink(pc.fname);
}
util_options_free(pc.opts);
return ret;
}
| 14,987 | 21.403587 | 109 | c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.