repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/librpmem/rpmem_util.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_util.h -- util functions for librpmem header file
*/
#ifndef RPMEM_UTIL_H
#define RPMEM_UTIL_H 1
#ifdef __cplusplus
extern "C" {
#endif
enum {
LERR = 1,
LWARN = 2,
LNOTICE = 3,
LINFO = 4,
_LDBG = 10,
};
#define RPMEM_LOG(level, fmt, args...) LOG(L##level, fmt, ## args)
#define RPMEM_DBG(fmt, args...) LOG(_LDBG, fmt, ## args)
#define RPMEM_FATAL(fmt, args...) FATAL(fmt, ## args)
#define RPMEM_ASSERT(cond) ASSERT(cond)
#define RPMEM_PERSIST_FLAGS_ALL RPMEM_PERSIST_RELAXED
#define RPMEM_PERSIST_FLAGS_MASK ((unsigned)(~RPMEM_PERSIST_FLAGS_ALL))
#define RPMEM_FLUSH_FLAGS_ALL RPMEM_FLUSH_RELAXED
#define RPMEM_FLUSH_FLAGS_MASK ((unsigned)(~RPMEM_FLUSH_FLAGS_ALL))
const char *rpmem_util_proto_errstr(enum rpmem_err err);
int rpmem_util_proto_errno(enum rpmem_err err);
void rpmem_util_cmds_init(void);
void rpmem_util_cmds_fini(void);
const char *rpmem_util_cmd_get(void);
void rpmem_util_get_env_max_nlanes(unsigned *max_nlanes);
void rpmem_util_get_env_wq_size(unsigned *wq_size);
#ifdef __cplusplus
}
#endif
#endif
| 1,137 | 22.708333 | 71 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/librpmem/rpmem_obc.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmem_obc.h -- rpmem out-of-band connection client header file
*/
#ifndef RPMEM_OBC_H
#define RPMEM_OBC_H 1
#include <sys/types.h>
#include <sys/socket.h>
#include "librpmem.h"
#ifdef __cplusplus
extern "C" {
#endif
struct rpmem_obc;
struct rpmem_obc *rpmem_obc_init(void);
void rpmem_obc_fini(struct rpmem_obc *rpc);
int rpmem_obc_connect(struct rpmem_obc *rpc,
const struct rpmem_target_info *info);
int rpmem_obc_disconnect(struct rpmem_obc *rpc);
int rpmem_obc_monitor(struct rpmem_obc *rpc, int nonblock);
int rpmem_obc_create(struct rpmem_obc *rpc,
const struct rpmem_req_attr *req,
struct rpmem_resp_attr *res,
const struct rpmem_pool_attr *pool_attr);
int rpmem_obc_open(struct rpmem_obc *rpc,
const struct rpmem_req_attr *req,
struct rpmem_resp_attr *res,
struct rpmem_pool_attr *pool_attr);
int rpmem_obc_set_attr(struct rpmem_obc *rpc,
const struct rpmem_pool_attr *pool_attr);
int rpmem_obc_close(struct rpmem_obc *rpc, int flags);
#ifdef __cplusplus
}
#endif
#endif
| 1,100 | 21.9375 | 65 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/librpmem/rpmem_obc.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_obc.c -- rpmem out-of-band connection client source file
*/
#include <stdlib.h>
#include <netdb.h>
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/socket.h>
#include "librpmem.h"
#include "rpmem.h"
#include "rpmem_common.h"
#include "rpmem_obc.h"
#include "rpmem_proto.h"
#include "rpmem_util.h"
#include "rpmem_ssh.h"
#include "out.h"
#include "sys_util.h"
#include "util.h"
/*
* rpmem_obc -- rpmem out-of-band client connection handle
*/
struct rpmem_obc {
struct rpmem_ssh *ssh;
};
/*
* rpmem_obc_is_connected -- (internal) return non-zero value if client is
* connected
*/
static inline int
rpmem_obc_is_connected(struct rpmem_obc *rpc)
{
return rpc->ssh != NULL;
}
/*
* rpmem_obc_check_ibc_attr -- (internal) check in-band connection
* attributes
*/
static int
rpmem_obc_check_ibc_attr(struct rpmem_msg_ibc_attr *ibc)
{
if (ibc->port == 0 || ibc->port > UINT16_MAX) {
ERR("invalid port number received -- %u", ibc->port);
errno = EPROTO;
return -1;
}
if (ibc->persist_method != RPMEM_PM_GPSPM &&
ibc->persist_method != RPMEM_PM_APM) {
ERR("invalid persistency method received -- %u",
ibc->persist_method);
errno = EPROTO;
return -1;
}
return 0;
}
/*
* rpmem_obc_check_port -- (internal) verify target node port number
*/
static int
rpmem_obc_check_port(const struct rpmem_target_info *info)
{
if (!(info->flags & RPMEM_HAS_SERVICE))
return 0;
if (*info->service == '\0') {
ERR("invalid port number -- '%s'", info->service);
goto err;
}
errno = 0;
char *endptr;
long port = strtol(info->service, &endptr, 10);
if (errno || *endptr != '\0') {
ERR("invalid port number -- '%s'", info->service);
goto err;
}
if (port < 1) {
ERR("port number must be positive -- '%s'", info->service);
goto err;
}
if (port > UINT16_MAX) {
ERR("port number too large -- '%s'", info->service);
goto err;
}
return 0;
err:
errno = EINVAL;
return -1;
}
/*
* rpmem_obc_close_conn -- (internal) close connection
*/
static void
rpmem_obc_close_conn(struct rpmem_obc *rpc)
{
rpmem_ssh_close(rpc->ssh);
(void) util_fetch_and_and64(&rpc->ssh, 0);
}
/*
* rpmem_obc_init_msg_hdr -- (internal) initialize message header
*/
static void
rpmem_obc_set_msg_hdr(struct rpmem_msg_hdr *hdrp,
enum rpmem_msg_type type, size_t size)
{
hdrp->type = type;
hdrp->size = size;
}
/*
* rpmem_obc_set_pool_desc -- (internal) fill the pool descriptor field
*/
static void
rpmem_obc_set_pool_desc(struct rpmem_msg_pool_desc *pool_desc,
const char *desc, size_t size)
{
RPMEM_ASSERT(size <= UINT32_MAX);
RPMEM_ASSERT(size > 0);
pool_desc->size = (uint32_t)size;
memcpy(pool_desc->desc, desc, size);
pool_desc->desc[size - 1] = '\0';
}
/*
* rpmem_obc_alloc_create_msg -- (internal) allocate and fill create request
* message
*/
static struct rpmem_msg_create *
rpmem_obc_alloc_create_msg(const struct rpmem_req_attr *req,
const struct rpmem_pool_attr *pool_attr, size_t *msg_sizep)
{
size_t pool_desc_size = strlen(req->pool_desc) + 1;
size_t msg_size = sizeof(struct rpmem_msg_create) + pool_desc_size;
struct rpmem_msg_create *msg = malloc(msg_size);
if (!msg) {
ERR("!cannot allocate create request message");
return NULL;
}
rpmem_obc_set_msg_hdr(&msg->hdr, RPMEM_MSG_TYPE_CREATE, msg_size);
msg->c.major = RPMEM_PROTO_MAJOR;
msg->c.minor = RPMEM_PROTO_MINOR;
msg->c.pool_size = req->pool_size;
msg->c.nlanes = req->nlanes;
msg->c.provider = req->provider;
msg->c.buff_size = req->buff_size;
rpmem_obc_set_pool_desc(&msg->pool_desc,
req->pool_desc, pool_desc_size);
if (pool_attr) {
pack_rpmem_pool_attr(pool_attr, &msg->pool_attr);
} else {
RPMEM_LOG(INFO, "using zeroed pool attributes");
memset(&msg->pool_attr, 0, sizeof(msg->pool_attr));
}
*msg_sizep = msg_size;
return msg;
}
/*
* rpmem_obc_check_req -- (internal) check request attributes
*/
static int
rpmem_obc_check_req(const struct rpmem_req_attr *req)
{
if (req->provider >= MAX_RPMEM_PROV) {
ERR("invalid provider specified -- %u", req->provider);
errno = EINVAL;
return -1;
}
return 0;
}
/*
* rpmem_obj_check_hdr_resp -- (internal) check response message header
*/
static int
rpmem_obc_check_hdr_resp(struct rpmem_msg_hdr_resp *resp,
enum rpmem_msg_type type, size_t size)
{
if (resp->type != type) {
ERR("invalid message type received -- %u", resp->type);
errno = EPROTO;
return -1;
}
if (resp->size != size) {
ERR("invalid message size received -- %lu", resp->size);
errno = EPROTO;
return -1;
}
if (resp->status >= MAX_RPMEM_ERR) {
ERR("invalid status received -- %u", resp->status);
errno = EPROTO;
return -1;
}
if (resp->status) {
enum rpmem_err status = (enum rpmem_err)resp->status;
ERR("%s", rpmem_util_proto_errstr(status));
errno = rpmem_util_proto_errno(status);
return -1;
}
return 0;
}
/*
* rpmem_obc_check_create_resp -- (internal) check create response message
*/
static int
rpmem_obc_check_create_resp(struct rpmem_msg_create_resp *resp)
{
if (rpmem_obc_check_hdr_resp(&resp->hdr, RPMEM_MSG_TYPE_CREATE_RESP,
sizeof(struct rpmem_msg_create_resp)))
return -1;
if (rpmem_obc_check_ibc_attr(&resp->ibc))
return -1;
return 0;
}
/*
* rpmem_obc_get_res -- (internal) read response attributes
*/
static void
rpmem_obc_get_res(struct rpmem_resp_attr *res,
struct rpmem_msg_ibc_attr *ibc)
{
res->port = (unsigned short)ibc->port;
res->rkey = ibc->rkey;
res->raddr = ibc->raddr;
res->persist_method =
(enum rpmem_persist_method)ibc->persist_method;
res->nlanes = ibc->nlanes;
}
/*
* rpmem_obc_alloc_open_msg -- (internal) allocate and fill open request message
*/
static struct rpmem_msg_open *
rpmem_obc_alloc_open_msg(const struct rpmem_req_attr *req,
const struct rpmem_pool_attr *pool_attr, size_t *msg_sizep)
{
size_t pool_desc_size = strlen(req->pool_desc) + 1;
size_t msg_size = sizeof(struct rpmem_msg_open) + pool_desc_size;
struct rpmem_msg_open *msg = malloc(msg_size);
if (!msg) {
ERR("!cannot allocate open request message");
return NULL;
}
rpmem_obc_set_msg_hdr(&msg->hdr, RPMEM_MSG_TYPE_OPEN, msg_size);
msg->c.major = RPMEM_PROTO_MAJOR;
msg->c.minor = RPMEM_PROTO_MINOR;
msg->c.pool_size = req->pool_size;
msg->c.nlanes = req->nlanes;
msg->c.provider = req->provider;
msg->c.buff_size = req->buff_size;
rpmem_obc_set_pool_desc(&msg->pool_desc,
req->pool_desc, pool_desc_size);
*msg_sizep = msg_size;
return msg;
}
/*
* rpmem_obc_check_open_resp -- (internal) check open response message
*/
static int
rpmem_obc_check_open_resp(struct rpmem_msg_open_resp *resp)
{
if (rpmem_obc_check_hdr_resp(&resp->hdr, RPMEM_MSG_TYPE_OPEN_RESP,
sizeof(struct rpmem_msg_open_resp)))
return -1;
if (rpmem_obc_check_ibc_attr(&resp->ibc))
return -1;
return 0;
}
/*
* rpmem_obc_check_close_resp -- (internal) check close response message
*/
static int
rpmem_obc_check_close_resp(struct rpmem_msg_close_resp *resp)
{
if (rpmem_obc_check_hdr_resp(&resp->hdr, RPMEM_MSG_TYPE_CLOSE_RESP,
sizeof(struct rpmem_msg_close_resp)))
return -1;
return 0;
}
/*
* rpmem_obc_check_set_attr_resp -- (internal) check set attributes response
* message
*/
static int
rpmem_obc_check_set_attr_resp(struct rpmem_msg_set_attr_resp *resp)
{
if (rpmem_obc_check_hdr_resp(&resp->hdr, RPMEM_MSG_TYPE_SET_ATTR_RESP,
sizeof(struct rpmem_msg_set_attr_resp)))
return -1;
return 0;
}
/*
* rpmem_obc_init -- initialize rpmem obc handle
*/
struct rpmem_obc *
rpmem_obc_init(void)
{
struct rpmem_obc *rpc = calloc(1, sizeof(*rpc));
if (!rpc) {
RPMEM_LOG(ERR, "!allocation of rpmem obc failed");
return NULL;
}
return rpc;
}
/*
* rpmem_obc_fini -- destroy rpmem obc handle
*
* This function must be called with connection already closed - after calling
* the rpmem_obc_disconnect or after receiving relevant value from
* rpmem_obc_monitor.
*/
void
rpmem_obc_fini(struct rpmem_obc *rpc)
{
free(rpc);
}
/*
* rpmem_obc_connect -- connect to target node
*
* Connects to target node, the target must be in the following format:
* <addr>[:<port>]. If the port number is not specified the default
* ssh port will be used. The <addr> is translated into IP address.
*
* Returns an error if connection is already established.
*/
int
rpmem_obc_connect(struct rpmem_obc *rpc, const struct rpmem_target_info *info)
{
if (rpmem_obc_is_connected(rpc)) {
errno = EALREADY;
goto err_notconnected;
}
if (rpmem_obc_check_port(info))
goto err_port;
rpc->ssh = rpmem_ssh_open(info);
if (!rpc->ssh)
goto err_ssh_open;
return 0;
err_ssh_open:
err_port:
err_notconnected:
return -1;
}
/*
* rpmem_obc_disconnect -- close the connection to target node
*
* Returns error if socket is not connected.
*/
int
rpmem_obc_disconnect(struct rpmem_obc *rpc)
{
if (rpmem_obc_is_connected(rpc)) {
rpmem_obc_close_conn(rpc);
return 0;
}
errno = ENOTCONN;
return -1;
}
/*
* rpmem_obc_monitor -- monitor connection with target node
*
* The nonblock variable indicates whether this function should return
* immediately (= 1) or may block (= 0).
*
* If the function detects that socket was closed by remote peer it is
* closed on local side and set to -1, so there is no need to call
* rpmem_obc_disconnect function. Please take a look at functions'
* descriptions to see which functions cannot be used if the connection
* has been already closed.
*
* This function expects there is no data pending on socket, if any data
* is pending this function returns an error and sets errno to EPROTO.
*
* Return values:
* 0 - not connected
* 1 - connected
* < 0 - error
*/
int
rpmem_obc_monitor(struct rpmem_obc *rpc, int nonblock)
{
if (!rpmem_obc_is_connected(rpc))
return 0;
return rpmem_ssh_monitor(rpc->ssh, nonblock);
}
/*
* rpmem_obc_create -- perform create request operation
*
* Returns error if connection has not been established yet.
*/
int
rpmem_obc_create(struct rpmem_obc *rpc,
const struct rpmem_req_attr *req,
struct rpmem_resp_attr *res,
const struct rpmem_pool_attr *pool_attr)
{
if (!rpmem_obc_is_connected(rpc)) {
ERR("out-of-band connection not established");
errno = ENOTCONN;
goto err_notconnected;
}
if (rpmem_obc_check_req(req))
goto err_req;
size_t msg_size;
struct rpmem_msg_create *msg =
rpmem_obc_alloc_create_msg(req, pool_attr, &msg_size);
if (!msg)
goto err_alloc_msg;
RPMEM_LOG(INFO, "sending create request message");
rpmem_hton_msg_create(msg);
if (rpmem_ssh_send(rpc->ssh, msg, msg_size)) {
ERR("!sending create request message failed");
goto err_msg_send;
}
RPMEM_LOG(NOTICE, "create request message sent");
RPMEM_LOG(INFO, "receiving create request response");
struct rpmem_msg_create_resp resp;
if (rpmem_ssh_recv(rpc->ssh, &resp,
sizeof(resp))) {
ERR("!receiving create request response failed");
goto err_msg_recv;
}
RPMEM_LOG(NOTICE, "create request response received");
rpmem_ntoh_msg_create_resp(&resp);
if (rpmem_obc_check_create_resp(&resp))
goto err_msg_resp;
rpmem_obc_get_res(res, &resp.ibc);
free(msg);
return 0;
err_msg_resp:
err_msg_recv:
err_msg_send:
free(msg);
err_alloc_msg:
err_req:
err_notconnected:
return -1;
}
/*
* rpmem_obc_open -- perform open request operation
*
* Returns error if connection is not already established.
*/
int
rpmem_obc_open(struct rpmem_obc *rpc,
const struct rpmem_req_attr *req,
struct rpmem_resp_attr *res,
struct rpmem_pool_attr *pool_attr)
{
if (!rpmem_obc_is_connected(rpc)) {
ERR("out-of-band connection not established");
errno = ENOTCONN;
goto err_notconnected;
}
if (rpmem_obc_check_req(req))
goto err_req;
size_t msg_size;
struct rpmem_msg_open *msg =
rpmem_obc_alloc_open_msg(req, pool_attr, &msg_size);
if (!msg)
goto err_alloc_msg;
RPMEM_LOG(INFO, "sending open request message");
rpmem_hton_msg_open(msg);
if (rpmem_ssh_send(rpc->ssh, msg, msg_size)) {
ERR("!sending open request message failed");
goto err_msg_send;
}
RPMEM_LOG(NOTICE, "open request message sent");
RPMEM_LOG(INFO, "receiving open request response");
struct rpmem_msg_open_resp resp;
if (rpmem_ssh_recv(rpc->ssh, &resp, sizeof(resp))) {
ERR("!receiving open request response failed");
goto err_msg_recv;
}
RPMEM_LOG(NOTICE, "open request response received");
rpmem_ntoh_msg_open_resp(&resp);
if (rpmem_obc_check_open_resp(&resp))
goto err_msg_resp;
rpmem_obc_get_res(res, &resp.ibc);
if (pool_attr)
unpack_rpmem_pool_attr(&resp.pool_attr, pool_attr);
free(msg);
return 0;
err_msg_resp:
err_msg_recv:
err_msg_send:
free(msg);
err_alloc_msg:
err_req:
err_notconnected:
return -1;
}
/*
* rpmem_obc_set_attr -- perform set attributes request operation
*
* Returns error if connection is not already established.
*/
int
rpmem_obc_set_attr(struct rpmem_obc *rpc,
const struct rpmem_pool_attr *pool_attr)
{
if (!rpmem_obc_is_connected(rpc)) {
ERR("out-of-band connection not established");
errno = ENOTCONN;
goto err_notconnected;
}
struct rpmem_msg_set_attr msg;
rpmem_obc_set_msg_hdr(&msg.hdr, RPMEM_MSG_TYPE_SET_ATTR, sizeof(msg));
if (pool_attr) {
memcpy(&msg.pool_attr, pool_attr, sizeof(msg.pool_attr));
} else {
RPMEM_LOG(INFO, "using zeroed pool attributes");
memset(&msg.pool_attr, 0, sizeof(msg.pool_attr));
}
RPMEM_LOG(INFO, "sending set attributes request message");
rpmem_hton_msg_set_attr(&msg);
if (rpmem_ssh_send(rpc->ssh, &msg, sizeof(msg))) {
ERR("!sending set attributes request message failed");
goto err_msg_send;
}
RPMEM_LOG(NOTICE, "set attributes request message sent");
RPMEM_LOG(INFO, "receiving set attributes request response");
struct rpmem_msg_set_attr_resp resp;
if (rpmem_ssh_recv(rpc->ssh, &resp,
sizeof(resp))) {
ERR("!receiving set attributes request response failed");
goto err_msg_recv;
}
RPMEM_LOG(NOTICE, "set attributes request response received");
rpmem_ntoh_msg_set_attr_resp(&resp);
if (rpmem_obc_check_set_attr_resp(&resp))
goto err_msg_resp;
return 0;
err_msg_resp:
err_msg_recv:
err_msg_send:
err_notconnected:
return -1;
}
/*
* rpmem_obc_close -- perform close request operation
*
* Returns error if connection is not already established.
*
* NOTE: this function does not close the connection, but sends close request
* message to remote node and receives a response. The connection must be
* closed using rpmem_obc_disconnect function.
*/
int
rpmem_obc_close(struct rpmem_obc *rpc, int flags)
{
if (!rpmem_obc_is_connected(rpc)) {
errno = ENOTCONN;
return -1;
}
struct rpmem_msg_close msg;
rpmem_obc_set_msg_hdr(&msg.hdr, RPMEM_MSG_TYPE_CLOSE, sizeof(msg));
msg.flags = (uint32_t)flags;
RPMEM_LOG(INFO, "sending close request message");
rpmem_hton_msg_close(&msg);
if (rpmem_ssh_send(rpc->ssh, &msg, sizeof(msg))) {
RPMEM_LOG(ERR, "!sending close request failed");
return -1;
}
RPMEM_LOG(NOTICE, "close request message sent");
RPMEM_LOG(INFO, "receiving close request response");
struct rpmem_msg_close_resp resp;
if (rpmem_ssh_recv(rpc->ssh, &resp,
sizeof(resp))) {
RPMEM_LOG(ERR, "!receiving close request response failed");
return -1;
}
RPMEM_LOG(NOTICE, "close request response received");
rpmem_ntoh_msg_close_resp(&resp);
if (rpmem_obc_check_close_resp(&resp))
return -1;
return 0;
}
| 15,410 | 21.730088 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemblk/blk.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* blk.h -- internal definitions for libpmem blk module
*/
#ifndef BLK_H
#define BLK_H 1
#include <stddef.h>
#include "ctl.h"
#include "os_thread.h"
#include "pool_hdr.h"
#include "page_size.h"
#ifdef __cplusplus
extern "C" {
#endif
#include "alloc.h"
#include "fault_injection.h"
#define PMEMBLK_LOG_PREFIX "libpmemblk"
#define PMEMBLK_LOG_LEVEL_VAR "PMEMBLK_LOG_LEVEL"
#define PMEMBLK_LOG_FILE_VAR "PMEMBLK_LOG_FILE"
/* attributes of the blk memory pool format for the pool header */
#define BLK_HDR_SIG "PMEMBLK" /* must be 8 bytes including '\0' */
#define BLK_FORMAT_MAJOR 1
#define BLK_FORMAT_FEAT_DEFAULT \
{POOL_FEAT_COMPAT_DEFAULT, POOL_FEAT_INCOMPAT_DEFAULT, 0x0000}
#define BLK_FORMAT_FEAT_CHECK \
{POOL_FEAT_COMPAT_VALID, POOL_FEAT_INCOMPAT_VALID, 0x0000}
static const features_t blk_format_feat_default = BLK_FORMAT_FEAT_DEFAULT;
struct pmemblk {
struct pool_hdr hdr; /* memory pool header */
/* root info for on-media format... */
uint32_t bsize; /* block size */
/* flag indicating if the pool was zero-initialized */
int is_zeroed;
/* some run-time state, allocated out of memory pool... */
void *addr; /* mapped region */
size_t size; /* size of mapped region */
int is_pmem; /* true if pool is PMEM */
int rdonly; /* true if pool is opened read-only */
void *data; /* post-header data area */
size_t datasize; /* size of data area */
size_t nlba; /* number of LBAs in pool */
struct btt *bttp; /* btt handle */
unsigned nlane; /* number of lanes */
unsigned next_lane; /* used to rotate through lanes */
os_mutex_t *locks; /* one per lane */
int is_dev_dax; /* true if mapped on device dax */
struct ctl *ctl; /* top level node of the ctl tree structure */
struct pool_set *set; /* pool set info */
#ifdef DEBUG
/* held during read/write mprotected sections */
os_mutex_t write_lock;
#endif
};
/* data area starts at this alignment after the struct pmemblk above */
#define BLK_FORMAT_DATA_ALIGN ((uintptr_t)PMEM_PAGESIZE)
#if FAULT_INJECTION
void
pmemblk_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at);
int
pmemblk_fault_injection_enabled(void);
#else
static inline void
pmemblk_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
abort();
}
static inline int
pmemblk_fault_injection_enabled(void)
{
return 0;
}
#endif
#ifdef __cplusplus
}
#endif
#endif
| 2,483 | 23.116505 | 74 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemblk/libpmemblk.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* libpmemblk.c -- pmem entry points for libpmemblk
*/
#include <stdio.h>
#include <stdint.h>
#include "libpmemblk.h"
#include "ctl_global.h"
#include "pmemcommon.h"
#include "blk.h"
/*
* The variable from which the config is directly loaded. The string
* cannot contain any comments or extraneous white characters.
*/
#define BLK_CONFIG_ENV_VARIABLE "PMEMBLK_CONF"
/*
* The variable that points to a config file from which the config is loaded.
*/
#define BLK_CONFIG_FILE_ENV_VARIABLE "PMEMBLK_CONF_FILE"
/*
* blk_ctl_init_and_load -- (static) initializes CTL and loads configuration
* from env variable and file
*/
static int
blk_ctl_init_and_load(PMEMblkpool *pbp)
{
LOG(3, "pbp %p", pbp);
if (pbp != NULL && (pbp->ctl = ctl_new()) == NULL) {
LOG(2, "!ctl_new");
return -1;
}
char *env_config = os_getenv(BLK_CONFIG_ENV_VARIABLE);
if (env_config != NULL) {
if (ctl_load_config_from_string(pbp ? pbp->ctl : NULL,
pbp, env_config) != 0) {
LOG(2, "unable to parse config stored in %s "
"environment variable",
BLK_CONFIG_ENV_VARIABLE);
goto err;
}
}
char *env_config_file = os_getenv(BLK_CONFIG_FILE_ENV_VARIABLE);
if (env_config_file != NULL && env_config_file[0] != '\0') {
if (ctl_load_config_from_file(pbp ? pbp->ctl : NULL,
pbp, env_config_file) != 0) {
LOG(2, "unable to parse config stored in %s "
"file (from %s environment variable)",
env_config_file,
BLK_CONFIG_FILE_ENV_VARIABLE);
goto err;
}
}
return 0;
err:
if (pbp)
ctl_delete(pbp->ctl);
return -1;
}
/*
* libpmemblk_init -- (internal) load-time initialization for blk
*
* Called automatically by the run-time loader.
*/
ATTR_CONSTRUCTOR
void
libpmemblk_init(void)
{
ctl_global_register();
if (blk_ctl_init_and_load(NULL))
FATAL("error: %s", pmemblk_errormsg());
common_init(PMEMBLK_LOG_PREFIX, PMEMBLK_LOG_LEVEL_VAR,
PMEMBLK_LOG_FILE_VAR, PMEMBLK_MAJOR_VERSION,
PMEMBLK_MINOR_VERSION);
LOG(3, NULL);
}
/*
* libpmemblk_fini -- libpmemblk cleanup routine
*
* Called automatically when the process terminates.
*/
ATTR_DESTRUCTOR
void
libpmemblk_fini(void)
{
LOG(3, NULL);
common_fini();
}
/*
* pmemblk_check_versionU -- see if lib meets application version requirements
*/
#ifndef _WIN32
static inline
#endif
const char *
pmemblk_check_versionU(unsigned major_required, unsigned minor_required)
{
LOG(3, "major_required %u minor_required %u",
major_required, minor_required);
if (major_required != PMEMBLK_MAJOR_VERSION) {
ERR("libpmemblk major version mismatch (need %u, found %u)",
major_required, PMEMBLK_MAJOR_VERSION);
return out_get_errormsg();
}
if (minor_required > PMEMBLK_MINOR_VERSION) {
ERR("libpmemblk minor version mismatch (need %u, found %u)",
minor_required, PMEMBLK_MINOR_VERSION);
return out_get_errormsg();
}
return NULL;
}
#ifndef _WIN32
/*
* pmemblk_check_version -- see if lib meets application version requirements
*/
const char *
pmemblk_check_version(unsigned major_required, unsigned minor_required)
{
return pmemblk_check_versionU(major_required, minor_required);
}
#else
/*
* pmemblk_check_versionW -- see if lib meets application version requirements
*/
const wchar_t *
pmemblk_check_versionW(unsigned major_required, unsigned minor_required)
{
if (pmemblk_check_versionU(major_required, minor_required) != NULL)
return out_get_errormsgW();
else
return NULL;
}
#endif
/*
* pmemblk_set_funcs -- allow overriding libpmemblk's call to malloc, etc.
*/
void
pmemblk_set_funcs(
void *(*malloc_func)(size_t size),
void (*free_func)(void *ptr),
void *(*realloc_func)(void *ptr, size_t size),
char *(*strdup_func)(const char *s))
{
LOG(3, NULL);
util_set_alloc_funcs(malloc_func, free_func, realloc_func, strdup_func);
}
/*
* pmemblk_errormsgU -- return last error message
*/
#ifndef _WIN32
static inline
#endif
const char *
pmemblk_errormsgU(void)
{
return out_get_errormsg();
}
#ifndef _WIN32
/*
* pmemblk_errormsg -- return last error message
*/
const char *
pmemblk_errormsg(void)
{
return pmemblk_errormsgU();
}
#else
/*
* pmemblk_errormsgW -- return last error message as wchar_t
*/
const wchar_t *
pmemblk_errormsgW(void)
{
return out_get_errormsgW();
}
#endif
| 4,318 | 20.487562 | 78 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemblk/btt.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* btt.h -- btt module definitions
*/
#ifndef BTT_H
#define BTT_H 1
#ifdef __cplusplus
extern "C" {
#endif
/* callback functions passed to btt_init() */
struct ns_callback {
int (*nsread)(void *ns, unsigned lane,
void *buf, size_t count, uint64_t off);
int (*nswrite)(void *ns, unsigned lane,
const void *buf, size_t count, uint64_t off);
int (*nszero)(void *ns, unsigned lane, size_t count, uint64_t off);
ssize_t (*nsmap)(void *ns, unsigned lane, void **addrp,
size_t len, uint64_t off);
void (*nssync)(void *ns, unsigned lane, void *addr, size_t len);
int ns_is_zeroed;
};
struct btt_info;
struct btt *btt_init(uint64_t rawsize, uint32_t lbasize, uint8_t parent_uuid[],
unsigned maxlane, void *ns, const struct ns_callback *ns_cbp);
unsigned btt_nlane(struct btt *bttp);
size_t btt_nlba(struct btt *bttp);
int btt_read(struct btt *bttp, unsigned lane, uint64_t lba, void *buf);
int btt_write(struct btt *bttp, unsigned lane, uint64_t lba, const void *buf);
int btt_set_zero(struct btt *bttp, unsigned lane, uint64_t lba);
int btt_set_error(struct btt *bttp, unsigned lane, uint64_t lba);
int btt_check(struct btt *bttp);
void btt_fini(struct btt *bttp);
uint64_t btt_flog_size(uint32_t nfree);
uint64_t btt_map_size(uint32_t external_nlba);
uint64_t btt_arena_datasize(uint64_t arena_size, uint32_t nfree);
int btt_info_set(struct btt_info *info, uint32_t external_lbasize,
uint32_t nfree, uint64_t arena_size, uint64_t space_left);
struct btt_flog *btt_flog_get_valid(struct btt_flog *flog_pair, int *next);
int map_entry_is_initial(uint32_t map_entry);
void btt_info_convert2h(struct btt_info *infop);
void btt_info_convert2le(struct btt_info *infop);
void btt_flog_convert2h(struct btt_flog *flogp);
void btt_flog_convert2le(struct btt_flog *flogp);
#ifdef __cplusplus
}
#endif
#endif
| 1,908 | 30.816667 | 79 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemblk/btt_layout.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* btt_layout.h -- block translation table on-media layout definitions
*/
/*
* Layout of BTT info block. All integers are stored little-endian.
*/
#ifndef BTT_LAYOUT_H
#define BTT_LAYOUT_H 1
#ifdef __cplusplus
extern "C" {
#endif
#define BTT_ALIGNMENT ((uintptr_t)4096) /* alignment of all BTT structures */
#define BTTINFO_SIG_LEN 16
#define BTTINFO_UUID_LEN 16
#define BTTINFO_UNUSED_LEN 3968
#define BTTINFO_SIG "BTT_ARENA_INFO\0"
struct btt_info {
char sig[BTTINFO_SIG_LEN]; /* must be "BTT_ARENA_INFO\0\0" */
uint8_t uuid[BTTINFO_UUID_LEN]; /* BTT UUID */
uint8_t parent_uuid[BTTINFO_UUID_LEN]; /* UUID of container */
uint32_t flags; /* see flag bits below */
uint16_t major; /* major version */
uint16_t minor; /* minor version */
uint32_t external_lbasize; /* advertised LBA size (bytes) */
uint32_t external_nlba; /* advertised LBAs in this arena */
uint32_t internal_lbasize; /* size of data area blocks (bytes) */
uint32_t internal_nlba; /* number of blocks in data area */
uint32_t nfree; /* number of free blocks */
uint32_t infosize; /* size of this info block */
/*
* The following offsets are relative to the beginning of
* the btt_info block.
*/
uint64_t nextoff; /* offset to next arena (or zero) */
uint64_t dataoff; /* offset to arena data area */
uint64_t mapoff; /* offset to area map */
uint64_t flogoff; /* offset to area flog */
uint64_t infooff; /* offset to backup info block */
char unused[BTTINFO_UNUSED_LEN]; /* must be zero */
uint64_t checksum; /* Fletcher64 of all fields */
};
/*
* Definitions for flags mask for btt_info structure above.
*/
#define BTTINFO_FLAG_ERROR 0x00000001 /* error state (read-only) */
#define BTTINFO_FLAG_ERROR_MASK 0x00000001 /* all error bits */
/*
* Current on-media format versions.
*/
#define BTTINFO_MAJOR_VERSION 1
#define BTTINFO_MINOR_VERSION 1
/*
* Layout of a BTT "flog" entry. All integers are stored little-endian.
*
* The "nfree" field in the BTT info block determines how many of these
* flog entries there are, and each entry consists of two of the following
* structs (entry updates alternate between the two structs), padded up
* to a cache line boundary to isolate adjacent updates.
*/
#define BTT_FLOG_PAIR_ALIGN ((uintptr_t)64)
struct btt_flog {
uint32_t lba; /* last pre-map LBA using this entry */
uint32_t old_map; /* old post-map LBA (the freed block) */
uint32_t new_map; /* new post-map LBA */
uint32_t seq; /* sequence number (01, 10, 11) */
};
/*
* Layout of a BTT "map" entry. 4-byte internal LBA offset, little-endian.
*/
#define BTT_MAP_ENTRY_SIZE 4
#define BTT_MAP_ENTRY_ERROR 0x40000000U
#define BTT_MAP_ENTRY_ZERO 0x80000000U
#define BTT_MAP_ENTRY_NORMAL 0xC0000000U
#define BTT_MAP_ENTRY_LBA_MASK 0x3fffffffU
#define BTT_MAP_LOCK_ALIGN ((uintptr_t)64)
/*
* BTT layout properties...
*/
#define BTT_MIN_SIZE ((1u << 20) * 16)
#define BTT_MAX_ARENA (1ull << 39) /* 512GB per arena */
#define BTT_MIN_LBA_SIZE (size_t)512
#define BTT_INTERNAL_LBA_ALIGNMENT 256U
#define BTT_DEFAULT_NFREE 256
#ifdef __cplusplus
}
#endif
#endif
| 3,197 | 28.611111 | 77 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemblk/blk.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* blk.c -- block memory pool entry points for libpmem
*/
#include <inttypes.h>
#include <stdio.h>
#include <string.h>
#include <sys/types.h>
#include <sys/param.h>
#include <unistd.h>
#include <errno.h>
#include <time.h>
#include <stdint.h>
#include <endian.h>
#include <stdbool.h>
#include "libpmem.h"
#include "libpmemblk.h"
#include "mmap.h"
#include "set.h"
#include "out.h"
#include "btt.h"
#include "blk.h"
#include "util.h"
#include "sys_util.h"
#include "util_pmem.h"
#include "valgrind_internal.h"
static const struct pool_attr Blk_create_attr = {
BLK_HDR_SIG,
BLK_FORMAT_MAJOR,
BLK_FORMAT_FEAT_DEFAULT,
{0}, {0}, {0}, {0}, {0}
};
static const struct pool_attr Blk_open_attr = {
BLK_HDR_SIG,
BLK_FORMAT_MAJOR,
BLK_FORMAT_FEAT_CHECK,
{0}, {0}, {0}, {0}, {0}
};
/*
* lane_enter -- (internal) acquire a unique lane number
*/
static void
lane_enter(PMEMblkpool *pbp, unsigned *lane)
{
unsigned mylane;
mylane = util_fetch_and_add32(&pbp->next_lane, 1) % pbp->nlane;
/* lane selected, grab the per-lane lock */
util_mutex_lock(&pbp->locks[mylane]);
*lane = mylane;
}
/*
* lane_exit -- (internal) drop lane lock
*/
static void
lane_exit(PMEMblkpool *pbp, unsigned mylane)
{
util_mutex_unlock(&pbp->locks[mylane]);
}
/*
* nsread -- (internal) read data from the namespace encapsulating the BTT
*
* This routine is provided to btt_init() to allow the btt module to
* do I/O on the memory pool containing the BTT layout.
*/
static int
nsread(void *ns, unsigned lane, void *buf, size_t count, uint64_t off)
{
struct pmemblk *pbp = (struct pmemblk *)ns;
LOG(13, "pbp %p lane %u count %zu off %" PRIu64, pbp, lane, count, off);
if (off + count > pbp->datasize) {
ERR("offset + count (%zu) past end of data area (%zu)",
(size_t)off + count, pbp->datasize);
errno = EINVAL;
return -1;
}
memcpy(buf, (char *)pbp->data + off, count);
return 0;
}
/*
* nswrite -- (internal) write data to the namespace encapsulating the BTT
*
* This routine is provided to btt_init() to allow the btt module to
* do I/O on the memory pool containing the BTT layout.
*/
static int
nswrite(void *ns, unsigned lane, const void *buf, size_t count,
uint64_t off)
{
struct pmemblk *pbp = (struct pmemblk *)ns;
LOG(13, "pbp %p lane %u count %zu off %" PRIu64, pbp, lane, count, off);
if (off + count > pbp->datasize) {
ERR("offset + count (%zu) past end of data area (%zu)",
(size_t)off + count, pbp->datasize);
errno = EINVAL;
return -1;
}
void *dest = (char *)pbp->data + off;
#ifdef DEBUG
/* grab debug write lock */
util_mutex_lock(&pbp->write_lock);
#endif
/* unprotect the memory (debug version only) */
RANGE_RW(dest, count, pbp->is_dev_dax);
if (pbp->is_pmem)
pmem_memcpy_nodrain(dest, buf, count);
else
memcpy(dest, buf, count);
/* protect the memory again (debug version only) */
RANGE_RO(dest, count, pbp->is_dev_dax);
#ifdef DEBUG
/* release debug write lock */
util_mutex_unlock(&pbp->write_lock);
#endif
if (pbp->is_pmem)
pmem_drain();
else
pmem_msync(dest, count);
return 0;
}
/*
* nsmap -- (internal) allow direct access to a range of a namespace
*
* The caller requests a range to be "mapped" but the return value
* may indicate a smaller amount (in which case the caller is expected
* to call back later for another mapping).
*
* This routine is provided to btt_init() to allow the btt module to
* do I/O on the memory pool containing the BTT layout.
*/
static ssize_t
nsmap(void *ns, unsigned lane, void **addrp, size_t len, uint64_t off)
{
struct pmemblk *pbp = (struct pmemblk *)ns;
LOG(12, "pbp %p lane %u len %zu off %" PRIu64, pbp, lane, len, off);
ASSERT(((ssize_t)len) >= 0);
if (off + len >= pbp->datasize) {
ERR("offset + len (%zu) past end of data area (%zu)",
(size_t)off + len, pbp->datasize - 1);
errno = EINVAL;
return -1;
}
/*
* Since the entire file is memory-mapped, this callback
* can always provide the entire length requested.
*/
*addrp = (char *)pbp->data + off;
LOG(12, "returning addr %p", *addrp);
return (ssize_t)len;
}
/*
* nssync -- (internal) flush changes made to a namespace range
*
* This is used in conjunction with the addresses handed out by
* nsmap() above. There's no need to sync things written via
* nswrite() since those changes are flushed each time nswrite()
* is called.
*
* This routine is provided to btt_init() to allow the btt module to
* do I/O on the memory pool containing the BTT layout.
*/
static void
nssync(void *ns, unsigned lane, void *addr, size_t len)
{
struct pmemblk *pbp = (struct pmemblk *)ns;
LOG(12, "pbp %p lane %u addr %p len %zu", pbp, lane, addr, len);
if (pbp->is_pmem)
pmem_persist(addr, len);
else
pmem_msync(addr, len);
}
/*
* nszero -- (internal) zero data in the namespace encapsulating the BTT
*
* This routine is provided to btt_init() to allow the btt module to
* zero the memory pool containing the BTT layout.
*/
static int
nszero(void *ns, unsigned lane, size_t count, uint64_t off)
{
struct pmemblk *pbp = (struct pmemblk *)ns;
LOG(13, "pbp %p lane %u count %zu off %" PRIu64, pbp, lane, count, off);
if (off + count > pbp->datasize) {
ERR("offset + count (%zu) past end of data area (%zu)",
(size_t)off + count, pbp->datasize);
errno = EINVAL;
return -1;
}
void *dest = (char *)pbp->data + off;
/* unprotect the memory (debug version only) */
RANGE_RW(dest, count, pbp->is_dev_dax);
pmem_memset_persist(dest, 0, count);
/* protect the memory again (debug version only) */
RANGE_RO(dest, count, pbp->is_dev_dax);
return 0;
}
/* callbacks for btt_init() */
static struct ns_callback ns_cb = {
.nsread = nsread,
.nswrite = nswrite,
.nszero = nszero,
.nsmap = nsmap,
.nssync = nssync,
.ns_is_zeroed = 0
};
/*
* blk_descr_create -- (internal) create block memory pool descriptor
*/
static void
blk_descr_create(PMEMblkpool *pbp, uint32_t bsize, int zeroed)
{
LOG(3, "pbp %p bsize %u zeroed %d", pbp, bsize, zeroed);
/* create the required metadata */
pbp->bsize = htole32(bsize);
util_persist(pbp->is_pmem, &pbp->bsize, sizeof(bsize));
pbp->is_zeroed = zeroed;
util_persist(pbp->is_pmem, &pbp->is_zeroed, sizeof(pbp->is_zeroed));
}
/*
* blk_descr_check -- (internal) validate block memory pool descriptor
*/
static int
blk_descr_check(PMEMblkpool *pbp, size_t *bsize)
{
LOG(3, "pbp %p bsize %zu", pbp, *bsize);
size_t hdr_bsize = le32toh(pbp->bsize);
if (*bsize && *bsize != hdr_bsize) {
ERR("wrong bsize (%zu), pool created with bsize %zu",
*bsize, hdr_bsize);
errno = EINVAL;
return -1;
}
*bsize = hdr_bsize;
LOG(3, "using block size from header: %zu", *bsize);
return 0;
}
/*
* blk_runtime_init -- (internal) initialize block memory pool runtime data
*/
static int
blk_runtime_init(PMEMblkpool *pbp, size_t bsize, int rdonly)
{
LOG(3, "pbp %p bsize %zu rdonly %d",
pbp, bsize, rdonly);
/* remove volatile part of header */
VALGRIND_REMOVE_PMEM_MAPPING(&pbp->addr,
sizeof(struct pmemblk) -
sizeof(struct pool_hdr) -
sizeof(pbp->bsize) -
sizeof(pbp->is_zeroed));
/*
* Use some of the memory pool area for run-time info. This
* run-time state is never loaded from the file, it is always
* created here, so no need to worry about byte-order.
*/
pbp->rdonly = rdonly;
pbp->data = (char *)pbp->addr +
roundup(sizeof(*pbp), BLK_FORMAT_DATA_ALIGN);
ASSERT(((char *)pbp->addr + pbp->size) >= (char *)pbp->data);
pbp->datasize = (size_t)
(((char *)pbp->addr + pbp->size) - (char *)pbp->data);
LOG(4, "data area %p data size %zu bsize %zu",
pbp->data, pbp->datasize, bsize);
long ncpus = sysconf(_SC_NPROCESSORS_ONLN);
if (ncpus < 1)
ncpus = 1;
ns_cb.ns_is_zeroed = pbp->is_zeroed;
/* things free by "goto err" if not NULL */
struct btt *bttp = NULL;
os_mutex_t *locks = NULL;
bttp = btt_init(pbp->datasize, (uint32_t)bsize, pbp->hdr.poolset_uuid,
(unsigned)ncpus * 2, pbp, &ns_cb);
if (bttp == NULL)
goto err; /* btt_init set errno, called LOG */
pbp->bttp = bttp;
pbp->nlane = btt_nlane(pbp->bttp);
pbp->next_lane = 0;
if ((locks = Malloc(pbp->nlane * sizeof(*locks))) == NULL) {
ERR("!Malloc for lane locks");
goto err;
}
for (unsigned i = 0; i < pbp->nlane; i++)
util_mutex_init(&locks[i]);
pbp->locks = locks;
#ifdef DEBUG
/* initialize debug lock */
util_mutex_init(&pbp->write_lock);
#endif
/*
* If possible, turn off all permissions on the pool header page.
*
* The prototype PMFS doesn't allow this when large pages are in
* use. It is not considered an error if this fails.
*/
RANGE_NONE(pbp->addr, sizeof(struct pool_hdr), pbp->is_dev_dax);
/* the data area should be kept read-only for debug version */
RANGE_RO(pbp->data, pbp->datasize, pbp->is_dev_dax);
return 0;
err:
LOG(4, "error clean up");
int oerrno = errno;
if (bttp)
btt_fini(bttp);
errno = oerrno;
return -1;
}
/*
* pmemblk_createU -- create a block memory pool
*/
#ifndef _WIN32
static inline
#endif
PMEMblkpool *
pmemblk_createU(const char *path, size_t bsize, size_t poolsize, mode_t mode)
{
LOG(3, "path %s bsize %zu poolsize %zu mode %o",
path, bsize, poolsize, mode);
/* check if bsize is valid */
if (bsize == 0) {
ERR("Invalid block size %zu", bsize);
errno = EINVAL;
return NULL;
}
if (bsize > UINT32_MAX) {
ERR("Invalid block size %zu", bsize);
errno = EINVAL;
return NULL;
}
struct pool_set *set;
struct pool_attr adj_pool_attr = Blk_create_attr;
/* force set SDS feature */
if (SDS_at_create)
adj_pool_attr.features.incompat |= POOL_FEAT_SDS;
else
adj_pool_attr.features.incompat &= ~POOL_FEAT_SDS;
if (util_pool_create(&set, path, poolsize, PMEMBLK_MIN_POOL,
PMEMBLK_MIN_PART, &adj_pool_attr, NULL,
REPLICAS_DISABLED) != 0) {
LOG(2, "cannot create pool or pool set");
return NULL;
}
ASSERT(set->nreplicas > 0);
struct pool_replica *rep = set->replica[0];
PMEMblkpool *pbp = rep->part[0].addr;
VALGRIND_REMOVE_PMEM_MAPPING(&pbp->addr,
sizeof(struct pmemblk) -
((uintptr_t)&pbp->addr - (uintptr_t)&pbp->hdr));
pbp->addr = pbp;
pbp->size = rep->repsize;
pbp->set = set;
pbp->is_pmem = rep->is_pmem;
pbp->is_dev_dax = rep->part[0].is_dev_dax;
/* is_dev_dax implies is_pmem */
ASSERT(!pbp->is_dev_dax || pbp->is_pmem);
/* create pool descriptor */
blk_descr_create(pbp, (uint32_t)bsize, set->zeroed);
/* initialize runtime parts */
if (blk_runtime_init(pbp, bsize, 0) != 0) {
ERR("pool initialization failed");
goto err;
}
if (util_poolset_chmod(set, mode))
goto err;
util_poolset_fdclose(set);
LOG(3, "pbp %p", pbp);
return pbp;
err:
LOG(4, "error clean up");
int oerrno = errno;
util_poolset_close(set, DELETE_CREATED_PARTS);
errno = oerrno;
return NULL;
}
#ifndef _WIN32
/*
* pmemblk_create -- create a block memory pool
*/
PMEMblkpool *
pmemblk_create(const char *path, size_t bsize, size_t poolsize, mode_t mode)
{
return pmemblk_createU(path, bsize, poolsize, mode);
}
#else
/*
* pmemblk_createW -- create a block memory pool
*/
PMEMblkpool *
pmemblk_createW(const wchar_t *path, size_t bsize, size_t poolsize,
mode_t mode)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return NULL;
PMEMblkpool *ret = pmemblk_createU(upath, bsize, poolsize, mode);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* blk_open_common -- (internal) open a block memory pool
*
* This routine does all the work, but takes a cow flag so internal
* calls can map a read-only pool if required.
*
* Passing in bsize == 0 means a valid pool header must exist (which
* will supply the block size).
*/
static PMEMblkpool *
blk_open_common(const char *path, size_t bsize, unsigned flags)
{
LOG(3, "path %s bsize %zu flags 0x%x", path, bsize, flags);
struct pool_set *set;
if (util_pool_open(&set, path, PMEMBLK_MIN_PART, &Blk_open_attr,
NULL, NULL, flags) != 0) {
LOG(2, "cannot open pool or pool set");
return NULL;
}
ASSERT(set->nreplicas > 0);
struct pool_replica *rep = set->replica[0];
PMEMblkpool *pbp = rep->part[0].addr;
VALGRIND_REMOVE_PMEM_MAPPING(&pbp->addr,
sizeof(struct pmemblk) -
((uintptr_t)&pbp->addr - (uintptr_t)&pbp->hdr));
pbp->addr = pbp;
pbp->size = rep->repsize;
pbp->set = set;
pbp->is_pmem = rep->is_pmem;
pbp->is_dev_dax = rep->part[0].is_dev_dax;
/* is_dev_dax implies is_pmem */
ASSERT(!pbp->is_dev_dax || pbp->is_pmem);
if (set->nreplicas > 1) {
errno = ENOTSUP;
ERR("!replicas not supported");
goto err;
}
/* validate pool descriptor */
if (blk_descr_check(pbp, &bsize) != 0) {
LOG(2, "descriptor check failed");
goto err;
}
/* initialize runtime parts */
if (blk_runtime_init(pbp, bsize, set->rdonly) != 0) {
ERR("pool initialization failed");
goto err;
}
util_poolset_fdclose(set);
LOG(3, "pbp %p", pbp);
return pbp;
err:
LOG(4, "error clean up");
int oerrno = errno;
util_poolset_close(set, DO_NOT_DELETE_PARTS);
errno = oerrno;
return NULL;
}
/*
* pmemblk_openU -- open a block memory pool
*/
#ifndef _WIN32
static inline
#endif
PMEMblkpool *
pmemblk_openU(const char *path, size_t bsize)
{
LOG(3, "path %s bsize %zu", path, bsize);
return blk_open_common(path, bsize, COW_at_open ? POOL_OPEN_COW : 0);
}
#ifndef _WIN32
/*
* pmemblk_open -- open a block memory pool
*/
PMEMblkpool *
pmemblk_open(const char *path, size_t bsize)
{
return pmemblk_openU(path, bsize);
}
#else
/*
* pmemblk_openW -- open a block memory pool
*/
PMEMblkpool *
pmemblk_openW(const wchar_t *path, size_t bsize)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return NULL;
PMEMblkpool *ret = pmemblk_openU(upath, bsize);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* pmemblk_close -- close a block memory pool
*/
void
pmemblk_close(PMEMblkpool *pbp)
{
LOG(3, "pbp %p", pbp);
btt_fini(pbp->bttp);
if (pbp->locks) {
for (unsigned i = 0; i < pbp->nlane; i++)
util_mutex_destroy(&pbp->locks[i]);
Free((void *)pbp->locks);
}
#ifdef DEBUG
/* destroy debug lock */
util_mutex_destroy(&pbp->write_lock);
#endif
util_poolset_close(pbp->set, DO_NOT_DELETE_PARTS);
}
/*
* pmemblk_bsize -- return size of block for specified pool
*/
size_t
pmemblk_bsize(PMEMblkpool *pbp)
{
LOG(3, "pbp %p", pbp);
return le32toh(pbp->bsize);
}
/*
* pmemblk_nblock -- return number of usable blocks in a block memory pool
*/
size_t
pmemblk_nblock(PMEMblkpool *pbp)
{
LOG(3, "pbp %p", pbp);
return btt_nlba(pbp->bttp);
}
/*
* pmemblk_read -- read a block in a block memory pool
*/
int
pmemblk_read(PMEMblkpool *pbp, void *buf, long long blockno)
{
LOG(3, "pbp %p buf %p blockno %lld", pbp, buf, blockno);
if (blockno < 0) {
ERR("negative block number");
errno = EINVAL;
return -1;
}
unsigned lane;
lane_enter(pbp, &lane);
int err = btt_read(pbp->bttp, lane, (uint64_t)blockno, buf);
lane_exit(pbp, lane);
return err;
}
/*
* pmemblk_write -- write a block (atomically) in a block memory pool
*/
int
pmemblk_write(PMEMblkpool *pbp, const void *buf, long long blockno)
{
LOG(3, "pbp %p buf %p blockno %lld", pbp, buf, blockno);
if (pbp->rdonly) {
ERR("EROFS (pool is read-only)");
errno = EROFS;
return -1;
}
if (blockno < 0) {
ERR("negative block number");
errno = EINVAL;
return -1;
}
unsigned lane;
lane_enter(pbp, &lane);
int err = btt_write(pbp->bttp, lane, (uint64_t)blockno, buf);
lane_exit(pbp, lane);
return err;
}
/*
* pmemblk_set_zero -- zero a block in a block memory pool
*/
int
pmemblk_set_zero(PMEMblkpool *pbp, long long blockno)
{
LOG(3, "pbp %p blockno %lld", pbp, blockno);
if (pbp->rdonly) {
ERR("EROFS (pool is read-only)");
errno = EROFS;
return -1;
}
if (blockno < 0) {
ERR("negative block number");
errno = EINVAL;
return -1;
}
unsigned lane;
lane_enter(pbp, &lane);
int err = btt_set_zero(pbp->bttp, lane, (uint64_t)blockno);
lane_exit(pbp, lane);
return err;
}
/*
* pmemblk_set_error -- set the error state on a block in a block memory pool
*/
int
pmemblk_set_error(PMEMblkpool *pbp, long long blockno)
{
LOG(3, "pbp %p blockno %lld", pbp, blockno);
if (pbp->rdonly) {
ERR("EROFS (pool is read-only)");
errno = EROFS;
return -1;
}
if (blockno < 0) {
ERR("negative block number");
errno = EINVAL;
return -1;
}
unsigned lane;
lane_enter(pbp, &lane);
int err = btt_set_error(pbp->bttp, lane, (uint64_t)blockno);
lane_exit(pbp, lane);
return err;
}
/*
* pmemblk_checkU -- block memory pool consistency check
*/
#ifndef _WIN32
static inline
#endif
int
pmemblk_checkU(const char *path, size_t bsize)
{
LOG(3, "path \"%s\" bsize %zu", path, bsize);
/* map the pool read-only */
PMEMblkpool *pbp = blk_open_common(path, bsize, POOL_OPEN_COW);
if (pbp == NULL)
return -1; /* errno set by blk_open_common() */
int retval = btt_check(pbp->bttp);
int oerrno = errno;
pmemblk_close(pbp);
errno = oerrno;
return retval;
}
#ifndef _WIN32
/*
* pmemblk_check -- block memory pool consistency check
*/
int
pmemblk_check(const char *path, size_t bsize)
{
return pmemblk_checkU(path, bsize);
}
#else
/*
* pmemblk_checkW -- block memory pool consistency check
*/
int
pmemblk_checkW(const wchar_t *path, size_t bsize)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return -1;
int ret = pmemblk_checkU(upath, bsize);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* pmemblk_ctl_getU -- programmatically executes a read ctl query
*/
#ifndef _WIN32
static inline
#endif
int
pmemblk_ctl_getU(PMEMblkpool *pbp, const char *name, void *arg)
{
LOG(3, "pbp %p name %s arg %p", pbp, name, arg);
return ctl_query(pbp == NULL ? NULL : pbp->ctl, pbp,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_READ, arg);
}
/*
* pmemblk_ctl_setU -- programmatically executes a write ctl query
*/
#ifndef _WIN32
static inline
#endif
int
pmemblk_ctl_setU(PMEMblkpool *pbp, const char *name, void *arg)
{
LOG(3, "pbp %p name %s arg %p", pbp, name, arg);
return ctl_query(pbp == NULL ? NULL : pbp->ctl, pbp,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_WRITE, arg);
}
/*
* pmemblk_ctl_execU -- programmatically executes a runnable ctl query
*/
#ifndef _WIN32
static inline
#endif
int
pmemblk_ctl_execU(PMEMblkpool *pbp, const char *name, void *arg)
{
LOG(3, "pbp %p name %s arg %p", pbp, name, arg);
return ctl_query(pbp == NULL ? NULL : pbp->ctl, pbp,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_RUNNABLE, arg);
}
#ifndef _WIN32
/*
* pmemblk_ctl_get -- programmatically executes a read ctl query
*/
int
pmemblk_ctl_get(PMEMblkpool *pbp, const char *name, void *arg)
{
return pmemblk_ctl_getU(pbp, name, arg);
}
/*
* pmemblk_ctl_set -- programmatically executes a write ctl query
*/
int
pmemblk_ctl_set(PMEMblkpool *pbp, const char *name, void *arg)
{
return pmemblk_ctl_setU(pbp, name, arg);
}
/*
* pmemblk_ctl_exec -- programmatically executes a runnable ctl query
*/
int
pmemblk_ctl_exec(PMEMblkpool *pbp, const char *name, void *arg)
{
return pmemblk_ctl_execU(pbp, name, arg);
}
#else
/*
* pmemblk_ctl_getW -- programmatically executes a read ctl query
*/
int
pmemblk_ctl_getW(PMEMblkpool *pbp, const wchar_t *name, void *arg)
{
char *uname = util_toUTF8(name);
if (uname == NULL)
return -1;
int ret = pmemblk_ctl_getU(pbp, uname, arg);
util_free_UTF8(uname);
return ret;
}
/*
* pmemblk_ctl_setW -- programmatically executes a write ctl query
*/
int
pmemblk_ctl_setW(PMEMblkpool *pbp, const wchar_t *name, void *arg)
{
char *uname = util_toUTF8(name);
if (uname == NULL)
return -1;
int ret = pmemblk_ctl_setU(pbp, uname, arg);
util_free_UTF8(uname);
return ret;
}
/*
* pmemblk_ctl_execW -- programmatically executes a runnable ctl query
*/
int
pmemblk_ctl_execW(PMEMblkpool *pbp, const wchar_t *name, void *arg)
{
char *uname = util_toUTF8(name);
if (uname == NULL)
return -1;
int ret = pmemblk_ctl_execU(pbp, uname, arg);
util_free_UTF8(uname);
return ret;
}
#endif
#if FAULT_INJECTION
void
pmemblk_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
core_inject_fault_at(type, nth, at);
}
int
pmemblk_fault_injection_enabled(void)
{
return core_fault_injection_enabled();
}
#endif
| 20,218 | 20.305585 | 77 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/container_ravl.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2019, Intel Corporation */
/*
* container_ravl.c -- implementation of ravl-based block container
*/
#include "container_ravl.h"
#include "ravl.h"
#include "out.h"
#include "sys_util.h"
struct block_container_ravl {
struct block_container super;
struct ravl *tree;
};
/*
* container_compare_memblocks -- (internal) compares two memory blocks
*/
static int
container_compare_memblocks(const void *lhs, const void *rhs)
{
const struct memory_block *l = lhs;
const struct memory_block *r = rhs;
int64_t diff = (int64_t)l->size_idx - (int64_t)r->size_idx;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->zone_id - (int64_t)r->zone_id;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->chunk_id - (int64_t)r->chunk_id;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->block_off - (int64_t)r->block_off;
if (diff != 0)
return diff > 0 ? 1 : -1;
return 0;
}
/*
* container_ravl_insert_block -- (internal) inserts a new memory block
* into the container
*/
static int
container_ravl_insert_block(struct block_container *bc,
const struct memory_block *m)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
struct memory_block *e = m->m_ops->get_user_data(m);
VALGRIND_DO_MAKE_MEM_DEFINED(e, sizeof(*e));
VALGRIND_ADD_TO_TX(e, sizeof(*e));
*e = *m;
VALGRIND_SET_CLEAN(e, sizeof(*e));
VALGRIND_REMOVE_FROM_TX(e, sizeof(*e));
return ravl_insert(c->tree, e);
}
/*
* container_ravl_get_rm_block_bestfit -- (internal) removes and returns the
* best-fit memory block for size
*/
static int
container_ravl_get_rm_block_bestfit(struct block_container *bc,
struct memory_block *m)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
struct ravl_node *n = ravl_find(c->tree, m,
RAVL_PREDICATE_GREATER_EQUAL);
if (n == NULL)
return ENOMEM;
struct memory_block *e = ravl_data(n);
*m = *e;
ravl_remove(c->tree, n);
return 0;
}
/*
* container_ravl_get_rm_block_exact --
* (internal) removes exact match memory block
*/
static int
container_ravl_get_rm_block_exact(struct block_container *bc,
const struct memory_block *m)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
struct ravl_node *n = ravl_find(c->tree, m, RAVL_PREDICATE_EQUAL);
if (n == NULL)
return ENOMEM;
ravl_remove(c->tree, n);
return 0;
}
/*
* container_ravl_is_empty -- (internal) checks whether the container is empty
*/
static int
container_ravl_is_empty(struct block_container *bc)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
return ravl_empty(c->tree);
}
/*
* container_ravl_rm_all -- (internal) removes all elements from the tree
*/
static void
container_ravl_rm_all(struct block_container *bc)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
ravl_clear(c->tree);
}
/*
* container_ravl_delete -- (internal) deletes the container
*/
static void
container_ravl_destroy(struct block_container *bc)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
ravl_delete(c->tree);
Free(bc);
}
/*
* Tree-based block container used to provide best-fit functionality to the
* bucket. The time complexity for this particular container is O(k) where k is
* the length of the key.
*
* The get methods also guarantee that the block with lowest possible address
* that best matches the requirements is provided.
*/
static const struct block_container_ops container_ravl_ops = {
.insert = container_ravl_insert_block,
.get_rm_exact = container_ravl_get_rm_block_exact,
.get_rm_bestfit = container_ravl_get_rm_block_bestfit,
.is_empty = container_ravl_is_empty,
.rm_all = container_ravl_rm_all,
.destroy = container_ravl_destroy,
};
/*
* container_new_ravl -- allocates and initializes a ravl container
*/
struct block_container *
container_new_ravl(struct palloc_heap *heap)
{
struct block_container_ravl *bc = Malloc(sizeof(*bc));
if (bc == NULL)
goto error_container_malloc;
bc->super.heap = heap;
bc->super.c_ops = &container_ravl_ops;
bc->tree = ravl_new(container_compare_memblocks);
if (bc->tree == NULL)
goto error_ravl_new;
return (struct block_container *)&bc->super;
error_ravl_new:
Free(bc);
error_container_malloc:
return NULL;
}
| 4,333 | 21.931217 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/heap_layout.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* heap_layout.h -- internal definitions for heap layout
*/
#ifndef LIBPMEMOBJ_HEAP_LAYOUT_H
#define LIBPMEMOBJ_HEAP_LAYOUT_H 1
#include <stddef.h>
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
#define HEAP_MAJOR 1
#define HEAP_MINOR 0
#define MAX_CHUNK (UINT16_MAX - 7) /* has to be multiple of 8 */
#define CHUNK_BASE_ALIGNMENT 1024
#define CHUNKSIZE ((size_t)1024 * 256) /* 256 kilobytes */
#define MAX_MEMORY_BLOCK_SIZE (MAX_CHUNK * CHUNKSIZE)
#define HEAP_SIGNATURE_LEN 16
#define HEAP_SIGNATURE "MEMORY_HEAP_HDR\0"
#define ZONE_HEADER_MAGIC 0xC3F0A2D2
#define ZONE_MIN_SIZE (sizeof(struct zone) + sizeof(struct chunk))
#define ZONE_MAX_SIZE (sizeof(struct zone) + sizeof(struct chunk) * MAX_CHUNK)
#define HEAP_MIN_SIZE (sizeof(struct heap_layout) + ZONE_MIN_SIZE)
/* Base bitmap values, relevant for both normal and flexible bitmaps */
#define RUN_BITS_PER_VALUE 64U
#define RUN_BASE_METADATA_VALUES\
((unsigned)(sizeof(struct chunk_run_header) / sizeof(uint64_t)))
#define RUN_BASE_METADATA_SIZE (sizeof(struct chunk_run_header))
#define RUN_CONTENT_SIZE (CHUNKSIZE - RUN_BASE_METADATA_SIZE)
/*
* Calculates the size in bytes of a single run instance, including bitmap
*/
#define RUN_CONTENT_SIZE_BYTES(size_idx)\
(RUN_CONTENT_SIZE + (((size_idx) - 1) * CHUNKSIZE))
/* Default bitmap values, specific for old, non-flexible, bitmaps */
#define RUN_DEFAULT_METADATA_VALUES 40 /* in 8 byte words, 320 bytes total */
#define RUN_DEFAULT_BITMAP_VALUES \
(RUN_DEFAULT_METADATA_VALUES - RUN_BASE_METADATA_VALUES)
#define RUN_DEFAULT_BITMAP_SIZE (sizeof(uint64_t) * RUN_DEFAULT_BITMAP_VALUES)
#define RUN_DEFAULT_BITMAP_NBITS\
(RUN_BITS_PER_VALUE * RUN_DEFAULT_BITMAP_VALUES)
#define RUN_DEFAULT_SIZE \
(CHUNKSIZE - RUN_BASE_METADATA_SIZE - RUN_DEFAULT_BITMAP_SIZE)
/*
* Calculates the size in bytes of a single run instance, without bitmap,
* but only for the default fixed-bitmap algorithm
*/
#define RUN_DEFAULT_SIZE_BYTES(size_idx)\
(RUN_DEFAULT_SIZE + (((size_idx) - 1) * CHUNKSIZE))
#define CHUNK_MASK ((CHUNKSIZE) - 1)
#define CHUNK_ALIGN_UP(value) ((((value) + CHUNK_MASK) & ~CHUNK_MASK))
enum chunk_flags {
CHUNK_FLAG_COMPACT_HEADER = 0x0001,
CHUNK_FLAG_HEADER_NONE = 0x0002,
CHUNK_FLAG_ALIGNED = 0x0004,
CHUNK_FLAG_FLEX_BITMAP = 0x0008,
};
#define CHUNK_FLAGS_ALL_VALID (\
CHUNK_FLAG_COMPACT_HEADER |\
CHUNK_FLAG_HEADER_NONE |\
CHUNK_FLAG_ALIGNED |\
CHUNK_FLAG_FLEX_BITMAP\
)
enum chunk_type {
CHUNK_TYPE_UNKNOWN,
CHUNK_TYPE_FOOTER, /* not actual chunk type */
CHUNK_TYPE_FREE,
CHUNK_TYPE_USED,
CHUNK_TYPE_RUN,
CHUNK_TYPE_RUN_DATA,
MAX_CHUNK_TYPE
};
struct chunk {
uint8_t data[CHUNKSIZE];
};
struct chunk_run_header {
uint64_t block_size;
uint64_t alignment; /* valid only /w CHUNK_FLAG_ALIGNED */
};
struct chunk_run {
struct chunk_run_header hdr;
uint8_t content[RUN_CONTENT_SIZE]; /* bitmap + data */
};
struct chunk_header {
uint16_t type;
uint16_t flags;
uint32_t size_idx;
};
struct zone_header {
uint32_t magic;
uint32_t size_idx;
uint8_t reserved[56];
};
struct zone {
struct zone_header header;
struct chunk_header chunk_headers[MAX_CHUNK];
struct chunk chunks[];
};
struct heap_header {
char signature[HEAP_SIGNATURE_LEN];
uint64_t major;
uint64_t minor;
uint64_t unused; /* might be garbage */
uint64_t chunksize;
uint64_t chunks_per_zone;
uint8_t reserved[960];
uint64_t checksum;
};
struct heap_layout {
struct heap_header header;
struct zone zone0; /* first element of zones array */
};
#define ALLOC_HDR_SIZE_SHIFT (48ULL)
#define ALLOC_HDR_FLAGS_MASK (((1ULL) << ALLOC_HDR_SIZE_SHIFT) - 1)
struct allocation_header_legacy {
uint8_t unused[8];
uint64_t size;
uint8_t unused2[32];
uint64_t root_size;
uint64_t type_num;
};
#define ALLOC_HDR_COMPACT_SIZE sizeof(struct allocation_header_compact)
struct allocation_header_compact {
uint64_t size;
uint64_t extra;
};
enum header_type {
HEADER_LEGACY,
HEADER_COMPACT,
HEADER_NONE,
MAX_HEADER_TYPES
};
static const size_t header_type_to_size[MAX_HEADER_TYPES] = {
sizeof(struct allocation_header_legacy),
sizeof(struct allocation_header_compact),
0
};
static const enum chunk_flags header_type_to_flag[MAX_HEADER_TYPES] = {
(enum chunk_flags)0,
CHUNK_FLAG_COMPACT_HEADER,
CHUNK_FLAG_HEADER_NONE
};
static inline struct zone *
ZID_TO_ZONE(struct heap_layout *layout, size_t zone_id)
{
return (struct zone *)
((uintptr_t)&layout->zone0 + ZONE_MAX_SIZE * zone_id);
}
static inline struct chunk_header *
GET_CHUNK_HDR(struct heap_layout *layout, size_t zone_id, unsigned chunk_id)
{
return &ZID_TO_ZONE(layout, zone_id)->chunk_headers[chunk_id];
}
static inline struct chunk *
GET_CHUNK(struct heap_layout *layout, size_t zone_id, unsigned chunk_id)
{
return &ZID_TO_ZONE(layout, zone_id)->chunks[chunk_id];
}
static inline struct chunk_run *
GET_CHUNK_RUN(struct heap_layout *layout, size_t zone_id, unsigned chunk_id)
{
return (struct chunk_run *)GET_CHUNK(layout, zone_id, chunk_id);
}
#ifdef __cplusplus
}
#endif
#endif
| 5,105 | 23.666667 | 78 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/alloc_class.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* alloc_class.h -- internal definitions for allocation classes
*/
#ifndef LIBPMEMOBJ_ALLOC_CLASS_H
#define LIBPMEMOBJ_ALLOC_CLASS_H 1
#include <stddef.h>
#include <stdint.h>
#include <sys/types.h>
#include "heap_layout.h"
#include "memblock.h"
#ifdef __cplusplus
extern "C" {
#endif
#define MAX_ALLOCATION_CLASSES (UINT8_MAX)
#define DEFAULT_ALLOC_CLASS_ID (0)
#define RUN_UNIT_MAX RUN_BITS_PER_VALUE
struct alloc_class_collection;
enum alloc_class_type {
CLASS_UNKNOWN,
CLASS_HUGE,
CLASS_RUN,
MAX_ALLOC_CLASS_TYPES
};
struct alloc_class {
uint8_t id;
uint16_t flags;
size_t unit_size;
enum header_type header_type;
enum alloc_class_type type;
/* run-specific data */
struct run_descriptor rdsc;
};
struct alloc_class_collection *alloc_class_collection_new(void);
void alloc_class_collection_delete(struct alloc_class_collection *ac);
struct alloc_class *alloc_class_by_run(
struct alloc_class_collection *ac,
size_t unit_size, uint16_t flags, uint32_t size_idx);
struct alloc_class *alloc_class_by_alloc_size(
struct alloc_class_collection *ac, size_t size);
struct alloc_class *alloc_class_by_id(
struct alloc_class_collection *ac, uint8_t id);
int alloc_class_reserve(struct alloc_class_collection *ac, uint8_t id);
int alloc_class_find_first_free_slot(struct alloc_class_collection *ac,
uint8_t *slot);
ssize_t
alloc_class_calc_size_idx(struct alloc_class *c, size_t size);
struct alloc_class *
alloc_class_new(int id, struct alloc_class_collection *ac,
enum alloc_class_type type, enum header_type htype,
size_t unit_size, size_t alignment,
uint32_t size_idx);
void alloc_class_delete(struct alloc_class_collection *ac,
struct alloc_class *c);
#ifdef __cplusplus
}
#endif
#endif
| 1,815 | 21.7 | 71 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/recycler.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* recycler.c -- implementation of run recycler
*/
#include "heap.h"
#include "recycler.h"
#include "vec.h"
#include "out.h"
#include "util.h"
#include "sys_util.h"
#include "ravl.h"
#include "valgrind_internal.h"
#define THRESHOLD_MUL 4
/*
* recycler_element_cmp -- compares two recycler elements
*/
static int
recycler_element_cmp(const void *lhs, const void *rhs)
{
const struct recycler_element *l = lhs;
const struct recycler_element *r = rhs;
int64_t diff = (int64_t)l->max_free_block - (int64_t)r->max_free_block;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->free_space - (int64_t)r->free_space;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->zone_id - (int64_t)r->zone_id;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->chunk_id - (int64_t)r->chunk_id;
if (diff != 0)
return diff > 0 ? 1 : -1;
return 0;
}
struct recycler {
struct ravl *runs;
struct palloc_heap *heap;
/*
* How many unaccounted units there *might* be inside of the memory
* blocks stored in the recycler.
* The value is not meant to be accurate, but rather a rough measure on
* how often should the memory block scores be recalculated.
*
* Per-chunk unaccounted units are shared for all zones, which might
* lead to some unnecessary recalculations.
*/
size_t unaccounted_units[MAX_CHUNK];
size_t unaccounted_total;
size_t nallocs;
size_t *peak_arenas;
VEC(, struct recycler_element) recalc;
os_mutex_t lock;
};
/*
* recycler_new -- creates new recycler instance
*/
struct recycler *
recycler_new(struct palloc_heap *heap, size_t nallocs, size_t *peak_arenas)
{
struct recycler *r = Malloc(sizeof(struct recycler));
if (r == NULL)
goto error_alloc_recycler;
r->runs = ravl_new_sized(recycler_element_cmp,
sizeof(struct recycler_element));
if (r->runs == NULL)
goto error_alloc_tree;
r->heap = heap;
r->nallocs = nallocs;
r->peak_arenas = peak_arenas;
r->unaccounted_total = 0;
memset(&r->unaccounted_units, 0, sizeof(r->unaccounted_units));
VEC_INIT(&r->recalc);
util_mutex_init(&r->lock);
return r;
error_alloc_tree:
Free(r);
error_alloc_recycler:
return NULL;
}
/*
* recycler_delete -- deletes recycler instance
*/
void
recycler_delete(struct recycler *r)
{
VEC_DELETE(&r->recalc);
util_mutex_destroy(&r->lock);
ravl_delete(r->runs);
Free(r);
}
/*
* recycler_element_new -- calculates how many free bytes does a run have and
* what's the largest request that the run can handle, returns that as
* recycler element struct
*/
struct recycler_element
recycler_element_new(struct palloc_heap *heap, const struct memory_block *m)
{
/*
* Counting of the clear bits can race with a concurrent deallocation
* that operates on the same run. This race is benign and has absolutely
* no effect on the correctness of this algorithm. Ideally, we would
* avoid grabbing the lock, but helgrind gets very confused if we
* try to disable reporting for this function.
*/
os_mutex_t *lock = m->m_ops->get_lock(m);
util_mutex_lock(lock);
struct recycler_element e = {
.free_space = 0,
.max_free_block = 0,
.chunk_id = m->chunk_id,
.zone_id = m->zone_id,
};
m->m_ops->calc_free(m, &e.free_space, &e.max_free_block);
util_mutex_unlock(lock);
return e;
}
/*
* recycler_put -- inserts new run into the recycler
*/
int
recycler_put(struct recycler *r, const struct memory_block *m,
struct recycler_element element)
{
int ret = 0;
util_mutex_lock(&r->lock);
ret = ravl_emplace_copy(r->runs, &element);
util_mutex_unlock(&r->lock);
return ret;
}
/*
* recycler_get -- retrieves a chunk from the recycler
*/
int
recycler_get(struct recycler *r, struct memory_block *m)
{
int ret = 0;
util_mutex_lock(&r->lock);
struct recycler_element e = { .max_free_block = m->size_idx, 0, 0, 0};
struct ravl_node *n = ravl_find(r->runs, &e,
RAVL_PREDICATE_GREATER_EQUAL);
if (n == NULL) {
ret = ENOMEM;
goto out;
}
struct recycler_element *ne = ravl_data(n);
m->chunk_id = ne->chunk_id;
m->zone_id = ne->zone_id;
ravl_remove(r->runs, n);
struct chunk_header *hdr = heap_get_chunk_hdr(r->heap, m);
m->size_idx = hdr->size_idx;
memblock_rebuild_state(r->heap, m);
out:
util_mutex_unlock(&r->lock);
return ret;
}
/*
* recycler_recalc -- recalculates the scores of runs in the recycler to match
* the updated persistent state
*/
struct empty_runs
recycler_recalc(struct recycler *r, int force)
{
struct empty_runs runs;
VEC_INIT(&runs);
uint64_t units = r->unaccounted_total;
size_t peak_arenas;
util_atomic_load64(r->peak_arenas, &peak_arenas);
uint64_t recalc_threshold =
THRESHOLD_MUL * peak_arenas * r->nallocs;
if (!force && units < recalc_threshold)
return runs;
if (util_mutex_trylock(&r->lock) != 0)
return runs;
/* If the search is forced, recalculate everything */
uint64_t search_limit = force ? UINT64_MAX : units;
uint64_t found_units = 0;
struct memory_block nm = MEMORY_BLOCK_NONE;
struct ravl_node *n;
struct recycler_element next = {0, 0, 0, 0};
enum ravl_predicate p = RAVL_PREDICATE_GREATER_EQUAL;
do {
if ((n = ravl_find(r->runs, &next, p)) == NULL)
break;
p = RAVL_PREDICATE_GREATER;
struct recycler_element *ne = ravl_data(n);
next = *ne;
uint64_t chunk_units = r->unaccounted_units[ne->chunk_id];
if (!force && chunk_units == 0)
continue;
uint32_t existing_free_space = ne->free_space;
nm.chunk_id = ne->chunk_id;
nm.zone_id = ne->zone_id;
memblock_rebuild_state(r->heap, &nm);
struct recycler_element e = recycler_element_new(r->heap, &nm);
ASSERT(e.free_space >= existing_free_space);
uint64_t free_space_diff = e.free_space - existing_free_space;
found_units += free_space_diff;
if (free_space_diff == 0)
continue;
/*
* Decrease the per chunk_id counter by the number of nallocs
* found, increased by the blocks potentially freed in the
* active memory block. Cap the sub value to prevent overflow.
*/
util_fetch_and_sub64(&r->unaccounted_units[nm.chunk_id],
MIN(chunk_units, free_space_diff + r->nallocs));
ravl_remove(r->runs, n);
if (e.free_space == r->nallocs) {
memblock_rebuild_state(r->heap, &nm);
if (VEC_PUSH_BACK(&runs, nm) != 0)
ASSERT(0); /* XXX: fix after refactoring */
} else {
VEC_PUSH_BACK(&r->recalc, e);
}
} while (found_units < search_limit);
struct recycler_element *e;
VEC_FOREACH_BY_PTR(e, &r->recalc) {
ravl_emplace_copy(r->runs, e);
}
VEC_CLEAR(&r->recalc);
util_mutex_unlock(&r->lock);
util_fetch_and_sub64(&r->unaccounted_total, units);
return runs;
}
/*
* recycler_inc_unaccounted -- increases the number of unaccounted units in the
* recycler
*/
void
recycler_inc_unaccounted(struct recycler *r, const struct memory_block *m)
{
util_fetch_and_add64(&r->unaccounted_total, m->size_idx);
util_fetch_and_add64(&r->unaccounted_units[m->chunk_id],
m->size_idx);
}
| 6,997 | 22.019737 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/alloc_class.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* alloc_class.c -- implementation of allocation classes
*/
#include <float.h>
#include <string.h>
#include "alloc_class.h"
#include "heap_layout.h"
#include "util.h"
#include "out.h"
#include "bucket.h"
#include "critnib.h"
#define RUN_CLASS_KEY_PACK(map_idx_s, flags_s, size_idx_s)\
((uint64_t)(map_idx_s) << 32 |\
(uint64_t)(flags_s) << 16 |\
(uint64_t)(size_idx_s))
/*
* Value used to mark a reserved spot in the bucket array.
*/
#define ACLASS_RESERVED ((void *)0xFFFFFFFFULL)
/*
* The last size that is handled by runs.
*/
#define MAX_RUN_SIZE (CHUNKSIZE * 10)
/*
* Maximum number of bytes the allocation class generation algorithm can decide
* to waste in a single run chunk.
*/
#define MAX_RUN_WASTED_BYTES 1024
/*
* Allocation categories are used for allocation classes generation. Each one
* defines the biggest handled size (in bytes) and step pct of the generation
* process. The step percentage defines maximum allowed external fragmentation
* for the category.
*/
#define MAX_ALLOC_CATEGORIES 9
/*
* The first size (in byes) which is actually used in the allocation
* class generation algorithm. All smaller sizes use the first predefined bucket
* with the smallest run unit size.
*/
#define FIRST_GENERATED_CLASS_SIZE 128
/*
* The granularity of the allocation class generation algorithm.
*/
#define ALLOC_BLOCK_SIZE_GEN 64
/*
* The first predefined allocation class size
*/
#define MIN_UNIT_SIZE 128
static const struct {
size_t size;
float step;
} categories[MAX_ALLOC_CATEGORIES] = {
/* dummy category - the first allocation class is predefined */
{FIRST_GENERATED_CLASS_SIZE, 0.05f},
{1024, 0.05f},
{2048, 0.05f},
{4096, 0.05f},
{8192, 0.05f},
{16384, 0.05f},
{32768, 0.05f},
{131072, 0.05f},
{393216, 0.05f},
};
#define RUN_UNIT_MAX_ALLOC 8U
/*
* Every allocation has to be a multiple of at least 8 because we need to
* ensure proper alignment of every pmem structure.
*/
#define ALLOC_BLOCK_SIZE 16
/*
* Converts size (in bytes) to number of allocation blocks.
*/
#define SIZE_TO_CLASS_MAP_INDEX(_s, _g) (1 + (((_s) - 1) / (_g)))
/*
* Target number of allocations per run instance.
*/
#define RUN_MIN_NALLOCS 200
/*
* Hard limit of chunks per single run.
*/
#define RUN_SIZE_IDX_CAP (16)
#define ALLOC_CLASS_DEFAULT_FLAGS CHUNK_FLAG_FLEX_BITMAP
struct alloc_class_collection {
size_t granularity;
struct alloc_class *aclasses[MAX_ALLOCATION_CLASSES];
/*
* The last size (in bytes) that is handled by runs, everything bigger
* uses the default class.
*/
size_t last_run_max_size;
/* maps allocation classes to allocation sizes, excluding the header! */
uint8_t *class_map_by_alloc_size;
/* maps allocation classes to run unit sizes */
struct critnib *class_map_by_unit_size;
int fail_on_missing_class;
int autogenerate_on_missing_class;
};
/*
* alloc_class_find_first_free_slot -- searches for the
* first available allocation class slot
*
* This function must be thread-safe because allocation classes can be created
* at runtime.
*/
int
alloc_class_find_first_free_slot(struct alloc_class_collection *ac,
uint8_t *slot)
{
LOG(10, NULL);
for (int n = 0; n < MAX_ALLOCATION_CLASSES; ++n) {
if (util_bool_compare_and_swap64(&ac->aclasses[n],
NULL, ACLASS_RESERVED)) {
*slot = (uint8_t)n;
return 0;
}
}
return -1;
}
/*
* alloc_class_reserve -- reserve the specified class id
*/
int
alloc_class_reserve(struct alloc_class_collection *ac, uint8_t id)
{
LOG(10, NULL);
return util_bool_compare_and_swap64(&ac->aclasses[id],
NULL, ACLASS_RESERVED) ? 0 : -1;
}
/*
* alloc_class_reservation_clear -- removes the reservation on class id
*/
static void
alloc_class_reservation_clear(struct alloc_class_collection *ac, int id)
{
LOG(10, NULL);
int ret = util_bool_compare_and_swap64(&ac->aclasses[id],
ACLASS_RESERVED, NULL);
ASSERT(ret);
}
/*
* alloc_class_new -- creates a new allocation class
*/
struct alloc_class *
alloc_class_new(int id, struct alloc_class_collection *ac,
enum alloc_class_type type, enum header_type htype,
size_t unit_size, size_t alignment,
uint32_t size_idx)
{
LOG(10, NULL);
struct alloc_class *c = Malloc(sizeof(*c));
if (c == NULL)
goto error_class_alloc;
c->unit_size = unit_size;
c->header_type = htype;
c->type = type;
c->flags = (uint16_t)
(header_type_to_flag[c->header_type] |
(alignment ? CHUNK_FLAG_ALIGNED : 0)) |
ALLOC_CLASS_DEFAULT_FLAGS;
switch (type) {
case CLASS_HUGE:
id = DEFAULT_ALLOC_CLASS_ID;
break;
case CLASS_RUN:
c->rdsc.alignment = alignment;
memblock_run_bitmap(&size_idx, c->flags, unit_size,
alignment, NULL, &c->rdsc.bitmap);
c->rdsc.nallocs = c->rdsc.bitmap.nbits;
c->rdsc.size_idx = size_idx;
/* these two fields are duplicated from class */
c->rdsc.unit_size = c->unit_size;
c->rdsc.flags = c->flags;
uint8_t slot = (uint8_t)id;
if (id < 0 && alloc_class_find_first_free_slot(ac,
&slot) != 0)
goto error_class_alloc;
id = slot;
size_t map_idx = SIZE_TO_CLASS_MAP_INDEX(c->unit_size,
ac->granularity);
ASSERT(map_idx <= UINT32_MAX);
uint32_t map_idx_s = (uint32_t)map_idx;
uint16_t size_idx_s = (uint16_t)size_idx;
uint16_t flags_s = (uint16_t)c->flags;
uint64_t k = RUN_CLASS_KEY_PACK(map_idx_s,
flags_s, size_idx_s);
if (critnib_insert(ac->class_map_by_unit_size,
k, c) != 0) {
ERR("unable to register allocation class");
goto error_map_insert;
}
break;
default:
ASSERT(0);
}
c->id = (uint8_t)id;
ac->aclasses[c->id] = c;
return c;
error_map_insert:
Free(c);
error_class_alloc:
if (id >= 0)
alloc_class_reservation_clear(ac, id);
return NULL;
}
/*
* alloc_class_delete -- (internal) deletes an allocation class
*/
void
alloc_class_delete(struct alloc_class_collection *ac,
struct alloc_class *c)
{
LOG(10, NULL);
ac->aclasses[c->id] = NULL;
Free(c);
}
/*
* alloc_class_find_or_create -- (internal) searches for the
* biggest allocation class for which unit_size is evenly divisible by n.
* If no such class exists, create one.
*/
static struct alloc_class *
alloc_class_find_or_create(struct alloc_class_collection *ac, size_t n)
{
LOG(10, NULL);
COMPILE_ERROR_ON(MAX_ALLOCATION_CLASSES > UINT8_MAX);
uint64_t required_size_bytes = n * RUN_MIN_NALLOCS;
uint32_t required_size_idx = 1;
if (required_size_bytes > RUN_DEFAULT_SIZE) {
required_size_bytes -= RUN_DEFAULT_SIZE;
required_size_idx +=
CALC_SIZE_IDX(CHUNKSIZE, required_size_bytes);
if (required_size_idx > RUN_SIZE_IDX_CAP)
required_size_idx = RUN_SIZE_IDX_CAP;
}
for (int i = MAX_ALLOCATION_CLASSES - 1; i >= 0; --i) {
struct alloc_class *c = ac->aclasses[i];
if (c == NULL || c->type == CLASS_HUGE ||
c->rdsc.size_idx < required_size_idx)
continue;
if (n % c->unit_size == 0 &&
n / c->unit_size <= RUN_UNIT_MAX_ALLOC)
return c;
}
/*
* In order to minimize the wasted space at the end of the run the
* run data size must be divisible by the allocation class unit size
* with the smallest possible remainder, preferably 0.
*/
struct run_bitmap b;
size_t runsize_bytes = 0;
do {
if (runsize_bytes != 0) /* don't increase on first iteration */
n += ALLOC_BLOCK_SIZE_GEN;
uint32_t size_idx = required_size_idx;
memblock_run_bitmap(&size_idx, ALLOC_CLASS_DEFAULT_FLAGS, n, 0,
NULL, &b);
runsize_bytes = RUN_CONTENT_SIZE_BYTES(size_idx) - b.size;
} while ((runsize_bytes % n) > MAX_RUN_WASTED_BYTES);
/*
* Now that the desired unit size is found the existing classes need
* to be searched for possible duplicates. If a class that can handle
* the calculated size already exists, simply return that.
*/
for (int i = 1; i < MAX_ALLOCATION_CLASSES; ++i) {
struct alloc_class *c = ac->aclasses[i];
if (c == NULL || c->type == CLASS_HUGE)
continue;
if (n / c->unit_size <= RUN_UNIT_MAX_ALLOC &&
n % c->unit_size == 0)
return c;
if (c->unit_size == n)
return c;
}
return alloc_class_new(-1, ac, CLASS_RUN, HEADER_COMPACT, n, 0,
required_size_idx);
}
/*
* alloc_class_find_min_frag -- searches for an existing allocation
* class that will provide the smallest internal fragmentation for the given
* size.
*/
static struct alloc_class *
alloc_class_find_min_frag(struct alloc_class_collection *ac, size_t n)
{
LOG(10, NULL);
struct alloc_class *best_c = NULL;
size_t lowest_waste = SIZE_MAX;
ASSERTne(n, 0);
/*
* Start from the largest buckets in order to minimize unit size of
* allocated memory blocks.
*/
for (int i = MAX_ALLOCATION_CLASSES - 1; i >= 0; --i) {
struct alloc_class *c = ac->aclasses[i];
/* can't use alloc classes /w no headers by default */
if (c == NULL || c->header_type == HEADER_NONE)
continue;
size_t real_size = n + header_type_to_size[c->header_type];
size_t units = CALC_SIZE_IDX(c->unit_size, real_size);
/* can't exceed the maximum allowed run unit max */
if (c->type == CLASS_RUN && units > RUN_UNIT_MAX_ALLOC)
continue;
if (c->unit_size * units == real_size)
return c;
size_t waste = (c->unit_size * units) - real_size;
/*
* If we assume that the allocation class is only ever going to
* be used with exactly one size, the effective internal
* fragmentation would be increased by the leftover
* memory at the end of the run.
*/
if (c->type == CLASS_RUN) {
size_t wasted_units = c->rdsc.nallocs % units;
size_t wasted_bytes = wasted_units * c->unit_size;
size_t waste_avg_per_unit = wasted_bytes /
c->rdsc.nallocs;
waste += waste_avg_per_unit;
}
if (best_c == NULL || lowest_waste > waste) {
best_c = c;
lowest_waste = waste;
}
}
ASSERTne(best_c, NULL);
return best_c;
}
/*
* alloc_class_collection_new -- creates a new collection of allocation classes
*/
struct alloc_class_collection *
alloc_class_collection_new()
{
LOG(10, NULL);
struct alloc_class_collection *ac = Zalloc(sizeof(*ac));
if (ac == NULL)
return NULL;
ac->granularity = ALLOC_BLOCK_SIZE;
ac->last_run_max_size = MAX_RUN_SIZE;
ac->fail_on_missing_class = 0;
ac->autogenerate_on_missing_class = 1;
size_t maps_size = (MAX_RUN_SIZE / ac->granularity) + 1;
if ((ac->class_map_by_alloc_size = Malloc(maps_size)) == NULL)
goto error;
if ((ac->class_map_by_unit_size = critnib_new()) == NULL)
goto error;
memset(ac->class_map_by_alloc_size, 0xFF, maps_size);
if (alloc_class_new(-1, ac, CLASS_HUGE, HEADER_COMPACT,
CHUNKSIZE, 0, 1) == NULL)
goto error;
struct alloc_class *predefined_class =
alloc_class_new(-1, ac, CLASS_RUN, HEADER_COMPACT,
MIN_UNIT_SIZE, 0, 1);
if (predefined_class == NULL)
goto error;
for (size_t i = 0; i < FIRST_GENERATED_CLASS_SIZE / ac->granularity;
++i) {
ac->class_map_by_alloc_size[i] = predefined_class->id;
}
/*
* Based on the defined categories, a set of allocation classes is
* created. The unit size of those classes is depended on the category
* initial size and step.
*/
size_t granularity_mask = ALLOC_BLOCK_SIZE_GEN - 1;
for (int c = 1; c < MAX_ALLOC_CATEGORIES; ++c) {
size_t n = categories[c - 1].size + ALLOC_BLOCK_SIZE_GEN;
do {
if (alloc_class_find_or_create(ac, n) == NULL)
goto error;
float stepf = (float)n * categories[c].step;
size_t stepi = (size_t)stepf;
stepi = (stepf - (float)stepi < FLT_EPSILON) ?
stepi : stepi + 1;
n += (stepi + (granularity_mask)) & ~granularity_mask;
} while (n <= categories[c].size);
}
/*
* Find the largest alloc class and use it's unit size as run allocation
* threshold.
*/
uint8_t largest_aclass_slot;
for (largest_aclass_slot = MAX_ALLOCATION_CLASSES - 1;
largest_aclass_slot > 0 &&
ac->aclasses[largest_aclass_slot] == NULL;
--largest_aclass_slot) {
/* intentional NOP */
}
struct alloc_class *c = ac->aclasses[largest_aclass_slot];
/*
* The actual run might contain less unit blocks than the theoretical
* unit max variable. This may be the case for very large unit sizes.
*/
size_t real_unit_max = c->rdsc.nallocs < RUN_UNIT_MAX_ALLOC ?
c->rdsc.nallocs : RUN_UNIT_MAX_ALLOC;
size_t theoretical_run_max_size = c->unit_size * real_unit_max;
ac->last_run_max_size = MAX_RUN_SIZE > theoretical_run_max_size ?
theoretical_run_max_size : MAX_RUN_SIZE;
#ifdef DEBUG
/*
* Verify that each bucket's unit size points back to the bucket by the
* bucket map. This must be true for the default allocation classes,
* otherwise duplicate buckets will be created.
*/
for (size_t i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
struct alloc_class *c = ac->aclasses[i];
if (c != NULL && c->type == CLASS_RUN) {
ASSERTeq(i, c->id);
ASSERTeq(alloc_class_by_run(ac, c->unit_size,
c->flags, c->rdsc.size_idx), c);
}
}
#endif
return ac;
error:
alloc_class_collection_delete(ac);
return NULL;
}
/*
* alloc_class_collection_delete -- deletes the allocation class collection and
* all of the classes within it
*/
void
alloc_class_collection_delete(struct alloc_class_collection *ac)
{
LOG(10, NULL);
for (size_t i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
struct alloc_class *c = ac->aclasses[i];
if (c != NULL) {
alloc_class_delete(ac, c);
}
}
if (ac->class_map_by_unit_size)
critnib_delete(ac->class_map_by_unit_size);
Free(ac->class_map_by_alloc_size);
Free(ac);
}
/*
* alloc_class_assign_by_size -- (internal) chooses the allocation class that
* best approximates the provided size
*/
static struct alloc_class *
alloc_class_assign_by_size(struct alloc_class_collection *ac,
size_t size)
{
LOG(10, NULL);
size_t class_map_index = SIZE_TO_CLASS_MAP_INDEX(size,
ac->granularity);
struct alloc_class *c = alloc_class_find_min_frag(ac,
class_map_index * ac->granularity);
ASSERTne(c, NULL);
/*
* We don't lock this array because locking this section here and then
* bailing out if someone else was faster would be still slower than
* just calculating the class and failing to assign the variable.
* We are using a compare and swap so that helgrind/drd don't complain.
*/
util_bool_compare_and_swap64(
&ac->class_map_by_alloc_size[class_map_index],
MAX_ALLOCATION_CLASSES, c->id);
return c;
}
/*
* alloc_class_by_alloc_size -- returns allocation class that is assigned
* to handle an allocation of the provided size
*/
struct alloc_class *
alloc_class_by_alloc_size(struct alloc_class_collection *ac, size_t size)
{
if (size < ac->last_run_max_size) {
uint8_t class_id = ac->class_map_by_alloc_size[
SIZE_TO_CLASS_MAP_INDEX(size, ac->granularity)];
if (class_id == MAX_ALLOCATION_CLASSES) {
if (ac->fail_on_missing_class)
return NULL;
else if (ac->autogenerate_on_missing_class)
return alloc_class_assign_by_size(ac, size);
else
return ac->aclasses[DEFAULT_ALLOC_CLASS_ID];
}
return ac->aclasses[class_id];
} else {
return ac->aclasses[DEFAULT_ALLOC_CLASS_ID];
}
}
/*
* alloc_class_by_run -- returns the allocation class that has the given
* unit size
*/
struct alloc_class *
alloc_class_by_run(struct alloc_class_collection *ac,
size_t unit_size, uint16_t flags, uint32_t size_idx)
{
size_t map_idx = SIZE_TO_CLASS_MAP_INDEX(unit_size, ac->granularity);
ASSERT(map_idx <= UINT32_MAX);
uint32_t map_idx_s = (uint32_t)map_idx;
ASSERT(size_idx <= UINT16_MAX);
uint16_t size_idx_s = (uint16_t)size_idx;
uint16_t flags_s = (uint16_t)flags;
return critnib_get(ac->class_map_by_unit_size,
RUN_CLASS_KEY_PACK(map_idx_s, flags_s, size_idx_s));
}
/*
* alloc_class_by_id -- returns the allocation class with an id
*/
struct alloc_class *
alloc_class_by_id(struct alloc_class_collection *ac, uint8_t id)
{
return ac->aclasses[id];
}
/*
* alloc_class_calc_size_idx -- calculates how many units does the size require
*/
ssize_t
alloc_class_calc_size_idx(struct alloc_class *c, size_t size)
{
uint32_t size_idx = CALC_SIZE_IDX(c->unit_size,
size + header_type_to_size[c->header_type]);
if (c->type == CLASS_RUN) {
if (c->header_type == HEADER_NONE && size_idx != 1)
return -1;
else if (size_idx > RUN_UNIT_MAX)
return -1;
else if (size_idx > c->rdsc.nallocs)
return -1;
}
return size_idx;
}
| 16,240 | 24.496075 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/obj.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* obj.h -- internal definitions for obj module
*/
#ifndef LIBPMEMOBJ_OBJ_H
#define LIBPMEMOBJ_OBJ_H 1
#include <stddef.h>
#include <stdint.h>
#include "lane.h"
#include "pool_hdr.h"
#include "pmalloc.h"
#include "ctl.h"
#include "sync.h"
#include "stats.h"
#include "ctl_debug.h"
#include "page_size.h"
#ifdef __cplusplus
extern "C" {
#endif
#include "alloc.h"
#include "fault_injection.h"
#define PMEMOBJ_LOG_PREFIX "libpmemobj"
#define PMEMOBJ_LOG_LEVEL_VAR "PMEMOBJ_LOG_LEVEL"
#define PMEMOBJ_LOG_FILE_VAR "PMEMOBJ_LOG_FILE"
/* attributes of the obj memory pool format for the pool header */
#define OBJ_HDR_SIG "PMEMOBJ" /* must be 8 bytes including '\0' */
#define OBJ_FORMAT_MAJOR 6
#define OBJ_FORMAT_FEAT_DEFAULT \
{POOL_FEAT_COMPAT_DEFAULT, POOL_FEAT_INCOMPAT_DEFAULT, 0x0000}
#define OBJ_FORMAT_FEAT_CHECK \
{POOL_FEAT_COMPAT_VALID, POOL_FEAT_INCOMPAT_VALID, 0x0000}
static const features_t obj_format_feat_default = OBJ_FORMAT_FEAT_CHECK;
/* size of the persistent part of PMEMOBJ pool descriptor */
#define OBJ_DSC_P_SIZE 2048
/* size of unused part of the persistent part of PMEMOBJ pool descriptor */
#define OBJ_DSC_P_UNUSED (OBJ_DSC_P_SIZE - PMEMOBJ_MAX_LAYOUT - 40)
#define OBJ_LANES_OFFSET (sizeof(struct pmemobjpool)) /* lanes offset */
#define OBJ_NLANES 1024 /* number of lanes */
#define OBJ_OFF_TO_PTR(pop, off) ((void *)((uintptr_t)(pop) + (off)))
#define OBJ_PTR_TO_OFF(pop, ptr) ((uintptr_t)(ptr) - (uintptr_t)(pop))
#define OBJ_OID_IS_NULL(oid) ((oid).off == 0)
#define OBJ_LIST_EMPTY(head) OBJ_OID_IS_NULL((head)->pe_first)
#define OBJ_OFF_FROM_HEAP(pop, off)\
((off) >= (pop)->heap_offset &&\
(off) < (pop)->heap_offset + (pop)->heap_size)
#define OBJ_OFF_FROM_LANES(pop, off)\
((off) >= (pop)->lanes_offset &&\
(off) < (pop)->lanes_offset +\
(pop)->nlanes * sizeof(struct lane_layout))
#define OBJ_PTR_FROM_POOL(pop, ptr)\
((uintptr_t)(ptr) >= (uintptr_t)(pop) &&\
(uintptr_t)(ptr) < (uintptr_t)(pop) +\
(pop)->heap_offset + (pop)->heap_size)
#define OBJ_OFF_IS_VALID(pop, off)\
(OBJ_OFF_FROM_HEAP(pop, off) ||\
(OBJ_PTR_TO_OFF(pop, &(pop)->root_offset) == (off)) ||\
(OBJ_PTR_TO_OFF(pop, &(pop)->root_size) == (off)) ||\
(OBJ_OFF_FROM_LANES(pop, off)))
#define OBJ_PTR_IS_VALID(pop, ptr)\
OBJ_OFF_IS_VALID(pop, OBJ_PTR_TO_OFF(pop, ptr))
typedef void (*persist_local_fn)(const void *, size_t);
typedef void (*flush_local_fn)(const void *, size_t);
typedef void (*drain_local_fn)(void);
typedef void *(*memcpy_local_fn)(void *dest, const void *src, size_t len,
unsigned flags);
typedef void *(*memmove_local_fn)(void *dest, const void *src, size_t len,
unsigned flags);
typedef void *(*memset_local_fn)(void *dest, int c, size_t len, unsigned flags);
typedef int (*persist_remote_fn)(PMEMobjpool *pop, const void *addr,
size_t len, unsigned lane, unsigned flags);
typedef uint64_t type_num_t;
#define CONVERSION_FLAG_OLD_SET_CACHE ((1ULL) << 0)
/* PMEM_OBJ_POOL_HEAD_SIZE Without the unused and unused2 arrays */
#define PMEM_OBJ_POOL_HEAD_SIZE 2196
#define PMEM_OBJ_POOL_UNUSED2_SIZE (PMEM_PAGESIZE \
- OBJ_DSC_P_UNUSED\
- PMEM_OBJ_POOL_HEAD_SIZE)
/*
//NEW
//#define _GNU_SOURCE
//#include <sys/types.h>
//#include <sys/stat.h>
#include <fcntl.h>
#include <sys/mman.h>
//int __real_open(const char *__path, int __oflag);
//int __wrap_open(const char *__path, int __oflag);
void* open_device(void);
//END NEW
*/
struct pmemobjpool {
struct pool_hdr hdr; /* memory pool header */
/* persistent part of PMEMOBJ pool descriptor (2kB) */
char layout[PMEMOBJ_MAX_LAYOUT];
uint64_t lanes_offset;
uint64_t nlanes;
uint64_t heap_offset;
uint64_t unused3;
unsigned char unused[OBJ_DSC_P_UNUSED]; /* must be zero */
uint64_t checksum; /* checksum of above fields */
uint64_t root_offset;
/* unique runID for this program run - persistent but not checksummed */
uint64_t run_id;
uint64_t root_size;
/*
* These flags can be set from a conversion tool and are set only for
* the first recovery of the pool.
*/
uint64_t conversion_flags;
uint64_t heap_size;
struct stats_persistent stats_persistent;
char pmem_reserved[496]; /* must be zeroed */
/* some run-time state, allocated out of memory pool... */
void *addr; /* mapped region */
int is_pmem; /* true if pool is PMEM */
int rdonly; /* true if pool is opened read-only */
struct palloc_heap heap;
struct lane_descriptor lanes_desc;
uint64_t uuid_lo;
int is_dev_dax; /* true if mapped on device dax */
struct ctl *ctl; /* top level node of the ctl tree structure */
struct stats *stats;
struct pool_set *set; /* pool set info */
struct pmemobjpool *replica; /* next replica */
/* per-replica functions: pmem or non-pmem */
persist_local_fn persist_local; /* persist function */
flush_local_fn flush_local; /* flush function */
drain_local_fn drain_local; /* drain function */
memcpy_local_fn memcpy_local; /* persistent memcpy function */
memmove_local_fn memmove_local; /* persistent memmove function */
memset_local_fn memset_local; /* persistent memset function */
/* for 'master' replica: with or without data replication */
struct pmem_ops p_ops;
PMEMmutex rootlock; /* root object lock */
int is_master_replica;
int has_remote_replicas;
/* remote replica section */
void *rpp; /* RPMEMpool opaque handle if it is a remote replica */
uintptr_t remote_base; /* beginning of the remote pool */
char *node_addr; /* address of a remote node */
char *pool_desc; /* descriptor of a poolset */
persist_remote_fn persist_remote; /* remote persist function */
int vg_boot;
int tx_debug_skip_expensive_checks;
struct tx_parameters *tx_params;
/*
* Locks are dynamically allocated on FreeBSD. Keep track so
* we can free them on pmemobj_close.
*/
PMEMmutex_internal *mutex_head;
PMEMrwlock_internal *rwlock_head;
PMEMcond_internal *cond_head;
struct {
struct ravl *map;
os_mutex_t lock;
int verify;
} ulog_user_buffers;
void *user_data;
//New
//void *device;
/* padding to align size of this structure to page boundary */
/* sizeof(unused2) == 8192 - offsetof(struct pmemobjpool, unused2) */
char unused2[PMEM_OBJ_POOL_UNUSED2_SIZE -28 ];
};
/*
* Stored in the 'size' field of oobh header, determines whether the object
* is internal or not. Internal objects are skipped in pmemobj iteration
* functions.
*/
#define OBJ_INTERNAL_OBJECT_MASK ((1ULL) << 15)
#define CLASS_ID_FROM_FLAG(flag)\
((uint16_t)((flag) >> 48))
#define ARENA_ID_FROM_FLAG(flag)\
((uint16_t)((flag) >> 32))
/*
* pmemobj_get_uuid_lo -- (internal) evaluates XOR sum of least significant
* 8 bytes with most significant 8 bytes.
*/
static inline uint64_t
pmemobj_get_uuid_lo(PMEMobjpool *pop)
{
uint64_t uuid_lo = 0;
for (int i = 0; i < 8; i++) {
uuid_lo = (uuid_lo << 8) |
(pop->hdr.poolset_uuid[i] ^
pop->hdr.poolset_uuid[8 + i]);
}
return uuid_lo;
}
/*
* OBJ_OID_IS_VALID -- (internal) checks if 'oid' is valid
*/
static inline int
OBJ_OID_IS_VALID(PMEMobjpool *pop, PMEMoid oid)
{
return OBJ_OID_IS_NULL(oid) ||
(oid.pool_uuid_lo == pop->uuid_lo &&
oid.off >= pop->heap_offset &&
oid.off < pop->heap_offset + pop->heap_size);
}
static inline int
OBJ_OFF_IS_VALID_FROM_CTX(void *ctx, uint64_t offset)
{
PMEMobjpool *pop = (PMEMobjpool *)ctx;
return OBJ_OFF_IS_VALID(pop, offset);
}
void obj_init(void);
void obj_fini(void);
int obj_read_remote(void *ctx, uintptr_t base, void *dest, void *addr,
size_t length);
/*
* (debug helper macro) logs notice message if used inside a transaction
*/
#ifdef DEBUG
#define _POBJ_DEBUG_NOTICE_IN_TX()\
_pobj_debug_notice(__func__, NULL, 0)
#else
#define _POBJ_DEBUG_NOTICE_IN_TX() do {} while (0)
#endif
#if FAULT_INJECTION
void
pmemobj_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at);
int
pmemobj_fault_injection_enabled(void);
#else
static inline void
pmemobj_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
abort();
}
static inline int
pmemobj_fault_injection_enabled(void)
{
return 0;
}
#endif
#ifdef __cplusplus
}
#endif
#endif
| 8,196 | 25.441935 | 80 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/list.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* list.h -- internal definitions for persistent atomic lists module
*/
#ifndef LIBPMEMOBJ_LIST_H
#define LIBPMEMOBJ_LIST_H 1
#include <stddef.h>
#include <stdint.h>
#include <sys/types.h>
#include "libpmemobj.h"
#include "lane.h"
#include "pmalloc.h"
#include "ulog.h"
#ifdef __cplusplus
extern "C" {
#endif
struct list_entry {
PMEMoid pe_next;
PMEMoid pe_prev;
};
struct list_head {
PMEMoid pe_first;
PMEMmutex lock;
};
int list_insert_new_user(PMEMobjpool *pop,
size_t pe_offset, struct list_head *user_head, PMEMoid dest, int before,
size_t size, uint64_t type_num, palloc_constr constructor, void *arg,
PMEMoid *oidp);
int list_insert(PMEMobjpool *pop,
ssize_t pe_offset, struct list_head *head, PMEMoid dest, int before,
PMEMoid oid);
int list_remove_free_user(PMEMobjpool *pop,
size_t pe_offset, struct list_head *user_head,
PMEMoid *oidp);
int list_remove(PMEMobjpool *pop,
ssize_t pe_offset, struct list_head *head,
PMEMoid oid);
int list_move(PMEMobjpool *pop,
size_t pe_offset_old, struct list_head *head_old,
size_t pe_offset_new, struct list_head *head_new,
PMEMoid dest, int before, PMEMoid oid);
void list_move_oob(PMEMobjpool *pop,
struct list_head *head_old, struct list_head *head_new,
PMEMoid oid);
#ifdef __cplusplus
}
#endif
#endif
| 1,376 | 20.184615 | 73 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/memops.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* memops.c -- aggregated memory operations helper implementation
*
* The operation collects all of the required memory modifications that
* need to happen in an atomic way (all of them or none), and abstracts
* away the storage type (transient/persistent) and the underlying
* implementation of how it's actually performed - in some cases using
* the redo log is unnecessary and the allocation process can be sped up
* a bit by completely omitting that whole machinery.
*
* The modifications are not visible until the context is processed.
*/
#include "memops.h"
#include "obj.h"
#include "out.h"
#include "ravl.h"
#include "valgrind_internal.h"
#include "vecq.h"
#include "sys_util.h"
#include <x86intrin.h>
#define ULOG_BASE_SIZE 1024
#define OP_MERGE_SEARCH 64
static inline uint64_t getCycle(){
uint32_t cycles_high, cycles_low, pid;
asm volatile ("RDTSCP\n\t" // rdtscp into eax and edx
"mov %%edx, %0\n\t"
"mov %%eax, %1\n\t"
"mov %%ecx, %2\n\t"
:"=r" (cycles_high), "=r" (cycles_low), "=r" (pid) //store in vars
:// no input
:"%eax", "%edx", "%ecx" // clobbered by rdtscp
);
return((uint64_t)cycles_high << 32) | cycles_low;
}
enum operation_state {
OPERATION_IDLE,
OPERATION_IN_PROGRESS,
OPERATION_CLEANUP,
};
struct operation_log {
size_t capacity; /* capacity of the ulog log */
size_t offset; /* data offset inside of the log */
struct ulog *ulog; /* DRAM allocated log of modifications */
};
/*
* operation_context -- context of an ongoing palloc operation
*/
struct operation_context {
enum log_type type;
ulog_extend_fn extend; /* function to allocate next ulog */
ulog_free_fn ulog_free; /* function to free next ulogs */
const struct pmem_ops *p_ops;
struct pmem_ops t_ops; /* used for transient data processing */
struct pmem_ops s_ops; /* used for shadow copy data processing */
size_t ulog_curr_offset; /* offset in the log for buffer stores */
size_t ulog_curr_capacity; /* capacity of the current log */
size_t ulog_curr_gen_num; /* transaction counter in the current log */
struct ulog *ulog_curr; /* current persistent log */
size_t total_logged; /* total amount of buffer stores in the logs */
struct ulog *ulog; /* pointer to the persistent ulog log */
size_t ulog_base_nbytes; /* available bytes in initial ulog log */
size_t ulog_capacity; /* sum of capacity, incl all next ulog logs */
int ulog_auto_reserve; /* allow or do not to auto ulog reservation */
int ulog_any_user_buffer; /* set if any user buffer is added */
struct ulog_next next; /* vector of 'next' fields of persistent ulog */
enum operation_state state; /* operation sanity check */
struct operation_log pshadow_ops; /* shadow copy of persistent ulog */
struct operation_log transient_ops; /* log of transient changes */
/* collection used to look for potential merge candidates */
VECQ(, struct ulog_entry_val *) merge_entries;
};
/*
* operation_log_transient_init -- (internal) initialize operation log
* containing transient memory resident changes
*/
static int
operation_log_transient_init(struct operation_log *log)
{
log->capacity = ULOG_BASE_SIZE;
log->offset = 0;
struct ulog *src = Zalloc(sizeof(struct ulog) +
ULOG_BASE_SIZE);
if (src == NULL) {
ERR("!Zalloc");
return -1;
}
/* initialize underlying redo log structure */
src->capacity = ULOG_BASE_SIZE;
log->ulog = src;
return 0;
}
/*
* operation_log_persistent_init -- (internal) initialize operation log
* containing persistent memory resident changes
*/
static int
operation_log_persistent_init(struct operation_log *log,
size_t ulog_base_nbytes)
{
log->capacity = ULOG_BASE_SIZE;
log->offset = 0;
struct ulog *src = Zalloc(sizeof(struct ulog) +
ULOG_BASE_SIZE);
if (src == NULL) {
ERR("!Zalloc");
return -1;
}
/* initialize underlying redo log structure */
src->capacity = ulog_base_nbytes;
memset(src->unused, 0, sizeof(src->unused));
log->ulog = src;
return 0;
}
/*
* operation_transient_clean -- cleans pmemcheck address state
*/
static int
operation_transient_clean(void *base, const void *addr, size_t len,
unsigned flags)
{
VALGRIND_SET_CLEAN(addr, len);
return 0;
}
/*
* operation_transient_drain -- noop
*/
static void
operation_transient_drain(void *base)
{
}
/*
* operation_transient_memcpy -- transient memcpy wrapper
*/
static void *
operation_transient_memcpy(void *base, void *dest, const void *src, size_t len,
unsigned flags)
{
return memcpy(dest, src, len);
}
/*
* operation_new -- creates new operation context
*/
struct operation_context *
operation_new(struct ulog *ulog, size_t ulog_base_nbytes,
ulog_extend_fn extend, ulog_free_fn ulog_free,
const struct pmem_ops *p_ops, enum log_type type)
{
struct operation_context *ctx = Zalloc(sizeof(*ctx));
if (ctx == NULL) {
ERR("!Zalloc");
goto error_ctx_alloc;
}
ctx->ulog = ulog;
ctx->ulog_base_nbytes = ulog_base_nbytes;
ctx->ulog_capacity = ulog_capacity(ulog,
ulog_base_nbytes, p_ops);
ctx->extend = extend;
ctx->ulog_free = ulog_free;
ctx->state = OPERATION_IDLE;
VEC_INIT(&ctx->next);
ulog_rebuild_next_vec(ulog, &ctx->next, p_ops);
ctx->p_ops = p_ops;
ctx->type = type;
ctx->ulog_any_user_buffer = 0;
ctx->ulog_curr_offset = 0;
ctx->ulog_curr_capacity = 0;
ctx->ulog_curr = NULL;
ctx->t_ops.base = NULL;
ctx->t_ops.flush = operation_transient_clean;
ctx->t_ops.memcpy = operation_transient_memcpy;
ctx->t_ops.drain = operation_transient_drain;
ctx->s_ops.base = p_ops->base;
ctx->s_ops.flush = operation_transient_clean;
ctx->s_ops.memcpy = operation_transient_memcpy;
ctx->s_ops.drain = operation_transient_drain;
VECQ_INIT(&ctx->merge_entries);
if (operation_log_transient_init(&ctx->transient_ops) != 0)
goto error_ulog_alloc;
if (operation_log_persistent_init(&ctx->pshadow_ops,
ulog_base_nbytes) != 0)
goto error_ulog_alloc;
return ctx;
error_ulog_alloc:
operation_delete(ctx);
error_ctx_alloc:
return NULL;
}
/*
* operation_delete -- deletes operation context
*/
void
operation_delete(struct operation_context *ctx)
{
VECQ_DELETE(&ctx->merge_entries);
VEC_DELETE(&ctx->next);
Free(ctx->pshadow_ops.ulog);
Free(ctx->transient_ops.ulog);
Free(ctx);
}
/*
* operation_user_buffer_remove -- removes range from the tree and returns 0
*/
static int
operation_user_buffer_remove(void *base, void *addr)
{
PMEMobjpool *pop = base;
if (!pop->ulog_user_buffers.verify)
return 0;
util_mutex_lock(&pop->ulog_user_buffers.lock);
struct ravl *ravl = pop->ulog_user_buffers.map;
enum ravl_predicate predict = RAVL_PREDICATE_EQUAL;
struct user_buffer_def range;
range.addr = addr;
range.size = 0;
struct ravl_node *n = ravl_find(ravl, &range, predict);
ASSERTne(n, NULL);
ravl_remove(ravl, n);
util_mutex_unlock(&pop->ulog_user_buffers.lock);
return 0;
}
/*
* operation_free_logs -- free all logs except first
*/
void
operation_free_logs(struct operation_context *ctx, uint64_t flags)
{
int freed = ulog_free_next(ctx->ulog, ctx->p_ops, ctx->ulog_free,
operation_user_buffer_remove, flags);
if (freed) {
ctx->ulog_capacity = ulog_capacity(ctx->ulog,
ctx->ulog_base_nbytes, ctx->p_ops);
VEC_CLEAR(&ctx->next);
ulog_rebuild_next_vec(ctx->ulog, &ctx->next, ctx->p_ops);
}
ASSERTeq(VEC_SIZE(&ctx->next), 0);
}
/*
* operation_merge -- (internal) performs operation on a field
*/
static inline void
operation_merge(struct ulog_entry_base *entry, uint64_t value,
ulog_operation_type type)
{
struct ulog_entry_val *e = (struct ulog_entry_val *)entry;
switch (type) {
case ULOG_OPERATION_AND:
e->value &= value;
break;
case ULOG_OPERATION_OR:
e->value |= value;
break;
case ULOG_OPERATION_SET:
e->value = value;
break;
default:
ASSERT(0); /* unreachable */
}
}
/*
* operation_try_merge_entry -- tries to merge the incoming log entry with
* existing entries
*
* Because this requires a reverse foreach, it cannot be implemented using
* the on-media ulog log structure since there's no way to find what's
* the previous entry in the log. Instead, the last N entries are stored
* in a collection and traversed backwards.
*/
static int
operation_try_merge_entry(struct operation_context *ctx,
void *ptr, uint64_t value, ulog_operation_type type)
{
int ret = 0;
uint64_t offset = OBJ_PTR_TO_OFF(ctx->p_ops->base, ptr);
struct ulog_entry_val *e;
VECQ_FOREACH_REVERSE(e, &ctx->merge_entries) {
if (ulog_entry_offset(&e->base) == offset) {
if (ulog_entry_type(&e->base) == type) {
operation_merge(&e->base, value, type);
return 1;
} else {
break;
}
}
}
return ret;
}
/*
* operation_merge_entry_add -- adds a new entry to the merge collection,
* keeps capacity at OP_MERGE_SEARCH. Removes old entries in FIFO fashion.
*/
static void
operation_merge_entry_add(struct operation_context *ctx,
struct ulog_entry_val *entry)
{
if (VECQ_SIZE(&ctx->merge_entries) == OP_MERGE_SEARCH)
(void) VECQ_DEQUEUE(&ctx->merge_entries);
if (VECQ_ENQUEUE(&ctx->merge_entries, entry) != 0) {
/* this is fine, only runtime perf will get slower */
LOG(2, "out of memory - unable to track entries");
}
}
/*
* operation_add_typed_value -- adds new entry to the current operation, if the
* same ptr address already exists and the operation type is set,
* the new value is not added and the function has no effect.
*/
int
operation_add_typed_entry(struct operation_context *ctx,
void *ptr, uint64_t value,
ulog_operation_type type, enum operation_log_type log_type)
{
struct operation_log *oplog = log_type == LOG_PERSISTENT ?
&ctx->pshadow_ops : &ctx->transient_ops;
/*
* Always make sure to have one extra spare cacheline so that the
* ulog log entry creation has enough room for zeroing.
*/
if (oplog->offset + CACHELINE_SIZE == oplog->capacity) {
size_t ncapacity = oplog->capacity + ULOG_BASE_SIZE;
struct ulog *ulog = Realloc(oplog->ulog,
SIZEOF_ULOG(ncapacity));
if (ulog == NULL)
return -1;
oplog->capacity += ULOG_BASE_SIZE;
oplog->ulog = ulog;
oplog->ulog->capacity = oplog->capacity;
/*
* Realloc invalidated the ulog entries that are inside of this
* vector, need to clear it to avoid use after free.
*/
VECQ_CLEAR(&ctx->merge_entries);
}
if (log_type == LOG_PERSISTENT &&
operation_try_merge_entry(ctx, ptr, value, type) != 0)
return 0;
struct ulog_entry_val *entry = ulog_entry_val_create(
oplog->ulog, oplog->offset, ptr, value, type,
log_type == LOG_TRANSIENT ? &ctx->t_ops : &ctx->s_ops);
if (log_type == LOG_PERSISTENT)
operation_merge_entry_add(ctx, entry);
oplog->offset += ulog_entry_size(&entry->base);
return 0;
}
/*
* operation_add_value -- adds new entry to the current operation with
* entry type autodetected based on the memory location
*/
int
operation_add_entry(struct operation_context *ctx, void *ptr, uint64_t value,
ulog_operation_type type)
{
const struct pmem_ops *p_ops = ctx->p_ops;
PMEMobjpool *pop = (PMEMobjpool *)p_ops->base;
int from_pool = OBJ_OFF_IS_VALID(pop,
(uintptr_t)ptr - (uintptr_t)p_ops->base);
return operation_add_typed_entry(ctx, ptr, value, type,
from_pool ? LOG_PERSISTENT : LOG_TRANSIENT);
}
/*
* operation_add_buffer -- adds a buffer operation to the log
*/
int
operation_add_buffer(struct operation_context *ctx,
void *dest, void *src, size_t size, ulog_operation_type type)
{
size_t real_size = size + sizeof(struct ulog_entry_buf);
/* if there's no space left in the log, reserve some more */
if (ctx->ulog_curr_capacity == 0) {
ctx->ulog_curr_gen_num = ctx->ulog->gen_num;
if (operation_reserve(ctx, ctx->total_logged + real_size) != 0)
return -1;
ctx->ulog_curr = ctx->ulog_curr == NULL ? ctx->ulog :
ulog_next(ctx->ulog_curr, ctx->p_ops);
ASSERTne(ctx->ulog_curr, NULL);
ctx->ulog_curr_offset = 0;
ctx->ulog_curr_capacity = ctx->ulog_curr->capacity;
}
size_t curr_size = MIN(real_size, ctx->ulog_curr_capacity);
size_t data_size = curr_size - sizeof(struct ulog_entry_buf);
size_t entry_size = ALIGN_UP(curr_size, CACHELINE_SIZE);
/*
* To make sure that the log is consistent and contiguous, we need
* make sure that the header of the entry that would be located
* immediately after this one is zeroed.
*/
struct ulog_entry_base *next_entry = NULL;
if (entry_size == ctx->ulog_curr_capacity) {
struct ulog *u = ulog_next(ctx->ulog_curr, ctx->p_ops);
if (u != NULL)
next_entry = (struct ulog_entry_base *)u->data;
} else {
size_t next_entry_offset = ctx->ulog_curr_offset + entry_size;
next_entry = (struct ulog_entry_base *)(ctx->ulog_curr->data +
next_entry_offset);
}
#ifdef USE_NDP_CLOBBER
int clear_next_header = 0;
if (next_entry != NULL){
clear_next_header = 1;
}
#else
if (next_entry != NULL){
ulog_clobber_entry(next_entry, ctx->p_ops);
}
#endif
#ifdef GET_NDP_BREAKDOWN
uint64_t startCycles = getCycle();
#endif
//ulogcount++;
#ifdef USE_NDP_CLOBBER
ulog_entry_buf_create(ctx->ulog_curr,
ctx->ulog_curr_offset,
ctx->ulog_curr_gen_num,
dest, src, data_size,
type, ctx->p_ops,
clear_next_header);
#else
ulog_entry_buf_create(ctx->ulog_curr,
ctx->ulog_curr_offset,
ctx->ulog_curr_gen_num,
dest, src, data_size,
type, ctx->p_ops);
#endif
#ifdef GET_NDP_BREAKDOWN
uint64_t endCycles = getCycle();
ulogCycles += endCycles - startCycles;
#endif
/* create a persistent log entry */
/* struct ulog_entry_buf *e = ulog_entry_buf_create(ctx->ulog_curr,
ctx->ulog_curr_offset,
ctx->ulog_curr_gen_num,
dest, src, data_size,
type, ctx->p_ops);
*/
// ASSERT(entry_size == ulog_entry_size(&e->base));
// ASSERT(entry_size <= ctx->ulog_curr_capacity);
ctx->total_logged += entry_size;
ctx->ulog_curr_offset += entry_size;
ctx->ulog_curr_capacity -= entry_size;
/*
* Recursively add the data to the log until the entire buffer is
* processed.
*/
return size - data_size == 0 ? 0 : operation_add_buffer(ctx,
(char *)dest + data_size,
(char *)src + data_size,
size - data_size, type);
}
/*
* operation_user_buffer_range_cmp -- compares addresses of
* user buffers
*/
int
operation_user_buffer_range_cmp(const void *lhs, const void *rhs)
{
const struct user_buffer_def *l = lhs;
const struct user_buffer_def *r = rhs;
if (l->addr > r->addr)
return 1;
else if (l->addr < r->addr)
return -1;
return 0;
}
/*
* operation_user_buffer_try_insert -- adds a user buffer range to the tree,
* if the buffer already exists in the tree function returns -1, otherwise
* it returns 0
*/
static int
operation_user_buffer_try_insert(PMEMobjpool *pop,
struct user_buffer_def *userbuf)
{
int ret = 0;
if (!pop->ulog_user_buffers.verify)
return ret;
util_mutex_lock(&pop->ulog_user_buffers.lock);
void *addr_end = (char *)userbuf->addr + userbuf->size;
struct user_buffer_def search;
search.addr = addr_end;
struct ravl_node *n = ravl_find(pop->ulog_user_buffers.map,
&search, RAVL_PREDICATE_LESS_EQUAL);
if (n != NULL) {
struct user_buffer_def *r = ravl_data(n);
void *r_end = (char *)r->addr + r->size;
if (r_end > userbuf->addr && r->addr < addr_end) {
/* what was found overlaps with what is being added */
ret = -1;
goto out;
}
}
if (ravl_emplace_copy(pop->ulog_user_buffers.map, userbuf) == -1) {
ASSERTne(errno, EEXIST);
ret = -1;
}
out:
util_mutex_unlock(&pop->ulog_user_buffers.lock);
return ret;
}
/*
* operation_user_buffer_verify_align -- verify if the provided buffer can be
* used as a transaction log, and if so - perform necessary alignments
*/
int
operation_user_buffer_verify_align(struct operation_context *ctx,
struct user_buffer_def *userbuf)
{
/*
* Address of the buffer has to be aligned up, and the size
* has to be aligned down, taking into account the number of bytes
* the address was incremented by. The remaining size has to be large
* enough to contain the header and at least one ulog entry.
*/
uint64_t buffer_offset = OBJ_PTR_TO_OFF(ctx->p_ops->base,
userbuf->addr);
ptrdiff_t size_diff = (intptr_t)ulog_by_offset(buffer_offset,
ctx->p_ops) - (intptr_t)userbuf->addr;
ssize_t capacity_unaligned = (ssize_t)userbuf->size - size_diff
- (ssize_t)sizeof(struct ulog);
if (capacity_unaligned < (ssize_t)CACHELINE_SIZE) {
ERR("Capacity insufficient");
return -1;
}
size_t capacity_aligned = ALIGN_DOWN((size_t)capacity_unaligned,
CACHELINE_SIZE);
userbuf->addr = ulog_by_offset(buffer_offset, ctx->p_ops);
userbuf->size = capacity_aligned + sizeof(struct ulog);
if (operation_user_buffer_try_insert(ctx->p_ops->base, userbuf)) {
ERR("Buffer currently used");
return -1;
}
return 0;
}
/*
* operation_add_user_buffer -- add user buffer to the ulog
*/
void
operation_add_user_buffer(struct operation_context *ctx,
struct user_buffer_def *userbuf)
{
uint64_t buffer_offset = OBJ_PTR_TO_OFF(ctx->p_ops->base,
userbuf->addr);
size_t capacity = userbuf->size - sizeof(struct ulog);
ulog_construct(buffer_offset, capacity, ctx->ulog->gen_num,
1, ULOG_USER_OWNED, ctx->p_ops);
struct ulog *last_log;
/* if there is only one log */
if (!VEC_SIZE(&ctx->next))
last_log = ctx->ulog;
else /* get last element from vector */
last_log = ulog_by_offset(VEC_BACK(&ctx->next), ctx->p_ops);
ASSERTne(last_log, NULL);
size_t next_size = sizeof(last_log->next);
VALGRIND_ADD_TO_TX(&last_log->next, next_size);
last_log->next = buffer_offset;
pmemops_persist(ctx->p_ops, &last_log->next, next_size);
VEC_PUSH_BACK(&ctx->next, buffer_offset);
ctx->ulog_capacity += capacity;
operation_set_any_user_buffer(ctx, 1);
}
/*
* operation_set_auto_reserve -- set auto reserve value for context
*/
void
operation_set_auto_reserve(struct operation_context *ctx, int auto_reserve)
{
ctx->ulog_auto_reserve = auto_reserve;
}
/*
* operation_set_any_user_buffer -- set ulog_any_user_buffer value for context
*/
void
operation_set_any_user_buffer(struct operation_context *ctx,
int any_user_buffer)
{
ctx->ulog_any_user_buffer = any_user_buffer;
}
/*
* operation_get_any_user_buffer -- get ulog_any_user_buffer value from context
*/
int
operation_get_any_user_buffer(struct operation_context *ctx)
{
return ctx->ulog_any_user_buffer;
}
/*
* operation_process_persistent_redo -- (internal) process using ulog
*/
static void
operation_process_persistent_redo(struct operation_context *ctx)
{
ASSERTeq(ctx->pshadow_ops.capacity % CACHELINE_SIZE, 0);
ulog_store(ctx->ulog, ctx->pshadow_ops.ulog,
ctx->pshadow_ops.offset, ctx->ulog_base_nbytes,
ctx->ulog_capacity,
&ctx->next, ctx->p_ops);
#ifdef USE_NDP_REDO
if(!use_ndp_redo){
#endif
ulog_process(ctx->pshadow_ops.ulog, OBJ_OFF_IS_VALID_FROM_CTX,
ctx->p_ops);
//ulog_process(ctx->ulog, OBJ_OFF_IS_VALID_FROM_CTX,
// ctx->p_ops);
#ifdef USE_NDP_REDO
}
else {
//ulog_process(ctx->pshadow_ops.ulog, OBJ_OFF_IS_VALID_FROM_CTX,
// ctx->p_ops);
//while(1){}
ulog_process_ndp(ctx->ulog, ctx->pshadow_ops.ulog, OBJ_OFF_IS_VALID_FROM_CTX,
ctx->p_ops);
//while(1){}
}
#endif
// while(((*((uint32_t*)(ctx->p_ops->device)+254)) & 2) != 2){
//asm volatile ("clflush (%0)" :: "r"((uint32_t*)(tx->pop->p_ops.device)+254));
//printf("waiting %x %x\n",*((uint32_t*)(tx->pop->p_ops.device)+11),*((uint32_t*)(tx->pop->p_ops.device)+254));
//printf("waiting!!\n");
// }
ulog_clobber(ctx->ulog, &ctx->next, ctx->p_ops);
}
/*
* operation_process_persistent_undo -- (internal) process using ulog
*/
static void
operation_process_persistent_undo(struct operation_context *ctx)
{
ASSERTeq(ctx->pshadow_ops.capacity % CACHELINE_SIZE, 0);
ulog_process(ctx->ulog, OBJ_OFF_IS_VALID_FROM_CTX, ctx->p_ops);
}
/*
* operation_reserve -- (internal) reserves new capacity in persistent ulog log
*/
int
operation_reserve(struct operation_context *ctx, size_t new_capacity)
{
if (new_capacity > ctx->ulog_capacity) {
if (ctx->extend == NULL) {
ERR("no extend function present");
return -1;
}
if (ulog_reserve(ctx->ulog,
ctx->ulog_base_nbytes,
ctx->ulog_curr_gen_num,
ctx->ulog_auto_reserve,
&new_capacity, ctx->extend,
&ctx->next, ctx->p_ops) != 0)
return -1;
ctx->ulog_capacity = new_capacity;
}
return 0;
}
/*
* operation_init -- initializes runtime state of an operation
*/
void
operation_init(struct operation_context *ctx)
{
struct operation_log *plog = &ctx->pshadow_ops;
struct operation_log *tlog = &ctx->transient_ops;
VALGRIND_ANNOTATE_NEW_MEMORY(ctx, sizeof(*ctx));
VALGRIND_ANNOTATE_NEW_MEMORY(tlog->ulog, sizeof(struct ulog) +
tlog->capacity);
VALGRIND_ANNOTATE_NEW_MEMORY(plog->ulog, sizeof(struct ulog) +
plog->capacity);
tlog->offset = 0;
plog->offset = 0;
VECQ_REINIT(&ctx->merge_entries);
ctx->ulog_curr_offset = 0;
ctx->ulog_curr_capacity = 0;
ctx->ulog_curr_gen_num = 0;
ctx->ulog_curr = NULL;
ctx->total_logged = 0;
ctx->ulog_auto_reserve = 1;
ctx->ulog_any_user_buffer = 0;
}
/*
* operation_start -- initializes and starts a new operation
*/
void
operation_start(struct operation_context *ctx)
{
operation_init(ctx);
ASSERTeq(ctx->state, OPERATION_IDLE);
ctx->state = OPERATION_IN_PROGRESS;
}
void
operation_resume(struct operation_context *ctx)
{
operation_start(ctx);
ctx->total_logged = ulog_base_nbytes(ctx->ulog);
}
/*
* operation_cancel -- cancels a running operation
*/
void
operation_cancel(struct operation_context *ctx)
{
ASSERTeq(ctx->state, OPERATION_IN_PROGRESS);
ctx->state = OPERATION_IDLE;
}
/*
* operation_process -- processes registered operations
*
* The order of processing is important: persistent, transient.
* This is because the transient entries that reside on persistent memory might
* require write to a location that is currently occupied by a valid persistent
* state but becomes a transient state after operation is processed.
*/
void
operation_process(struct operation_context *ctx)
{
/*
* If there's exactly one persistent entry there's no need to involve
* the redo log. We can simply assign the value, the operation will be
* atomic.
*/
int redo_process = ctx->type == LOG_TYPE_REDO &&
ctx->pshadow_ops.offset != 0;
if (redo_process &&
ctx->pshadow_ops.offset == sizeof(struct ulog_entry_val)) {
struct ulog_entry_base *e = (struct ulog_entry_base *)
ctx->pshadow_ops.ulog->data;
ulog_operation_type t = ulog_entry_type(e);
if (t == ULOG_OPERATION_SET || t == ULOG_OPERATION_AND ||
t == ULOG_OPERATION_OR) {
ulog_entry_apply(e, 1, ctx->p_ops); //could not be effectiv ein ndp
redo_process = 0;
}
}
if (redo_process) {
operation_process_persistent_redo(ctx); //ndp
ctx->state = OPERATION_CLEANUP;
} else if (ctx->type == LOG_TYPE_UNDO && ctx->total_logged != 0) {
operation_process_persistent_undo(ctx);
ctx->state = OPERATION_CLEANUP;
}
/* process transient entries with transient memory ops */
if (ctx->transient_ops.offset != 0)
ulog_process(ctx->transient_ops.ulog, NULL, &ctx->t_ops); //where is this used?
}
/*
* operation_finish -- finalizes the operation
*/
void
operation_finish(struct operation_context *ctx, unsigned flags)
{
ASSERTne(ctx->state, OPERATION_IDLE);
if (ctx->type == LOG_TYPE_UNDO && ctx->total_logged != 0)
ctx->state = OPERATION_CLEANUP;
if (ctx->ulog_any_user_buffer) {
flags |= ULOG_ANY_USER_BUFFER;
ctx->state = OPERATION_CLEANUP;
}
if (ctx->state != OPERATION_CLEANUP)
goto out;
if (ctx->type == LOG_TYPE_UNDO) {
int ret = ulog_clobber_data(ctx->ulog,
ctx->total_logged, ctx->ulog_base_nbytes,
&ctx->next, ctx->ulog_free,
operation_user_buffer_remove,
ctx->p_ops, flags);
if (ret == 0)
goto out;
} else if (ctx->type == LOG_TYPE_REDO) {
int ret = ulog_free_next(ctx->ulog, ctx->p_ops,
ctx->ulog_free, operation_user_buffer_remove,
flags);
if (ret == 0)
goto out;
}
/* clobbering shrunk the ulog */
ctx->ulog_capacity = ulog_capacity(ctx->ulog,
ctx->ulog_base_nbytes, ctx->p_ops);
VEC_CLEAR(&ctx->next);
ulog_rebuild_next_vec(ctx->ulog, &ctx->next, ctx->p_ops);
out:
ctx->state = OPERATION_IDLE;
}
| 24,116 | 25.589857 | 113 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/stats.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2019, Intel Corporation */
/*
* stats.c -- implementation of statistics
*/
#include "obj.h"
#include "stats.h"
STATS_CTL_HANDLER(persistent, curr_allocated, heap_curr_allocated);
STATS_CTL_HANDLER(transient, run_allocated, heap_run_allocated);
STATS_CTL_HANDLER(transient, run_active, heap_run_active);
static const struct ctl_node CTL_NODE(heap)[] = {
STATS_CTL_LEAF(persistent, curr_allocated),
STATS_CTL_LEAF(transient, run_allocated),
STATS_CTL_LEAF(transient, run_active),
CTL_NODE_END
};
/*
* CTL_READ_HANDLER(enabled) -- returns whether or not statistics are enabled
*/
static int
CTL_READ_HANDLER(enabled)(void *ctx,
enum ctl_query_source source, void *arg,
struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
enum pobj_stats_enabled *arg_out = arg;
*arg_out = pop->stats->enabled;
return 0;
}
/*
* stats_enabled_parser -- parses the stats enabled type
*/
static int
stats_enabled_parser(const void *arg, void *dest, size_t dest_size)
{
const char *vstr = arg;
enum pobj_stats_enabled *enabled = dest;
ASSERTeq(dest_size, sizeof(enum pobj_stats_enabled));
int bool_out;
if (ctl_arg_boolean(arg, &bool_out, sizeof(bool_out)) == 0) {
*enabled = bool_out ?
POBJ_STATS_ENABLED_BOTH : POBJ_STATS_DISABLED;
return 0;
}
if (strcmp(vstr, "disabled") == 0) {
*enabled = POBJ_STATS_DISABLED;
} else if (strcmp(vstr, "both") == 0) {
*enabled = POBJ_STATS_ENABLED_BOTH;
} else if (strcmp(vstr, "persistent") == 0) {
*enabled = POBJ_STATS_ENABLED_PERSISTENT;
} else if (strcmp(vstr, "transient") == 0) {
*enabled = POBJ_STATS_ENABLED_TRANSIENT;
} else {
ERR("invalid enable type");
errno = EINVAL;
return -1;
}
return 0;
}
/*
* CTL_WRITE_HANDLER(enabled) -- enables or disables statistics counting
*/
static int
CTL_WRITE_HANDLER(enabled)(void *ctx,
enum ctl_query_source source, void *arg,
struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
pop->stats->enabled = *(enum pobj_stats_enabled *)arg;
return 0;
}
static const struct ctl_argument CTL_ARG(enabled) = {
.dest_size = sizeof(enum pobj_stats_enabled),
.parsers = {
CTL_ARG_PARSER(sizeof(enum pobj_stats_enabled),
stats_enabled_parser),
CTL_ARG_PARSER_END
}
};
static const struct ctl_node CTL_NODE(stats)[] = {
CTL_CHILD(heap),
CTL_LEAF_RW(enabled),
CTL_NODE_END
};
/*
* stats_new -- allocates and initializes statistics instance
*/
struct stats *
stats_new(PMEMobjpool *pop)
{
struct stats *s = Malloc(sizeof(*s));
if (s == NULL) {
ERR("!Malloc");
return NULL;
}
s->enabled = POBJ_STATS_ENABLED_TRANSIENT;
s->persistent = &pop->stats_persistent;
VALGRIND_ADD_TO_GLOBAL_TX_IGNORE(s->persistent, sizeof(*s->persistent));
s->transient = Zalloc(sizeof(struct stats_transient));
if (s->transient == NULL)
goto error_transient_alloc;
return s;
error_transient_alloc:
Free(s);
return NULL;
}
/*
* stats_delete -- deletes statistics instance
*/
void
stats_delete(PMEMobjpool *pop, struct stats *s)
{
pmemops_persist(&pop->p_ops, s->persistent,
sizeof(struct stats_persistent));
Free(s->transient);
Free(s);
}
/*
* stats_ctl_register -- registers ctl nodes for statistics
*/
void
stats_ctl_register(PMEMobjpool *pop)
{
CTL_REGISTER_MODULE(pop->ctl, stats);
}
| 3,293 | 20.671053 | 77 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/heap.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* heap.h -- internal definitions for heap
*/
#ifndef LIBPMEMOBJ_HEAP_H
#define LIBPMEMOBJ_HEAP_H 1
#include <stddef.h>
#include <stdint.h>
#include "bucket.h"
#include "memblock.h"
#include "memops.h"
#include "palloc.h"
#include "os_thread.h"
#ifdef __cplusplus
extern "C" {
#endif
#define HEAP_OFF_TO_PTR(heap, off) ((void *)((char *)((heap)->base) + (off)))
#define HEAP_PTR_TO_OFF(heap, ptr)\
((uintptr_t)(ptr) - (uintptr_t)((heap)->base))
#define BIT_IS_CLR(a, i) (!((a) & (1ULL << (i))))
#define HEAP_ARENA_PER_THREAD (0)
int heap_boot(struct palloc_heap *heap, void *heap_start, uint64_t heap_size,
uint64_t *sizep,
void *base, struct pmem_ops *p_ops,
struct stats *stats, struct pool_set *set);
int heap_init(void *heap_start, uint64_t heap_size, uint64_t *sizep,
struct pmem_ops *p_ops);
void heap_cleanup(struct palloc_heap *heap);
int heap_check(void *heap_start, uint64_t heap_size);
int heap_check_remote(void *heap_start, uint64_t heap_size,
struct remote_ops *ops);
int heap_buckets_init(struct palloc_heap *heap);
int heap_create_alloc_class_buckets(struct palloc_heap *heap,
struct alloc_class *c);
int heap_extend(struct palloc_heap *heap, struct bucket *defb, size_t size);
struct alloc_class *
heap_get_best_class(struct palloc_heap *heap, size_t size);
struct bucket *
heap_bucket_acquire(struct palloc_heap *heap, uint8_t class_id,
uint16_t arena_id);
void
heap_bucket_release(struct palloc_heap *heap, struct bucket *b);
int heap_get_bestfit_block(struct palloc_heap *heap, struct bucket *b,
struct memory_block *m);
struct memory_block
heap_coalesce_huge(struct palloc_heap *heap, struct bucket *b,
const struct memory_block *m);
os_mutex_t *heap_get_run_lock(struct palloc_heap *heap,
uint32_t chunk_id);
void
heap_force_recycle(struct palloc_heap *heap);
void
heap_discard_run(struct palloc_heap *heap, struct memory_block *m);
void
heap_memblock_on_free(struct palloc_heap *heap, const struct memory_block *m);
int
heap_free_chunk_reuse(struct palloc_heap *heap,
struct bucket *bucket, struct memory_block *m);
void heap_foreach_object(struct palloc_heap *heap, object_callback cb,
void *arg, struct memory_block start);
struct alloc_class_collection *heap_alloc_classes(struct palloc_heap *heap);
void *heap_end(struct palloc_heap *heap);
unsigned heap_get_narenas_total(struct palloc_heap *heap);
unsigned heap_get_narenas_max(struct palloc_heap *heap);
int heap_set_narenas_max(struct palloc_heap *heap, unsigned size);
unsigned heap_get_narenas_auto(struct palloc_heap *heap);
unsigned heap_get_thread_arena_id(struct palloc_heap *heap);
int heap_arena_create(struct palloc_heap *heap);
struct bucket **
heap_get_arena_buckets(struct palloc_heap *heap, unsigned arena_id);
int heap_get_arena_auto(struct palloc_heap *heap, unsigned arena_id);
int heap_set_arena_auto(struct palloc_heap *heap, unsigned arena_id,
int automatic);
void heap_set_arena_thread(struct palloc_heap *heap, unsigned arena_id);
void heap_vg_open(struct palloc_heap *heap, object_callback cb,
void *arg, int objects);
static inline struct chunk_header *
heap_get_chunk_hdr(struct palloc_heap *heap, const struct memory_block *m)
{
return GET_CHUNK_HDR(heap->layout, m->zone_id, m->chunk_id);
}
static inline struct chunk *
heap_get_chunk(struct palloc_heap *heap, const struct memory_block *m)
{
return GET_CHUNK(heap->layout, m->zone_id, m->chunk_id);
}
static inline struct chunk_run *
heap_get_chunk_run(struct palloc_heap *heap, const struct memory_block *m)
{
return GET_CHUNK_RUN(heap->layout, m->zone_id, m->chunk_id);
}
#ifdef __cplusplus
}
#endif
#endif
| 3,719 | 26.969925 | 78 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/list.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* list.c -- implementation of persistent atomic lists module
*/
#include <inttypes.h>
#include "list.h"
#include "obj.h"
#include "os_thread.h"
#include "out.h"
#include "sync.h"
#include "valgrind_internal.h"
#include "memops.h"
#define PREV_OFF (offsetof(struct list_entry, pe_prev) + offsetof(PMEMoid, off))
#define NEXT_OFF (offsetof(struct list_entry, pe_next) + offsetof(PMEMoid, off))
/*
* list_args_common -- common arguments for operations on list
*
* pe_offset - offset to list entry relative to user data
* obj_doffset - offset to element's data relative to pmemobj pool
* entry_ptr - list entry structure of element
*/
struct list_args_common {
ssize_t pe_offset;
uint64_t obj_doffset;
struct list_entry *entry_ptr;
};
/*
* list_args_insert -- arguments for inserting element to list
*
* head - list head
* dest - destination element OID
* dest_entry_ptr - list entry of destination element
* before - insert before or after destination element
*/
struct list_args_insert {
struct list_head *head;
PMEMoid dest;
struct list_entry *dest_entry_ptr;
int before;
};
/*
* list_args_reinsert -- arguments for reinserting element on list
*
* head - list head
* entry_ptr - list entry of old element
* obj_doffset - offset to element's data relative to pmemobj pool
*/
struct list_args_reinsert {
struct list_head *head;
struct list_entry *entry_ptr;
uint64_t obj_doffset;
};
/*
* list_args_remove -- arguments for removing element from list
*
* pe_offset - offset to list entry relative to user data
* obj_doffset - offset to element's data relative to pmemobj pool
* head - list head
* entry_ptr - list entry structure of element
*/
struct list_args_remove {
ssize_t pe_offset;
uint64_t obj_doffset;
struct list_head *head;
struct list_entry *entry_ptr;
};
/*
* list_mutexes_lock -- (internal) grab one or two locks in ascending
* address order
*/
static inline int
list_mutexes_lock(PMEMobjpool *pop,
struct list_head *head1, struct list_head *head2)
{
ASSERTne(head1, NULL);
if (!head2 || head1 == head2)
return pmemobj_mutex_lock(pop, &head1->lock);
PMEMmutex *lock1;
PMEMmutex *lock2;
if ((uintptr_t)&head1->lock < (uintptr_t)&head2->lock) {
lock1 = &head1->lock;
lock2 = &head2->lock;
} else {
lock1 = &head2->lock;
lock2 = &head1->lock;
}
int ret;
if ((ret = pmemobj_mutex_lock(pop, lock1)))
goto err;
if ((ret = pmemobj_mutex_lock(pop, lock2)))
goto err_unlock;
return 0;
err_unlock:
pmemobj_mutex_unlock(pop, lock1);
err:
return ret;
}
/*
* list_mutexes_unlock -- (internal) release one or two locks
*/
static inline void
list_mutexes_unlock(PMEMobjpool *pop,
struct list_head *head1, struct list_head *head2)
{
ASSERTne(head1, NULL);
if (!head2 || head1 == head2) {
pmemobj_mutex_unlock_nofail(pop, &head1->lock);
return;
}
pmemobj_mutex_unlock_nofail(pop, &head1->lock);
pmemobj_mutex_unlock_nofail(pop, &head2->lock);
}
/*
* list_get_dest -- (internal) return destination object ID
*
* If the input dest is not OID_NULL returns dest.
* If the input dest is OID_NULL and before is set returns first element.
* If the input dest is OID_NULL and before is no set returns last element.
*/
static inline PMEMoid
list_get_dest(PMEMobjpool *pop, struct list_head *head, PMEMoid dest,
ssize_t pe_offset, int before)
{
if (dest.off)
return dest;
if (head->pe_first.off == 0 || !!before == POBJ_LIST_DEST_HEAD)
return head->pe_first;
struct list_entry *first_ptr = (struct list_entry *)OBJ_OFF_TO_PTR(pop,
(uintptr_t)((ssize_t)head->pe_first.off + pe_offset));
return first_ptr->pe_prev;
}
/*
* list_set_oid_redo_log -- (internal) set PMEMoid value using redo log
*/
static size_t
list_set_oid_redo_log(PMEMobjpool *pop,
struct operation_context *ctx,
PMEMoid *oidp, uint64_t obj_doffset, int oidp_inited)
{
ASSERT(OBJ_PTR_IS_VALID(pop, oidp));
if (!oidp_inited || oidp->pool_uuid_lo != pop->uuid_lo) {
if (oidp_inited)
ASSERTeq(oidp->pool_uuid_lo, 0);
operation_add_entry(ctx, &oidp->pool_uuid_lo, pop->uuid_lo,
ULOG_OPERATION_SET);
}
operation_add_entry(ctx, &oidp->off, obj_doffset,
ULOG_OPERATION_SET);
return 0;
}
/*
* list_update_head -- (internal) update pe_first entry in list head
*/
static size_t
list_update_head(PMEMobjpool *pop,
struct operation_context *ctx,
struct list_head *head, uint64_t first_offset)
{
LOG(15, NULL);
operation_add_entry(ctx, &head->pe_first.off, first_offset,
ULOG_OPERATION_SET);
if (head->pe_first.pool_uuid_lo == 0) {
operation_add_entry(ctx, &head->pe_first.pool_uuid_lo,
pop->uuid_lo, ULOG_OPERATION_SET);
}
return 0;
}
/*
* u64_add_offset -- (internal) add signed offset to unsigned integer and check
* for overflows
*/
static void
u64_add_offset(uint64_t *value, ssize_t off)
{
uint64_t prev = *value;
if (off >= 0) {
*value += (size_t)off;
ASSERT(*value >= prev); /* detect overflow */
} else {
*value -= (size_t)-off;
ASSERT(*value < prev);
}
}
/*
* list_fill_entry_persist -- (internal) fill new entry using persist function
*
* Used for newly allocated objects.
*/
static void
list_fill_entry_persist(PMEMobjpool *pop, struct list_entry *entry_ptr,
uint64_t next_offset, uint64_t prev_offset)
{
LOG(15, NULL);
VALGRIND_ADD_TO_TX(entry_ptr, sizeof(*entry_ptr));
entry_ptr->pe_next.pool_uuid_lo = pop->uuid_lo;
entry_ptr->pe_next.off = next_offset;
entry_ptr->pe_prev.pool_uuid_lo = pop->uuid_lo;
entry_ptr->pe_prev.off = prev_offset;
VALGRIND_REMOVE_FROM_TX(entry_ptr, sizeof(*entry_ptr));
pmemops_persist(&pop->p_ops, entry_ptr, sizeof(*entry_ptr));
}
/*
* list_fill_entry_redo_log -- (internal) fill new entry using redo log
*
* Used to update entry in existing object.
*/
static size_t
list_fill_entry_redo_log(PMEMobjpool *pop,
struct operation_context *ctx,
struct list_args_common *args,
uint64_t next_offset, uint64_t prev_offset, int set_uuid)
{
LOG(15, NULL);
struct pmem_ops *ops = &pop->p_ops;
ASSERTne(args->entry_ptr, NULL);
ASSERTne(args->obj_doffset, 0);
if (set_uuid) {
VALGRIND_ADD_TO_TX(&(args->entry_ptr->pe_next.pool_uuid_lo),
sizeof(args->entry_ptr->pe_next.pool_uuid_lo));
VALGRIND_ADD_TO_TX(&(args->entry_ptr->pe_prev.pool_uuid_lo),
sizeof(args->entry_ptr->pe_prev.pool_uuid_lo));
/* don't need to fill pool uuid using redo log */
args->entry_ptr->pe_next.pool_uuid_lo = pop->uuid_lo;
args->entry_ptr->pe_prev.pool_uuid_lo = pop->uuid_lo;
VALGRIND_REMOVE_FROM_TX(
&(args->entry_ptr->pe_next.pool_uuid_lo),
sizeof(args->entry_ptr->pe_next.pool_uuid_lo));
VALGRIND_REMOVE_FROM_TX(
&(args->entry_ptr->pe_prev.pool_uuid_lo),
sizeof(args->entry_ptr->pe_prev.pool_uuid_lo));
pmemops_persist(ops, args->entry_ptr, sizeof(*args->entry_ptr));
} else {
ASSERTeq(args->entry_ptr->pe_next.pool_uuid_lo, pop->uuid_lo);
ASSERTeq(args->entry_ptr->pe_prev.pool_uuid_lo, pop->uuid_lo);
}
/* set current->next and current->prev using redo log */
uint64_t next_off_off = args->obj_doffset + NEXT_OFF;
uint64_t prev_off_off = args->obj_doffset + PREV_OFF;
u64_add_offset(&next_off_off, args->pe_offset);
u64_add_offset(&prev_off_off, args->pe_offset);
void *next_ptr = (char *)pop + next_off_off;
void *prev_ptr = (char *)pop + prev_off_off;
operation_add_entry(ctx, next_ptr, next_offset, ULOG_OPERATION_SET);
operation_add_entry(ctx, prev_ptr, prev_offset, ULOG_OPERATION_SET);
return 0;
}
/*
* list_remove_single -- (internal) remove element from single list
*/
static size_t
list_remove_single(PMEMobjpool *pop,
struct operation_context *ctx,
struct list_args_remove *args)
{
LOG(15, NULL);
if (args->entry_ptr->pe_next.off == args->obj_doffset) {
/* only one element on list */
ASSERTeq(args->head->pe_first.off, args->obj_doffset);
ASSERTeq(args->entry_ptr->pe_prev.off, args->obj_doffset);
return list_update_head(pop, ctx, args->head, 0);
} else {
/* set next->prev = prev and prev->next = next */
uint64_t next_off = args->entry_ptr->pe_next.off;
uint64_t next_prev_off = next_off + PREV_OFF;
u64_add_offset(&next_prev_off, args->pe_offset);
uint64_t prev_off = args->entry_ptr->pe_prev.off;
uint64_t prev_next_off = prev_off + NEXT_OFF;
u64_add_offset(&prev_next_off, args->pe_offset);
void *prev_ptr = (char *)pop + next_prev_off;
void *next_ptr = (char *)pop + prev_next_off;
operation_add_entry(ctx, prev_ptr, prev_off,
ULOG_OPERATION_SET);
operation_add_entry(ctx, next_ptr, next_off,
ULOG_OPERATION_SET);
if (args->head->pe_first.off == args->obj_doffset) {
/* removing element is the first one */
return list_update_head(pop, ctx,
args->head, next_off);
} else {
return 0;
}
}
}
/*
* list_insert_before -- (internal) insert element at offset before an element
*/
static size_t
list_insert_before(PMEMobjpool *pop,
struct operation_context *ctx,
struct list_args_insert *args, struct list_args_common *args_common,
uint64_t *next_offset, uint64_t *prev_offset)
{
LOG(15, NULL);
/* current->next = dest and current->prev = dest->prev */
*next_offset = args->dest.off;
*prev_offset = args->dest_entry_ptr->pe_prev.off;
/* dest->prev = current and dest->prev->next = current */
uint64_t dest_prev_off = args->dest.off + PREV_OFF;
u64_add_offset(&dest_prev_off, args_common->pe_offset);
uint64_t dest_prev_next_off = args->dest_entry_ptr->pe_prev.off +
NEXT_OFF;
u64_add_offset(&dest_prev_next_off, args_common->pe_offset);
void *dest_prev_ptr = (char *)pop + dest_prev_off;
void *dest_prev_next_ptr = (char *)pop + dest_prev_next_off;
operation_add_entry(ctx, dest_prev_ptr, args_common->obj_doffset,
ULOG_OPERATION_SET);
operation_add_entry(ctx, dest_prev_next_ptr, args_common->obj_doffset,
ULOG_OPERATION_SET);
return 0;
}
/*
* list_insert_after -- (internal) insert element at offset after an element
*/
static size_t
list_insert_after(PMEMobjpool *pop,
struct operation_context *ctx,
struct list_args_insert *args, struct list_args_common *args_common,
uint64_t *next_offset, uint64_t *prev_offset)
{
LOG(15, NULL);
/* current->next = dest->next and current->prev = dest */
*next_offset = args->dest_entry_ptr->pe_next.off;
*prev_offset = args->dest.off;
/* dest->next = current and dest->next->prev = current */
uint64_t dest_next_off = args->dest.off + NEXT_OFF;
u64_add_offset(&dest_next_off, args_common->pe_offset);
uint64_t dest_next_prev_off = args->dest_entry_ptr->pe_next.off +
PREV_OFF;
u64_add_offset(&dest_next_prev_off, args_common->pe_offset);
void *dest_next_ptr = (char *)pop + dest_next_off;
void *dest_next_prev_ptr = (char *)pop + dest_next_prev_off;
operation_add_entry(ctx, dest_next_ptr, args_common->obj_doffset,
ULOG_OPERATION_SET);
operation_add_entry(ctx, dest_next_prev_ptr, args_common->obj_doffset,
ULOG_OPERATION_SET);
return 0;
}
/*
* list_insert_user -- (internal) insert element at offset to a user list
*/
static size_t
list_insert_user(PMEMobjpool *pop,
struct operation_context *ctx,
struct list_args_insert *args, struct list_args_common *args_common,
uint64_t *next_offset, uint64_t *prev_offset)
{
LOG(15, NULL);
if (args->dest.off == 0) {
/* inserting the first element on list */
ASSERTeq(args->head->pe_first.off, 0);
/* set loop on current element */
*next_offset = args_common->obj_doffset;
*prev_offset = args_common->obj_doffset;
/* update head */
list_update_head(pop, ctx, args->head,
args_common->obj_doffset);
} else {
if (args->before) {
/* inserting before dest */
list_insert_before(pop, ctx, args, args_common,
next_offset, prev_offset);
if (args->dest.off == args->head->pe_first.off) {
/* current element at first position */
list_update_head(pop, ctx, args->head,
args_common->obj_doffset);
}
} else {
/* inserting after dest */
list_insert_after(pop, ctx, args, args_common,
next_offset, prev_offset);
}
}
return 0;
}
/*
* list_insert_new -- allocate and insert element to oob and user lists
*
* pop - pmemobj pool handle
* pe_offset - offset to list entry on user list relative to user data
* user_head - user list head, must be locked if not NULL
* dest - destination on user list
* before - insert before/after destination on user list
* size - size of allocation, will be increased by OBJ_OOB_SIZE
* constructor - object's constructor
* arg - argument for object's constructor
* oidp - pointer to target object ID
*/
static int
list_insert_new(PMEMobjpool *pop,
size_t pe_offset, struct list_head *user_head, PMEMoid dest, int before,
size_t size, uint64_t type_num, int (*constructor)(void *ctx, void *ptr,
size_t usable_size, void *arg), void *arg, PMEMoid *oidp)
{
LOG(3, NULL);
ASSERT(user_head != NULL);
int ret;
#ifdef DEBUG
int r = pmemobj_mutex_assert_locked(pop, &user_head->lock);
ASSERTeq(r, 0);
#endif
struct lane *lane;
lane_hold(pop, &lane);
struct pobj_action reserved;
if (palloc_reserve(&pop->heap, size, constructor, arg,
type_num, 0, 0, 0, &reserved) != 0) {
ERR("!palloc_reserve");
ret = -1;
goto err_pmalloc;
}
uint64_t obj_doffset = reserved.heap.offset;
struct operation_context *ctx = lane->external;
operation_start(ctx);
ASSERT((ssize_t)pe_offset >= 0);
dest = list_get_dest(pop, user_head, dest,
(ssize_t)pe_offset, before);
struct list_entry *entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
obj_doffset + pe_offset);
struct list_entry *dest_entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
dest.off + pe_offset);
struct list_args_insert args = {
.dest = dest,
.dest_entry_ptr = dest_entry_ptr,
.head = user_head,
.before = before,
};
struct list_args_common args_common = {
.obj_doffset = obj_doffset,
.entry_ptr = entry_ptr,
.pe_offset = (ssize_t)pe_offset,
};
uint64_t next_offset;
uint64_t prev_offset;
/* insert element to user list */
list_insert_user(pop,
ctx, &args, &args_common,
&next_offset, &prev_offset);
/* don't need to use redo log for filling new element */
list_fill_entry_persist(pop, entry_ptr,
next_offset, prev_offset);
if (oidp != NULL) {
if (OBJ_PTR_IS_VALID(pop, oidp)) {
list_set_oid_redo_log(pop, ctx,
oidp, obj_doffset, 0);
} else {
oidp->off = obj_doffset;
oidp->pool_uuid_lo = pop->uuid_lo;
}
}
palloc_publish(&pop->heap, &reserved, 1, ctx);
ret = 0;
err_pmalloc:
lane_release(pop);
ASSERT(ret == 0 || ret == -1);
return ret;
}
/*
* list_insert_new_user -- allocate and insert element to oob and user lists
*
* pop - pmemobj pool handle
* oob_head - oob list head
* pe_offset - offset to list entry on user list relative to user data
* user_head - user list head
* dest - destination on user list
* before - insert before/after destination on user list
* size - size of allocation, will be increased by OBJ_OOB_SIZE
* constructor - object's constructor
* arg - argument for object's constructor
* oidp - pointer to target object ID
*/
int
list_insert_new_user(PMEMobjpool *pop,
size_t pe_offset, struct list_head *user_head, PMEMoid dest, int before,
size_t size, uint64_t type_num, int (*constructor)(void *ctx, void *ptr,
size_t usable_size, void *arg), void *arg, PMEMoid *oidp)
{
int ret;
if ((ret = pmemobj_mutex_lock(pop, &user_head->lock))) {
errno = ret;
LOG(2, "pmemobj_mutex_lock failed");
return -1;
}
ret = list_insert_new(pop, pe_offset, user_head,
dest, before, size, type_num, constructor, arg, oidp);
pmemobj_mutex_unlock_nofail(pop, &user_head->lock);
ASSERT(ret == 0 || ret == -1);
return ret;
}
/*
* list_insert -- insert object to a single list
*
* pop - pmemobj handle
* pe_offset - offset to list entry on user list relative to user data
* head - list head
* dest - destination object ID
* before - before/after destination
* oid - target object ID
*/
int
list_insert(PMEMobjpool *pop,
ssize_t pe_offset, struct list_head *head,
PMEMoid dest, int before,
PMEMoid oid)
{
LOG(3, NULL);
ASSERTne(head, NULL);
struct lane *lane;
lane_hold(pop, &lane);
int ret;
if ((ret = pmemobj_mutex_lock(pop, &head->lock))) {
errno = ret;
LOG(2, "pmemobj_mutex_lock failed");
ret = -1;
goto err;
}
struct operation_context *ctx = lane->external;
operation_start(ctx);
dest = list_get_dest(pop, head, dest, pe_offset, before);
struct list_entry *entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
(uintptr_t)((ssize_t)oid.off + pe_offset));
struct list_entry *dest_entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
(uintptr_t)((ssize_t)dest.off + pe_offset));
struct list_args_insert args = {
.dest = dest,
.dest_entry_ptr = dest_entry_ptr,
.head = head,
.before = before,
};
struct list_args_common args_common = {
.obj_doffset = oid.off,
.entry_ptr = entry_ptr,
.pe_offset = (ssize_t)pe_offset,
};
uint64_t next_offset;
uint64_t prev_offset;
/* insert element to user list */
list_insert_user(pop, ctx,
&args, &args_common, &next_offset, &prev_offset);
/* fill entry of existing element using redo log */
list_fill_entry_redo_log(pop, ctx,
&args_common, next_offset, prev_offset, 1);
operation_process(ctx);
operation_finish(ctx, 0);
pmemobj_mutex_unlock_nofail(pop, &head->lock);
err:
lane_release(pop);
ASSERT(ret == 0 || ret == -1);
return ret;
}
/*
* list_remove_free -- remove from two lists and free an object
*
* pop - pmemobj pool handle
* oob_head - oob list head
* pe_offset - offset to list entry on user list relative to user data
* user_head - user list head, *must* be locked if not NULL
* oidp - pointer to target object ID
*/
static void
list_remove_free(PMEMobjpool *pop, size_t pe_offset,
struct list_head *user_head, PMEMoid *oidp)
{
LOG(3, NULL);
ASSERT(user_head != NULL);
#ifdef DEBUG
int r = pmemobj_mutex_assert_locked(pop, &user_head->lock);
ASSERTeq(r, 0);
#endif
struct lane *lane;
lane_hold(pop, &lane);
struct operation_context *ctx = lane->external;
operation_start(ctx);
struct pobj_action deferred;
palloc_defer_free(&pop->heap, oidp->off, &deferred);
uint64_t obj_doffset = oidp->off;
ASSERT((ssize_t)pe_offset >= 0);
struct list_entry *entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
obj_doffset + pe_offset);
struct list_args_remove args = {
.pe_offset = (ssize_t)pe_offset,
.head = user_head,
.entry_ptr = entry_ptr,
.obj_doffset = obj_doffset
};
/* remove from user list */
list_remove_single(pop, ctx, &args);
/* clear the oid */
if (OBJ_PTR_IS_VALID(pop, oidp))
list_set_oid_redo_log(pop, ctx, oidp, 0, 1);
else
oidp->off = 0;
palloc_publish(&pop->heap, &deferred, 1, ctx);
lane_release(pop);
}
/*
* list_remove_free_user -- remove from two lists and free an object
*
* pop - pmemobj pool handle
* oob_head - oob list head
* pe_offset - offset to list entry on user list relative to user data
* user_head - user list head
* oidp - pointer to target object ID
*/
int
list_remove_free_user(PMEMobjpool *pop, size_t pe_offset,
struct list_head *user_head, PMEMoid *oidp)
{
LOG(3, NULL);
int ret;
if ((ret = pmemobj_mutex_lock(pop, &user_head->lock))) {
errno = ret;
LOG(2, "pmemobj_mutex_lock failed");
return -1;
}
list_remove_free(pop, pe_offset, user_head, oidp);
pmemobj_mutex_unlock_nofail(pop, &user_head->lock);
return 0;
}
/*
* list_remove -- remove object from list
*
* pop - pmemobj handle
* pe_offset - offset to list entry on user list relative to user data
* head - list head
* oid - target object ID
*/
int
list_remove(PMEMobjpool *pop,
ssize_t pe_offset, struct list_head *head,
PMEMoid oid)
{
LOG(3, NULL);
ASSERTne(head, NULL);
int ret;
struct lane *lane;
lane_hold(pop, &lane);
if ((ret = pmemobj_mutex_lock(pop, &head->lock))) {
errno = ret;
LOG(2, "pmemobj_mutex_lock failed");
ret = -1;
goto err;
}
struct operation_context *ctx = lane->external;
operation_start(ctx);
struct list_entry *entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
oid.off + (size_t)pe_offset);
struct list_args_remove args = {
.pe_offset = (ssize_t)pe_offset,
.head = head,
.entry_ptr = entry_ptr,
.obj_doffset = oid.off,
};
struct list_args_common args_common = {
.obj_doffset = oid.off,
.entry_ptr = entry_ptr,
.pe_offset = (ssize_t)pe_offset,
};
/* remove element from user list */
list_remove_single(pop, ctx, &args);
/* clear next and prev offsets in removing element using redo log */
list_fill_entry_redo_log(pop, ctx,
&args_common, 0, 0, 0);
operation_process(ctx);
operation_finish(ctx, 0);
pmemobj_mutex_unlock_nofail(pop, &head->lock);
err:
lane_release(pop);
ASSERT(ret == 0 || ret == -1);
return ret;
}
/*
* list_move -- move object between two lists
*
* pop - pmemobj handle
* pe_offset_old - offset to old list entry relative to user data
* head_old - old list head
* pe_offset_new - offset to new list entry relative to user data
* head_new - new list head
* dest - destination object ID
* before - before/after destination
* oid - target object ID
*/
int
list_move(PMEMobjpool *pop,
size_t pe_offset_old, struct list_head *head_old,
size_t pe_offset_new, struct list_head *head_new,
PMEMoid dest, int before, PMEMoid oid)
{
LOG(3, NULL);
ASSERTne(head_old, NULL);
ASSERTne(head_new, NULL);
int ret;
struct lane *lane;
lane_hold(pop, &lane);
/*
* Grab locks in specified order to avoid dead-locks.
*
* XXX performance improvement: initialize oob locks at pool opening
*/
if ((ret = list_mutexes_lock(pop, head_new, head_old))) {
errno = ret;
LOG(2, "list_mutexes_lock failed");
ret = -1;
goto err;
}
struct operation_context *ctx = lane->external;
operation_start(ctx);
dest = list_get_dest(pop, head_new, dest,
(ssize_t)pe_offset_new, before);
struct list_entry *entry_ptr_old =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
oid.off + pe_offset_old);
struct list_entry *entry_ptr_new =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
oid.off + pe_offset_new);
struct list_entry *dest_entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
dest.off + pe_offset_new);
if (head_old == head_new) {
/* moving within the same list */
if (dest.off == oid.off)
goto unlock;
if (before && dest_entry_ptr->pe_prev.off == oid.off) {
if (head_old->pe_first.off != dest.off)
goto unlock;
list_update_head(pop, ctx,
head_old, oid.off);
goto redo_last;
}
if (!before && dest_entry_ptr->pe_next.off == oid.off) {
if (head_old->pe_first.off != oid.off)
goto unlock;
list_update_head(pop, ctx,
head_old, entry_ptr_old->pe_next.off);
goto redo_last;
}
}
ASSERT((ssize_t)pe_offset_old >= 0);
struct list_args_remove args_remove = {
.pe_offset = (ssize_t)pe_offset_old,
.head = head_old,
.entry_ptr = entry_ptr_old,
.obj_doffset = oid.off,
};
struct list_args_insert args_insert = {
.head = head_new,
.dest = dest,
.dest_entry_ptr = dest_entry_ptr,
.before = before,
};
ASSERT((ssize_t)pe_offset_new >= 0);
struct list_args_common args_common = {
.obj_doffset = oid.off,
.entry_ptr = entry_ptr_new,
.pe_offset = (ssize_t)pe_offset_new,
};
uint64_t next_offset;
uint64_t prev_offset;
/* remove element from user list */
list_remove_single(pop, ctx, &args_remove);
/* insert element to user list */
list_insert_user(pop, ctx, &args_insert,
&args_common, &next_offset, &prev_offset);
/* offsets differ, move is between different list entries - set uuid */
int set_uuid = pe_offset_new != pe_offset_old ? 1 : 0;
/* fill next and prev offsets of moving element using redo log */
list_fill_entry_redo_log(pop, ctx,
&args_common, next_offset, prev_offset, set_uuid);
redo_last:
unlock:
operation_process(ctx);
operation_finish(ctx, 0);
list_mutexes_unlock(pop, head_new, head_old);
err:
lane_release(pop);
ASSERT(ret == 0 || ret == -1);
return ret;
}
| 24,297 | 24.848936 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/memops.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* memops.h -- aggregated memory operations helper definitions
*/
#ifndef LIBPMEMOBJ_MEMOPS_H
#define LIBPMEMOBJ_MEMOPS_H 1
#include <stddef.h>
#include <stdint.h>
#include "vec.h"
#include "pmemops.h"
#include "ulog.h"
#include "lane.h"
#ifdef __cplusplus
extern "C" {
#endif
enum operation_log_type {
LOG_PERSISTENT, /* log of persistent modifications */
LOG_TRANSIENT, /* log of transient memory modifications */
MAX_OPERATION_LOG_TYPE
};
enum log_type {
LOG_TYPE_UNDO,
LOG_TYPE_REDO,
MAX_LOG_TYPE,
};
struct user_buffer_def {
void *addr;
size_t size;
};
#ifdef GET_NDP_BREAKDOWN
extern uint64_t ulogCycles;
#endif
#ifdef USE_NDP_REDO
extern int use_ndp_redo;
#endif
struct operation_context;
struct operation_context *
operation_new(struct ulog *redo, size_t ulog_base_nbytes,
ulog_extend_fn extend, ulog_free_fn ulog_free,
const struct pmem_ops *p_ops, enum log_type type);
void operation_init(struct operation_context *ctx);
void operation_start(struct operation_context *ctx);
void operation_resume(struct operation_context *ctx);
void operation_delete(struct operation_context *ctx);
void operation_free_logs(struct operation_context *ctx, uint64_t flags);
int operation_add_buffer(struct operation_context *ctx,
void *dest, void *src, size_t size, ulog_operation_type type);
int operation_add_entry(struct operation_context *ctx,
void *ptr, uint64_t value, ulog_operation_type type);
int operation_add_typed_entry(struct operation_context *ctx,
void *ptr, uint64_t value,
ulog_operation_type type, enum operation_log_type log_type);
int operation_user_buffer_verify_align(struct operation_context *ctx,
struct user_buffer_def *userbuf);
void operation_add_user_buffer(struct operation_context *ctx,
struct user_buffer_def *userbuf);
void operation_set_auto_reserve(struct operation_context *ctx,
int auto_reserve);
void operation_set_any_user_buffer(struct operation_context *ctx,
int any_user_buffer);
int operation_get_any_user_buffer(struct operation_context *ctx);
int operation_user_buffer_range_cmp(const void *lhs, const void *rhs);
int operation_reserve(struct operation_context *ctx, size_t new_capacity);
void operation_process(struct operation_context *ctx);
void operation_finish(struct operation_context *ctx, unsigned flags);
void operation_cancel(struct operation_context *ctx);
#ifdef __cplusplus
}
#endif
#endif
| 2,467 | 26.422222 | 74 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/pmalloc.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* pmalloc.h -- internal definitions for persistent malloc
*/
#ifndef LIBPMEMOBJ_PMALLOC_H
#define LIBPMEMOBJ_PMALLOC_H 1
#include <stddef.h>
#include <stdint.h>
#include "libpmemobj.h"
#include "memops.h"
#include "palloc.h"
#ifdef __cplusplus
extern "C" {
#endif
/* single operations done in the internal context of the lane */
int pmalloc(PMEMobjpool *pop, uint64_t *off, size_t size,
uint64_t extra_field, uint16_t object_flags);
int pmalloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags, uint16_t class_id);
int prealloc(PMEMobjpool *pop, uint64_t *off, size_t size,
uint64_t extra_field, uint16_t object_flags);
void pfree(PMEMobjpool *pop, uint64_t *off);
/* external operation to be used together with context-aware palloc funcs */
struct operation_context *pmalloc_operation_hold(PMEMobjpool *pop);
struct operation_context *pmalloc_operation_hold_no_start(PMEMobjpool *pop);
void pmalloc_operation_release(PMEMobjpool *pop);
void pmalloc_ctl_register(PMEMobjpool *pop);
int pmalloc_cleanup(PMEMobjpool *pop);
int pmalloc_boot(PMEMobjpool *pop);
#ifdef __cplusplus
}
#endif
#endif
| 1,291 | 24.333333 | 76 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/recycler.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* recycler.h -- internal definitions of run recycler
*
* This is a container that stores runs that are currently not used by any of
* the buckets.
*/
#ifndef LIBPMEMOBJ_RECYCLER_H
#define LIBPMEMOBJ_RECYCLER_H 1
#include "memblock.h"
#include "vec.h"
#ifdef __cplusplus
extern "C" {
#endif
struct recycler;
VEC(empty_runs, struct memory_block);
struct recycler_element {
uint32_t max_free_block;
uint32_t free_space;
uint32_t chunk_id;
uint32_t zone_id;
};
struct recycler *recycler_new(struct palloc_heap *layout,
size_t nallocs, size_t *peak_arenas);
void recycler_delete(struct recycler *r);
struct recycler_element recycler_element_new(struct palloc_heap *heap,
const struct memory_block *m);
int recycler_put(struct recycler *r, const struct memory_block *m,
struct recycler_element element);
int recycler_get(struct recycler *r, struct memory_block *m);
struct empty_runs recycler_recalc(struct recycler *r, int force);
void recycler_inc_unaccounted(struct recycler *r,
const struct memory_block *m);
#ifdef __cplusplus
}
#endif
#endif
| 1,158 | 20.867925 | 77 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/palloc.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* palloc.h -- internal definitions for persistent allocator
*/
#ifndef LIBPMEMOBJ_PALLOC_H
#define LIBPMEMOBJ_PALLOC_H 1
#include <stddef.h>
#include <stdint.h>
#include "libpmemobj.h"
#include "memops.h"
#include "ulog.h"
#include "valgrind_internal.h"
#include "stats.h"
#ifdef __cplusplus
extern "C" {
#endif
#define PALLOC_CTL_DEBUG_NO_PATTERN (-1)
struct palloc_heap {
struct pmem_ops p_ops;
struct heap_layout *layout;
struct heap_rt *rt;
uint64_t *sizep;
uint64_t growsize;
struct stats *stats;
struct pool_set *set;
void *base;
int alloc_pattern;
};
struct memory_block;
typedef int (*palloc_constr)(void *base, void *ptr,
size_t usable_size, void *arg);
int palloc_operation(struct palloc_heap *heap, uint64_t off, uint64_t *dest_off,
size_t size, palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags,
uint16_t class_id, uint16_t arena_id,
struct operation_context *ctx);
int
palloc_reserve(struct palloc_heap *heap, size_t size,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags,
uint16_t class_id, uint16_t arena_id,
struct pobj_action *act);
void
palloc_defer_free(struct palloc_heap *heap, uint64_t off,
struct pobj_action *act);
void
palloc_cancel(struct palloc_heap *heap,
struct pobj_action *actv, size_t actvcnt);
void
palloc_publish(struct palloc_heap *heap,
struct pobj_action *actv, size_t actvcnt,
struct operation_context *ctx);
void
palloc_set_value(struct palloc_heap *heap, struct pobj_action *act,
uint64_t *ptr, uint64_t value);
uint64_t palloc_first(struct palloc_heap *heap);
uint64_t palloc_next(struct palloc_heap *heap, uint64_t off);
size_t palloc_usable_size(struct palloc_heap *heap, uint64_t off);
uint64_t palloc_extra(struct palloc_heap *heap, uint64_t off);
uint16_t palloc_flags(struct palloc_heap *heap, uint64_t off);
int palloc_boot(struct palloc_heap *heap, void *heap_start,
uint64_t heap_size, uint64_t *sizep,
void *base, struct pmem_ops *p_ops,
struct stats *stats, struct pool_set *set);
int palloc_buckets_init(struct palloc_heap *heap);
int palloc_init(void *heap_start, uint64_t heap_size, uint64_t *sizep,
struct pmem_ops *p_ops);
void *palloc_heap_end(struct palloc_heap *h);
int palloc_heap_check(void *heap_start, uint64_t heap_size);
int palloc_heap_check_remote(void *heap_start, uint64_t heap_size,
struct remote_ops *ops);
void palloc_heap_cleanup(struct palloc_heap *heap);
size_t palloc_heap(void *heap_start);
int palloc_defrag(struct palloc_heap *heap, uint64_t **objv, size_t objcnt,
struct operation_context *ctx, struct pobj_defrag_result *result);
/* foreach callback, terminates iteration if return value is non-zero */
typedef int (*object_callback)(const struct memory_block *m, void *arg);
#if VG_MEMCHECK_ENABLED
void palloc_heap_vg_open(struct palloc_heap *heap, int objects);
#endif
#ifdef __cplusplus
}
#endif
#endif
| 3,006 | 25.377193 | 80 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/container.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* container.h -- internal definitions for block containers
*/
#ifndef LIBPMEMOBJ_CONTAINER_H
#define LIBPMEMOBJ_CONTAINER_H 1
#include "memblock.h"
#ifdef __cplusplus
extern "C" {
#endif
struct block_container {
const struct block_container_ops *c_ops;
struct palloc_heap *heap;
};
struct block_container_ops {
/* inserts a new memory block into the container */
int (*insert)(struct block_container *c, const struct memory_block *m);
/* removes exact match memory block */
int (*get_rm_exact)(struct block_container *c,
const struct memory_block *m);
/* removes and returns the best-fit memory block for size */
int (*get_rm_bestfit)(struct block_container *c,
struct memory_block *m);
/* checks whether the container is empty */
int (*is_empty)(struct block_container *c);
/* removes all elements from the container */
void (*rm_all)(struct block_container *c);
/* deletes the container */
void (*destroy)(struct block_container *c);
};
#ifdef __cplusplus
}
#endif
#endif /* LIBPMEMOBJ_CONTAINER_H */
| 1,125 | 21.979592 | 72 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/stats.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2019, Intel Corporation */
/*
* stats.h -- definitions of statistics
*/
#ifndef LIBPMEMOBJ_STATS_H
#define LIBPMEMOBJ_STATS_H 1
#include "ctl.h"
#include "libpmemobj/ctl.h"
#ifdef __cplusplus
extern "C" {
#endif
struct stats_transient {
uint64_t heap_run_allocated;
uint64_t heap_run_active;
};
struct stats_persistent {
uint64_t heap_curr_allocated;
};
struct stats {
enum pobj_stats_enabled enabled;
struct stats_transient *transient;
struct stats_persistent *persistent;
};
#define STATS_INC(stats, type, name, value) do {\
STATS_INC_##type(stats, name, value);\
} while (0)
#define STATS_INC_transient(stats, name, value) do {\
if ((stats)->enabled == POBJ_STATS_ENABLED_TRANSIENT ||\
(stats)->enabled == POBJ_STATS_ENABLED_BOTH)\
util_fetch_and_add64((&(stats)->transient->name), (value));\
} while (0)
#define STATS_INC_persistent(stats, name, value) do {\
if ((stats)->enabled == POBJ_STATS_ENABLED_PERSISTENT ||\
(stats)->enabled == POBJ_STATS_ENABLED_BOTH)\
util_fetch_and_add64((&(stats)->persistent->name), (value));\
} while (0)
#define STATS_SUB(stats, type, name, value) do {\
STATS_SUB_##type(stats, name, value);\
} while (0)
#define STATS_SUB_transient(stats, name, value) do {\
if ((stats)->enabled == POBJ_STATS_ENABLED_TRANSIENT ||\
(stats)->enabled == POBJ_STATS_ENABLED_BOTH)\
util_fetch_and_sub64((&(stats)->transient->name), (value));\
} while (0)
#define STATS_SUB_persistent(stats, name, value) do {\
if ((stats)->enabled == POBJ_STATS_ENABLED_PERSISTENT ||\
(stats)->enabled == POBJ_STATS_ENABLED_BOTH)\
util_fetch_and_sub64((&(stats)->persistent->name), (value));\
} while (0)
#define STATS_SET(stats, type, name, value) do {\
STATS_SET_##type(stats, name, value);\
} while (0)
#define STATS_SET_transient(stats, name, value) do {\
if ((stats)->enabled == POBJ_STATS_ENABLED_TRANSIENT ||\
(stats)->enabled == POBJ_STATS_ENABLED_BOTH)\
util_atomic_store_explicit64((&(stats)->transient->name),\
(value), memory_order_release);\
} while (0)
#define STATS_SET_persistent(stats, name, value) do {\
if ((stats)->enabled == POBJ_STATS_ENABLED_PERSISTENT ||\
(stats)->enabled == POBJ_STATS_ENABLED_BOTH)\
util_atomic_store_explicit64((&(stats)->persistent->name),\
(value), memory_order_release);\
} while (0)
#define STATS_CTL_LEAF(type, name)\
{CTL_STR(name), CTL_NODE_LEAF,\
{CTL_READ_HANDLER(type##_##name), NULL, NULL},\
NULL, NULL}
#define STATS_CTL_HANDLER(type, name, varname)\
static int CTL_READ_HANDLER(type##_##name)(void *ctx,\
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)\
{\
PMEMobjpool *pop = ctx;\
uint64_t *argv = arg;\
util_atomic_load_explicit64(&pop->stats->type->varname,\
argv, memory_order_acquire);\
return 0;\
}
void stats_ctl_register(PMEMobjpool *pop);
struct stats *stats_new(PMEMobjpool *pop);
void stats_delete(PMEMobjpool *pop, struct stats *stats);
#ifdef __cplusplus
}
#endif
#endif
| 2,990 | 26.440367 | 71 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/bucket.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* bucket.c -- bucket implementation
*
* Buckets manage volatile state of the heap. They are the abstraction layer
* between the heap-managed chunks/runs and memory allocations.
*
* Each bucket instance can have a different underlying container that is
* responsible for selecting blocks - which means that whether the allocator
* serves memory blocks in best/first/next -fit manner is decided during bucket
* creation.
*/
#include "alloc_class.h"
#include "bucket.h"
#include "heap.h"
#include "out.h"
#include "sys_util.h"
#include "valgrind_internal.h"
/*
* bucket_new -- creates a new bucket instance
*/
struct bucket *
bucket_new(struct block_container *c, struct alloc_class *aclass)
{
if (c == NULL)
return NULL;
struct bucket *b = Malloc(sizeof(*b));
if (b == NULL)
return NULL;
b->container = c;
b->c_ops = c->c_ops;
util_mutex_init(&b->lock);
b->is_active = 0;
b->active_memory_block = NULL;
if (aclass && aclass->type == CLASS_RUN) {
b->active_memory_block =
Zalloc(sizeof(struct memory_block_reserved));
if (b->active_memory_block == NULL)
goto error_active_alloc;
}
b->aclass = aclass;
return b;
error_active_alloc:
util_mutex_destroy(&b->lock);
Free(b);
return NULL;
}
/*
* bucket_insert_block -- inserts a block into the bucket
*/
int
bucket_insert_block(struct bucket *b, const struct memory_block *m)
{
#if VG_MEMCHECK_ENABLED || VG_HELGRIND_ENABLED || VG_DRD_ENABLED
if (On_memcheck || On_drd_or_hg) {
size_t size = m->m_ops->get_real_size(m);
void *data = m->m_ops->get_real_data(m);
VALGRIND_DO_MAKE_MEM_NOACCESS(data, size);
VALGRIND_ANNOTATE_NEW_MEMORY(data, size);
}
#endif
return b->c_ops->insert(b->container, m);
}
/*
* bucket_delete -- cleanups and deallocates bucket instance
*/
void
bucket_delete(struct bucket *b)
{
if (b->active_memory_block)
Free(b->active_memory_block);
util_mutex_destroy(&b->lock);
b->c_ops->destroy(b->container);
Free(b);
}
/*
* bucket_current_resvp -- returns the pointer to the current reservation count
*/
int *
bucket_current_resvp(struct bucket *b)
{
return b->active_memory_block ? &b->active_memory_block->nresv : NULL;
}
| 2,251 | 21.52 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/container_seglists.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* container_seglists.c -- implementation of segregated lists block container
*
* This container is constructed from N (up to 64) intrusive lists and a
* single 8 byte bitmap that stores the information whether a given list is
* empty or not.
*/
#include "container_seglists.h"
#include "out.h"
#include "sys_util.h"
#include "util.h"
#include "valgrind_internal.h"
#include "vecq.h"
#define SEGLIST_BLOCK_LISTS 64U
struct block_container_seglists {
struct block_container super;
struct memory_block m;
VECQ(, uint32_t) blocks[SEGLIST_BLOCK_LISTS];
uint64_t nonempty_lists;
};
/*
* container_seglists_insert_block -- (internal) inserts a new memory block
* into the container
*/
static int
container_seglists_insert_block(struct block_container *bc,
const struct memory_block *m)
{
ASSERT(m->chunk_id < MAX_CHUNK);
ASSERT(m->zone_id < UINT16_MAX);
ASSERTne(m->size_idx, 0);
struct block_container_seglists *c =
(struct block_container_seglists *)bc;
if (c->nonempty_lists == 0)
c->m = *m;
ASSERT(m->size_idx <= SEGLIST_BLOCK_LISTS);
ASSERT(m->chunk_id == c->m.chunk_id);
ASSERT(m->zone_id == c->m.zone_id);
if (VECQ_ENQUEUE(&c->blocks[m->size_idx - 1], m->block_off) != 0)
return -1;
/* marks the list as nonempty */
c->nonempty_lists |= 1ULL << (m->size_idx - 1);
return 0;
}
/*
* container_seglists_get_rm_block_bestfit -- (internal) removes and returns the
* best-fit memory block for size
*/
static int
container_seglists_get_rm_block_bestfit(struct block_container *bc,
struct memory_block *m)
{
struct block_container_seglists *c =
(struct block_container_seglists *)bc;
ASSERT(m->size_idx <= SEGLIST_BLOCK_LISTS);
uint32_t i = 0;
/* applicable lists */
uint64_t size_mask = (1ULL << (m->size_idx - 1)) - 1;
uint64_t v = c->nonempty_lists & ~size_mask;
if (v == 0)
return ENOMEM;
/* finds the list that serves the smallest applicable size */
i = util_lssb_index64(v);
uint32_t block_offset = VECQ_DEQUEUE(&c->blocks[i]);
if (VECQ_SIZE(&c->blocks[i]) == 0) /* marks the list as empty */
c->nonempty_lists &= ~(1ULL << (i));
*m = c->m;
m->block_off = block_offset;
m->size_idx = i + 1;
return 0;
}
/*
* container_seglists_is_empty -- (internal) checks whether the container is
* empty
*/
static int
container_seglists_is_empty(struct block_container *bc)
{
struct block_container_seglists *c =
(struct block_container_seglists *)bc;
return c->nonempty_lists == 0;
}
/*
* container_seglists_rm_all -- (internal) removes all elements from the tree
*/
static void
container_seglists_rm_all(struct block_container *bc)
{
struct block_container_seglists *c =
(struct block_container_seglists *)bc;
for (unsigned i = 0; i < SEGLIST_BLOCK_LISTS; ++i)
VECQ_CLEAR(&c->blocks[i]);
c->nonempty_lists = 0;
}
/*
* container_seglists_delete -- (internal) deletes the container
*/
static void
container_seglists_destroy(struct block_container *bc)
{
struct block_container_seglists *c =
(struct block_container_seglists *)bc;
for (unsigned i = 0; i < SEGLIST_BLOCK_LISTS; ++i)
VECQ_DELETE(&c->blocks[i]);
Free(c);
}
/*
* This container does not support retrieval of exact memory blocks, but other
* than provides best-fit in O(1) time for unit sizes that do not exceed 64.
*/
static const struct block_container_ops container_seglists_ops = {
.insert = container_seglists_insert_block,
.get_rm_exact = NULL,
.get_rm_bestfit = container_seglists_get_rm_block_bestfit,
.is_empty = container_seglists_is_empty,
.rm_all = container_seglists_rm_all,
.destroy = container_seglists_destroy,
};
/*
* container_new_seglists -- allocates and initializes a seglists container
*/
struct block_container *
container_new_seglists(struct palloc_heap *heap)
{
struct block_container_seglists *bc = Malloc(sizeof(*bc));
if (bc == NULL)
goto error_container_malloc;
bc->super.heap = heap;
bc->super.c_ops = &container_seglists_ops;
for (unsigned i = 0; i < SEGLIST_BLOCK_LISTS; ++i)
VECQ_INIT(&bc->blocks[i]);
bc->nonempty_lists = 0;
return (struct block_container *)&bc->super;
error_container_malloc:
return NULL;
}
| 4,215 | 23.511628 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/tx.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* tx.h -- internal definitions for transactions
*/
#ifndef LIBPMEMOBJ_INTERNAL_TX_H
#define LIBPMEMOBJ_INTERNAL_TX_H 1
#include <stdint.h>
#include "obj.h"
#include "ulog.h"
#ifdef __cplusplus
extern "C" {
#endif
#define TX_DEFAULT_RANGE_CACHE_SIZE (1 << 15)
#define TX_DEFAULT_RANGE_CACHE_THRESHOLD (1 << 12)
#define TX_RANGE_MASK (8ULL - 1)
#define TX_RANGE_MASK_LEGACY (32ULL - 1)
#define TX_ALIGN_SIZE(s, amask) (((s) + (amask)) & ~(amask))
#define TX_SNAPSHOT_LOG_ENTRY_ALIGNMENT CACHELINE_SIZE
#define TX_SNAPSHOT_LOG_BUFFER_OVERHEAD sizeof(struct ulog)
#define TX_SNAPSHOT_LOG_ENTRY_OVERHEAD sizeof(struct ulog_entry_buf)
#define TX_INTENT_LOG_BUFFER_ALIGNMENT CACHELINE_SIZE
#define TX_INTENT_LOG_BUFFER_OVERHEAD sizeof(struct ulog)
#define TX_INTENT_LOG_ENTRY_OVERHEAD sizeof(struct ulog_entry_val)
struct tx_parameters {
size_t cache_size;
};
/*
* Returns the current transaction's pool handle, NULL if not within
* a transaction.
*/
PMEMobjpool *tx_get_pop(void);
void tx_ctl_register(PMEMobjpool *pop);
struct tx_parameters *tx_params_new(void);
void tx_params_delete(struct tx_parameters *tx_params);
#ifdef __cplusplus
}
#endif
#endif
| 1,258 | 22.314815 | 68 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/critnib.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2019, Intel Corporation */
/*
* critnib.c -- implementation of critnib tree
*
* It offers identity lookup (like a hashmap) and <= lookup (like a search
* tree). Unlike some hashing algorithms (cuckoo hash, perfect hashing) the
* complexity isn't constant, but for data sizes we expect it's several
* times as fast as cuckoo, and has no "stop the world" cases that would
* cause latency (ie, better worst case behaviour).
*/
/*
* STRUCTURE DESCRIPTION
*
* Critnib is a hybrid between a radix tree and DJ Bernstein's critbit:
* it skips nodes for uninteresting radix nodes (ie, ones that would have
* exactly one child), this requires adding to every node a field that
* describes the slice (4-bit in our case) that this radix level is for.
*
* This implementation also stores each node's path (ie, bits that are
* common to every key in that subtree) -- this doesn't help with lookups
* at all (unused in == match, could be reconstructed at no cost in <=
* after first dive) but simplifies inserts and removes. If we ever want
* that piece of memory it's easy to trim it down.
*/
/*
* CONCURRENCY ISSUES
*
* Reads are completely lock-free sync-free, but only almost wait-free:
* if for some reason a read thread gets pathologically stalled, it will
* notice the data being stale and restart the work. In usual cases,
* the structure having been modified does _not_ cause a restart.
*
* Writes could be easily made lock-free as well (with only a cmpxchg
* sync), but this leads to problems with removes. A possible solution
* would be doing removes by overwriting by NULL w/o freeing -- yet this
* would lead to the structure growing without bounds. Complex per-node
* locks would increase concurrency but they slow down individual writes
* enough that in practice a simple global write lock works faster.
*
* Removes are the only operation that can break reads. The structure
* can do local RCU well -- the problem being knowing when it's safe to
* free. Any synchronization with reads would kill their speed, thus
* instead we have a remove count. The grace period is DELETED_LIFE,
* after which any read will notice staleness and restart its work.
*/
#include <errno.h>
#include <stdbool.h>
#include "alloc.h"
#include "critnib.h"
#include "out.h"
#include "sys_util.h"
#include "valgrind_internal.h"
/*
* A node that has been deleted is left untouched for this many delete
* cycles. Reads have guaranteed correctness if they took no longer than
* DELETED_LIFE concurrent deletes, otherwise they notice something is
* wrong and restart. The memory of deleted nodes is never freed to
* malloc nor their pointers lead anywhere wrong, thus a stale read will
* (temporarily) get a wrong answer but won't crash.
*
* There's no need to count writes as they never interfere with reads.
*
* Allowing stale reads (of arbitrarily old writes or of deletes less than
* DELETED_LIFE old) might sound counterintuitive, but it doesn't affect
* semantics in any way: the thread could have been stalled just after
* returning from our code. Thus, the guarantee is: the result of get() or
* find_le() is a value that was current at any point between the call
* start and end.
*/
#define DELETED_LIFE 16
#define SLICE 4
#define NIB ((1ULL << SLICE) - 1)
#define SLNODES (1 << SLICE)
typedef unsigned char sh_t;
struct critnib_node {
/*
* path is the part of a tree that's already traversed (be it through
* explicit nodes or collapsed links) -- ie, any subtree below has all
* those bits set to this value.
*
* nib is a 4-bit slice that's an index into the node's children.
*
* shift is the length (in bits) of the part of the key below this node.
*
* nib
* |XXXXXXXXXX|?|*****|
* path ^
* +-----+
* shift
*/
struct critnib_node *child[SLNODES];
uint64_t path;
sh_t shift;
};
struct critnib_leaf {
uint64_t key;
void *value;
};
struct critnib {
struct critnib_node *root;
/* pool of freed nodes: singly linked list, next at child[0] */
struct critnib_node *deleted_node;
struct critnib_leaf *deleted_leaf;
/* nodes removed but not yet eligible for reuse */
struct critnib_node *pending_del_nodes[DELETED_LIFE];
struct critnib_leaf *pending_del_leaves[DELETED_LIFE];
uint64_t remove_count;
os_mutex_t mutex; /* writes/removes */
};
/*
* atomic load
*/
static void
load(void *src, void *dst)
{
util_atomic_load_explicit64((uint64_t *)src, (uint64_t *)dst,
memory_order_acquire);
}
/*
* atomic store
*/
static void
store(void *dst, void *src)
{
util_atomic_store_explicit64((uint64_t *)dst, (uint64_t)src,
memory_order_release);
}
/*
* internal: is_leaf -- check tagged pointer for leafness
*/
static inline bool
is_leaf(struct critnib_node *n)
{
return (uint64_t)n & 1;
}
/*
* internal: to_leaf -- untag a leaf pointer
*/
static inline struct critnib_leaf *
to_leaf(struct critnib_node *n)
{
return (void *)((uint64_t)n & ~1ULL);
}
/*
* internal: path_mask -- return bit mask of a path above a subtree [shift]
* bits tall
*/
static inline uint64_t
path_mask(sh_t shift)
{
return ~NIB << shift;
}
/*
* internal: slice_index -- return index of child at the given nib
*/
static inline unsigned
slice_index(uint64_t key, sh_t shift)
{
return (unsigned)((key >> shift) & NIB);
}
/*
* critnib_new -- allocates a new critnib structure
*/
struct critnib *
critnib_new(void)
{
struct critnib *c = Zalloc(sizeof(struct critnib));
if (!c)
return NULL;
util_mutex_init(&c->mutex);
VALGRIND_HG_DRD_DISABLE_CHECKING(&c->root, sizeof(c->root));
VALGRIND_HG_DRD_DISABLE_CHECKING(&c->remove_count,
sizeof(c->remove_count));
return c;
}
/*
* internal: delete_node -- recursively free (to malloc) a subtree
*/
static void
delete_node(struct critnib_node *__restrict n)
{
if (!is_leaf(n)) {
for (int i = 0; i < SLNODES; i++) {
if (n->child[i])
delete_node(n->child[i]);
}
Free(n);
} else {
Free(to_leaf(n));
}
}
/*
* critnib_delete -- destroy and free a critnib struct
*/
void
critnib_delete(struct critnib *c)
{
if (c->root)
delete_node(c->root);
util_mutex_destroy(&c->mutex);
for (struct critnib_node *m = c->deleted_node; m; ) {
struct critnib_node *mm = m->child[0];
Free(m);
m = mm;
}
for (struct critnib_leaf *k = c->deleted_leaf; k; ) {
struct critnib_leaf *kk = k->value;
Free(k);
k = kk;
}
for (int i = 0; i < DELETED_LIFE; i++) {
Free(c->pending_del_nodes[i]);
Free(c->pending_del_leaves[i]);
}
Free(c);
}
/*
* internal: free_node -- free (to internal pool, not malloc) a node.
*
* We cannot free them to malloc as a stalled reader thread may still walk
* through such nodes; it will notice the result being bogus but only after
* completing the walk, thus we need to ensure any freed nodes still point
* to within the critnib structure.
*/
static void
free_node(struct critnib *__restrict c, struct critnib_node *__restrict n)
{
if (!n)
return;
ASSERT(!is_leaf(n));
n->child[0] = c->deleted_node;
c->deleted_node = n;
}
/*
* internal: alloc_node -- allocate a node from our pool or from malloc
*/
static struct critnib_node *
alloc_node(struct critnib *__restrict c)
{
if (!c->deleted_node) {
struct critnib_node *n = Malloc(sizeof(struct critnib_node));
if (n == NULL)
ERR("!Malloc");
return n;
}
struct critnib_node *n = c->deleted_node;
c->deleted_node = n->child[0];
VALGRIND_ANNOTATE_NEW_MEMORY(n, sizeof(*n));
return n;
}
/*
* internal: free_leaf -- free (to internal pool, not malloc) a leaf.
*
* See free_node().
*/
static void
free_leaf(struct critnib *__restrict c, struct critnib_leaf *__restrict k)
{
if (!k)
return;
k->value = c->deleted_leaf;
c->deleted_leaf = k;
}
/*
* internal: alloc_leaf -- allocate a leaf from our pool or from malloc
*/
static struct critnib_leaf *
alloc_leaf(struct critnib *__restrict c)
{
if (!c->deleted_leaf) {
struct critnib_leaf *k = Malloc(sizeof(struct critnib_leaf));
if (k == NULL)
ERR("!Malloc");
return k;
}
struct critnib_leaf *k = c->deleted_leaf;
c->deleted_leaf = k->value;
VALGRIND_ANNOTATE_NEW_MEMORY(k, sizeof(*k));
return k;
}
/*
* crinib_insert -- write a key:value pair to the critnib structure
*
* Returns:
* • 0 on success
* • EEXIST if such a key already exists
* • ENOMEM if we're out of memory
*
* Takes a global write lock but doesn't stall any readers.
*/
int
critnib_insert(struct critnib *c, uint64_t key, void *value)
{
util_mutex_lock(&c->mutex);
struct critnib_leaf *k = alloc_leaf(c);
if (!k) {
util_mutex_unlock(&c->mutex);
return ENOMEM;
}
VALGRIND_HG_DRD_DISABLE_CHECKING(k, sizeof(struct critnib_leaf));
k->key = key;
k->value = value;
struct critnib_node *kn = (void *)((uint64_t)k | 1);
struct critnib_node *n = c->root;
if (!n) {
c->root = kn;
util_mutex_unlock(&c->mutex);
return 0;
}
struct critnib_node **parent = &c->root;
struct critnib_node *prev = c->root;
while (n && !is_leaf(n) && (key & path_mask(n->shift)) == n->path) {
prev = n;
parent = &n->child[slice_index(key, n->shift)];
n = *parent;
}
if (!n) {
n = prev;
store(&n->child[slice_index(key, n->shift)], kn);
util_mutex_unlock(&c->mutex);
return 0;
}
uint64_t path = is_leaf(n) ? to_leaf(n)->key : n->path;
/* Find where the path differs from our key. */
uint64_t at = path ^ key;
if (!at) {
ASSERT(is_leaf(n));
free_leaf(c, to_leaf(kn));
/* fail instead of replacing */
util_mutex_unlock(&c->mutex);
return EEXIST;
}
/* and convert that to an index. */
sh_t sh = util_mssb_index64(at) & (sh_t)~(SLICE - 1);
struct critnib_node *m = alloc_node(c);
if (!m) {
free_leaf(c, to_leaf(kn));
util_mutex_unlock(&c->mutex);
return ENOMEM;
}
VALGRIND_HG_DRD_DISABLE_CHECKING(m, sizeof(struct critnib_node));
for (int i = 0; i < SLNODES; i++)
m->child[i] = NULL;
m->child[slice_index(key, sh)] = kn;
m->child[slice_index(path, sh)] = n;
m->shift = sh;
m->path = key & path_mask(sh);
store(parent, m);
util_mutex_unlock(&c->mutex);
return 0;
}
/*
* critnib_remove -- delete a key from the critnib structure, return its value
*/
void *
critnib_remove(struct critnib *c, uint64_t key)
{
struct critnib_leaf *k;
void *value = NULL;
util_mutex_lock(&c->mutex);
struct critnib_node *n = c->root;
if (!n)
goto not_found;
uint64_t del = util_fetch_and_add64(&c->remove_count, 1) % DELETED_LIFE;
free_node(c, c->pending_del_nodes[del]);
free_leaf(c, c->pending_del_leaves[del]);
c->pending_del_nodes[del] = NULL;
c->pending_del_leaves[del] = NULL;
if (is_leaf(n)) {
k = to_leaf(n);
if (k->key == key) {
store(&c->root, NULL);
goto del_leaf;
}
goto not_found;
}
/*
* n and k are a parent:child pair (after the first iteration); k is the
* leaf that holds the key we're deleting.
*/
struct critnib_node **k_parent = &c->root;
struct critnib_node **n_parent = &c->root;
struct critnib_node *kn = n;
while (!is_leaf(kn)) {
n_parent = k_parent;
n = kn;
k_parent = &kn->child[slice_index(key, kn->shift)];
kn = *k_parent;
if (!kn)
goto not_found;
}
k = to_leaf(kn);
if (k->key != key)
goto not_found;
store(&n->child[slice_index(key, n->shift)], NULL);
/* Remove the node if there's only one remaining child. */
int ochild = -1;
for (int i = 0; i < SLNODES; i++) {
if (n->child[i]) {
if (ochild != -1)
goto del_leaf;
ochild = i;
}
}
ASSERTne(ochild, -1);
store(n_parent, n->child[ochild]);
c->pending_del_nodes[del] = n;
del_leaf:
value = k->value;
c->pending_del_leaves[del] = k;
not_found:
util_mutex_unlock(&c->mutex);
return value;
}
/*
* critnib_get -- query for a key ("==" match), returns value or NULL
*
* Doesn't need a lock but if many deletes happened while our thread was
* somehow stalled the query is restarted (as freed nodes remain unused only
* for a grace period).
*
* Counterintuitively, it's pointless to return the most current answer,
* we need only one that was valid at any point after the call started.
*/
void *
critnib_get(struct critnib *c, uint64_t key)
{
uint64_t wrs1, wrs2;
void *res;
do {
struct critnib_node *n;
load(&c->remove_count, &wrs1);
load(&c->root, &n);
/*
* critbit algorithm: dive into the tree, looking at nothing but
* each node's critical bit^H^H^Hnibble. This means we risk
* going wrong way if our path is missing, but that's ok...
*/
while (n && !is_leaf(n))
load(&n->child[slice_index(key, n->shift)], &n);
/* ... as we check it at the end. */
struct critnib_leaf *k = to_leaf(n);
res = (n && k->key == key) ? k->value : NULL;
load(&c->remove_count, &wrs2);
} while (wrs1 + DELETED_LIFE <= wrs2);
return res;
}
/*
* internal: find_successor -- return the rightmost non-null node in a subtree
*/
static void *
find_successor(struct critnib_node *__restrict n)
{
while (1) {
int nib;
for (nib = NIB; nib >= 0; nib--)
if (n->child[nib])
break;
if (nib < 0)
return NULL;
n = n->child[nib];
if (is_leaf(n))
return to_leaf(n)->value;
}
}
/*
* internal: find_le -- recursively search <= in a subtree
*/
static void *
find_le(struct critnib_node *__restrict n, uint64_t key)
{
if (!n)
return NULL;
if (is_leaf(n)) {
struct critnib_leaf *k = to_leaf(n);
return (k->key <= key) ? k->value : NULL;
}
/*
* is our key outside the subtree we're in?
*
* If we're inside, all bits above the nib will be identical; note
* that shift points at the nib's lower rather than upper edge, so it
* needs to be masked away as well.
*/
if ((key ^ n->path) >> (n->shift) & ~NIB) {
/*
* subtree is too far to the left?
* -> its rightmost value is good
*/
if (n->path < key)
return find_successor(n);
/*
* subtree is too far to the right?
* -> it has nothing of interest to us
*/
return NULL;
}
unsigned nib = slice_index(key, n->shift);
/* recursive call: follow the path */
{
struct critnib_node *m;
load(&n->child[nib], &m);
void *value = find_le(m, key);
if (value)
return value;
}
/*
* nothing in that subtree? We strayed from the path at this point,
* thus need to search every subtree to our left in this node. No
* need to dive into any but the first non-null, though.
*/
for (; nib > 0; nib--) {
struct critnib_node *m;
load(&n->child[nib - 1], &m);
if (m) {
n = m;
if (is_leaf(n))
return to_leaf(n)->value;
return find_successor(n);
}
}
return NULL;
}
/*
* critnib_find_le -- query for a key ("<=" match), returns value or NULL
*
* Same guarantees as critnib_get().
*/
void *
critnib_find_le(struct critnib *c, uint64_t key)
{
uint64_t wrs1, wrs2;
void *res;
do {
load(&c->remove_count, &wrs1);
struct critnib_node *n; /* avoid a subtle TOCTOU */
load(&c->root, &n);
res = n ? find_le(n, key) : NULL;
load(&c->remove_count, &wrs2);
} while (wrs1 + DELETED_LIFE <= wrs2);
return res;
}
| 15,052 | 22.087423 | 78 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/memblock.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* memblock.h -- internal definitions for memory block
*/
#ifndef LIBPMEMOBJ_MEMBLOCK_H
#define LIBPMEMOBJ_MEMBLOCK_H 1
#include <stddef.h>
#include <stdint.h>
#include "os_thread.h"
#include "heap_layout.h"
#include "memops.h"
#include "palloc.h"
#ifdef __cplusplus
extern "C" {
#endif
#define MEMORY_BLOCK_NONE \
(struct memory_block)\
{0, 0, 0, 0, NULL, NULL, MAX_HEADER_TYPES, MAX_MEMORY_BLOCK, NULL}
#define MEMORY_BLOCK_IS_NONE(_m)\
((_m).heap == NULL)
#define MEMORY_BLOCK_EQUALS(lhs, rhs)\
((lhs).zone_id == (rhs).zone_id && (lhs).chunk_id == (rhs).chunk_id &&\
(lhs).block_off == (rhs).block_off && (lhs).heap == (rhs).heap)
enum memory_block_type {
/*
* Huge memory blocks are directly backed by memory chunks. A single
* huge block can consist of several chunks.
* The persistent representation of huge memory blocks can be thought
* of as a doubly linked list with variable length elements.
* That list is stored in the chunk headers array where one element
* directly corresponds to one chunk.
*
* U - used, F - free, R - footer, . - empty
* |U| represents a used chunk with a size index of 1, with type
* information (CHUNK_TYPE_USED) stored in the corresponding header
* array element - chunk_headers[chunk_id].
*
* |F...R| represents a free chunk with size index of 5. The empty
* chunk headers have undefined values and shouldn't be used. All
* chunks with size larger than 1 must have a footer in the last
* corresponding header array - chunk_headers[chunk_id - size_idx - 1].
*
* The above representation of chunks will be used to describe the
* way fail-safety is achieved during heap operations.
*
* Allocation of huge memory block with size index 5:
* Initial heap state: |U| <> |F..R| <> |U| <> |F......R|
*
* The only block that matches that size is at very end of the chunks
* list: |F......R|
*
* As the request was for memory block of size 5, and this ones size is
* 7 there's a need to first split the chunk in two.
* 1) The last chunk header of the new allocation is marked as footer
* and the block after that one is marked as free: |F...RF.R|
* This is allowed and has no impact on the heap because this
* modification is into chunk header that is otherwise unused, in
* other words the linked list didn't change.
*
* 2) The size index of the first header is changed from previous value
* of 7 to 5: |F...R||F.R|
* This is a single fail-safe atomic operation and this is the
* first change that is noticeable by the heap operations.
* A single linked list element is split into two new ones.
*
* 3) The allocation process either uses redo log or changes directly
* the chunk header type from free to used: |U...R| <> |F.R|
*
* In a similar fashion the reverse operation, free, is performed:
* Initial heap state: |U| <> |F..R| <> |F| <> |U...R| <> |F.R|
*
* This is the heap after the previous example with the single chunk
* in between changed from used to free.
*
* 1) Determine the neighbors of the memory block which is being
* freed.
*
* 2) Update the footer (if needed) information of the last chunk which
* is the memory block being freed or it's neighbor to the right.
* |F| <> |U...R| <> |F.R << this one|
*
* 3) Update the size index and type of the left-most chunk header.
* And so this: |F << this one| <> |U...R| <> |F.R|
* becomes this: |F.......R|
* The entire chunk header can be updated in a single fail-safe
* atomic operation because it's size is only 64 bytes.
*/
MEMORY_BLOCK_HUGE,
/*
* Run memory blocks are chunks with CHUNK_TYPE_RUN and size index of 1.
* The entire chunk is subdivided into smaller blocks and has an
* additional metadata attached in the form of a bitmap - each bit
* corresponds to a single block.
* In this case there's no need to perform any coalescing or splitting
* on the persistent metadata.
* The bitmap is stored on a variable number of 64 bit values and
* because of the requirement of allocation fail-safe atomicity the
* maximum size index of a memory block from a run is 64 - since that's
* the limit of atomic write guarantee.
*
* The allocation/deallocation process is a single 8 byte write that
* sets/clears the corresponding bits. Depending on the user choice
* it can either be made atomically or using redo-log when grouped with
* other operations.
* It's also important to note that in a case of realloc it might so
* happen that a single 8 byte bitmap value has its bits both set and
* cleared - that's why the run memory block metadata changes operate
* on AND'ing or OR'ing a bitmask instead of directly setting the value.
*/
MEMORY_BLOCK_RUN,
MAX_MEMORY_BLOCK
};
enum memblock_state {
MEMBLOCK_STATE_UNKNOWN,
MEMBLOCK_ALLOCATED,
MEMBLOCK_FREE,
MAX_MEMBLOCK_STATE,
};
/* runtime bitmap information for a run */
struct run_bitmap {
unsigned nvalues; /* number of 8 byte values - size of values array */
unsigned nbits; /* number of valid bits */
size_t size; /* total size of the bitmap in bytes */
uint64_t *values; /* pointer to the bitmap's values array */
};
/* runtime information necessary to create a run */
struct run_descriptor {
uint16_t flags; /* chunk flags for the run */
size_t unit_size; /* the size of a single unit in a run */
uint32_t size_idx; /* size index of a single run instance */
size_t alignment; /* required alignment of objects */
unsigned nallocs; /* number of allocs per run */
struct run_bitmap bitmap;
};
struct memory_block_ops {
/* returns memory block size */
size_t (*block_size)(const struct memory_block *m);
/* prepares header modification operation */
void (*prep_hdr)(const struct memory_block *m,
enum memblock_state dest_state, struct operation_context *ctx);
/* returns lock associated with memory block */
os_mutex_t *(*get_lock)(const struct memory_block *m);
/* returns whether a block is allocated or not */
enum memblock_state (*get_state)(const struct memory_block *m);
/* returns pointer to the data of a block */
void *(*get_user_data)(const struct memory_block *m);
/*
* Returns the size of a memory block without overhead.
* This is the size of a data block that can be used.
*/
size_t (*get_user_size)(const struct memory_block *m);
/* returns pointer to the beginning of data of a run block */
void *(*get_real_data)(const struct memory_block *m);
/* returns the size of a memory block, including headers */
size_t (*get_real_size)(const struct memory_block *m);
/* writes a header of an allocation */
void (*write_header)(const struct memory_block *m,
uint64_t extra_field, uint16_t flags);
void (*invalidate)(const struct memory_block *m);
/*
* Checks the header type of a chunk matches the expected type and
* modifies it if necessary. This is fail-safe atomic.
*/
void (*ensure_header_type)(const struct memory_block *m,
enum header_type t);
/*
* Reinitializes a block after a heap restart.
* This is called for EVERY allocation, but *only* under Valgrind.
*/
void (*reinit_header)(const struct memory_block *m);
/* returns the extra field of an allocation */
uint64_t (*get_extra)(const struct memory_block *m);
/* returns the flags of an allocation */
uint16_t (*get_flags)(const struct memory_block *m);
/* initializes memblock in valgrind */
void (*vg_init)(const struct memory_block *m, int objects,
object_callback cb, void *arg);
/* iterates over every free block */
int (*iterate_free)(const struct memory_block *m,
object_callback cb, void *arg);
/* iterates over every used block */
int (*iterate_used)(const struct memory_block *m,
object_callback cb, void *arg);
/* calculates number of free units, valid only for runs */
void (*calc_free)(const struct memory_block *m,
uint32_t *free_space, uint32_t *max_free_block);
/* this is called exactly once for every existing chunk */
void (*reinit_chunk)(const struct memory_block *m);
/*
* Initializes bitmap data for a run.
* Do *not* use this function unless absolutely necessary, it breaks
* the abstraction layer by exposing implementation details.
*/
void (*get_bitmap)(const struct memory_block *m, struct run_bitmap *b);
/* calculates the ratio between occupied and unoccupied space */
unsigned (*fill_pct)(const struct memory_block *m);
};
struct memory_block {
uint32_t chunk_id; /* index of the memory block in its zone */
uint32_t zone_id; /* index of this block zone in the heap */
/*
* Size index of the memory block represented in either multiple of
* CHUNKSIZE in the case of a huge chunk or in multiple of a run
* block size.
*/
uint32_t size_idx;
/*
* Used only for run chunks, must be zeroed for huge.
* Number of preceding blocks in the chunk. In other words, the
* position of this memory block in run bitmap.
*/
uint32_t block_off;
/*
* The variables below are associated with the memory block and are
* stored here for convenience. Those fields are filled by either the
* memblock_from_offset or memblock_rebuild_state, and they should not
* be modified manually.
*/
const struct memory_block_ops *m_ops;
struct palloc_heap *heap;
enum header_type header_type;
enum memory_block_type type;
struct run_bitmap *cached_bitmap;
};
/*
* This is a representation of a run memory block that is active in a bucket or
* is on a pending list in the recycler.
* This structure should never be passed around by value because the address of
* the nresv variable can be in reservations made through palloc_reserve(). Only
* if the number of reservations equals 0 the structure can be moved/freed.
*/
struct memory_block_reserved {
struct memory_block m;
struct bucket *bucket;
/*
* Number of reservations made from this run, the pointer to this value
* is stored in a user facing pobj_action structure. Decremented once
* the reservation is published or canceled.
*/
int nresv;
};
struct memory_block memblock_from_offset(struct palloc_heap *heap,
uint64_t off);
struct memory_block memblock_from_offset_opt(struct palloc_heap *heap,
uint64_t off, int size);
void memblock_rebuild_state(struct palloc_heap *heap, struct memory_block *m);
struct memory_block memblock_huge_init(struct palloc_heap *heap,
uint32_t chunk_id, uint32_t zone_id, uint32_t size_idx);
struct memory_block memblock_run_init(struct palloc_heap *heap,
uint32_t chunk_id, uint32_t zone_id, struct run_descriptor *rdsc);
void memblock_run_bitmap(uint32_t *size_idx, uint16_t flags,
uint64_t unit_size, uint64_t alignment, void *content,
struct run_bitmap *b);
#ifdef __cplusplus
}
#endif
#endif
| 10,750 | 34.019544 | 80 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/pmalloc.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* pmalloc.c -- implementation of pmalloc POSIX-like API
*
* This is the front-end part of the persistent memory allocator. It uses both
* transient and persistent representation of the heap to provide memory blocks
* in a reasonable time and with an acceptable common-case fragmentation.
*/
#include <inttypes.h>
#include "valgrind_internal.h"
#include "heap.h"
#include "lane.h"
#include "memblock.h"
#include "memops.h"
#include "obj.h"
#include "out.h"
#include "palloc.h"
#include "pmalloc.h"
#include "alloc_class.h"
#include "set.h"
#include "mmap.h"
enum pmalloc_operation_type {
OPERATION_INTERNAL, /* used only for single, one-off operations */
OPERATION_EXTERNAL, /* used for everything else, incl. large redos */
MAX_OPERATION_TYPE,
};
struct lane_alloc_runtime {
struct operation_context *ctx[MAX_OPERATION_TYPE];
};
/*
* pmalloc_operation_hold_type -- acquires allocator lane section and returns a
* pointer to its operation context
*/
static struct operation_context *
pmalloc_operation_hold_type(PMEMobjpool *pop, enum pmalloc_operation_type type,
int start)
{
struct lane *lane;
lane_hold(pop, &lane);
struct operation_context *ctx = type == OPERATION_INTERNAL ?
lane->internal : lane->external;
if (start)
operation_start(ctx);
return ctx;
}
/*
* pmalloc_operation_hold_type -- acquires allocator lane section and returns a
* pointer to its operation context without starting
*/
struct operation_context *
pmalloc_operation_hold_no_start(PMEMobjpool *pop)
{
return pmalloc_operation_hold_type(pop, OPERATION_EXTERNAL, 0);
}
/*
* pmalloc_operation_hold -- acquires allocator lane section and returns a
* pointer to its redo log
*/
struct operation_context *
pmalloc_operation_hold(PMEMobjpool *pop)
{
return pmalloc_operation_hold_type(pop, OPERATION_EXTERNAL, 1);
}
/*
* pmalloc_operation_release -- releases allocator lane section
*/
void
pmalloc_operation_release(PMEMobjpool *pop)
{
lane_release(pop);
}
/*
* pmalloc -- allocates a new block of memory
*
* The pool offset is written persistently into the off variable.
*
* If successful function returns zero. Otherwise an error number is returned.
*/
int
pmalloc(PMEMobjpool *pop, uint64_t *off, size_t size,
uint64_t extra_field, uint16_t object_flags)
{
struct operation_context *ctx =
pmalloc_operation_hold_type(pop, OPERATION_INTERNAL, 1);
int ret = palloc_operation(&pop->heap, 0, off, size, NULL, NULL,
extra_field, object_flags, 0, 0, ctx);
pmalloc_operation_release(pop);
return ret;
}
/*
* pmalloc_construct -- allocates a new block of memory with a constructor
*
* The block offset is written persistently into the off variable, but only
* after the constructor function has been called.
*
* If successful function returns zero. Otherwise an error number is returned.
*/
int
pmalloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags, uint16_t class_id)
{
struct operation_context *ctx =
pmalloc_operation_hold_type(pop, OPERATION_INTERNAL, 1);
int ret = palloc_operation(&pop->heap, 0, off, size, constructor, arg,
extra_field, object_flags, class_id, 0, ctx);
pmalloc_operation_release(pop);
return ret;
}
/*
* prealloc -- resizes in-place a previously allocated memory block
*
* The block offset is written persistently into the off variable.
*
* If successful function returns zero. Otherwise an error number is returned.
*/
int
prealloc(PMEMobjpool *pop, uint64_t *off, size_t size,
uint64_t extra_field, uint16_t object_flags)
{
struct operation_context *ctx =
pmalloc_operation_hold_type(pop, OPERATION_INTERNAL, 1);
int ret = palloc_operation(&pop->heap, *off, off, size, NULL, NULL,
extra_field, object_flags, 0, 0, ctx);
pmalloc_operation_release(pop);
return ret;
}
/*
* pfree -- deallocates a memory block previously allocated by pmalloc
*
* A zero value is written persistently into the off variable.
*
* If successful function returns zero. Otherwise an error number is returned.
*/
void
pfree(PMEMobjpool *pop, uint64_t *off)
{
struct operation_context *ctx =
pmalloc_operation_hold_type(pop, OPERATION_INTERNAL, 1);
int ret = palloc_operation(&pop->heap, *off, off, 0, NULL, NULL,
0, 0, 0, 0, ctx);
ASSERTeq(ret, 0);
pmalloc_operation_release(pop);
}
/*
* pmalloc_boot -- global runtime init routine of allocator section
*/
int
pmalloc_boot(PMEMobjpool *pop)
{
int ret = palloc_boot(&pop->heap, (char *)pop + pop->heap_offset,
pop->set->poolsize - pop->heap_offset, &pop->heap_size,
pop, &pop->p_ops,
pop->stats, pop->set);
if (ret)
return ret;
#if VG_MEMCHECK_ENABLED
if (On_memcheck)
palloc_heap_vg_open(&pop->heap, pop->vg_boot);
#endif
ret = palloc_buckets_init(&pop->heap);
if (ret)
palloc_heap_cleanup(&pop->heap);
return ret;
}
/*
* pmalloc_cleanup -- global cleanup routine of allocator section
*/
int
pmalloc_cleanup(PMEMobjpool *pop)
{
palloc_heap_cleanup(&pop->heap);
return 0;
}
/*
* CTL_WRITE_HANDLER(desc) -- creates a new allocation class
*/
static int
CTL_WRITE_HANDLER(desc)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
uint8_t id;
struct alloc_class_collection *ac = heap_alloc_classes(&pop->heap);
struct pobj_alloc_class_desc *p = arg;
if (p->unit_size <= 0 || p->unit_size > PMEMOBJ_MAX_ALLOC_SIZE ||
p->units_per_block <= 0) {
errno = EINVAL;
return -1;
}
if (p->alignment != 0 && p->unit_size % p->alignment != 0) {
ERR("unit size must be evenly divisible by alignment");
errno = EINVAL;
return -1;
}
if (p->alignment > (MEGABYTE * 2)) {
ERR("alignment cannot be larger than 2 megabytes");
errno = EINVAL;
return -1;
}
enum header_type lib_htype = MAX_HEADER_TYPES;
switch (p->header_type) {
case POBJ_HEADER_LEGACY:
lib_htype = HEADER_LEGACY;
break;
case POBJ_HEADER_COMPACT:
lib_htype = HEADER_COMPACT;
break;
case POBJ_HEADER_NONE:
lib_htype = HEADER_NONE;
break;
case MAX_POBJ_HEADER_TYPES:
default:
ERR("invalid header type");
errno = EINVAL;
return -1;
}
if (PMDK_SLIST_EMPTY(indexes)) {
if (alloc_class_find_first_free_slot(ac, &id) != 0) {
ERR("no available free allocation class identifier");
errno = EINVAL;
return -1;
}
} else {
struct ctl_index *idx = PMDK_SLIST_FIRST(indexes);
ASSERTeq(strcmp(idx->name, "class_id"), 0);
if (idx->value < 0 || idx->value >= MAX_ALLOCATION_CLASSES) {
ERR("class id outside of the allowed range");
errno = ERANGE;
return -1;
}
id = (uint8_t)idx->value;
if (alloc_class_reserve(ac, id) != 0) {
ERR("attempted to overwrite an allocation class");
errno = EEXIST;
return -1;
}
}
size_t runsize_bytes =
CHUNK_ALIGN_UP((p->units_per_block * p->unit_size) +
RUN_BASE_METADATA_SIZE);
/* aligning the buffer might require up-to to 'alignment' bytes */
if (p->alignment != 0)
runsize_bytes += p->alignment;
uint32_t size_idx = (uint32_t)(runsize_bytes / CHUNKSIZE);
if (size_idx > UINT16_MAX)
size_idx = UINT16_MAX;
struct alloc_class *c = alloc_class_new(id,
heap_alloc_classes(&pop->heap), CLASS_RUN,
lib_htype, p->unit_size, p->alignment, size_idx);
if (c == NULL) {
errno = EINVAL;
return -1;
}
if (heap_create_alloc_class_buckets(&pop->heap, c) != 0) {
alloc_class_delete(ac, c);
return -1;
}
p->class_id = c->id;
p->units_per_block = c->rdsc.nallocs;
return 0;
}
/*
* pmalloc_header_type_parser -- parses the alloc header type argument
*/
static int
pmalloc_header_type_parser(const void *arg, void *dest, size_t dest_size)
{
const char *vstr = arg;
enum pobj_header_type *htype = dest;
ASSERTeq(dest_size, sizeof(enum pobj_header_type));
if (strcmp(vstr, "none") == 0) {
*htype = POBJ_HEADER_NONE;
} else if (strcmp(vstr, "compact") == 0) {
*htype = POBJ_HEADER_COMPACT;
} else if (strcmp(vstr, "legacy") == 0) {
*htype = POBJ_HEADER_LEGACY;
} else {
ERR("invalid header type");
errno = EINVAL;
return -1;
}
return 0;
}
/*
* CTL_READ_HANDLER(desc) -- reads the information about allocation class
*/
static int
CTL_READ_HANDLER(desc)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
uint8_t id;
struct ctl_index *idx = PMDK_SLIST_FIRST(indexes);
ASSERTeq(strcmp(idx->name, "class_id"), 0);
if (idx->value < 0 || idx->value >= MAX_ALLOCATION_CLASSES) {
ERR("class id outside of the allowed range");
errno = ERANGE;
return -1;
}
id = (uint8_t)idx->value;
struct alloc_class *c = alloc_class_by_id(
heap_alloc_classes(&pop->heap), id);
if (c == NULL) {
ERR("class with the given id does not exist");
errno = ENOENT;
return -1;
}
enum pobj_header_type user_htype = MAX_POBJ_HEADER_TYPES;
switch (c->header_type) {
case HEADER_LEGACY:
user_htype = POBJ_HEADER_LEGACY;
break;
case HEADER_COMPACT:
user_htype = POBJ_HEADER_COMPACT;
break;
case HEADER_NONE:
user_htype = POBJ_HEADER_NONE;
break;
default:
ASSERT(0); /* unreachable */
break;
}
struct pobj_alloc_class_desc *p = arg;
p->units_per_block = c->type == CLASS_HUGE ? 0 : c->rdsc.nallocs;
p->header_type = user_htype;
p->unit_size = c->unit_size;
p->class_id = c->id;
p->alignment = c->flags & CHUNK_FLAG_ALIGNED ? c->rdsc.alignment : 0;
return 0;
}
static const struct ctl_argument CTL_ARG(desc) = {
.dest_size = sizeof(struct pobj_alloc_class_desc),
.parsers = {
CTL_ARG_PARSER_STRUCT(struct pobj_alloc_class_desc,
unit_size, ctl_arg_integer),
CTL_ARG_PARSER_STRUCT(struct pobj_alloc_class_desc,
alignment, ctl_arg_integer),
CTL_ARG_PARSER_STRUCT(struct pobj_alloc_class_desc,
units_per_block, ctl_arg_integer),
CTL_ARG_PARSER_STRUCT(struct pobj_alloc_class_desc,
header_type, pmalloc_header_type_parser),
CTL_ARG_PARSER_END
}
};
static const struct ctl_node CTL_NODE(class_id)[] = {
CTL_LEAF_RW(desc),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(new)[] = {
CTL_LEAF_WO(desc),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(alloc_class)[] = {
CTL_INDEXED(class_id),
CTL_INDEXED(new),
CTL_NODE_END
};
/*
* CTL_RUNNABLE_HANDLER(extend) -- extends the pool by the given size
*/
static int
CTL_RUNNABLE_HANDLER(extend)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
ssize_t arg_in = *(ssize_t *)arg;
if (arg_in < (ssize_t)PMEMOBJ_MIN_PART) {
ERR("incorrect size for extend, must be larger than %" PRIu64,
PMEMOBJ_MIN_PART);
return -1;
}
struct palloc_heap *heap = &pop->heap;
struct bucket *defb = heap_bucket_acquire(heap,
DEFAULT_ALLOC_CLASS_ID,
HEAP_ARENA_PER_THREAD);
int ret = heap_extend(heap, defb, (size_t)arg_in) < 0 ? -1 : 0;
heap_bucket_release(heap, defb);
return ret;
}
/*
* CTL_READ_HANDLER(granularity) -- reads the current heap grow size
*/
static int
CTL_READ_HANDLER(granularity)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
ssize_t *arg_out = arg;
*arg_out = (ssize_t)pop->heap.growsize;
return 0;
}
/*
* CTL_WRITE_HANDLER(granularity) -- changes the heap grow size
*/
static int
CTL_WRITE_HANDLER(granularity)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
ssize_t arg_in = *(int *)arg;
if (arg_in != 0 && arg_in < (ssize_t)PMEMOBJ_MIN_PART) {
ERR("incorrect grow size, must be 0 or larger than %" PRIu64,
PMEMOBJ_MIN_PART);
return -1;
}
pop->heap.growsize = (size_t)arg_in;
return 0;
}
static const struct ctl_argument CTL_ARG(granularity) = CTL_ARG_LONG_LONG;
/*
* CTL_READ_HANDLER(total) -- reads a number of the arenas
*/
static int
CTL_READ_HANDLER(total)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
unsigned *narenas = arg;
*narenas = heap_get_narenas_total(&pop->heap);
return 0;
}
/*
* CTL_READ_HANDLER(max) -- reads a max number of the arenas
*/
static int
CTL_READ_HANDLER(max)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
unsigned *max = arg;
*max = heap_get_narenas_max(&pop->heap);
return 0;
}
/*
* CTL_WRITE_HANDLER(max) -- write a max number of the arenas
*/
static int
CTL_WRITE_HANDLER(max)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
unsigned size = *(unsigned *)arg;
int ret = heap_set_narenas_max(&pop->heap, size);
if (ret) {
LOG(1, "cannot change max arena number");
return -1;
}
return 0;
}
static const struct ctl_argument CTL_ARG(max) = CTL_ARG_LONG_LONG;
/*
* CTL_READ_HANDLER(automatic) -- reads a number of the automatic arenas
*/
static int
CTL_READ_HANDLER(automatic, narenas)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
unsigned *narenas = arg;
*narenas = heap_get_narenas_auto(&pop->heap);
return 0;
}
/*
* CTL_READ_HANDLER(arena_id) -- reads the id of the arena
* assigned to the calling thread
*/
static int
CTL_READ_HANDLER(arena_id)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
unsigned *arena_id = arg;
*arena_id = heap_get_thread_arena_id(&pop->heap);
return 0;
}
/*
* CTL_WRITE_HANDLER(arena_id) -- assigns the arena to the calling thread
*/
static int
CTL_WRITE_HANDLER(arena_id)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
unsigned arena_id = *(unsigned *)arg;
unsigned narenas = heap_get_narenas_total(&pop->heap);
/*
* check if index is not bigger than number of arenas
* or if it is not equal zero
*/
if (arena_id < 1 || arena_id > narenas) {
LOG(1, "arena id outside of the allowed range: <1,%u>",
narenas);
errno = ERANGE;
return -1;
}
heap_set_arena_thread(&pop->heap, arena_id);
return 0;
}
static const struct ctl_argument CTL_ARG(arena_id) = CTL_ARG_LONG_LONG;
/*
* CTL_WRITE_HANDLER(automatic) -- updates automatic status of the arena
*/
static int
CTL_WRITE_HANDLER(automatic)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
int arg_in = *(int *)arg;
unsigned arena_id;
struct ctl_index *idx = PMDK_SLIST_FIRST(indexes);
ASSERTeq(strcmp(idx->name, "arena_id"), 0);
arena_id = (unsigned)idx->value;
unsigned narenas = heap_get_narenas_total(&pop->heap);
/*
* check if index is not bigger than number of arenas
* or if it is not equal zero
*/
if (arena_id < 1 || arena_id > narenas) {
LOG(1, "arena id outside of the allowed range: <1,%u>",
narenas);
errno = ERANGE;
return -1;
}
if (arg_in != 0 && arg_in != 1) {
LOG(1, "incorrect arena state, must be 0 or 1");
return -1;
}
return heap_set_arena_auto(&pop->heap, arena_id, arg_in);
}
/*
* CTL_READ_HANDLER(automatic) -- reads automatic status of the arena
*/
static int
CTL_READ_HANDLER(automatic)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
int *arg_out = arg;
unsigned arena_id;
struct ctl_index *idx = PMDK_SLIST_FIRST(indexes);
ASSERTeq(strcmp(idx->name, "arena_id"), 0);
arena_id = (unsigned)idx->value;
unsigned narenas = heap_get_narenas_total(&pop->heap);
/*
* check if index is not bigger than number of arenas
* or if it is not equal zero
*/
if (arena_id < 1 || arena_id > narenas) {
LOG(1, "arena id outside of the allowed range: <1,%u>",
narenas);
errno = ERANGE;
return -1;
}
*arg_out = heap_get_arena_auto(&pop->heap, arena_id);
return 0;
}
static struct ctl_argument CTL_ARG(automatic) = CTL_ARG_BOOLEAN;
static const struct ctl_node CTL_NODE(size)[] = {
CTL_LEAF_RW(granularity),
CTL_LEAF_RUNNABLE(extend),
CTL_NODE_END
};
/*
* CTL_READ_HANDLER(size) -- reads usable size of specified arena
*/
static int
CTL_READ_HANDLER(size)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
unsigned arena_id;
unsigned narenas;
size_t *arena_size = arg;
struct ctl_index *idx = PMDK_SLIST_FIRST(indexes);
ASSERTeq(strcmp(idx->name, "arena_id"), 0);
/* take index of arena */
arena_id = (unsigned)idx->value;
/* take number of arenas */
narenas = heap_get_narenas_total(&pop->heap);
/*
* check if index is not bigger than number of arenas
* or if it is not equal zero
*/
if (arena_id < 1 || arena_id > narenas) {
LOG(1, "arena id outside of the allowed range: <1,%u>",
narenas);
errno = ERANGE;
return -1;
}
/* take buckets for arena */
struct bucket **buckets;
buckets = heap_get_arena_buckets(&pop->heap, arena_id);
/* calculate number of reservation for arena using buckets */
unsigned size = 0;
for (int i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
if (buckets[i] != NULL && buckets[i]->is_active)
size += buckets[i]->active_memory_block->m.size_idx;
}
*arena_size = size * CHUNKSIZE;
return 0;
}
/*
* CTL_RUNNABLE_HANDLER(create) -- create new arena in the heap
*/
static int
CTL_RUNNABLE_HANDLER(create)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
unsigned *arena_id = arg;
struct palloc_heap *heap = &pop->heap;
int ret = heap_arena_create(heap);
if (ret < 0)
return -1;
*arena_id = (unsigned)ret;
return 0;
}
static const struct ctl_node CTL_NODE(arena_id)[] = {
CTL_LEAF_RO(size),
CTL_LEAF_RW(automatic),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(arena)[] = {
CTL_INDEXED(arena_id),
CTL_LEAF_RUNNABLE(create),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(narenas)[] = {
CTL_LEAF_RO(automatic, narenas),
CTL_LEAF_RO(total),
CTL_LEAF_RW(max),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(thread)[] = {
CTL_LEAF_RW(arena_id),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(heap)[] = {
CTL_CHILD(alloc_class),
CTL_CHILD(arena),
CTL_CHILD(size),
CTL_CHILD(thread),
CTL_CHILD(narenas),
CTL_NODE_END
};
/*
* pmalloc_ctl_register -- registers ctl nodes for "heap" module
*/
void
pmalloc_ctl_register(PMEMobjpool *pop)
{
CTL_REGISTER_MODULE(pop->ctl, heap);
}
| 18,444 | 22.114035 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/pmemops.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
#ifndef LIBPMEMOBJ_PMEMOPS_H
#define LIBPMEMOBJ_PMEMOPS_H 1
#include <stddef.h>
#include <stdint.h>
#include "util.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef int (*persist_fn)(void *base, const void *, size_t, unsigned);
typedef int (*flush_fn)(void *base, const void *, size_t, unsigned);
typedef void (*drain_fn)(void *base);
typedef void *(*memcpy_fn)(void *base, void *dest, const void *src, size_t len,
unsigned flags);
typedef void *(*memmove_fn)(void *base, void *dest, const void *src, size_t len,
unsigned flags);
typedef void *(*memset_fn)(void *base, void *dest, int c, size_t len,
unsigned flags);
typedef int (*remote_read_fn)(void *ctx, uintptr_t base, void *dest, void *addr,
size_t length);
struct pmem_ops {
/* for 'master' replica: with or without data replication */
persist_fn persist; /* persist function */
flush_fn flush; /* flush function */
drain_fn drain; /* drain function */
memcpy_fn memcpy; /* persistent memcpy function */
memmove_fn memmove; /* persistent memmove function */
memset_fn memset; /* persistent memset function */
void *base;
//char a;
//temp var end
struct remote_ops {
remote_read_fn read;
void *ctx;
uintptr_t base;
} remote;
void *device;
uint16_t objid;
};
static force_inline int
pmemops_xpersist(const struct pmem_ops *p_ops, const void *d, size_t s,
unsigned flags)
{
return p_ops->persist(p_ops->base, d, s, flags);
}
static force_inline void
pmemops_persist(const struct pmem_ops *p_ops, const void *d, size_t s)
{
(void) pmemops_xpersist(p_ops, d, s, 0);
}
static force_inline int
pmemops_xflush(const struct pmem_ops *p_ops, const void *d, size_t s,
unsigned flags)
{
return p_ops->flush(p_ops->base, d, s, flags);
}
static force_inline void
pmemops_flush(const struct pmem_ops *p_ops, const void *d, size_t s)
{
(void) pmemops_xflush(p_ops, d, s, 0);
}
static force_inline void
pmemops_drain(const struct pmem_ops *p_ops)
{
p_ops->drain(p_ops->base);
}
static force_inline void *
pmemops_memcpy(const struct pmem_ops *p_ops, void *dest,
const void *src, size_t len, unsigned flags)
{
return p_ops->memcpy(p_ops->base, dest, src, len, flags);
}
static force_inline void *
pmemops_memmove(const struct pmem_ops *p_ops, void *dest,
const void *src, size_t len, unsigned flags)
{
return p_ops->memmove(p_ops->base, dest, src, len, flags);
}
static force_inline void *
pmemops_memset(const struct pmem_ops *p_ops, void *dest, int c,
size_t len, unsigned flags)
{
return p_ops->memset(p_ops->base, dest, c, len, flags);
}
#ifdef __cplusplus
}
#endif
#endif
| 2,672 | 22.866071 | 80 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/sync.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* sync.h -- internal to obj synchronization API
*/
#ifndef LIBPMEMOBJ_SYNC_H
#define LIBPMEMOBJ_SYNC_H 1
#include <errno.h>
#include <stdint.h>
#include "libpmemobj.h"
#include "out.h"
#include "os_thread.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* internal definitions of PMEM-locks
*/
typedef union padded_pmemmutex {
char padding[_POBJ_CL_SIZE];
struct {
uint64_t runid;
union {
os_mutex_t mutex;
struct {
void *bsd_mutex_p;
union padded_pmemmutex *next;
} bsd_u;
} mutex_u;
} pmemmutex;
} PMEMmutex_internal;
#define PMEMmutex_lock pmemmutex.mutex_u.mutex
#define PMEMmutex_bsd_mutex_p pmemmutex.mutex_u.bsd_u.bsd_mutex_p
#define PMEMmutex_next pmemmutex.mutex_u.bsd_u.next
typedef union padded_pmemrwlock {
char padding[_POBJ_CL_SIZE];
struct {
uint64_t runid;
union {
os_rwlock_t rwlock;
struct {
void *bsd_rwlock_p;
union padded_pmemrwlock *next;
} bsd_u;
} rwlock_u;
} pmemrwlock;
} PMEMrwlock_internal;
#define PMEMrwlock_lock pmemrwlock.rwlock_u.rwlock
#define PMEMrwlock_bsd_rwlock_p pmemrwlock.rwlock_u.bsd_u.bsd_rwlock_p
#define PMEMrwlock_next pmemrwlock.rwlock_u.bsd_u.next
typedef union padded_pmemcond {
char padding[_POBJ_CL_SIZE];
struct {
uint64_t runid;
union {
os_cond_t cond;
struct {
void *bsd_cond_p;
union padded_pmemcond *next;
} bsd_u;
} cond_u;
} pmemcond;
} PMEMcond_internal;
#define PMEMcond_cond pmemcond.cond_u.cond
#define PMEMcond_bsd_cond_p pmemcond.cond_u.bsd_u.bsd_cond_p
#define PMEMcond_next pmemcond.cond_u.bsd_u.next
/*
* pmemobj_mutex_lock_nofail -- pmemobj_mutex_lock variant that never
* fails from caller perspective. If pmemobj_mutex_lock failed, this function
* aborts the program.
*/
static inline void
pmemobj_mutex_lock_nofail(PMEMobjpool *pop, PMEMmutex *mutexp)
{
int ret = pmemobj_mutex_lock(pop, mutexp);
if (ret) {
errno = ret;
FATAL("!pmemobj_mutex_lock");
}
}
/*
* pmemobj_mutex_unlock_nofail -- pmemobj_mutex_unlock variant that never
* fails from caller perspective. If pmemobj_mutex_unlock failed, this function
* aborts the program.
*/
static inline void
pmemobj_mutex_unlock_nofail(PMEMobjpool *pop, PMEMmutex *mutexp)
{
int ret = pmemobj_mutex_unlock(pop, mutexp);
if (ret) {
errno = ret;
FATAL("!pmemobj_mutex_unlock");
}
}
int pmemobj_mutex_assert_locked(PMEMobjpool *pop, PMEMmutex *mutexp);
#ifdef __cplusplus
}
#endif
#endif
| 2,504 | 21.168142 | 79 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/sync.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* sync.c -- persistent memory resident synchronization primitives
*/
#include <inttypes.h>
#include "obj.h"
#include "out.h"
#include "util.h"
#include "sync.h"
#include "sys_util.h"
#include "util.h"
#include "valgrind_internal.h"
#ifdef __FreeBSD__
#define RECORD_LOCK(init, type, p) \
if (init) {\
PMEM##type##_internal *head = pop->type##_head;\
while (!util_bool_compare_and_swap64(&pop->type##_head, head,\
p)) {\
head = pop->type##_head;\
}\
p->PMEM##type##_next = head;\
}
#else
#define RECORD_LOCK(init, type, p)
#endif
/*
* _get_value -- (internal) atomically initialize and return a value.
* Returns -1 on error, 0 if the caller is not the value
* initializer, 1 if the caller is the value initializer.
*/
static int
_get_value(uint64_t pop_runid, volatile uint64_t *runid, void *value, void *arg,
int (*init_value)(void *value, void *arg))
{
uint64_t tmp_runid;
int initializer = 0;
while ((tmp_runid = *runid) != pop_runid) {
if (tmp_runid == pop_runid - 1)
continue;
if (!util_bool_compare_and_swap64(runid, tmp_runid,
pop_runid - 1))
continue;
initializer = 1;
if (init_value(value, arg)) {
ERR("error initializing lock");
util_fetch_and_and64(runid, 0);
return -1;
}
if (util_bool_compare_and_swap64(runid, pop_runid - 1,
pop_runid) == 0) {
ERR("error setting lock runid");
return -1;
}
}
return initializer;
}
/*
* get_mutex -- (internal) atomically initialize, record and return a mutex
*/
static inline os_mutex_t *
get_mutex(PMEMobjpool *pop, PMEMmutex_internal *imp)
{
if (likely(imp->pmemmutex.runid == pop->run_id))
return &imp->PMEMmutex_lock;
volatile uint64_t *runid = &imp->pmemmutex.runid;
LOG(5, "PMEMmutex %p pop->run_id %" PRIu64 " pmemmutex.runid %" PRIu64,
imp, pop->run_id, *runid);
ASSERTeq((uintptr_t)runid % util_alignof(uint64_t), 0);
COMPILE_ERROR_ON(sizeof(PMEMmutex) != sizeof(PMEMmutex_internal));
COMPILE_ERROR_ON(util_alignof(PMEMmutex) != util_alignof(os_mutex_t));
VALGRIND_REMOVE_PMEM_MAPPING(imp, _POBJ_CL_SIZE);
int initializer = _get_value(pop->run_id, runid, &imp->PMEMmutex_lock,
NULL, (void *)os_mutex_init);
if (initializer == -1) {
return NULL;
}
RECORD_LOCK(initializer, mutex, imp);
return &imp->PMEMmutex_lock;
}
/*
* get_rwlock -- (internal) atomically initialize, record and return a rwlock
*/
static inline os_rwlock_t *
get_rwlock(PMEMobjpool *pop, PMEMrwlock_internal *irp)
{
if (likely(irp->pmemrwlock.runid == pop->run_id))
return &irp->PMEMrwlock_lock;
volatile uint64_t *runid = &irp->pmemrwlock.runid;
LOG(5, "PMEMrwlock %p pop->run_id %"\
PRIu64 " pmemrwlock.runid %" PRIu64,
irp, pop->run_id, *runid);
ASSERTeq((uintptr_t)runid % util_alignof(uint64_t), 0);
COMPILE_ERROR_ON(sizeof(PMEMrwlock) != sizeof(PMEMrwlock_internal));
COMPILE_ERROR_ON(util_alignof(PMEMrwlock)
!= util_alignof(os_rwlock_t));
VALGRIND_REMOVE_PMEM_MAPPING(irp, _POBJ_CL_SIZE);
int initializer = _get_value(pop->run_id, runid, &irp->PMEMrwlock_lock,
NULL, (void *)os_rwlock_init);
if (initializer == -1) {
return NULL;
}
RECORD_LOCK(initializer, rwlock, irp);
return &irp->PMEMrwlock_lock;
}
/*
* get_cond -- (internal) atomically initialize, record and return a
* condition variable
*/
static inline os_cond_t *
get_cond(PMEMobjpool *pop, PMEMcond_internal *icp)
{
if (likely(icp->pmemcond.runid == pop->run_id))
return &icp->PMEMcond_cond;
volatile uint64_t *runid = &icp->pmemcond.runid;
LOG(5, "PMEMcond %p pop->run_id %" PRIu64 " pmemcond.runid %" PRIu64,
icp, pop->run_id, *runid);
ASSERTeq((uintptr_t)runid % util_alignof(uint64_t), 0);
COMPILE_ERROR_ON(sizeof(PMEMcond) != sizeof(PMEMcond_internal));
COMPILE_ERROR_ON(util_alignof(PMEMcond) != util_alignof(os_cond_t));
VALGRIND_REMOVE_PMEM_MAPPING(icp, _POBJ_CL_SIZE);
int initializer = _get_value(pop->run_id, runid, &icp->PMEMcond_cond,
NULL, (void *)os_cond_init);
if (initializer == -1) {
return NULL;
}
RECORD_LOCK(initializer, cond, icp);
return &icp->PMEMcond_cond;
}
/*
* pmemobj_mutex_zero -- zero-initialize a pmem resident mutex
*
* This function is not MT safe.
*/
void
pmemobj_mutex_zero(PMEMobjpool *pop, PMEMmutex *mutexp)
{
LOG(3, "pop %p mutex %p", pop, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
mutexip->pmemmutex.runid = 0;
pmemops_persist(&pop->p_ops, &mutexip->pmemmutex.runid,
sizeof(mutexip->pmemmutex.runid));
}
/*
* pmemobj_mutex_lock -- lock a pmem resident mutex
*
* Atomically initializes and locks a PMEMmutex, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_mutex_lock(PMEMobjpool *pop, PMEMmutex *mutexp)
{
LOG(3, "pop %p mutex %p", pop, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_mutex_t *mutex = get_mutex(pop, mutexip);
if (mutex == NULL)
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
return os_mutex_lock(mutex);
}
/*
* pmemobj_mutex_assert_locked -- checks whether mutex is locked.
*
* Returns 0 when mutex is locked.
*/
int
pmemobj_mutex_assert_locked(PMEMobjpool *pop, PMEMmutex *mutexp)
{
LOG(3, "pop %p mutex %p", pop, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_mutex_t *mutex = get_mutex(pop, mutexip);
if (mutex == NULL)
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
int ret = os_mutex_trylock(mutex);
if (ret == EBUSY)
return 0;
if (ret == 0) {
util_mutex_unlock(mutex);
/*
* There's no good error code for this case. EINVAL is used for
* something else here.
*/
return ENODEV;
}
return ret;
}
/*
* pmemobj_mutex_timedlock -- lock a pmem resident mutex
*
* Atomically initializes and locks a PMEMmutex, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_mutex_timedlock(PMEMobjpool *pop, PMEMmutex *__restrict mutexp,
const struct timespec *__restrict abs_timeout)
{
LOG(3, "pop %p mutex %p", pop, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_mutex_t *mutex = get_mutex(pop, mutexip);
if (mutex == NULL)
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
return os_mutex_timedlock(mutex, abs_timeout);
}
/*
* pmemobj_mutex_trylock -- trylock a pmem resident mutex
*
* Atomically initializes and trylocks a PMEMmutex, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_mutex_trylock(PMEMobjpool *pop, PMEMmutex *mutexp)
{
LOG(3, "pop %p mutex %p", pop, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_mutex_t *mutex = get_mutex(pop, mutexip);
if (mutex == NULL)
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
return os_mutex_trylock(mutex);
}
/*
* pmemobj_mutex_unlock -- unlock a pmem resident mutex
*/
int
pmemobj_mutex_unlock(PMEMobjpool *pop, PMEMmutex *mutexp)
{
LOG(3, "pop %p mutex %p", pop, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
/* XXX potential performance improvement - move GET to debug version */
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_mutex_t *mutex = get_mutex(pop, mutexip);
if (mutex == NULL)
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
return os_mutex_unlock(mutex);
}
/*
* pmemobj_rwlock_zero -- zero-initialize a pmem resident rwlock
*
* This function is not MT safe.
*/
void
pmemobj_rwlock_zero(PMEMobjpool *pop, PMEMrwlock *rwlockp)
{
LOG(3, "pop %p rwlock %p", pop, rwlockp);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
rwlockip->pmemrwlock.runid = 0;
pmemops_persist(&pop->p_ops, &rwlockip->pmemrwlock.runid,
sizeof(rwlockip->pmemrwlock.runid));
}
/*
* pmemobj_rwlock_rdlock -- rdlock a pmem resident mutex
*
* Atomically initializes and rdlocks a PMEMrwlock, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_rwlock_rdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp)
{
LOG(3, "pop %p rwlock %p", pop, rwlockp);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_rdlock(rwlock);
}
/*
* pmemobj_rwlock_wrlock -- wrlock a pmem resident mutex
*
* Atomically initializes and wrlocks a PMEMrwlock, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_rwlock_wrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp)
{
LOG(3, "pop %p rwlock %p", pop, rwlockp);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_wrlock(rwlock);
}
/*
* pmemobj_rwlock_timedrdlock -- timedrdlock a pmem resident mutex
*
* Atomically initializes and timedrdlocks a PMEMrwlock, otherwise behaves as
* its POSIX counterpart.
*/
int
pmemobj_rwlock_timedrdlock(PMEMobjpool *pop, PMEMrwlock *__restrict rwlockp,
const struct timespec *__restrict abs_timeout)
{
LOG(3, "pop %p rwlock %p timeout sec %ld nsec %ld", pop, rwlockp,
abs_timeout->tv_sec, abs_timeout->tv_nsec);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_timedrdlock(rwlock, abs_timeout);
}
/*
* pmemobj_rwlock_timedwrlock -- timedwrlock a pmem resident mutex
*
* Atomically initializes and timedwrlocks a PMEMrwlock, otherwise behaves as
* its POSIX counterpart.
*/
int
pmemobj_rwlock_timedwrlock(PMEMobjpool *pop, PMEMrwlock *__restrict rwlockp,
const struct timespec *__restrict abs_timeout)
{
LOG(3, "pop %p rwlock %p timeout sec %ld nsec %ld", pop, rwlockp,
abs_timeout->tv_sec, abs_timeout->tv_nsec);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_timedwrlock(rwlock, abs_timeout);
}
/*
* pmemobj_rwlock_tryrdlock -- tryrdlock a pmem resident mutex
*
* Atomically initializes and tryrdlocks a PMEMrwlock, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_rwlock_tryrdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp)
{
LOG(3, "pop %p rwlock %p", pop, rwlockp);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_tryrdlock(rwlock);
}
/*
* pmemobj_rwlock_trywrlock -- trywrlock a pmem resident mutex
*
* Atomically initializes and trywrlocks a PMEMrwlock, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_rwlock_trywrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp)
{
LOG(3, "pop %p rwlock %p", pop, rwlockp);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_trywrlock(rwlock);
}
/*
* pmemobj_rwlock_unlock -- unlock a pmem resident rwlock
*/
int
pmemobj_rwlock_unlock(PMEMobjpool *pop, PMEMrwlock *rwlockp)
{
LOG(3, "pop %p rwlock %p", pop, rwlockp);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
/* XXX potential performance improvement - move GET to debug version */
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_unlock(rwlock);
}
/*
* pmemobj_cond_zero -- zero-initialize a pmem resident condition variable
*
* This function is not MT safe.
*/
void
pmemobj_cond_zero(PMEMobjpool *pop, PMEMcond *condp)
{
LOG(3, "pop %p cond %p", pop, condp);
ASSERTeq(pop, pmemobj_pool_by_ptr(condp));
PMEMcond_internal *condip = (PMEMcond_internal *)condp;
condip->pmemcond.runid = 0;
pmemops_persist(&pop->p_ops, &condip->pmemcond.runid,
sizeof(condip->pmemcond.runid));
}
/*
* pmemobj_cond_broadcast -- broadcast a pmem resident condition variable
*
* Atomically initializes and broadcast a PMEMcond, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_cond_broadcast(PMEMobjpool *pop, PMEMcond *condp)
{
LOG(3, "pop %p cond %p", pop, condp);
ASSERTeq(pop, pmemobj_pool_by_ptr(condp));
PMEMcond_internal *condip = (PMEMcond_internal *)condp;
os_cond_t *cond = get_cond(pop, condip);
if (cond == NULL)
return EINVAL;
ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0);
return os_cond_broadcast(cond);
}
/*
* pmemobj_cond_signal -- signal a pmem resident condition variable
*
* Atomically initializes and signal a PMEMcond, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_cond_signal(PMEMobjpool *pop, PMEMcond *condp)
{
LOG(3, "pop %p cond %p", pop, condp);
ASSERTeq(pop, pmemobj_pool_by_ptr(condp));
PMEMcond_internal *condip = (PMEMcond_internal *)condp;
os_cond_t *cond = get_cond(pop, condip);
if (cond == NULL)
return EINVAL;
ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0);
return os_cond_signal(cond);
}
/*
* pmemobj_cond_timedwait -- timedwait on a pmem resident condition variable
*
* Atomically initializes and timedwait on a PMEMcond, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_cond_timedwait(PMEMobjpool *pop, PMEMcond *__restrict condp,
PMEMmutex *__restrict mutexp,
const struct timespec *__restrict abs_timeout)
{
LOG(3, "pop %p cond %p mutex %p abstime sec %ld nsec %ld", pop, condp,
mutexp, abs_timeout->tv_sec, abs_timeout->tv_nsec);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
ASSERTeq(pop, pmemobj_pool_by_ptr(condp));
PMEMcond_internal *condip = (PMEMcond_internal *)condp;
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_cond_t *cond = get_cond(pop, condip);
os_mutex_t *mutex = get_mutex(pop, mutexip);
if ((cond == NULL) || (mutex == NULL))
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0);
return os_cond_timedwait(cond, mutex, abs_timeout);
}
/*
* pmemobj_cond_wait -- wait on a pmem resident condition variable
*
* Atomically initializes and wait on a PMEMcond, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_cond_wait(PMEMobjpool *pop, PMEMcond *condp,
PMEMmutex *__restrict mutexp)
{
LOG(3, "pop %p cond %p mutex %p", pop, condp, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
ASSERTeq(pop, pmemobj_pool_by_ptr(condp));
PMEMcond_internal *condip = (PMEMcond_internal *)condp;
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_cond_t *cond = get_cond(pop, condip);
os_mutex_t *mutex = get_mutex(pop, mutexip);
if ((cond == NULL) || (mutex == NULL))
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0);
return os_cond_wait(cond, mutex);
}
/*
* pmemobj_volatile -- atomically initialize, record and return a
* generic value
*/
void *
pmemobj_volatile(PMEMobjpool *pop, struct pmemvlt *vlt,
void *ptr, size_t size,
int (*constr)(void *ptr, void *arg), void *arg)
{
LOG(3, "pop %p vlt %p ptr %p constr %p arg %p", pop, vlt, ptr,
constr, arg);
if (likely(vlt->runid == pop->run_id))
return ptr;
VALGRIND_REMOVE_PMEM_MAPPING(ptr, size);
VALGRIND_ADD_TO_TX(vlt, sizeof(*vlt));
if (_get_value(pop->run_id, &vlt->runid, ptr, arg, constr) < 0) {
VALGRIND_REMOVE_FROM_TX(vlt, sizeof(*vlt));
return NULL;
}
VALGRIND_REMOVE_FROM_TX(vlt, sizeof(*vlt));
VALGRIND_SET_CLEAN(vlt, sizeof(*vlt));
return ptr;
}
| 16,501 | 24.664075 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/lane.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* lane.h -- internal definitions for lanes
*/
#ifndef LIBPMEMOBJ_LANE_H
#define LIBPMEMOBJ_LANE_H 1
#include <stdint.h>
#include "ulog.h"
#include "libpmemobj.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* Distance between lanes used by threads required to prevent threads from
* false sharing part of lanes array. Used if properly spread lanes are
* available. Otherwise less spread out lanes would be used.
*/
#define LANE_JUMP (64 / sizeof(uint64_t))
/*
* Number of times the algorithm will try to reacquire the primary lane for the
* thread. If this threshold is exceeded, a new primary lane is selected for the
* thread.
*/
#define LANE_PRIMARY_ATTEMPTS 128
#define RLANE_DEFAULT 0
#define LANE_TOTAL_SIZE 3072 /* 3 * 1024 (sum of 3 old lane sections) */
/*
* We have 3 kilobytes to distribute.
* The smallest capacity is needed for the internal redo log for which we can
* accurately calculate the maximum number of occupied space: 48 bytes,
* 3 times sizeof(struct ulog_entry_val). One for bitmap OR, second for bitmap
* AND, third for modification of the destination pointer. For future needs,
* this has been bumped up to 12 ulog entries.
*
* The remaining part has to be split between transactional redo and undo logs,
* and since by far the most space consuming operations are transactional
* snapshots, most of the space, 2 kilobytes, is assigned to the undo log.
* After that, the remainder, 640 bytes, or 40 ulog entries, is left for the
* transactional redo logs.
* Thanks to this distribution, all small and medium transactions should be
* entirely performed without allocating any additional metadata.
*
* These values must be cacheline size aligned to be used for ulogs. Therefore
* they are parametrized for the size of the struct ulog changes between
* platforms.
*/
#define LANE_UNDO_SIZE (LANE_TOTAL_SIZE \
- LANE_REDO_EXTERNAL_SIZE \
- LANE_REDO_INTERNAL_SIZE \
- 3 * sizeof(struct ulog)) /* 2048 for 64B ulog */
#define LANE_REDO_EXTERNAL_SIZE ALIGN_UP(704 - sizeof(struct ulog), \
CACHELINE_SIZE) /* 640 for 64B ulog */
#define LANE_REDO_INTERNAL_SIZE ALIGN_UP(256 - sizeof(struct ulog), \
CACHELINE_SIZE) /* 192 for 64B ulog */
struct lane_layout {
/*
* Redo log for self-contained and 'one-shot' allocator operations.
* Cannot be extended.
*/
struct ULOG(LANE_REDO_INTERNAL_SIZE) internal;
/*
* Redo log for large operations/transactions.
* Can be extended by the use of internal ulog.
*/
struct ULOG(LANE_REDO_EXTERNAL_SIZE) external;
/*
* Undo log for snapshots done in a transaction.
* Can be extended/shrunk by the use of internal ulog.
*/
struct ULOG(LANE_UNDO_SIZE) undo;
};
struct lane {
struct lane_layout *layout; /* pointer to persistent layout */
struct operation_context *internal; /* context for internal ulog */
struct operation_context *external; /* context for external ulog */
struct operation_context *undo; /* context for undo ulog */
};
struct lane_descriptor {
/*
* Number of lanes available at runtime must be <= total number of lanes
* available in the pool. Number of lanes can be limited by shortage of
* other resources e.g. available RNIC's submission queue sizes.
*/
unsigned runtime_nlanes;
unsigned next_lane_idx;
uint64_t *lane_locks;
struct lane *lane;
};
typedef int (*section_layout_op)(PMEMobjpool *pop, void *data, unsigned length);
typedef void *(*section_constr)(PMEMobjpool *pop, void *data);
typedef void (*section_destr)(PMEMobjpool *pop, void *rt);
typedef int (*section_global_op)(PMEMobjpool *pop);
struct section_operations {
section_constr construct_rt;
section_destr destroy_rt;
section_layout_op check;
section_layout_op recover;
section_global_op boot;
section_global_op cleanup;
};
struct lane_info {
uint64_t pop_uuid_lo;
uint64_t lane_idx;
unsigned long nest_count;
/*
* The index of the primary lane for the thread. A thread will always
* try to acquire the primary lane first, and only if that fails it will
* look for a different available lane.
*/
uint64_t primary;
int primary_attempts;
struct lane_info *prev, *next;
};
void lane_info_boot(void);
void lane_info_destroy(void);
void lane_init_data(PMEMobjpool *pop);
int lane_boot(PMEMobjpool *pop);
void lane_cleanup(PMEMobjpool *pop);
int lane_recover_and_section_boot(PMEMobjpool *pop);
int lane_section_cleanup(PMEMobjpool *pop);
int lane_check(PMEMobjpool *pop);
unsigned lane_hold(PMEMobjpool *pop, struct lane **lane);
void lane_release(PMEMobjpool *pop);
#ifdef __cplusplus
}
#endif
#endif
| 4,652 | 30.02 | 80 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/ulog.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* ulog.h -- unified log public interface
*/
#ifndef LIBPMEMOBJ_ULOG_H
#define LIBPMEMOBJ_ULOG_H 1
#include <stddef.h>
#include <stdint.h>
#include <time.h>
#include "vec.h"
#include "pmemops.h"
#include<x86intrin.h>
////cmd write optimization
/*
struct ulog_cmd_packet{
uint32_t ulog_offset : 32;
uint32_t base_offset : 32;
uint32_t src : 32;
uint32_t size : 32;
};
*/
struct ulog_entry_base {
uint64_t offset; /* offset with operation type flag */
};
/*
* ulog_entry_val -- log entry
*/
struct ulog_entry_val {
struct ulog_entry_base base;
uint64_t value; /* value to be applied */
};
/*
* ulog_entry_buf - ulog buffer entry
*/
struct ulog_entry_buf {
struct ulog_entry_base base; /* offset with operation type flag */
uint64_t checksum; /* checksum of the entire log entry */
uint64_t size; /* size of the buffer to be modified */
uint8_t data[]; /* content to fill in */
};
#define ULOG_UNUSED ((CACHELINE_SIZE - 40) / 8)
/*
* This structure *must* be located at a cacheline boundary. To achieve this,
* the next field is always allocated with extra padding, and then the offset
* is additionally aligned.
*/
#define ULOG(capacity_bytes) {\
/* 64 bytes of metadata */\
uint64_t checksum; /* checksum of ulog header and its entries */\
uint64_t next; /* offset of ulog extension */\
uint64_t capacity; /* capacity of this ulog in bytes */\
uint64_t gen_num; /* generation counter */\
uint64_t flags; /* ulog flags */\
uint64_t unused[ULOG_UNUSED]; /* must be 0 */\
uint8_t data[capacity_bytes]; /* N bytes of data */\
}\
#define SIZEOF_ULOG(base_capacity)\
(sizeof(struct ulog) + base_capacity)
/*
* Ulog buffer allocated by the user must be marked by this flag.
* It is important to not free it at the end:
* what user has allocated - user should free himself.
*/
#define ULOG_USER_OWNED (1U << 0)
/* use this for allocations of aligned ulog extensions */
#define SIZEOF_ALIGNED_ULOG(base_capacity)\
ALIGN_UP(SIZEOF_ULOG(base_capacity + (2 * CACHELINE_SIZE)), CACHELINE_SIZE)
struct ulog ULOG(0);
VEC(ulog_next, uint64_t);
typedef uint64_t ulog_operation_type;
#define ULOG_OPERATION_SET (0b000ULL << 61ULL)
#define ULOG_OPERATION_AND (0b001ULL << 61ULL)
#define ULOG_OPERATION_OR (0b010ULL << 61ULL)
#define ULOG_OPERATION_BUF_SET (0b101ULL << 61ULL)
#define ULOG_OPERATION_BUF_CPY (0b110ULL << 61ULL)
#define ULOG_BIT_OPERATIONS (ULOG_OPERATION_AND | ULOG_OPERATION_OR)
/* immediately frees all associated ulog structures */
#define ULOG_FREE_AFTER_FIRST (1U << 0)
/* increments gen_num of the first, preallocated, ulog */
#define ULOG_INC_FIRST_GEN_NUM (1U << 1)
/* informs if there was any buffer allocated by user in the tx */
#define ULOG_ANY_USER_BUFFER (1U << 2)
typedef int (*ulog_check_offset_fn)(void *ctx, uint64_t offset);
typedef int (*ulog_extend_fn)(void *, uint64_t *, uint64_t);
typedef int (*ulog_entry_cb)(struct ulog_entry_base *e, void *arg,
const struct pmem_ops *p_ops);
typedef int (*ulog_entry_cb_ndp)(struct ulog_entry_base *e, struct ulog_entry_base *f, void *arg,
const struct pmem_ops *p_ops);
typedef void (*ulog_free_fn)(void *base, uint64_t *next);
typedef int (*ulog_rm_user_buffer_fn)(void *, void *addr);
struct ulog *ulog_next(struct ulog *ulog, const struct pmem_ops *p_ops);
void ulog_construct(uint64_t offset, size_t capacity, uint64_t gen_num,
int flush, uint64_t flags, const struct pmem_ops *p_ops);
size_t ulog_capacity(struct ulog *ulog, size_t ulog_base_bytes,
const struct pmem_ops *p_ops);
void ulog_rebuild_next_vec(struct ulog *ulog, struct ulog_next *next,
const struct pmem_ops *p_ops);
int ulog_foreach_entry(struct ulog *ulog,
ulog_entry_cb cb, void *arg, const struct pmem_ops *ops, struct ulog *ulognvm);
int ulog_foreach_entry_ndp(struct ulog *ulogdram, struct ulog *ulognvm,
ulog_entry_cb_ndp cb, void *arg, const struct pmem_ops *ops);
int ulog_reserve(struct ulog *ulog,
size_t ulog_base_nbytes, size_t gen_num,
int auto_reserve, size_t *new_capacity_bytes,
ulog_extend_fn extend, struct ulog_next *next,
const struct pmem_ops *p_ops);
void ulog_store(struct ulog *dest,
struct ulog *src, size_t nbytes, size_t ulog_base_nbytes,
size_t ulog_total_capacity,
struct ulog_next *next, const struct pmem_ops *p_ops);
int ulog_free_next(struct ulog *u, const struct pmem_ops *p_ops,
ulog_free_fn ulog_free, ulog_rm_user_buffer_fn user_buff_remove,
uint64_t flags);
void ulog_clobber(struct ulog *dest, struct ulog_next *next,
const struct pmem_ops *p_ops);
int ulog_clobber_data(struct ulog *dest,
size_t nbytes, size_t ulog_base_nbytes,
struct ulog_next *next, ulog_free_fn ulog_free,
ulog_rm_user_buffer_fn user_buff_remove,
const struct pmem_ops *p_ops, unsigned flags);
void ulog_clobber_entry(const struct ulog_entry_base *e,
const struct pmem_ops *p_ops);
void ulog_process(struct ulog *ulog, ulog_check_offset_fn check,
const struct pmem_ops *p_ops);
void ulog_process_ndp(struct ulog *ulognvm, struct ulog *ulogdeam, ulog_check_offset_fn check,
const struct pmem_ops *p_ops);
size_t ulog_base_nbytes(struct ulog *ulog);
int ulog_recovery_needed(struct ulog *ulog, int verify_checksum);
struct ulog *ulog_by_offset(size_t offset, const struct pmem_ops *p_ops);
uint64_t ulog_entry_offset(const struct ulog_entry_base *entry);
ulog_operation_type ulog_entry_type(
const struct ulog_entry_base *entry);
struct ulog_entry_val *ulog_entry_val_create(struct ulog *ulog,
size_t offset, uint64_t *dest, uint64_t value,
ulog_operation_type type,
const struct pmem_ops *p_ops);
#ifdef USE_NDP_CLOBBER
struct ulog_entry_buf *
ulog_entry_buf_create(struct ulog *ulog, size_t offset,
uint64_t gen_num, uint64_t *dest, const void *src, uint64_t size,
ulog_operation_type type, const struct pmem_ops *p_ops, int clear_next_header);
#else
struct ulog_entry_buf *
ulog_entry_buf_create(struct ulog *ulog, size_t offset,
uint64_t gen_num, uint64_t *dest, const void *src, uint64_t size,
ulog_operation_type type, const struct pmem_ops *p_ops);
#endif
void ulog_entry_apply(const struct ulog_entry_base *e, int persist,
const struct pmem_ops *p_ops);
void ulog_entry_apply_ndp(const struct ulog_entry_base *e, const struct ulog_entry_base *f, int persist,
const struct pmem_ops *p_ops);
size_t ulog_entry_size(const struct ulog_entry_base *entry);
void ulog_recover(struct ulog *ulog, ulog_check_offset_fn check,
const struct pmem_ops *p_ops);
int ulog_check(struct ulog *ulog, ulog_check_offset_fn check,
const struct pmem_ops *p_ops);
#endif
| 6,600 | 32.170854 | 104 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/lane.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* lane.c -- lane implementation
*/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <inttypes.h>
#include <errno.h>
#include <limits.h>
#include <sched.h>
#include "libpmemobj.h"
#include "critnib.h"
#include "lane.h"
#include "out.h"
#include "util.h"
#include "obj.h"
#include "os_thread.h"
#include "valgrind_internal.h"
#include "memops.h"
#include "palloc.h"
#include "tx.h"
static os_tls_key_t Lane_info_key;
static __thread struct critnib *Lane_info_ht;
static __thread struct lane_info *Lane_info_records;
static __thread struct lane_info *Lane_info_cache;
/*
* lane_info_create -- (internal) constructor for thread shared data
*/
static inline void
lane_info_create(void)
{
Lane_info_ht = critnib_new();
if (Lane_info_ht == NULL)
FATAL("critnib_new");
}
/*
* lane_info_delete -- (internal) deletes lane info hash table
*/
static inline void
lane_info_delete(void)
{
if (unlikely(Lane_info_ht == NULL))
return;
critnib_delete(Lane_info_ht);
struct lane_info *record;
struct lane_info *head = Lane_info_records;
while (head != NULL) {
record = head;
head = head->next;
Free(record);
}
Lane_info_ht = NULL;
Lane_info_records = NULL;
Lane_info_cache = NULL;
}
/*
* lane_info_ht_boot -- (internal) boot lane info and add it to thread shared
* data
*/
static inline void
lane_info_ht_boot(void)
{
lane_info_create();
int result = os_tls_set(Lane_info_key, Lane_info_ht);
if (result != 0) {
errno = result;
FATAL("!os_tls_set");
}
}
/*
* lane_info_ht_destroy -- (internal) destructor for thread shared data
*/
static inline void
lane_info_ht_destroy(void *ht)
{
lane_info_delete();
}
/*
* lane_info_boot -- initialize lane info hash table and lane info key
*/
void
lane_info_boot(void)
{
int result = os_tls_key_create(&Lane_info_key, lane_info_ht_destroy);
if (result != 0) {
errno = result;
FATAL("!os_tls_key_create");
}
}
/*
* lane_info_destroy -- destroy lane info hash table
*/
void
lane_info_destroy(void)
{
lane_info_delete();
(void) os_tls_key_delete(Lane_info_key);
}
/*
* lane_info_cleanup -- remove lane info record regarding pool being deleted
*/
static inline void
lane_info_cleanup(PMEMobjpool *pop)
{
if (unlikely(Lane_info_ht == NULL))
return;
struct lane_info *info = critnib_remove(Lane_info_ht, pop->uuid_lo);
if (likely(info != NULL)) {
if (info->prev)
info->prev->next = info->next;
if (info->next)
info->next->prev = info->prev;
if (Lane_info_cache == info)
Lane_info_cache = NULL;
if (Lane_info_records == info)
Lane_info_records = info->next;
Free(info);
}
}
/*
* lane_get_layout -- (internal) calculates the real pointer of the lane layout
*/
static struct lane_layout *
lane_get_layout(PMEMobjpool *pop, uint64_t lane_idx)
{
return (void *)((char *)pop + pop->lanes_offset +
sizeof(struct lane_layout) * lane_idx);
}
/*
* lane_ulog_constructor -- (internal) constructor of a ulog extension
*/
static int
lane_ulog_constructor(void *base, void *ptr, size_t usable_size, void *arg)
{
PMEMobjpool *pop = base;
const struct pmem_ops *p_ops = &pop->p_ops;
size_t capacity = ALIGN_DOWN(usable_size - sizeof(struct ulog),
CACHELINE_SIZE);
uint64_t gen_num = *(uint64_t *)arg;
ulog_construct(OBJ_PTR_TO_OFF(base, ptr), capacity,
gen_num, 1, 0, p_ops);
return 0;
}
/*
* lane_undo_extend -- allocates a new undo log
*/
static int
lane_undo_extend(void *base, uint64_t *redo, uint64_t gen_num)
{
PMEMobjpool *pop = base;
struct tx_parameters *params = pop->tx_params;
size_t s = SIZEOF_ALIGNED_ULOG(params->cache_size);
return pmalloc_construct(base, redo, s, lane_ulog_constructor, &gen_num,
0, OBJ_INTERNAL_OBJECT_MASK, 0);
}
/*
* lane_redo_extend -- allocates a new redo log
*/
static int
lane_redo_extend(void *base, uint64_t *redo, uint64_t gen_num)
{
size_t s = SIZEOF_ALIGNED_ULOG(LANE_REDO_EXTERNAL_SIZE);
return pmalloc_construct(base, redo, s, lane_ulog_constructor, &gen_num,
0, OBJ_INTERNAL_OBJECT_MASK, 0);
}
/*
* lane_init -- (internal) initializes a single lane runtime variables
*/
static int
lane_init(PMEMobjpool *pop, struct lane *lane, struct lane_layout *layout)
{
ASSERTne(lane, NULL);
lane->layout = layout;
lane->internal = operation_new((struct ulog *)&layout->internal,
LANE_REDO_INTERNAL_SIZE,
NULL, NULL, &pop->p_ops,
LOG_TYPE_REDO);
if (lane->internal == NULL)
goto error_internal_new;
lane->external = operation_new((struct ulog *)&layout->external,
LANE_REDO_EXTERNAL_SIZE,
lane_redo_extend, (ulog_free_fn)pfree, &pop->p_ops,
LOG_TYPE_REDO);
if (lane->external == NULL)
goto error_external_new;
lane->undo = operation_new((struct ulog *)&layout->undo,
LANE_UNDO_SIZE,
lane_undo_extend, (ulog_free_fn)pfree, &pop->p_ops,
LOG_TYPE_UNDO);
if (lane->undo == NULL)
goto error_undo_new;
return 0;
error_undo_new:
operation_delete(lane->external);
error_external_new:
operation_delete(lane->internal);
error_internal_new:
return -1;
}
/*
* lane_destroy -- cleanups a single lane runtime variables
*/
static void
lane_destroy(PMEMobjpool *pop, struct lane *lane)
{
operation_delete(lane->undo);
operation_delete(lane->internal);
operation_delete(lane->external);
}
/*
* lane_boot -- initializes all lanes
*/
int
lane_boot(PMEMobjpool *pop)
{
int err = 0;
pop->lanes_desc.lane = Malloc(sizeof(struct lane) * pop->nlanes);
if (pop->lanes_desc.lane == NULL) {
err = ENOMEM;
ERR("!Malloc of volatile lanes");
goto error_lanes_malloc;
}
pop->lanes_desc.next_lane_idx = 0;
pop->lanes_desc.lane_locks =
Zalloc(sizeof(*pop->lanes_desc.lane_locks) * pop->nlanes);
if (pop->lanes_desc.lane_locks == NULL) {
ERR("!Malloc for lane locks");
goto error_locks_malloc;
}
/* add lanes to pmemcheck ignored list */
VALGRIND_ADD_TO_GLOBAL_TX_IGNORE((char *)pop + pop->lanes_offset,
(sizeof(struct lane_layout) * pop->nlanes));
uint64_t i;
for (i = 0; i < pop->nlanes; ++i) {
struct lane_layout *layout = lane_get_layout(pop, i);
if ((err = lane_init(pop, &pop->lanes_desc.lane[i], layout))) {
ERR("!lane_init");
goto error_lane_init;
}
}
return 0;
error_lane_init:
for (; i >= 1; --i)
lane_destroy(pop, &pop->lanes_desc.lane[i - 1]);
Free(pop->lanes_desc.lane_locks);
pop->lanes_desc.lane_locks = NULL;
error_locks_malloc:
Free(pop->lanes_desc.lane);
pop->lanes_desc.lane = NULL;
error_lanes_malloc:
return err;
}
/*
* lane_init_data -- initializes ulogs for all the lanes
*/
void
lane_init_data(PMEMobjpool *pop)
{
struct lane_layout *layout;
for (uint64_t i = 0; i < pop->nlanes; ++i) {
layout = lane_get_layout(pop, i);
ulog_construct(OBJ_PTR_TO_OFF(pop, &layout->internal),
LANE_REDO_INTERNAL_SIZE, 0, 0, 0, &pop->p_ops);
ulog_construct(OBJ_PTR_TO_OFF(pop, &layout->external),
LANE_REDO_EXTERNAL_SIZE, 0, 0, 0, &pop->p_ops);
ulog_construct(OBJ_PTR_TO_OFF(pop, &layout->undo),
LANE_UNDO_SIZE, 0, 0, 0, &pop->p_ops);
}
layout = lane_get_layout(pop, 0);
pmemops_xpersist(&pop->p_ops, layout,
pop->nlanes * sizeof(struct lane_layout),
PMEMOBJ_F_RELAXED);
}
/*
* lane_cleanup -- destroys all lanes
*/
void
lane_cleanup(PMEMobjpool *pop)
{
for (uint64_t i = 0; i < pop->nlanes; ++i)
lane_destroy(pop, &pop->lanes_desc.lane[i]);
Free(pop->lanes_desc.lane);
pop->lanes_desc.lane = NULL;
Free(pop->lanes_desc.lane_locks);
pop->lanes_desc.lane_locks = NULL;
lane_info_cleanup(pop);
}
/*
* lane_recover_and_section_boot -- performs initialization and recovery of all
* lanes
*/
int
lane_recover_and_section_boot(PMEMobjpool *pop)
{
COMPILE_ERROR_ON(SIZEOF_ULOG(LANE_UNDO_SIZE) +
SIZEOF_ULOG(LANE_REDO_EXTERNAL_SIZE) +
SIZEOF_ULOG(LANE_REDO_INTERNAL_SIZE) != LANE_TOTAL_SIZE);
int err = 0;
uint64_t i; /* lane index */
struct lane_layout *layout;
/*
* First we need to recover the internal/external redo logs so that the
* allocator state is consistent before we boot it.
*/
for (i = 0; i < pop->nlanes; ++i) {
layout = lane_get_layout(pop, i);
ulog_recover((struct ulog *)&layout->internal,
OBJ_OFF_IS_VALID_FROM_CTX, &pop->p_ops);
ulog_recover((struct ulog *)&layout->external,
OBJ_OFF_IS_VALID_FROM_CTX, &pop->p_ops);
}
if ((err = pmalloc_boot(pop)) != 0)
return err;
/*
* Undo logs must be processed after the heap is initialized since
* a undo recovery might require deallocation of the next ulogs.
*/
for (i = 0; i < pop->nlanes; ++i) {
struct operation_context *ctx = pop->lanes_desc.lane[i].undo;
operation_resume(ctx);
operation_process(ctx);
operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM |
ULOG_FREE_AFTER_FIRST);
}
return 0;
}
/*
* lane_section_cleanup -- performs runtime cleanup of all lanes
*/
int
lane_section_cleanup(PMEMobjpool *pop)
{
return pmalloc_cleanup(pop);
}
/*
* lane_check -- performs check of all lanes
*/
int
lane_check(PMEMobjpool *pop)
{
int err = 0;
uint64_t j; /* lane index */
struct lane_layout *layout;
for (j = 0; j < pop->nlanes; ++j) {
layout = lane_get_layout(pop, j);
if (ulog_check((struct ulog *)&layout->internal,
OBJ_OFF_IS_VALID_FROM_CTX, &pop->p_ops) != 0) {
LOG(2, "lane %" PRIu64 " internal redo failed: %d",
j, err);
return err;
}
}
return 0;
}
/*
* get_lane -- (internal) get free lane index
*/
static inline void
get_lane(uint64_t *locks, struct lane_info *info, uint64_t nlocks)
{
info->lane_idx = info->primary;
while (1) {
do {
info->lane_idx %= nlocks;
if (likely(util_bool_compare_and_swap64(
&locks[info->lane_idx], 0, 1))) {
if (info->lane_idx == info->primary) {
info->primary_attempts =
LANE_PRIMARY_ATTEMPTS;
} else if (info->primary_attempts == 0) {
info->primary = info->lane_idx;
info->primary_attempts =
LANE_PRIMARY_ATTEMPTS;
}
return;
}
if (info->lane_idx == info->primary &&
info->primary_attempts > 0) {
info->primary_attempts--;
}
++info->lane_idx;
} while (info->lane_idx < nlocks);
sched_yield();
}
}
/*
* get_lane_info_record -- (internal) get lane record attached to memory pool
* or first free
*/
static inline struct lane_info *
get_lane_info_record(PMEMobjpool *pop)
{
if (likely(Lane_info_cache != NULL &&
Lane_info_cache->pop_uuid_lo == pop->uuid_lo)) {
return Lane_info_cache;
}
if (unlikely(Lane_info_ht == NULL)) {
lane_info_ht_boot();
}
struct lane_info *info = critnib_get(Lane_info_ht, pop->uuid_lo);
if (unlikely(info == NULL)) {
info = Malloc(sizeof(struct lane_info));
if (unlikely(info == NULL)) {
FATAL("Malloc");
}
info->pop_uuid_lo = pop->uuid_lo;
info->lane_idx = UINT64_MAX;
info->nest_count = 0;
info->next = Lane_info_records;
info->prev = NULL;
info->primary = 0;
info->primary_attempts = LANE_PRIMARY_ATTEMPTS;
if (Lane_info_records) {
Lane_info_records->prev = info;
}
Lane_info_records = info;
if (unlikely(critnib_insert(
Lane_info_ht, pop->uuid_lo, info) != 0)) {
FATAL("critnib_insert");
}
}
Lane_info_cache = info;
return info;
}
/*
* lane_hold -- grabs a per-thread lane in a round-robin fashion
*/
unsigned
lane_hold(PMEMobjpool *pop, struct lane **lanep)
{
/*
* Before runtime lane initialization all remote operations are
* executed using RLANE_DEFAULT.
*/
if (unlikely(!pop->lanes_desc.runtime_nlanes)) {
ASSERT(pop->has_remote_replicas);
if (lanep != NULL)
FATAL("cannot obtain section before lane's init");
return RLANE_DEFAULT;
}
struct lane_info *lane = get_lane_info_record(pop);
while (unlikely(lane->lane_idx == UINT64_MAX)) {
/* initial wrap to next CL */
lane->primary = lane->lane_idx = util_fetch_and_add32(
&pop->lanes_desc.next_lane_idx, LANE_JUMP);
} /* handles wraparound */
uint64_t *llocks = pop->lanes_desc.lane_locks;
/* grab next free lane from lanes available at runtime */
if (!lane->nest_count++) {
get_lane(llocks, lane, pop->lanes_desc.runtime_nlanes);
}
struct lane *l = &pop->lanes_desc.lane[lane->lane_idx];
/* reinitialize lane's content only if in outermost hold */
if (lanep && lane->nest_count == 1) {
VALGRIND_ANNOTATE_NEW_MEMORY(l, sizeof(*l));
VALGRIND_ANNOTATE_NEW_MEMORY(l->layout, sizeof(*l->layout));
operation_init(l->external);
operation_init(l->internal);
operation_init(l->undo);
}
if (lanep)
*lanep = l;
return (unsigned)lane->lane_idx;
}
/*
* lane_release -- drops the per-thread lane
*/
void
lane_release(PMEMobjpool *pop)
{
if (unlikely(!pop->lanes_desc.runtime_nlanes)) {
ASSERT(pop->has_remote_replicas);
return;
}
struct lane_info *lane = get_lane_info_record(pop);
ASSERTne(lane, NULL);
ASSERTne(lane->lane_idx, UINT64_MAX);
if (unlikely(lane->nest_count == 0)) {
FATAL("lane_release");
} else if (--(lane->nest_count) == 0) {
if (unlikely(!util_bool_compare_and_swap64(
&pop->lanes_desc.lane_locks[lane->lane_idx],
1, 0))) {
FATAL("util_bool_compare_and_swap64");
}
}
}
| 12,994 | 21.678883 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/rpmem_obc/rpmem_obc_test_common.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmem_obc_test_common.h -- common declarations for rpmem_obc test
*/
#include "unittest.h"
#include "out.h"
#include "librpmem.h"
#include "rpmem.h"
#include "rpmem_proto.h"
#include "rpmem_common.h"
#include "rpmem_util.h"
#include "rpmem_obc.h"
#define POOL_SIZE 1024
#define NLANES 32
#define NLANES_RESP 16
#define PROVIDER RPMEM_PROV_LIBFABRIC_SOCKETS
#define POOL_DESC "pool_desc"
#define RKEY 0xabababababababab
#define RADDR 0x0101010101010101
#define PORT 1234
#define BUFF_SIZE 8192
#define POOL_ATTR_INIT {\
.signature = "<RPMEM>",\
.major = 1,\
.compat_features = 2,\
.incompat_features = 3,\
.ro_compat_features = 4,\
.poolset_uuid = "POOLSET_UUID0123",\
.uuid = "UUID0123456789AB",\
.next_uuid = "NEXT_UUID0123456",\
.prev_uuid = "PREV_UUID0123456",\
.user_flags = "USER_FLAGS012345",\
}
#define POOL_ATTR_ALT {\
.signature = "<ALT>",\
.major = 5,\
.compat_features = 6,\
.incompat_features = 7,\
.ro_compat_features = 8,\
.poolset_uuid = "UUID_POOLSET_ALT",\
.uuid = "ALT_UUIDCDEFFEDC",\
.next_uuid = "456UUID_NEXT_ALT",\
.prev_uuid = "UUID012_ALT_PREV",\
.user_flags = "012345USER_FLAGS",\
}
static const struct rpmem_pool_attr POOL_ATTR = POOL_ATTR_INIT;
struct server {
int fd_in;
int fd_out;
};
void set_rpmem_cmd(const char *fmt, ...);
struct server *srv_init(void);
void srv_fini(struct server *s);
void srv_recv(struct server *s, void *buff, size_t len);
void srv_send(struct server *s, const void *buff, size_t len);
void srv_wait_disconnect(struct server *s);
void client_connect_wait(struct rpmem_obc *rpc, char *target);
/*
* Since the server may disconnect the connection at any moment
* from the client's perspective, execute the test in a loop so
* the moment when the connection is closed will be possibly different.
*/
#define ECONNRESET_LOOP 10
void server_econnreset(struct server *s, const void *msg, size_t len);
TEST_CASE_DECLARE(client_enotconn);
TEST_CASE_DECLARE(client_connect);
TEST_CASE_DECLARE(client_monitor);
TEST_CASE_DECLARE(server_monitor);
TEST_CASE_DECLARE(server_wait);
TEST_CASE_DECLARE(client_create);
TEST_CASE_DECLARE(server_create);
TEST_CASE_DECLARE(server_create_econnreset);
TEST_CASE_DECLARE(server_create_eproto);
TEST_CASE_DECLARE(server_create_error);
TEST_CASE_DECLARE(client_open);
TEST_CASE_DECLARE(server_open);
TEST_CASE_DECLARE(server_open_econnreset);
TEST_CASE_DECLARE(server_open_eproto);
TEST_CASE_DECLARE(server_open_error);
TEST_CASE_DECLARE(client_close);
TEST_CASE_DECLARE(server_close);
TEST_CASE_DECLARE(server_close_econnreset);
TEST_CASE_DECLARE(server_close_eproto);
TEST_CASE_DECLARE(server_close_error);
TEST_CASE_DECLARE(client_set_attr);
TEST_CASE_DECLARE(server_set_attr);
TEST_CASE_DECLARE(server_set_attr_econnreset);
TEST_CASE_DECLARE(server_set_attr_eproto);
TEST_CASE_DECLARE(server_set_attr_error);
| 2,951 | 26.082569 | 71 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/rpmem_obc/rpmem_obc_test_create.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmem_obc_test_create.c -- test cases for rpmem_obc_create function
*/
#include "rpmem_obc_test_common.h"
static const struct rpmem_msg_create_resp CREATE_RESP = {
.hdr = {
.type = RPMEM_MSG_TYPE_CREATE_RESP,
.size = sizeof(struct rpmem_msg_create_resp),
.status = 0,
},
.ibc = {
.port = PORT,
.rkey = RKEY,
.raddr = RADDR,
.persist_method = RPMEM_PM_GPSPM,
.nlanes = NLANES_RESP,
},
};
/*
* check_create_msg -- check create message
*/
static void
check_create_msg(struct rpmem_msg_create *msg)
{
size_t pool_desc_size = strlen(POOL_DESC) + 1;
size_t msg_size = sizeof(struct rpmem_msg_create) + pool_desc_size;
struct rpmem_pool_attr pool_attr = POOL_ATTR_INIT;
UT_ASSERTeq(msg->hdr.type, RPMEM_MSG_TYPE_CREATE);
UT_ASSERTeq(msg->hdr.size, msg_size);
UT_ASSERTeq(msg->c.major, RPMEM_PROTO_MAJOR);
UT_ASSERTeq(msg->c.minor, RPMEM_PROTO_MINOR);
UT_ASSERTeq(msg->c.pool_size, POOL_SIZE);
UT_ASSERTeq(msg->c.provider, PROVIDER);
UT_ASSERTeq(msg->c.nlanes, NLANES);
UT_ASSERTeq(msg->c.buff_size, BUFF_SIZE);
UT_ASSERTeq(msg->pool_desc.size, pool_desc_size);
UT_ASSERTeq(strcmp((char *)msg->pool_desc.desc, POOL_DESC), 0);
UT_ASSERTeq(memcmp(&msg->pool_attr, &pool_attr, sizeof(pool_attr)), 0);
}
/*
* server_create_handle -- handle a create request message
*/
static void
server_create_handle(struct server *s, const struct rpmem_msg_create_resp *resp)
{
size_t msg_size = sizeof(struct rpmem_msg_create) +
strlen(POOL_DESC) + 1;
struct rpmem_msg_create *msg = MALLOC(msg_size);
srv_recv(s, msg, msg_size);
rpmem_ntoh_msg_create(msg);
check_create_msg(msg);
srv_send(s, resp, sizeof(*resp));
FREE(msg);
}
/*
* Number of cases for EPROTO test. Must be kept in sync with the
* server_create_eproto function.
*/
#define CREATE_EPROTO_COUNT 8
/*
* server_create_eproto -- send invalid create request responses to a client
*/
int
server_create_eproto(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, CREATE_EPROTO_COUNT - 1);
int i = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_create_resp resp = CREATE_RESP;
switch (i) {
case 0:
resp.hdr.type = MAX_RPMEM_MSG_TYPE;
break;
case 1:
resp.hdr.type = RPMEM_MSG_TYPE_OPEN_RESP;
break;
case 2:
resp.hdr.size -= 1;
break;
case 3:
resp.hdr.size += 1;
break;
case 4:
resp.hdr.status = MAX_RPMEM_ERR;
break;
case 5:
resp.ibc.port = 0;
break;
case 6:
resp.ibc.port = UINT16_MAX + 1;
break;
case 7:
resp.ibc.persist_method = MAX_RPMEM_PM;
break;
default:
UT_ASSERT(0);
break;
}
rpmem_hton_msg_create_resp(&resp);
server_create_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* server_create_error -- return an error status in create response message
*/
int
server_create_error(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, MAX_RPMEM_ERR);
enum rpmem_err e = (enum rpmem_err)atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_create_resp resp = CREATE_RESP;
resp.hdr.status = e;
rpmem_hton_msg_create_resp(&resp);
server_create_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* server_create_econnreset -- test case for closing connection - server side
*/
int
server_create_econnreset(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0|1", tc->name);
int do_send = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_create_resp resp = CREATE_RESP;
rpmem_hton_msg_create_resp(&resp);
if (do_send)
srv_send(s, &resp, sizeof(resp) / 2);
srv_fini(s);
return 1;
}
/*
* server_create -- test case for rpmem_obc_create function - server side
*/
int
server_create(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 0)
UT_FATAL("usage: %s", tc->name);
struct server *s = srv_init();
struct rpmem_msg_create_resp resp = CREATE_RESP;
rpmem_hton_msg_create_resp(&resp);
server_create_handle(s, &resp);
srv_fini(s);
return 0;
}
/*
* client_create_errno -- perform create request operation and expect
* specified errno. If ex_errno is zero expect certain values in res struct.
*/
static void
client_create_errno(char *target, int ex_errno)
{
struct rpmem_req_attr req = {
.pool_size = POOL_SIZE,
.nlanes = NLANES,
.provider = PROVIDER,
.pool_desc = POOL_DESC,
.buff_size = BUFF_SIZE,
};
struct rpmem_pool_attr pool_attr = POOL_ATTR_INIT;
struct rpmem_resp_attr res;
int ret;
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
client_connect_wait(rpc, target);
ret = rpmem_obc_create(rpc, &req, &res, &pool_attr);
if (ex_errno) {
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
} else {
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(res.port, CREATE_RESP.ibc.port);
UT_ASSERTeq(res.rkey, CREATE_RESP.ibc.rkey);
UT_ASSERTeq(res.raddr, CREATE_RESP.ibc.raddr);
UT_ASSERTeq(res.persist_method,
CREATE_RESP.ibc.persist_method);
UT_ASSERTeq(res.nlanes,
CREATE_RESP.ibc.nlanes);
}
rpmem_obc_disconnect(rpc);
rpmem_obc_fini(rpc);
}
/*
* client_create_error -- check if valid errno is set if error status returned
*/
static void
client_create_error(char *target)
{
struct rpmem_req_attr req = {
.pool_size = POOL_SIZE,
.nlanes = NLANES,
.provider = PROVIDER,
.pool_desc = POOL_DESC,
.buff_size = BUFF_SIZE,
};
struct rpmem_pool_attr pool_attr = POOL_ATTR_INIT;
struct rpmem_resp_attr res;
int ret;
for (enum rpmem_err e = 1; e < MAX_RPMEM_ERR; e++) {
set_rpmem_cmd("server_create_error %d", e);
int ex_errno = rpmem_util_proto_errno(e);
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
client_connect_wait(rpc, target);
ret = rpmem_obc_create(rpc, &req, &res, &pool_attr);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
rpmem_obc_disconnect(rpc);
rpmem_obc_fini(rpc);
}
}
/*
* client_create -- test case for create request operation - client side
*/
int
client_create(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
for (int i = 0; i < ECONNRESET_LOOP; i++) {
set_rpmem_cmd("server_create_econnreset %d", i % 2);
client_create_errno(target, ECONNRESET);
}
for (int i = 0; i < CREATE_EPROTO_COUNT; i++) {
set_rpmem_cmd("server_create_eproto %d", i);
client_create_errno(target, EPROTO);
}
client_create_error(target);
set_rpmem_cmd("server_create");
client_create_errno(target, 0);
return 1;
}
| 6,642 | 20.498382 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/rpmem_obc/rpmem_obc_test.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* rpmem_obc_test.c -- unit test for rpmem_obc module
*/
#include "rpmem_obc_test_common.h"
#include "pmemcommon.h"
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(client_enotconn),
TEST_CASE(client_connect),
TEST_CASE(client_create),
TEST_CASE(server_create),
TEST_CASE(server_create_econnreset),
TEST_CASE(server_create_eproto),
TEST_CASE(server_create_error),
TEST_CASE(client_open),
TEST_CASE(server_open),
TEST_CASE(server_open_econnreset),
TEST_CASE(server_open_eproto),
TEST_CASE(server_open_error),
TEST_CASE(client_close),
TEST_CASE(server_close),
TEST_CASE(server_close_econnreset),
TEST_CASE(server_close_eproto),
TEST_CASE(server_close_error),
TEST_CASE(client_monitor),
TEST_CASE(server_monitor),
TEST_CASE(client_set_attr),
TEST_CASE(server_set_attr),
TEST_CASE(server_set_attr_econnreset),
TEST_CASE(server_set_attr_eproto),
TEST_CASE(server_set_attr_error),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
START(argc, argv, "rpmem_obc");
common_init("rpmem_obc",
"RPMEM_LOG_LEVEL",
"RPMEM_LOG_FILE", 0, 0);
rpmem_util_cmds_init();
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
rpmem_util_cmds_fini();
common_fini();
DONE(NULL);
}
| 1,388 | 20.369231 | 59 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/rpmem_obc/rpmem_obc_test_open.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmem_obc_test_open.c -- test cases for rpmem_obj_open function
*/
#include "rpmem_obc_test_common.h"
static const struct rpmem_msg_open_resp OPEN_RESP = {
.hdr = {
.type = RPMEM_MSG_TYPE_OPEN_RESP,
.size = sizeof(struct rpmem_msg_open_resp),
.status = 0,
},
.ibc = {
.port = PORT,
.rkey = RKEY,
.raddr = RADDR,
.persist_method = RPMEM_PM_GPSPM,
.nlanes = NLANES_RESP,
},
.pool_attr = POOL_ATTR_INIT,
};
/*
* check_open_msg -- check open message
*/
static void
check_open_msg(struct rpmem_msg_open *msg)
{
size_t pool_desc_size = strlen(POOL_DESC) + 1;
size_t msg_size = sizeof(struct rpmem_msg_open) + pool_desc_size;
UT_ASSERTeq(msg->hdr.type, RPMEM_MSG_TYPE_OPEN);
UT_ASSERTeq(msg->hdr.size, msg_size);
UT_ASSERTeq(msg->c.major, RPMEM_PROTO_MAJOR);
UT_ASSERTeq(msg->c.minor, RPMEM_PROTO_MINOR);
UT_ASSERTeq(msg->c.pool_size, POOL_SIZE);
UT_ASSERTeq(msg->c.provider, PROVIDER);
UT_ASSERTeq(msg->c.nlanes, NLANES);
UT_ASSERTeq(msg->c.buff_size, BUFF_SIZE);
UT_ASSERTeq(msg->pool_desc.size, pool_desc_size);
UT_ASSERTeq(strcmp((char *)msg->pool_desc.desc, POOL_DESC), 0);
}
/*
* server_open_handle -- handle an open request message
*/
static void
server_open_handle(struct server *s, const struct rpmem_msg_open_resp *resp)
{
size_t msg_size = sizeof(struct rpmem_msg_open) +
strlen(POOL_DESC) + 1;
struct rpmem_msg_open *msg = MALLOC(msg_size);
srv_recv(s, msg, msg_size);
rpmem_ntoh_msg_open(msg);
check_open_msg(msg);
srv_send(s, resp, sizeof(*resp));
FREE(msg);
}
/*
* Number of cases for EPROTO test. Must be kept in sync with the
* server_open_eproto function.
*/
#define OPEN_EPROTO_COUNT 8
/*
* server_open_eproto -- send invalid open request responses to a client
*/
int
server_open_eproto(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, OPEN_EPROTO_COUNT - 1);
int i = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_open_resp resp = OPEN_RESP;
switch (i) {
case 0:
resp.hdr.type = MAX_RPMEM_MSG_TYPE;
break;
case 1:
resp.hdr.type = RPMEM_MSG_TYPE_CREATE_RESP;
break;
case 2:
resp.hdr.size -= 1;
break;
case 3:
resp.hdr.size += 1;
break;
case 4:
resp.hdr.status = MAX_RPMEM_ERR;
break;
case 5:
resp.ibc.port = 0;
break;
case 6:
resp.ibc.port = UINT16_MAX + 1;
break;
case 7:
resp.ibc.persist_method = MAX_RPMEM_PM;
break;
default:
UT_ASSERT(0);
break;
}
rpmem_hton_msg_open_resp(&resp);
server_open_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* server_open_error -- return error status in open response message
*/
int
server_open_error(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, MAX_RPMEM_ERR);
enum rpmem_err e = (enum rpmem_err)atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_open_resp resp = OPEN_RESP;
resp.hdr.status = e;
rpmem_hton_msg_open_resp(&resp);
server_open_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* server_open -- test case for rpmem_obc_create function - server side
*/
int
server_open_econnreset(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0|1", tc->name);
int do_send = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_open_resp resp = OPEN_RESP;
rpmem_hton_msg_open_resp(&resp);
if (do_send)
srv_send(s, &resp, sizeof(resp) / 2);
srv_fini(s);
return 1;
}
/*
* server_open -- test case for open request message - server side
*/
int
server_open(const struct test_case *tc, int argc, char *argv[])
{
struct server *s = srv_init();
struct rpmem_msg_open_resp resp = OPEN_RESP;
rpmem_hton_msg_open_resp(&resp);
server_open_handle(s, &resp);
srv_fini(s);
return 0;
}
/*
* client_open_errno -- perform open request operation and expect
* specified errno, repeat the operation specified number of times.
* If ex_errno is zero expect certain values in res struct.
*/
static void
client_open_errno(char *target, int ex_errno)
{
struct rpmem_req_attr req = {
.pool_size = POOL_SIZE,
.nlanes = NLANES,
.provider = PROVIDER,
.pool_desc = POOL_DESC,
.buff_size = BUFF_SIZE,
};
struct rpmem_pool_attr pool_attr;
memset(&pool_attr, 0, sizeof(pool_attr));
struct rpmem_resp_attr res;
int ret;
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
client_connect_wait(rpc, target);
ret = rpmem_obc_open(rpc, &req, &res, &pool_attr);
if (ex_errno) {
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
} else {
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(res.port, OPEN_RESP.ibc.port);
UT_ASSERTeq(res.rkey, OPEN_RESP.ibc.rkey);
UT_ASSERTeq(res.raddr, OPEN_RESP.ibc.raddr);
UT_ASSERTeq(res.persist_method,
OPEN_RESP.ibc.persist_method);
UT_ASSERTeq(res.nlanes,
OPEN_RESP.ibc.nlanes);
UT_ASSERTeq(memcmp(pool_attr.signature,
OPEN_RESP.pool_attr.signature,
RPMEM_POOL_HDR_SIG_LEN), 0);
UT_ASSERTeq(pool_attr.major, OPEN_RESP.pool_attr.major);
UT_ASSERTeq(pool_attr.compat_features,
OPEN_RESP.pool_attr.compat_features);
UT_ASSERTeq(pool_attr.incompat_features,
OPEN_RESP.pool_attr.incompat_features);
UT_ASSERTeq(pool_attr.ro_compat_features,
OPEN_RESP.pool_attr.ro_compat_features);
UT_ASSERTeq(memcmp(pool_attr.poolset_uuid,
OPEN_RESP.pool_attr.poolset_uuid,
RPMEM_POOL_HDR_UUID_LEN), 0);
UT_ASSERTeq(memcmp(pool_attr.uuid,
OPEN_RESP.pool_attr.uuid,
RPMEM_POOL_HDR_UUID_LEN), 0);
UT_ASSERTeq(memcmp(pool_attr.next_uuid,
OPEN_RESP.pool_attr.next_uuid,
RPMEM_POOL_HDR_UUID_LEN), 0);
UT_ASSERTeq(memcmp(pool_attr.prev_uuid,
OPEN_RESP.pool_attr.prev_uuid,
RPMEM_POOL_HDR_UUID_LEN), 0);
UT_ASSERTeq(memcmp(pool_attr.user_flags,
OPEN_RESP.pool_attr.user_flags,
RPMEM_POOL_USER_FLAGS_LEN), 0);
}
rpmem_obc_disconnect(rpc);
rpmem_obc_fini(rpc);
}
/*
* client_open_error -- check if valid errno is set if error status returned
*/
static void
client_open_error(char *target)
{
struct rpmem_req_attr req = {
.pool_size = POOL_SIZE,
.nlanes = NLANES,
.provider = PROVIDER,
.pool_desc = POOL_DESC,
.buff_size = BUFF_SIZE,
};
struct rpmem_pool_attr pool_attr;
memset(&pool_attr, 0, sizeof(pool_attr));
struct rpmem_resp_attr res;
int ret;
for (enum rpmem_err e = 1; e < MAX_RPMEM_ERR; e++) {
set_rpmem_cmd("server_open_error %d", e);
int ex_errno = rpmem_util_proto_errno(e);
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
client_connect_wait(rpc, target);
ret = rpmem_obc_open(rpc, &req, &res, &pool_attr);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
rpmem_obc_disconnect(rpc);
rpmem_obc_fini(rpc);
}
}
/*
* client_open -- test case for open request message - client side
*/
int
client_open(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
for (int i = 0; i < ECONNRESET_LOOP; i++) {
set_rpmem_cmd("server_open_econnreset %d", i % 2);
client_open_errno(target, ECONNRESET);
}
for (int i = 0; i < OPEN_EPROTO_COUNT; i++) {
set_rpmem_cmd("server_open_eproto %d", i);
client_open_errno(target, EPROTO);
}
client_open_error(target);
set_rpmem_cmd("server_open");
client_open_errno(target, 0);
return 1;
}
| 7,427 | 21.306306 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/rpmemd_db/rpmemd_db_test.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* rpmemd_db_test.c -- unit test for pool set database
*
* usage: rpmemd_db <log-file> <root_dir> <pool_desc_1> <pool_desc_2>
*/
#include "file.h"
#include "unittest.h"
#include "librpmem.h"
#include "rpmemd_db.h"
#include "rpmemd_log.h"
#include "util_pmem.h"
#include "set.h"
#include "out.h"
#include <limits.h>
#include <stdlib.h>
#include <unistd.h>
#include <time.h>
#define POOL_MODE 0644
#define FAILED_FUNC(func_name) \
UT_ERR("!%s(): %s() failed", __func__, func_name);
#define FAILED_FUNC_PARAM(func_name, param) \
UT_ERR("!%s(): %s(%s) failed", __func__, func_name, param);
#define NPOOLS_DUAL 2
#define POOL_ATTR_CREATE 0
#define POOL_ATTR_OPEN 1
#define POOL_ATTR_SET_ATTR 2
#define POOL_STATE_INITIAL 0
#define POOL_STATE_CREATED 1
#define POOL_STATE_OPENED 2
#define POOL_STATE_CLOSED POOL_STATE_CREATED
#define POOL_STATE_REMOVED POOL_STATE_INITIAL
/*
* fill_rand -- fill a buffer with random values
*/
static void
fill_rand(void *addr, size_t len)
{
unsigned char *buff = addr;
srand(time(NULL));
for (unsigned i = 0; i < len; i++)
buff[i] = (rand() % ('z' - 'a')) + 'a';
}
/*
* test_init -- test rpmemd_db_init() and rpmemd_db_fini()
*/
static int
test_init(const char *root_dir)
{
struct rpmemd_db *db;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
rpmemd_db_fini(db);
return 0;
}
/*
* test_check_dir -- test rpmemd_db_check_dir()
*/
static int
test_check_dir(const char *root_dir)
{
struct rpmemd_db *db;
int ret;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
ret = rpmemd_db_check_dir(db);
if (ret) {
FAILED_FUNC("rpmemd_db_check_dir");
}
rpmemd_db_fini(db);
return ret;
}
/*
* test_create -- test rpmemd_db_pool_create()
*/
static int
test_create(const char *root_dir, const char *pool_desc)
{
struct rpmem_pool_attr attr;
memset(&attr, 0, sizeof(attr));
attr.incompat_features = 2;
struct rpmemd_db_pool *prp;
struct rpmemd_db *db;
int ret = -1;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr);
if (prp == NULL) {
FAILED_FUNC("rpmemd_db_pool_create");
goto fini;
}
rpmemd_db_pool_close(db, prp);
ret = rpmemd_db_pool_remove(db, pool_desc, 0, 0);
if (ret) {
FAILED_FUNC("rpmemd_db_pool_remove");
}
fini:
rpmemd_db_fini(db);
return ret;
}
/*
* test_create_dual -- dual test for rpmemd_db_pool_create()
*/
static int
test_create_dual(const char *root_dir, const char *pool_desc_1,
const char *pool_desc_2)
{
struct rpmem_pool_attr attr1;
memset(&attr1, 0, sizeof(attr1));
attr1.incompat_features = 2;
struct rpmemd_db_pool *prp1, *prp2;
struct rpmemd_db *db;
int ret = -1;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
/* test dual create */
prp1 = rpmemd_db_pool_create(db, pool_desc_1, 0, &attr1);
if (prp1 == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_create", pool_desc_1);
goto err_create_1;
}
prp2 = rpmemd_db_pool_create(db, pool_desc_2, 0, &attr1);
if (prp2 == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_create", pool_desc_2);
goto err_create_2;
}
rpmemd_db_pool_close(db, prp2);
rpmemd_db_pool_close(db, prp1);
ret = rpmemd_db_pool_remove(db, pool_desc_2, 0, 0);
if (ret) {
FAILED_FUNC_PARAM("rpmemd_db_pool_remove", pool_desc_2);
goto err_remove_2;
}
ret = rpmemd_db_pool_remove(db, pool_desc_1, 0, 0);
if (ret) {
FAILED_FUNC_PARAM("rpmemd_db_pool_remove", pool_desc_1);
}
goto fini;
err_create_2:
rpmemd_db_pool_close(db, prp1);
err_remove_2:
rpmemd_db_pool_remove(db, pool_desc_1, 0, 0);
err_create_1:
fini:
rpmemd_db_fini(db);
return ret;
}
/*
* compare_attr -- compare pool's attributes
*/
static void
compare_attr(struct rpmem_pool_attr *a1, struct rpmem_pool_attr *a2)
{
char *msg;
if (a1->major != a2->major) {
msg = "major";
goto err_mismatch;
}
if (a1->compat_features != a2->compat_features) {
msg = "compat_features";
goto err_mismatch;
}
if (a1->incompat_features != a2->incompat_features) {
msg = "incompat_features";
goto err_mismatch;
}
if (a1->ro_compat_features != a2->ro_compat_features) {
msg = "ro_compat_features";
goto err_mismatch;
}
if (memcmp(a1->signature, a2->signature, RPMEM_POOL_HDR_SIG_LEN)) {
msg = "signature";
goto err_mismatch;
}
if (memcmp(a1->poolset_uuid, a2->poolset_uuid,
RPMEM_POOL_HDR_UUID_LEN)) {
msg = "poolset_uuid";
goto err_mismatch;
}
if (memcmp(a1->uuid, a2->uuid, RPMEM_POOL_HDR_UUID_LEN)) {
msg = "uuid";
goto err_mismatch;
}
if (memcmp(a1->next_uuid, a2->next_uuid, RPMEM_POOL_HDR_UUID_LEN)) {
msg = "next_uuid";
goto err_mismatch;
}
if (memcmp(a1->prev_uuid, a2->prev_uuid, RPMEM_POOL_HDR_UUID_LEN)) {
msg = "prev_uuid";
goto err_mismatch;
}
return;
err_mismatch:
errno = EINVAL;
UT_FATAL("%s(): pool attributes mismatch (%s)", __func__, msg);
}
/*
* test_open -- test rpmemd_db_pool_open()
*/
static int
test_open(const char *root_dir, const char *pool_desc)
{
struct rpmem_pool_attr attr1, attr2;
struct rpmemd_db_pool *prp;
struct rpmemd_db *db;
int ret = -1;
fill_rand(&attr1, sizeof(attr1));
attr1.major = 1;
attr1.incompat_features = 2;
attr1.compat_features = 0;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr1);
if (prp == NULL) {
FAILED_FUNC("rpmemd_db_pool_create");
goto fini;
}
rpmemd_db_pool_close(db, prp);
prp = rpmemd_db_pool_open(db, pool_desc, 0, &attr2);
if (prp == NULL) {
FAILED_FUNC("rpmemd_db_pool_open");
goto fini;
}
rpmemd_db_pool_close(db, prp);
compare_attr(&attr1, &attr2);
ret = rpmemd_db_pool_remove(db, pool_desc, 0, 0);
if (ret) {
FAILED_FUNC("rpmemd_db_pool_remove");
}
fini:
rpmemd_db_fini(db);
return ret;
}
/*
* test_open_dual -- dual test for rpmemd_db_pool_open()
*/
static int
test_open_dual(const char *root_dir, const char *pool_desc_1,
const char *pool_desc_2)
{
struct rpmem_pool_attr attr1a, attr2a, attr1b, attr2b;
struct rpmemd_db_pool *prp1, *prp2;
struct rpmemd_db *db;
int ret = -1;
fill_rand(&attr1a, sizeof(attr1a));
fill_rand(&attr1b, sizeof(attr1b));
attr1a.major = 1;
attr1a.incompat_features = 2;
attr1a.compat_features = 0;
attr1b.major = 1;
attr1b.incompat_features = 2;
attr1b.compat_features = 0;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
prp1 = rpmemd_db_pool_create(db, pool_desc_1, 0, &attr1a);
if (prp1 == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_create", pool_desc_1);
goto err_create_1;
}
rpmemd_db_pool_close(db, prp1);
prp2 = rpmemd_db_pool_create(db, pool_desc_2, 0, &attr1b);
if (prp2 == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_create", pool_desc_2);
goto err_create_2;
}
rpmemd_db_pool_close(db, prp2);
/* test dual open */
prp1 = rpmemd_db_pool_open(db, pool_desc_1, 0, &attr2a);
if (prp1 == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_open", pool_desc_1);
goto err_open_1;
}
prp2 = rpmemd_db_pool_open(db, pool_desc_2, 0, &attr2b);
if (prp2 == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_open", pool_desc_2);
goto err_open_2;
}
rpmemd_db_pool_close(db, prp1);
rpmemd_db_pool_close(db, prp2);
compare_attr(&attr1a, &attr2a);
compare_attr(&attr1b, &attr2b);
ret = rpmemd_db_pool_remove(db, pool_desc_2, 0, 0);
if (ret) {
FAILED_FUNC_PARAM("rpmemd_db_pool_remove", pool_desc_2);
goto err_remove_2;
}
ret = rpmemd_db_pool_remove(db, pool_desc_1, 0, 0);
if (ret) {
FAILED_FUNC_PARAM("rpmemd_db_pool_remove", pool_desc_1);
}
goto fini;
err_open_2:
rpmemd_db_pool_close(db, prp1);
err_open_1:
rpmemd_db_pool_remove(db, pool_desc_2, 0, 0);
err_create_2:
err_remove_2:
rpmemd_db_pool_remove(db, pool_desc_1, 0, 0);
err_create_1:
fini:
rpmemd_db_fini(db);
return ret;
}
/*
* test_set_attr -- test rpmemd_db_pool_set_attr()
*/
static int
test_set_attr(const char *root_dir, const char *pool_desc)
{
struct rpmem_pool_attr attr[3];
struct rpmemd_db_pool *prp;
struct rpmemd_db *db;
int ret = -1;
fill_rand(&attr[POOL_ATTR_CREATE], sizeof(attr[POOL_ATTR_CREATE]));
fill_rand(&attr[POOL_ATTR_SET_ATTR], sizeof(attr[POOL_ATTR_SET_ATTR]));
attr[POOL_ATTR_CREATE].major = 1;
attr[POOL_ATTR_CREATE].incompat_features = 2;
attr[POOL_ATTR_CREATE].compat_features = 0;
attr[POOL_ATTR_SET_ATTR].major = 1;
attr[POOL_ATTR_SET_ATTR].incompat_features = 2;
attr[POOL_ATTR_SET_ATTR].compat_features = 0;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr[POOL_ATTR_CREATE]);
if (prp == NULL) {
FAILED_FUNC("rpmemd_db_pool_create");
goto err_create;
}
rpmemd_db_pool_close(db, prp);
prp = rpmemd_db_pool_open(db, pool_desc, 0, &attr[POOL_ATTR_OPEN]);
if (prp == NULL) {
FAILED_FUNC("rpmemd_db_pool_open");
goto err_open;
}
compare_attr(&attr[POOL_ATTR_CREATE], &attr[POOL_ATTR_OPEN]);
ret = rpmemd_db_pool_set_attr(prp, &attr[POOL_ATTR_SET_ATTR]);
if (ret) {
FAILED_FUNC("rpmemd_db_pool_set_attr");
goto err_set_attr;
}
rpmemd_db_pool_close(db, prp);
prp = rpmemd_db_pool_open(db, pool_desc, 0, &attr[POOL_ATTR_OPEN]);
if (prp == NULL) {
FAILED_FUNC("rpmemd_db_pool_open");
goto err_open;
}
compare_attr(&attr[POOL_ATTR_SET_ATTR], &attr[POOL_ATTR_OPEN]);
rpmemd_db_pool_close(db, prp);
ret = rpmemd_db_pool_remove(db, pool_desc, 0, 0);
if (ret) {
FAILED_FUNC("rpmemd_db_pool_remove");
}
goto fini;
err_set_attr:
rpmemd_db_pool_close(db, prp);
err_open:
rpmemd_db_pool_remove(db, pool_desc, 0, 0);
err_create:
fini:
rpmemd_db_fini(db);
return ret;
}
/*
* test_set_attr_dual -- dual test for rpmemd_db_pool_set_attr()
*/
static int
test_set_attr_dual(const char *root_dir, const char *pool_desc_1,
const char *pool_desc_2)
{
struct rpmem_pool_attr attr[NPOOLS_DUAL][3];
struct rpmemd_db_pool *prp[NPOOLS_DUAL];
const char *pool_desc[NPOOLS_DUAL] = {pool_desc_1, pool_desc_2};
unsigned pool_state[NPOOLS_DUAL] = {POOL_STATE_INITIAL};
struct rpmemd_db *db;
int ret = -1;
/* initialize rpmem database */
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
for (unsigned p = 0; p < NPOOLS_DUAL; ++p) {
/*
* generate random pool attributes for create and set
* attributes operations
*/
fill_rand(&attr[p][POOL_ATTR_CREATE],
sizeof(attr[p][POOL_ATTR_CREATE]));
fill_rand(&attr[p][POOL_ATTR_SET_ATTR],
sizeof(attr[p][POOL_ATTR_SET_ATTR]));
attr[p][POOL_ATTR_CREATE].major = 1;
attr[p][POOL_ATTR_CREATE].incompat_features = 2;
attr[p][POOL_ATTR_CREATE].compat_features = 0;
attr[p][POOL_ATTR_SET_ATTR].major = 1;
attr[p][POOL_ATTR_SET_ATTR].incompat_features = 2;
attr[p][POOL_ATTR_SET_ATTR].compat_features = 0;
/* create pool */
prp[p] = rpmemd_db_pool_create(db, pool_desc[p], 0,
&attr[p][POOL_ATTR_CREATE]);
if (prp[p] == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_create",
pool_desc[p]);
goto err;
}
rpmemd_db_pool_close(db, prp[p]);
pool_state[p] = POOL_STATE_CREATED;
}
/* open pools and check pool attributes */
for (unsigned p = 0; p < NPOOLS_DUAL; ++p) {
prp[p] = rpmemd_db_pool_open(db, pool_desc[p], 0,
&attr[p][POOL_ATTR_OPEN]);
if (prp[p] == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_open", pool_desc[p]);
goto err;
}
pool_state[p] = POOL_STATE_OPENED;
compare_attr(&attr[p][POOL_ATTR_CREATE],
&attr[p][POOL_ATTR_OPEN]);
}
/* set attributes and close pools */
for (unsigned p = 0; p < NPOOLS_DUAL; ++p) {
ret = rpmemd_db_pool_set_attr(prp[p],
&attr[p][POOL_ATTR_SET_ATTR]);
if (ret) {
FAILED_FUNC_PARAM("rpmemd_db_pool_set_attr",
pool_desc[p]);
goto err;
}
rpmemd_db_pool_close(db, prp[p]);
pool_state[p] = POOL_STATE_CLOSED;
}
/* open pools and check attributes */
for (unsigned p = 0; p < NPOOLS_DUAL; ++p) {
prp[p] = rpmemd_db_pool_open(db, pool_desc[p], 0,
&attr[p][POOL_ATTR_OPEN]);
if (prp[p] == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_open", pool_desc[p]);
goto err;
}
pool_state[p] = POOL_STATE_OPENED;
compare_attr(&attr[p][POOL_ATTR_SET_ATTR],
&attr[p][POOL_ATTR_OPEN]);
}
err:
for (unsigned p = 0; p < NPOOLS_DUAL; ++p) {
if (pool_state[p] == POOL_STATE_OPENED) {
rpmemd_db_pool_close(db, prp[p]);
pool_state[p] = POOL_STATE_CLOSED;
}
if (pool_state[p] == POOL_STATE_CREATED) {
ret = rpmemd_db_pool_remove(db, pool_desc[p], 0, 0);
if (ret) {
FAILED_FUNC_PARAM("rpmemd_db_pool_remove",
pool_desc[p]);
}
pool_state[p] = POOL_STATE_REMOVED;
}
}
rpmemd_db_fini(db);
return ret;
}
static int
exists_cb(struct part_file *pf, void *arg)
{
return util_file_exists(pf->part->path);
}
static int
noexists_cb(struct part_file *pf, void *arg)
{
int exists = util_file_exists(pf->part->path);
if (exists < 0)
return -1;
else
return !exists;
}
/*
* test_remove -- test for rpmemd_db_pool_remove()
*/
static void
test_remove(const char *root_dir, const char *pool_desc)
{
struct rpmem_pool_attr attr;
struct rpmemd_db_pool *prp;
struct rpmemd_db *db;
int ret;
char path[PATH_MAX];
SNPRINTF(path, PATH_MAX, "%s/%s", root_dir, pool_desc);
fill_rand(&attr, sizeof(attr));
strncpy((char *)attr.poolset_uuid, "TEST", sizeof(attr.poolset_uuid));
attr.incompat_features = 2;
attr.compat_features = 0;
db = rpmemd_db_init(root_dir, POOL_MODE);
UT_ASSERTne(db, NULL);
prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr);
UT_ASSERTne(prp, NULL);
rpmemd_db_pool_close(db, prp);
ret = util_poolset_foreach_part(path, exists_cb, NULL);
UT_ASSERTeq(ret, 1);
ret = rpmemd_db_pool_remove(db, pool_desc, 0, 0);
UT_ASSERTeq(ret, 0);
ret = util_poolset_foreach_part(path, noexists_cb, NULL);
UT_ASSERTeq(ret, 1);
prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr);
UT_ASSERTne(prp, NULL);
rpmemd_db_pool_close(db, prp);
ret = rpmemd_db_pool_remove(db, pool_desc, 0, 1);
UT_ASSERTeq(ret, 0);
ret = util_file_exists(path);
UT_ASSERTne(ret, 1);
rpmemd_db_fini(db);
}
int
main(int argc, char *argv[])
{
char *pool_desc[2], *log_file;
char root_dir[PATH_MAX];
START(argc, argv, "rpmemd_db");
util_init();
out_init("rpmemd_db", "RPMEM_LOG_LEVEL", "RPMEM_LOG_FILE", 0, 0);
if (argc != 5)
UT_FATAL("usage: %s <log-file> <root_dir> <pool_desc_1>"
" <pool_desc_2>", argv[0]);
log_file = argv[1];
if (realpath(argv[2], root_dir) == NULL)
UT_FATAL("!realpath(%s)", argv[1]);
pool_desc[0] = argv[3];
pool_desc[1] = argv[4];
if (rpmemd_log_init("rpmemd error: ", log_file, 0))
FAILED_FUNC("rpmemd_log_init");
test_init(root_dir);
test_check_dir(root_dir);
test_create(root_dir, pool_desc[0]);
test_create_dual(root_dir, pool_desc[0], pool_desc[1]);
test_open(root_dir, pool_desc[0]);
test_open_dual(root_dir, pool_desc[0], pool_desc[1]);
test_set_attr(root_dir, pool_desc[0]);
test_set_attr_dual(root_dir, pool_desc[0], pool_desc[1]);
test_remove(root_dir, pool_desc[0]);
rpmemd_log_close();
out_fini();
DONE(NULL);
}
| 15,339 | 22.636364 | 72 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_granularity/pmem2_granularity.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* pmem2_granularity.c -- test for graunlarity functionality
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "config.h"
#include "source.h"
#include "pmem2_granularity.h"
#include "unittest.h"
#include "ut_pmem2_config.h"
#include "ut_pmem2_utils.h"
#include "out.h"
size_t Is_nfit = 1;
size_t Pc_type = 7;
size_t Pc_capabilities;
/*
* parse_args -- parse args from the input
*/
static int
parse_args(const struct test_case *tc, int argc, char *argv[],
char **file)
{
if (argc < 1)
UT_FATAL("usage: %s <file>", tc->name);
*file = argv[0];
return 1;
}
/*
* set_eadr -- set variable required for mocked functions
*/
static void
set_eadr()
{
int is_eadr = atoi(os_getenv("IS_EADR"));
if (is_eadr)
Pc_capabilities = 3;
else
Pc_capabilities = 2;
}
/*
* test_ctx -- essential parameters used by test
*/
struct test_ctx {
int fd;
enum pmem2_granularity requested_granularity;
enum pmem2_granularity expected_granularity;
};
/*
* init_test -- initialize basic parameters for test
*/
static void
init_test(char *file, struct test_ctx *ctx,
enum pmem2_granularity granularity)
{
set_eadr();
ctx->fd = OPEN(file, O_RDWR);
ctx->requested_granularity = granularity;
int is_eadr = atoi(os_getenv("IS_EADR"));
int is_pmem = atoi(os_getenv("IS_PMEM"));
if (is_eadr) {
if (is_pmem)
ctx->expected_granularity = PMEM2_GRANULARITY_BYTE;
else
UT_FATAL("invalid configuration IS_EADR && !IS_PMEM");
} else if (is_pmem) {
ctx->expected_granularity = PMEM2_GRANULARITY_CACHE_LINE;
} else {
ctx->expected_granularity = PMEM2_GRANULARITY_PAGE;
}
}
/*
* init_cfg -- initialize basic pmem2 config
*/
static void
init_cfg(struct pmem2_config *cfg,
struct pmem2_source **src, struct test_ctx *ctx)
{
pmem2_config_init(cfg);
int ret = pmem2_source_from_fd(src, ctx->fd);
UT_PMEM2_EXPECT_RETURN(ret, 0);
}
/*
* cleanup -- cleanup the environment after test
*/
static void
cleanup(struct pmem2_source *src, struct test_ctx *ctx)
{
#ifdef _WIN32
CloseHandle(src->value.handle);
#else
CLOSE(ctx->fd);
#endif
}
/*
* map_with_available_granularity -- map the range with valid granularity,
* includes cleanup
*/
static void
map_with_available_granularity(struct pmem2_config *cfg,
struct pmem2_source *src, struct test_ctx *ctx)
{
cfg->requested_max_granularity = ctx->requested_granularity;
struct pmem2_map *map;
int ret = pmem2_map(cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTne(map, NULL);
UT_ASSERTeq(ctx->expected_granularity,
pmem2_map_get_store_granularity(map));
/* cleanup after the test */
pmem2_unmap(&map);
}
/*
* map_with_unavailable_granularity -- map the range with invalid
* granularity (unsuccessful)
*/
static void
map_with_unavailable_granularity(struct pmem2_config *cfg,
struct pmem2_source *src, struct test_ctx *ctx)
{
cfg->requested_max_granularity = ctx->requested_granularity;
struct pmem2_map *map;
int ret = pmem2_map(cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_GRANULARITY_NOT_SUPPORTED);
UT_ERR("%s", pmem2_errormsg());
UT_ASSERTeq(map, NULL);
}
typedef void(*map_func)(struct pmem2_config *cfg,
struct pmem2_source *src, struct test_ctx *ctx);
/*
* granularity_template -- template for testing granularity in pmem2
*/
static int
granularity_template(const struct test_case *tc, int argc, char *argv[],
map_func map_do, enum pmem2_granularity granularity)
{
char *file = NULL;
int ret = parse_args(tc, argc, argv, &file);
struct test_ctx ctx = { 0 };
init_test(file, &ctx, granularity);
struct pmem2_config cfg;
struct pmem2_source *src;
init_cfg(&cfg, &src, &ctx);
map_do(&cfg, src, &ctx);
cleanup(src, &ctx);
pmem2_source_delete(&src);
return ret;
}
/*
* test_granularity_req_byte_avail_byte -- require byte granularity,
* when byte granularity is available
*/
static int
test_granularity_req_byte_avail_byte(const struct test_case *tc, int argc,
char *argv[])
{
return granularity_template(tc, argc, argv,
map_with_available_granularity, PMEM2_GRANULARITY_BYTE);
}
/*
* test_granularity_req_byte_avail_cl -- require byte granularity,
* when cache line granularity is available
*/
static int
test_granularity_req_byte_avail_cl(const struct test_case *tc, int argc,
char *argv[])
{
return granularity_template(tc, argc, argv,
map_with_unavailable_granularity, PMEM2_GRANULARITY_BYTE);
}
/*
* test_granularity_req_byte_avail_page -- require byte granularity,
* when page granularity is available
*/
static int
test_granularity_req_byte_avail_page(const struct test_case *tc, int argc,
char *argv[])
{
return granularity_template(tc, argc, argv,
map_with_unavailable_granularity, PMEM2_GRANULARITY_BYTE);
}
/*
* test_granularity_req_cl_avail_byte -- require cache line granularity,
* when byte granularity is available
*/
static int
test_granularity_req_cl_avail_byte(const struct test_case *tc, int argc,
char *argv[])
{
return granularity_template(tc, argc, argv,
map_with_available_granularity, PMEM2_GRANULARITY_CACHE_LINE);
}
/*
* test_granularity_req_cl_avail_cl -- require cache line granularity,
* when cache line granularity is available
*/
static int
test_granularity_req_cl_avail_cl(const struct test_case *tc, int argc,
char *argv[])
{
return granularity_template(tc, argc, argv,
map_with_available_granularity, PMEM2_GRANULARITY_CACHE_LINE);
}
/*
* test_granularity_req_cl_avail_page -- require cache line granularity,
* when page granularity is available
*/
static int
test_granularity_req_cl_avail_page(const struct test_case *tc, int argc,
char *argv[])
{
return granularity_template(tc, argc, argv,
map_with_unavailable_granularity, PMEM2_GRANULARITY_CACHE_LINE);
}
/*
* test_granularity_req_page_avail_byte -- require page granularity,
* when byte granularity is available
*/
static int
test_granularity_req_page_avail_byte(const struct test_case *tc, int argc,
char *argv[])
{
return granularity_template(tc, argc, argv,
map_with_available_granularity, PMEM2_GRANULARITY_PAGE);
}
/*
* test_granularity_req_byte_avail_cl -- require page granularity,
* when byte cache line is available
*/
static int
test_granularity_req_page_avail_cl(const struct test_case *tc, int argc,
char *argv[])
{
return granularity_template(tc, argc, argv,
map_with_available_granularity, PMEM2_GRANULARITY_PAGE);
}
/*
* test_granularity_req_page_avail_page -- require page granularity,
* when page granularity is available
*/
static int
test_granularity_req_page_avail_page(const struct test_case *tc, int argc,
char *argv[])
{
return granularity_template(tc, argc, argv,
map_with_available_granularity, PMEM2_GRANULARITY_PAGE);
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_granularity_req_byte_avail_byte),
TEST_CASE(test_granularity_req_byte_avail_cl),
TEST_CASE(test_granularity_req_byte_avail_page),
TEST_CASE(test_granularity_req_cl_avail_byte),
TEST_CASE(test_granularity_req_cl_avail_cl),
TEST_CASE(test_granularity_req_cl_avail_page),
TEST_CASE(test_granularity_req_page_avail_byte),
TEST_CASE(test_granularity_req_page_avail_cl),
TEST_CASE(test_granularity_req_page_avail_page),
};
#define NTESTS ARRAY_SIZE(test_cases)
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem2_granularity");
out_init("pmem2_granularity", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0, 0);
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
out_fini();
DONE(NULL);
}
#ifdef _MSC_VER
MSVC_CONSTR(libpmem2_init)
MSVC_DESTR(libpmem2_fini)
#endif
| 7,665 | 23.106918 | 74 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_granularity/mocks_posix.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* mocks_posix.c -- mocked functions used in auto_flush_linux.c
*/
#include <fts.h>
#include "map.h"
#include "../common/mmap.h"
#include "fs.h"
#include "unittest.h"
#define BUS_DEVICE_PATH "/sys/bus/nd/devices"
/*
* mmap - mock mmap
*/
FUNC_MOCK(mmap, void *, void *addr, size_t len, int prot,
int flags, int fd, __off_t offset)
FUNC_MOCK_RUN_DEFAULT {
char *str_map_sync = os_getenv("IS_PMEM");
const int ms = MAP_SYNC | MAP_SHARED_VALIDATE;
int map_sync_try = ((flags & ms) == ms) ? 1 : 0;
if (str_map_sync && atoi(str_map_sync) == 1) {
if (map_sync_try) {
flags &= ~ms;
flags |= MAP_SHARED;
return _FUNC_REAL(mmap)(addr, len, prot, flags,
fd, offset);
}
} else if (map_sync_try) {
errno = EINVAL;
return MAP_FAILED;
}
return _FUNC_REAL(mmap)(addr, len, prot, flags, fd, offset);
}
FUNC_MOCK_END
/*
* open -- open mock
*/
FUNC_MOCK(open, int, const char *path, int flags, ...)
FUNC_MOCK_RUN_DEFAULT {
va_list ap;
va_start(ap, flags);
int mode = va_arg(ap, int);
va_end(ap);
char *is_bus_device_path = strstr(path, BUS_DEVICE_PATH);
if (!is_bus_device_path ||
(is_bus_device_path && strstr(path, "region")))
return _FUNC_REAL(open)(path, flags, mode);
const char *mock_path = os_getenv("BUS_DEVICE_PATH");
return _FUNC_REAL(open)(mock_path, flags, mode);
}
FUNC_MOCK_END
struct fs {
FTS *ft;
struct fs_entry entry;
};
/*
* fs_new -- creates fs traversal instance
*/
FUNC_MOCK(fs_new, struct fs *, const char *path)
FUNC_MOCK_RUN_DEFAULT {
char *is_bus_device_path = strstr(path, BUS_DEVICE_PATH);
if (!is_bus_device_path ||
(is_bus_device_path && strstr(path, "region")))
return _FUNC_REAL(fs_new)(path);
const char *mock_path = os_getenv("BUS_DEVICE_PATH");
return _FUNC_REAL(fs_new)(mock_path);
}
FUNC_MOCK_END
/*
* os_stat -- os_stat mock to handle sysfs path
*/
FUNC_MOCK(os_stat, int, const char *path, os_stat_t *buf)
FUNC_MOCK_RUN_DEFAULT {
char *is_bus_device_path = strstr(path, BUS_DEVICE_PATH);
if (!is_bus_device_path ||
(is_bus_device_path && strstr(path, "region")))
return _FUNC_REAL(os_stat)(path, buf);
const char *mock_path = os_getenv("BUS_DEVICE_PATH");
return _FUNC_REAL(os_stat)(mock_path, buf);
}
FUNC_MOCK_END
| 2,302 | 23.5 | 63 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_granularity/mocks_dax_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* mocks_dax_windows.c -- mocked function required to control
* FILE_DAX_VOLUME value reported by the OS APIs
*/
#include "unittest.h"
FUNC_MOCK_DLLIMPORT(GetVolumeInformationByHandleW, BOOL,
HANDLE hFile,
LPWSTR lpVolumeNameBuffer,
DWORD nVolumeNameSize,
LPDWORD lpVolumeSerialNumber,
LPDWORD lpMaximumComponentLength,
LPDWORD lpFileSystemFlags,
LPWSTR lpFileSystemNameBuffer,
DWORD nFileSystemNameSize)
FUNC_MOCK_RUN_DEFAULT {
size_t is_pmem = atoi(os_getenv("IS_PMEM"));
if (is_pmem)
*lpFileSystemFlags = FILE_DAX_VOLUME;
else
*lpFileSystemFlags = 0;
return TRUE;
}
FUNC_MOCK_END
| 688 | 22.758621 | 61 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_granularity/mocks_dax_windows.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* mocks_dax_windows.h -- redefinitions of GetVolumeInformationByHandleW
*
* This file is Windows-specific.
*
* This file should be included (i.e. using Forced Include) by libpmem2
* files, when compiled for the purpose of pmem2_granularity test.
* It would replace default implementation with mocked functions defined
* in mocks_windows.c
*
* This WRAP_REAL define could also be passed as a preprocessor definition.
*/
#ifndef MOCKS_WINDOWS_H
#define MOCKS_WINDOWS_H 1
#include <windows.h>
#ifndef WRAP_REAL
#define GetVolumeInformationByHandleW __wrap_GetVolumeInformationByHandleW
BOOL
__wrap_GetVolumeInformationByHandleW(HANDLE hFile, LPWSTR lpVolumeNameBuffer,
DWORD nVolumeNameSize, LPDWORD lpVolumeSerialNumber,
LPDWORD lpMaximumComponentLength, LPDWORD lpFileSystemFlags,
LPWSTR lpFileSystemNameBuffer, DWORD nFileSystemNameSize);
#endif
#endif
| 956 | 28.90625 | 77 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/rpmem_fip/rpmem_fip_test.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* rpmem_fip_test.c -- tests for rpmem_fip and rpmemd_fip modules
*/
#include <netdb.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include "unittest.h"
#include "pmemcommon.h"
#include "librpmem.h"
#include "rpmem.h"
#include "rpmem_proto.h"
#include "rpmem_common.h"
#include "rpmem_util.h"
#include "rpmem_fip_common.h"
#include "rpmem_fip_oob.h"
#include "rpmemd_fip.h"
#include "rpmemd_log.h"
#include "rpmemd_util.h"
#include "rpmem_fip.h"
#include "os.h"
#define SIZE_PER_LANE 64
#define COUNT_PER_LANE 32
#define NLANES 1024
#define SOCK_NLANES 32
#define NTHREADS 32
#define TOTAL_PER_LANE (SIZE_PER_LANE * COUNT_PER_LANE)
#define POOL_SIZE (NLANES * TOTAL_PER_LANE)
static uint8_t lpool[POOL_SIZE];
static uint8_t rpool[POOL_SIZE];
TEST_CASE_DECLARE(client_init);
TEST_CASE_DECLARE(server_init);
TEST_CASE_DECLARE(client_connect);
TEST_CASE_DECLARE(server_connect);
TEST_CASE_DECLARE(server_process);
TEST_CASE_DECLARE(client_flush);
TEST_CASE_DECLARE(client_flush_mt);
TEST_CASE_DECLARE(client_persist);
TEST_CASE_DECLARE(client_persist_mt);
TEST_CASE_DECLARE(client_read);
TEST_CASE_DECLARE(client_wq_size);
struct fip_client {
enum rpmem_provider provider;
unsigned max_wq_size;
unsigned nlanes;
};
#define FIP_CLIENT_DEFAULT {RPMEM_PROV_UNKNOWN, 0, NLANES}
/*
* get_persist_method -- parse persist method
*/
static enum rpmem_persist_method
get_persist_method(const char *pm)
{
if (strcmp(pm, "GPSPM") == 0)
return RPMEM_PM_GPSPM;
else if (strcmp(pm, "APM") == 0)
return RPMEM_PM_APM;
else
UT_FATAL("unknown method");
}
/*
* get_provider -- get provider for given target
*/
static void
get_provider(const char *target, const char *prov_name,
struct fip_client *client)
{
struct rpmem_fip_probe probe;
int ret;
int any = 0;
if (strcmp(prov_name, "any") == 0)
any = 1;
ret = rpmem_fip_probe_get(target, &probe);
UT_ASSERTeq(ret, 0);
UT_ASSERT(rpmem_fip_probe_any(probe));
if (any) {
/* return verbs in first place */
if (rpmem_fip_probe(probe,
RPMEM_PROV_LIBFABRIC_VERBS))
client->provider = RPMEM_PROV_LIBFABRIC_VERBS;
else if (rpmem_fip_probe(probe,
RPMEM_PROV_LIBFABRIC_SOCKETS))
client->provider = RPMEM_PROV_LIBFABRIC_SOCKETS;
else
UT_ASSERT(0);
} else {
client->provider = rpmem_provider_from_str(prov_name);
UT_ASSERTne(client->provider, RPMEM_PROV_UNKNOWN);
UT_ASSERT(rpmem_fip_probe(probe, client->provider));
}
/*
* Decrease number of lanes for socket provider because
* the test may be too long.
*/
if (client->provider == RPMEM_PROV_LIBFABRIC_SOCKETS)
client->nlanes = min(client->nlanes, SOCK_NLANES);
client->max_wq_size = probe.max_wq_size[client->provider];
}
/*
* set_pool_data -- set pools data to well known values
*/
static void
set_pool_data(uint8_t *pool, int inverse)
{
for (unsigned l = 0; l < NLANES; l++) {
for (unsigned i = 0; i < COUNT_PER_LANE; i++) {
size_t offset = l * TOTAL_PER_LANE + i * SIZE_PER_LANE;
unsigned val = i + l;
if (inverse)
val = ~val;
memset(&pool[offset], (int)val, SIZE_PER_LANE);
}
}
}
/*
* flush_arg -- arguments for client persist and flush / drain threads
*/
struct flush_arg {
struct rpmem_fip *fip;
unsigned lane;
};
typedef void *(*flush_fn)(void *arg);
/*
* client_flush_thread -- thread callback for flush / drain operation
*/
static void *
client_flush_thread(void *arg)
{
struct flush_arg *args = arg;
int ret;
/* persist with len == 0 should always succeed */
ret = rpmem_fip_flush(args->fip, args->lane * TOTAL_PER_LANE,
0, args->lane, RPMEM_FLUSH_WRITE);
UT_ASSERTeq(ret, 0);
for (unsigned i = 0; i < COUNT_PER_LANE; i++) {
size_t offset = args->lane * TOTAL_PER_LANE + i * SIZE_PER_LANE;
unsigned val = args->lane + i;
memset(&lpool[offset], (int)val, SIZE_PER_LANE);
ret = rpmem_fip_flush(args->fip, offset,
SIZE_PER_LANE, args->lane, RPMEM_FLUSH_WRITE);
UT_ASSERTeq(ret, 0);
}
ret = rpmem_fip_drain(args->fip, args->lane);
UT_ASSERTeq(ret, 0);
return NULL;
}
/*
* client_persist_thread -- thread callback for persist operation
*/
static void *
client_persist_thread(void *arg)
{
struct flush_arg *args = arg;
int ret;
/* persist with len == 0 should always succeed */
ret = rpmem_fip_persist(args->fip, args->lane * TOTAL_PER_LANE,
0, args->lane, RPMEM_FLUSH_WRITE);
UT_ASSERTeq(ret, 0);
for (unsigned i = 0; i < COUNT_PER_LANE; i++) {
size_t offset = args->lane * TOTAL_PER_LANE + i * SIZE_PER_LANE;
unsigned val = args->lane + i;
memset(&lpool[offset], (int)val, SIZE_PER_LANE);
ret = rpmem_fip_persist(args->fip, offset,
SIZE_PER_LANE, args->lane, RPMEM_FLUSH_WRITE);
UT_ASSERTeq(ret, 0);
}
return NULL;
}
/*
* client_init -- test case for client initialization
*/
int
client_init(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 3)
UT_FATAL("usage: %s <target> <provider> <persist method>",
tc->name);
char *target = argv[0];
char *prov_name = argv[1];
char *persist_method = argv[2];
set_rpmem_cmd("server_init %s", persist_method);
char fip_service[NI_MAXSERV];
struct rpmem_target_info *info;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
struct fip_client fip_client = FIP_CLIENT_DEFAULT;
get_provider(info->node, prov_name, &fip_client);
client_t *client;
struct rpmem_resp_attr resp;
client = client_exchange(info, fip_client.nlanes, fip_client.provider,
&resp);
struct rpmem_fip_attr attr = {
.provider = fip_client.provider,
.max_wq_size = fip_client.max_wq_size,
.persist_method = resp.persist_method,
.laddr = lpool,
.size = POOL_SIZE,
.nlanes = resp.nlanes,
.raddr = (void *)resp.raddr,
.rkey = resp.rkey,
};
ssize_t sret = SNPRINTF(fip_service, NI_MAXSERV, "%u", resp.port);
UT_ASSERT(sret > 0);
/*
* Tune the maximum number of lanes according to environment.
*/
rpmem_util_get_env_max_nlanes(&Rpmem_max_nlanes);
struct rpmem_fip *fip;
fip = rpmem_fip_init(info->node, fip_service, &attr,
&fip_client.nlanes);
UT_ASSERTne(fip, NULL);
client_close_begin(client);
client_close_end(client);
rpmem_fip_fini(fip);
rpmem_target_free(info);
return 3;
}
/*
* server_init -- test case for server initialization
*/
int
server_init(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <persist method>", tc->name);
enum rpmem_persist_method persist_method = get_persist_method(argv[0]);
unsigned nlanes;
enum rpmem_provider provider;
char *addr = NULL;
int ret;
server_exchange_begin(&nlanes, &provider, &addr);
UT_ASSERTne(addr, NULL);
struct rpmemd_fip_attr attr = {
.addr = rpool,
.size = POOL_SIZE,
.nlanes = nlanes,
.provider = provider,
.persist_method = persist_method,
.nthreads = NTHREADS,
};
ret = rpmemd_apply_pm_policy(&attr.persist_method, &attr.persist,
&attr.memcpy_persist,
1 /* is pmem */);
UT_ASSERTeq(ret, 0);
struct rpmem_resp_attr resp;
struct rpmemd_fip *fip;
enum rpmem_err err;
fip = rpmemd_fip_init(addr, NULL, &attr, &resp, &err);
UT_ASSERTne(fip, NULL);
server_exchange_end(resp);
server_close_begin();
server_close_end();
rpmemd_fip_fini(fip);
FREE(addr);
return 1;
}
/*
* client_connect -- test case for establishing connection - client side
*/
int
client_connect(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 3)
UT_FATAL("usage: %s <target> <provider> <persist method>",
tc->name);
char *target = argv[0];
char *prov_name = argv[1];
char *persist_method = argv[2];
set_rpmem_cmd("server_connect %s", persist_method);
char fip_service[NI_MAXSERV];
struct rpmem_target_info *info;
int ret;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
struct fip_client fip_client = FIP_CLIENT_DEFAULT;
get_provider(info->node, prov_name, &fip_client);
client_t *client;
struct rpmem_resp_attr resp;
client = client_exchange(info, fip_client.nlanes, fip_client.provider,
&resp);
struct rpmem_fip_attr attr = {
.provider = fip_client.provider,
.max_wq_size = fip_client.max_wq_size,
.persist_method = resp.persist_method,
.laddr = lpool,
.size = POOL_SIZE,
.nlanes = resp.nlanes,
.raddr = (void *)resp.raddr,
.rkey = resp.rkey,
};
ssize_t sret = SNPRINTF(fip_service, NI_MAXSERV, "%u", resp.port);
UT_ASSERT(sret > 0);
struct rpmem_fip *fip;
fip = rpmem_fip_init(info->node, fip_service, &attr,
&fip_client.nlanes);
UT_ASSERTne(fip, NULL);
ret = rpmem_fip_connect(fip);
UT_ASSERTeq(ret, 0);
client_close_begin(client);
ret = rpmem_fip_close(fip);
UT_ASSERTeq(ret, 0);
client_close_end(client);
rpmem_fip_fini(fip);
rpmem_target_free(info);
return 3;
}
/*
* server_connect -- test case for establishing connection - server side
*/
int
server_connect(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <persist method>", tc->name);
enum rpmem_persist_method persist_method = get_persist_method(argv[0]);
unsigned nlanes;
enum rpmem_provider provider;
char *addr = NULL;
server_exchange_begin(&nlanes, &provider, &addr);
UT_ASSERTne(addr, NULL);
struct rpmemd_fip_attr attr = {
.addr = rpool,
.size = POOL_SIZE,
.nlanes = nlanes,
.provider = provider,
.persist_method = persist_method,
.nthreads = NTHREADS,
};
int ret;
struct rpmem_resp_attr resp;
struct rpmemd_fip *fip;
enum rpmem_err err;
ret = rpmemd_apply_pm_policy(&attr.persist_method, &attr.persist,
&attr.memcpy_persist,
1 /* is pmem */);
UT_ASSERTeq(ret, 0);
fip = rpmemd_fip_init(addr, NULL, &attr, &resp, &err);
UT_ASSERTne(fip, NULL);
server_exchange_end(resp);
ret = rpmemd_fip_accept(fip, -1);
UT_ASSERTeq(ret, 0);
server_close_begin();
server_close_end();
ret = rpmemd_fip_wait_close(fip, -1);
UT_ASSERTeq(ret, 0);
ret = rpmemd_fip_close(fip);
UT_ASSERTeq(ret, 0);
rpmemd_fip_fini(fip);
FREE(addr);
return 1;
}
/*
* server_process -- test case for processing data on server side
*/
int
server_process(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <persist method>", tc->name);
enum rpmem_persist_method persist_method = get_persist_method(argv[0]);
set_pool_data(rpool, 1);
unsigned nlanes;
enum rpmem_provider provider;
char *addr = NULL;
server_exchange_begin(&nlanes, &provider, &addr);
UT_ASSERTne(addr, NULL);
struct rpmemd_fip_attr attr = {
.addr = rpool,
.size = POOL_SIZE,
.nlanes = nlanes,
.provider = provider,
.persist_method = persist_method,
.nthreads = NTHREADS,
};
int ret;
struct rpmem_resp_attr resp;
struct rpmemd_fip *fip;
enum rpmem_err err;
ret = rpmemd_apply_pm_policy(&attr.persist_method, &attr.persist,
&attr.memcpy_persist,
1 /* is pmem */);
UT_ASSERTeq(ret, 0);
fip = rpmemd_fip_init(addr, NULL, &attr, &resp, &err);
UT_ASSERTne(fip, NULL);
server_exchange_end(resp);
ret = rpmemd_fip_accept(fip, -1);
UT_ASSERTeq(ret, 0);
ret = rpmemd_fip_process_start(fip);
server_close_begin();
ret = rpmemd_fip_process_stop(fip);
UT_ASSERTeq(ret, 0);
server_close_end();
ret = rpmemd_fip_wait_close(fip, -1);
UT_ASSERTeq(ret, 0);
ret = rpmemd_fip_close(fip);
UT_ASSERTeq(ret, 0);
rpmemd_fip_fini(fip);
FREE(addr);
return 1;
}
/*
* flush_common -- common part for single-threaded persist and flush / drain
* test cases
*/
static void
flush_common(char *target, char *prov_name, char *persist_method,
flush_fn flush_func)
{
set_rpmem_cmd("server_process %s", persist_method);
char fip_service[NI_MAXSERV];
struct rpmem_target_info *info;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
int ret;
set_pool_data(lpool, 1);
set_pool_data(rpool, 1);
struct fip_client fip_client = FIP_CLIENT_DEFAULT;
get_provider(info->node, prov_name, &fip_client);
client_t *client;
struct rpmem_resp_attr resp;
client = client_exchange(info, fip_client.nlanes, fip_client.provider,
&resp);
struct rpmem_fip_attr attr = {
.provider = fip_client.provider,
.max_wq_size = fip_client.max_wq_size,
.persist_method = resp.persist_method,
.laddr = lpool,
.size = POOL_SIZE,
.nlanes = resp.nlanes,
.raddr = (void *)resp.raddr,
.rkey = resp.rkey,
};
ssize_t sret = SNPRINTF(fip_service, NI_MAXSERV, "%u", resp.port);
UT_ASSERT(sret > 0);
struct rpmem_fip *fip;
fip = rpmem_fip_init(info->node, fip_service, &attr,
&fip_client.nlanes);
UT_ASSERTne(fip, NULL);
ret = rpmem_fip_connect(fip);
UT_ASSERTeq(ret, 0);
struct flush_arg arg = {
.fip = fip,
.lane = 0,
};
flush_func(&arg);
ret = rpmem_fip_read(fip, rpool, POOL_SIZE, 0, 0);
UT_ASSERTeq(ret, 0);
client_close_begin(client);
ret = rpmem_fip_close(fip);
UT_ASSERTeq(ret, 0);
client_close_end(client);
rpmem_fip_fini(fip);
ret = memcmp(rpool, lpool, POOL_SIZE);
UT_ASSERTeq(ret, 0);
rpmem_target_free(info);
}
/*
* flush_common_mt -- common part for multi-threaded persist and flush / drain
* test cases
*/
static int
flush_common_mt(char *target, char *prov_name, char *persist_method,
flush_fn flush_thread_func)
{
set_rpmem_cmd("server_process %s", persist_method);
char fip_service[NI_MAXSERV];
struct rpmem_target_info *info;
int ret;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
set_pool_data(lpool, 1);
set_pool_data(rpool, 1);
struct fip_client fip_client = FIP_CLIENT_DEFAULT;
get_provider(info->node, prov_name, &fip_client);
client_t *client;
struct rpmem_resp_attr resp;
client = client_exchange(info, fip_client.nlanes, fip_client.provider,
&resp);
struct rpmem_fip_attr attr = {
.provider = fip_client.provider,
.max_wq_size = fip_client.max_wq_size,
.persist_method = resp.persist_method,
.laddr = lpool,
.size = POOL_SIZE,
.nlanes = resp.nlanes,
.raddr = (void *)resp.raddr,
.rkey = resp.rkey,
};
ssize_t sret = SNPRINTF(fip_service, NI_MAXSERV, "%u", resp.port);
UT_ASSERT(sret > 0);
struct rpmem_fip *fip;
fip = rpmem_fip_init(info->node, fip_service, &attr,
&fip_client.nlanes);
UT_ASSERTne(fip, NULL);
ret = rpmem_fip_connect(fip);
UT_ASSERTeq(ret, 0);
os_thread_t *flush_thread = MALLOC(resp.nlanes * sizeof(os_thread_t));
struct flush_arg *args = MALLOC(resp.nlanes * sizeof(struct flush_arg));
for (unsigned i = 0; i < fip_client.nlanes; i++) {
args[i].fip = fip;
args[i].lane = i;
THREAD_CREATE(&flush_thread[i], NULL,
flush_thread_func, &args[i]);
}
for (unsigned i = 0; i < fip_client.nlanes; i++)
THREAD_JOIN(&flush_thread[i], NULL);
ret = rpmem_fip_read(fip, rpool, POOL_SIZE, 0, 0);
UT_ASSERTeq(ret, 0);
client_close_begin(client);
ret = rpmem_fip_close(fip);
UT_ASSERTeq(ret, 0);
client_close_end(client);
rpmem_fip_fini(fip);
FREE(flush_thread);
FREE(args);
ret = memcmp(rpool, lpool, POOL_SIZE);
UT_ASSERTeq(ret, 0);
rpmem_target_free(info);
return 3;
}
/*
* client_flush -- test case for single-threaded flush / drain operation
*/
int
client_flush(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 3)
UT_FATAL("usage: %s <target> <provider> <persist method>",
tc->name);
char *target = argv[0];
char *prov_name = argv[1];
char *persist_method = argv[2];
flush_common(target, prov_name, persist_method, client_flush_thread);
return 3;
}
/*
* client_flush_mt -- test case for multi-threaded flush / drain operation
*/
int
client_flush_mt(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 3)
UT_FATAL("usage: %s <target> <provider> <persist method>",
tc->name);
char *target = argv[0];
char *prov_name = argv[1];
char *persist_method = argv[2];
flush_common_mt(target, prov_name, persist_method, client_flush_thread);
return 3;
}
/*
* client_persist -- test case for single-threaded persist operation
*/
int
client_persist(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 3)
UT_FATAL("usage: %s <target> <provider> <persist method>",
tc->name);
char *target = argv[0];
char *prov_name = argv[1];
char *persist_method = argv[2];
flush_common(target, prov_name, persist_method, client_persist_thread);
return 3;
}
/*
* client_persist_mt -- test case for multi-threaded persist operation
*/
int
client_persist_mt(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 3)
UT_FATAL("usage: %s <target> <provider> <persist method>",
tc->name);
char *target = argv[0];
char *prov_name = argv[1];
char *persist_method = argv[2];
flush_common_mt(target, prov_name, persist_method,
client_persist_thread);
return 3;
}
/*
* client_read -- test case for read operation
*/
int
client_read(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 3)
UT_FATAL("usage: %s <target> <provider> <persist method>",
tc->name);
char *target = argv[0];
char *prov_name = argv[1];
char *persist_method = argv[2];
set_rpmem_cmd("server_process %s", persist_method);
char fip_service[NI_MAXSERV];
struct rpmem_target_info *info;
int ret;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
set_pool_data(lpool, 0);
set_pool_data(rpool, 1);
struct fip_client fip_client = FIP_CLIENT_DEFAULT;
get_provider(info->node, prov_name, &fip_client);
client_t *client;
struct rpmem_resp_attr resp;
client = client_exchange(info, fip_client.nlanes, fip_client.provider,
&resp);
struct rpmem_fip_attr attr = {
.provider = fip_client.provider,
.max_wq_size = fip_client.max_wq_size,
.persist_method = resp.persist_method,
.laddr = lpool,
.size = POOL_SIZE,
.nlanes = resp.nlanes,
.raddr = (void *)resp.raddr,
.rkey = resp.rkey,
};
ssize_t sret = SNPRINTF(fip_service, NI_MAXSERV, "%u", resp.port);
UT_ASSERT(sret > 0);
struct rpmem_fip *fip;
fip = rpmem_fip_init(info->node, fip_service, &attr,
&fip_client.nlanes);
UT_ASSERTne(fip, NULL);
ret = rpmem_fip_connect(fip);
UT_ASSERTeq(ret, 0);
/* read with len == 0 should always succeed */
ret = rpmem_fip_read(fip, lpool, 0, 0, 0);
UT_ASSERTeq(ret, 0);
ret = rpmem_fip_read(fip, lpool, POOL_SIZE, 0, 0);
UT_ASSERTeq(ret, 0);
client_close_begin(client);
ret = rpmem_fip_close(fip);
UT_ASSERTeq(ret, 0);
client_close_end(client);
rpmem_fip_fini(fip);
ret = memcmp(rpool, lpool, POOL_SIZE);
UT_ASSERTeq(ret, 0);
rpmem_target_free(info);
return 3;
}
#define LT_MAX_WQ_SIZE "LT_MAX_WQ_SIZE" /* < max_wq_size */
#define EQ_MAX_WQ_SIZE "EQ_MAX_WQ_SIZE" /* == max_wq_size */
#define GT_MAX_WQ_SIZE "GT_MAX_WQ_SIZE" /* > max_wq_size */
/*
* client_wq_size -- test case for WQ size adjustment
*/
int
client_wq_size(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 3)
UT_FATAL("usage: %s <target> <provider> <persist method>"
"<wq_size>", tc->name);
char *target = argv[0];
char *prov_name = argv[1];
char *persist_method = argv[2];
char *wq_size_env_str = argv[3];
set_rpmem_cmd("server_process %s", persist_method);
char fip_service[NI_MAXSERV];
struct rpmem_target_info *info;
int ret;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
struct fip_client fip_client = FIP_CLIENT_DEFAULT;
get_provider(info->node, prov_name, &fip_client);
rpmem_util_get_env_max_nlanes(&fip_client.nlanes);
client_t *client;
struct rpmem_resp_attr resp;
client = client_exchange(info, fip_client.nlanes, fip_client.provider,
&resp);
struct rpmem_fip_attr attr = {
.provider = fip_client.provider,
.max_wq_size = fip_client.max_wq_size,
.persist_method = resp.persist_method,
.laddr = lpool,
.size = POOL_SIZE,
.nlanes = resp.nlanes,
.raddr = (void *)resp.raddr,
.rkey = resp.rkey,
};
ssize_t sret = SNPRINTF(fip_service, NI_MAXSERV, "%u", resp.port);
UT_ASSERT(sret > 0);
/* check RPMEM_WORK_QUEUE_SIZE env processing */
unsigned wq_size_default = Rpmem_wq_size;
if (strcmp(wq_size_env_str, LT_MAX_WQ_SIZE) == 0) {
Rpmem_wq_size = fip_client.max_wq_size - 1;
} else if (strcmp(wq_size_env_str, EQ_MAX_WQ_SIZE) == 0) {
Rpmem_wq_size = fip_client.max_wq_size;
} else if (strcmp(wq_size_env_str, GT_MAX_WQ_SIZE) == 0) {
Rpmem_wq_size = fip_client.max_wq_size + 1;
} else {
long wq_size_env = STRTOL(wq_size_env_str, NULL, 10);
rpmem_util_get_env_wq_size(&Rpmem_wq_size);
if (wq_size_env > 0) {
if (wq_size_env < UINT_MAX)
UT_ASSERT(Rpmem_wq_size == wq_size_env);
else
UT_ASSERT(Rpmem_wq_size == UINT_MAX);
} else
UT_ASSERT(Rpmem_wq_size == wq_size_default);
}
struct rpmem_fip *fip;
fip = rpmem_fip_init(info->node, fip_service, &attr,
&fip_client.nlanes);
UT_ASSERTne(fip, NULL);
size_t req_wq_size = rpmem_fip_wq_size(
resp.persist_method, RPMEM_FIP_NODE_CLIENT);
size_t eff_wq_size = rpmem_fip_get_wq_size(fip);
/* max supported meets minimal requirements */
UT_ASSERT(fip_client.max_wq_size >= req_wq_size);
/* calculated meets minimal requirements */
UT_ASSERT(eff_wq_size >= req_wq_size);
/* calculated is supported */
UT_ASSERT(eff_wq_size <= fip_client.max_wq_size);
/* if forced by env meets minimal requirements */
if (Rpmem_wq_size > req_wq_size) {
/* and it is supported */
if (Rpmem_wq_size <= fip_client.max_wq_size) {
/* calculated is >= to forced */
UT_ASSERT(eff_wq_size >= Rpmem_wq_size);
} else {
/* calculated is clipped to max supported */
UT_ASSERT(eff_wq_size == fip_client.max_wq_size);
}
}
ret = rpmem_fip_connect(fip);
UT_ASSERTeq(ret, 0);
client_close_begin(client);
ret = rpmem_fip_close(fip);
UT_ASSERTeq(ret, 0);
client_close_end(client);
rpmem_fip_fini(fip);
rpmem_target_free(info);
return 4;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(client_init),
TEST_CASE(server_init),
TEST_CASE(client_connect),
TEST_CASE(server_connect),
TEST_CASE(client_flush),
TEST_CASE(client_flush_mt),
TEST_CASE(client_persist),
TEST_CASE(client_persist_mt),
TEST_CASE(server_process),
TEST_CASE(client_read),
TEST_CASE(client_wq_size)
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
/* workaround for left-opened files by libfabric */
rpmem_fip_probe_get("localhost", NULL);
START(argc, argv, "rpmem_obc");
common_init("rpmem_fip",
"RPMEM_LOG_LEVEL",
"RPMEM_LOG_FILE", 0, 0);
rpmem_util_cmds_init();
rpmemd_log_init("rpmemd", os_getenv("RPMEMD_LOG_FILE"), 0);
rpmemd_log_level = rpmemd_log_level_from_str(
os_getenv("RPMEMD_LOG_LEVEL"));
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
common_fini();
rpmemd_log_close();
rpmem_util_cmds_fini();
DONE(NULL);
}
| 22,586 | 21.97762 | 78 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/rpmem_fip/rpmem_fip_oob.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* rpmem_fip_sock.h -- simple oob connection implementation for exchanging
* required RDMA related data
*/
#include <stdint.h>
#include <netinet/in.h>
typedef struct rpmem_ssh client_t;
client_t *client_exchange(struct rpmem_target_info *info,
unsigned nlanes,
enum rpmem_provider provider,
struct rpmem_resp_attr *resp);
void client_close_begin(client_t *c);
void client_close_end(client_t *c);
void server_exchange_begin(unsigned *lanes, enum rpmem_provider *provider,
char **addr);
void server_exchange_end(struct rpmem_resp_attr resp);
void server_close_begin(void);
void server_close_end(void);
void set_rpmem_cmd(const char *fmt, ...);
| 743 | 24.655172 | 74 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_source/pmem2_source.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* pmem2_source.c -- pmem2_source unittests
*/
#include "fault_injection.h"
#include "libpmem2.h"
#include "unittest.h"
#include "ut_pmem2_utils.h"
#include "ut_pmem2_config.h"
#include "source.h"
#include "out.h"
/*
* verify_fd -- verify value fd or handle in source
*/
static void
verify_fd(struct pmem2_source *src, int fd)
{
#ifdef WIN32
UT_ASSERTeq(src->type, PMEM2_SOURCE_HANDLE);
UT_ASSERTeq(src->value.handle, fd != INVALID_FD ?
(HANDLE)_get_osfhandle(fd) : INVALID_HANDLE_VALUE);
#else
UT_ASSERTeq(src->type, PMEM2_SOURCE_FD);
UT_ASSERTeq(src->value.fd, fd);
#endif
}
/*
* test_set_rw_fd - test setting O_RDWR fd
*/
static int
test_set_rw_fd(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_set_rw_fd <file>");
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_source *src;
int ret = pmem2_source_from_fd(&src, fd);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTne(src, NULL);
verify_fd(src, fd);
ret = pmem2_source_delete(&src);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTeq(src, NULL);
CLOSE(fd);
return 1;
}
/*
* test_set_ro_fd - test setting O_RDONLY fd
*/
static int
test_set_ro_fd(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_set_ro_fd <file>");
char *file = argv[0];
int fd = OPEN(file, O_RDONLY);
struct pmem2_source *src;
int ret = pmem2_source_from_fd(&src, fd);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTne(src, NULL);
verify_fd(src, fd);
ret = pmem2_source_delete(&src);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTeq(src, NULL);
CLOSE(fd);
return 1;
}
/*
* test_set_invalid_fd - test setting invalid fd
*/
static int
test_set_invalid_fd(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_set_invalid_fd <file>");
char *file = argv[0];
/* open and close the file to get invalid fd */
int fd = OPEN(file, O_WRONLY);
CLOSE(fd);
ut_suppress_crt_assert();
struct pmem2_source *src;
int ret = pmem2_source_from_fd(&src, fd);
ut_unsuppress_crt_assert();
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_FILE_HANDLE);
UT_ASSERTeq(src, NULL);
return 1;
}
/*
* test_set_wronly_fd - test setting wronly fd
*/
static int
test_set_wronly_fd(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_set_wronly_fd <file>");
char *file = argv[0];
int fd = OPEN(file, O_WRONLY);
struct pmem2_source *src;
int ret = pmem2_source_from_fd(&src, fd);
#ifdef _WIN32
/* windows doesn't validate open flags */
UT_PMEM2_EXPECT_RETURN(ret, 0);
verify_fd(src, fd);
ret = pmem2_source_delete(&src);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTeq(src, NULL);
#else
UT_ASSERTeq(src, NULL);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_FILE_HANDLE);
#endif
CLOSE(fd);
return 1;
}
/*
* test_alloc_src_enomem - test pmem2_source allocation with error injection
*/
static int
test_alloc_src_enomem(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_alloc_src_enomem <file>");
char *file = argv[0];
struct pmem2_source *src;
if (!core_fault_injection_enabled()) {
return 1;
}
int fd = OPEN(file, O_RDWR);
core_inject_fault_at(PMEM_MALLOC, 1, "pmem2_malloc");
int ret = pmem2_source_from_fd(&src, fd);
UT_PMEM2_EXPECT_RETURN(ret, -ENOMEM);
UT_ASSERTeq(src, NULL);
CLOSE(fd);
return 1;
}
/*
* test_delete_null_config - test pmem2_source_delete on NULL config
*/
static int
test_delete_null_config(const struct test_case *tc, int argc,
char *argv[])
{
struct pmem2_source *src = NULL;
/* should not crash */
int ret = pmem2_source_delete(&src);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTeq(src, NULL);
return 0;
}
#ifdef WIN32
/*
* test_set_handle - test setting valid handle
*/
static int
test_set_handle(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_set_handle <file>");
char *file = argv[0];
HANDLE h = CreateFile(file, GENERIC_READ | GENERIC_WRITE,
0, NULL, OPEN_ALWAYS, 0, NULL);
UT_ASSERTne(h, INVALID_HANDLE_VALUE);
struct pmem2_source *src;
int ret = pmem2_source_from_handle(&src, h);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTeq(src->value.handle, h);
CloseHandle(h);
pmem2_source_delete(&src);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTeq(src, NULL);
return 1;
}
/*
* test_set_null_handle - test resetting handle
*/
static int
test_set_null_handle(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_source *src;
int ret = pmem2_source_from_handle(&src, INVALID_HANDLE_VALUE);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_FILE_HANDLE);
UT_ASSERTeq(src, NULL);
return 0;
}
/*
* test_set_invalid_handle - test setting invalid handle
*/
static int
test_set_invalid_handle(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_set_invalid_handle <file>");
char *file = argv[0];
struct pmem2_source *src;
HANDLE h = CreateFile(file, GENERIC_READ | GENERIC_WRITE,
0, NULL, OPEN_ALWAYS, 0, NULL);
UT_ASSERTne(h, INVALID_HANDLE_VALUE);
CloseHandle(h);
int ret = pmem2_source_from_handle(&src, h);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_FILE_HANDLE);
return 1;
}
/*
* test_set_directory_handle - test setting a directory handle
*/
static int
test_set_directory_handle(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_set_directory_handle <file>");
char *file = argv[0];
struct pmem2_source *src;
HANDLE h = CreateFile(file, GENERIC_READ | GENERIC_WRITE,
0, NULL, OPEN_ALWAYS, FILE_FLAG_BACKUP_SEMANTICS, NULL);
UT_ASSERTne(h, INVALID_HANDLE_VALUE);
int ret = pmem2_source_from_handle(&src, h);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_FILE_TYPE);
UT_ASSERTeq(src, NULL);
CloseHandle(h);
return 1;
}
/*
* test_set_directory_handle - test setting a mutex handle
*/
static int
test_set_mutex_handle(const struct test_case *tc, int argc, char *argv[])
{
struct pmem2_source *src;
HANDLE h = CreateMutex(NULL, FALSE, NULL);
UT_ASSERTne(h, INVALID_HANDLE_VALUE);
int ret = pmem2_source_from_handle(&src, h);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_FILE_HANDLE);
UT_ASSERTeq(src, NULL);
CloseHandle(h);
return 0;
}
#else
/*
* test_set_directory_handle - test setting directory's fd
*/
static int
test_set_directory_fd(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_set_directory_fd <file>");
char *file = argv[0];
struct pmem2_source *src;
int fd = OPEN(file, O_RDONLY);
int ret = pmem2_source_from_fd(&src, fd);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_FILE_TYPE);
CLOSE(fd);
return 1;
}
#endif
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_set_rw_fd),
TEST_CASE(test_set_ro_fd),
TEST_CASE(test_set_invalid_fd),
TEST_CASE(test_set_wronly_fd),
TEST_CASE(test_alloc_src_enomem),
TEST_CASE(test_delete_null_config),
#ifdef _WIN32
TEST_CASE(test_set_handle),
TEST_CASE(test_set_null_handle),
TEST_CASE(test_set_invalid_handle),
TEST_CASE(test_set_directory_handle),
TEST_CASE(test_set_mutex_handle),
#else
TEST_CASE(test_set_directory_fd),
#endif
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char **argv)
{
START(argc, argv, "pmem2_source");
util_init();
out_init("pmem2_source", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0, 0);
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
out_fini();
DONE(NULL);
}
| 7,608 | 20.433803 | 77 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/tools/ddmap/ddmap.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* ddmap.c -- simple app for reading and writing data from/to a regular file or
* dax device using mmap instead of file io API
*/
#include <stdio.h>
#include <unistd.h>
#include <getopt.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include "common.h"
#include "output.h"
#include "mmap.h"
#include "file.h"
#include "util.h"
#include "os.h"
/*
* ddmap_context -- context and arguments
*/
struct ddmap_context {
char *file_in; /* input file name */
char *file_out; /* output file name */
char *str; /* string data to write */
size_t offset_in; /* offset from beginning of input file for */
/* read/write operations expressed in blocks */
size_t offset_out; /* offset from beginning of output file for */
/* read/write operations expressed in blocks */
size_t bytes; /* size of blocks to write at the time */
size_t count; /* number of blocks to read/write */
int checksum; /* compute checksum */
int runlen; /* print bytes as runlen/char sequence */
};
/*
* the default context, with all fields initialized to zero or NULL
*/
static struct ddmap_context ddmap_default;
/*
* print_usage -- print short description of usage
*/
static void
print_usage(void)
{
printf("Usage: ddmap [option] ...\n");
printf("Valid options:\n");
printf("-i FILE - read from FILE\n");
printf("-o FILE - write to FILE\n");
printf("-d STRING - STRING to be written\n");
printf("-s N - skip N blocks at start of input\n");
printf("-q N - skip N blocks at start of output\n");
printf("-b N - read/write N bytes at a time\n");
printf("-n N - copy N input blocks\n");
printf("-c - compute checksum\n");
printf("-r - print file content as runlen/char pairs\n");
printf("-h - print this usage info\n");
}
/*
* long_options -- command line options
*/
static const struct option long_options[] = {
{"input-file", required_argument, NULL, 'i'},
{"output-file", required_argument, NULL, 'o'},
{"string", required_argument, NULL, 'd'},
{"offset-in", required_argument, NULL, 's'},
{"offset-out", required_argument, NULL, 'q'},
{"block-size", required_argument, NULL, 'b'},
{"count", required_argument, NULL, 'n'},
{"checksum", no_argument, NULL, 'c'},
{"runlen", no_argument, NULL, 'r'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0 },
};
/*
* ddmap_print_char -- (internal) print single char
*
* Printable ASCII characters are printed normally,
* NUL character is printed as a little circle (the degree symbol),
* non-printable ASCII characters are printed as centered dots.
*/
static void
ddmap_print_char(char c)
{
if (c == '\0')
/* print the degree symbol for NUL */
printf("\u00B0");
else if (c >= ' ' && c <= '~')
/* print printable ASCII character */
printf("%c", c);
else
/* print centered dot for non-printable character */
printf("\u00B7");
}
/*
* ddmap_print_runlen -- (internal) print file content as length/char pairs
*
* For each sequence of chars of the same value (could be just 1 byte)
* print length of the sequence and the char value.
*/
static void
ddmap_print_runlen(char *addr, size_t len)
{
char c = '\0';
ssize_t cnt = 0;
for (size_t i = 0; i < len; i++) {
if (i > 0 && c != addr[i] && cnt != 0) {
printf("%zd ", cnt);
ddmap_print_char(c);
printf("\n");
cnt = 0;
}
c = addr[i];
cnt++;
}
if (cnt) {
printf("%zd ", cnt);
ddmap_print_char(c);
printf("\n");
}
}
/*
* ddmap_print_bytes -- (internal) print array of bytes
*/
static void
ddmap_print_bytes(const char *data, size_t len)
{
for (size_t i = 0; i < len; ++i) {
ddmap_print_char(data[i]);
}
printf("\n");
}
/*
* ddmap_read -- (internal) read a string from the file at the offset and
* print it to stdout
*/
static int
ddmap_read(const char *path, size_t offset_in, size_t bytes, size_t count,
int runlen)
{
size_t len = bytes * count;
os_off_t offset = (os_off_t)(bytes * offset_in);
char *read_buff = Zalloc(len + 1);
if (read_buff == NULL) {
outv_err("Zalloc(%zu) failed\n", len + 1);
return -1;
}
ssize_t read_len = util_file_pread(path, read_buff, len, offset);
if (read_len < 0) {
outv_err("pread failed");
Free(read_buff);
return -1;
} else if ((size_t)read_len < len) {
outv(1, "read less bytes than requested: %zd vs. %zu\n",
read_len, len);
}
if (runlen)
ddmap_print_runlen(read_buff, (size_t)read_len);
else
ddmap_print_bytes(read_buff, (size_t)read_len);
Free(read_buff);
return 0;
}
/*
* ddmap_zero -- (internal) zero a range of data in the file
*/
static int
ddmap_zero(const char *path, size_t offset, size_t len)
{
void *addr;
ssize_t filesize = util_file_get_size(path);
if (filesize < 0) {
outv_err("invalid file size");
return -1;
}
if (offset + len > (size_t)filesize)
len = (size_t)filesize - offset;
addr = util_file_map_whole(path);
if (addr == NULL) {
outv_err("map failed");
return -1;
}
memset((char *)addr + offset, 0, len);
util_unmap(addr, (size_t)filesize);
return 0;
}
/*
* ddmap_write_data -- (internal) write data to a file
*/
static int
ddmap_write_data(const char *path, const char *data,
os_off_t offset, size_t len)
{
if (util_file_pwrite(path, data, len, offset) < 0) {
outv_err("pwrite for dax device failed: path %s,"
" len %zu, offset %zd", path, len, offset);
return -1;
}
return 0;
}
/*
* ddmap_write_from_file -- (internal) write data from file to dax device or
* file
*/
static int
ddmap_write_from_file(const char *path_in, const char *path_out,
size_t offset_in, size_t offset_out, size_t bytes,
size_t count)
{
char *src, *tmp_src;
os_off_t offset;
ssize_t file_in_size = util_file_get_size(path_in);
size_t data_left, len;
util_init();
src = util_file_map_whole(path_in);
src += (os_off_t)(offset_in * bytes);
offset = (os_off_t)(offset_out * bytes);
data_left = (size_t)file_in_size;
tmp_src = src;
do {
len = MIN(data_left, bytes);
ddmap_write_data(path_out, tmp_src, offset, len);
tmp_src += len;
data_left -= len;
if (data_left == 0) {
data_left = (size_t)file_in_size;
tmp_src = src;
}
offset += (os_off_t)len;
count--;
} while (count > 0);
util_unmap(src, (size_t)file_in_size);
return 0;
}
/*
* ddmap_write -- (internal) write the string to the file
*/
static int
ddmap_write(const char *path, const char *str, size_t offset_in, size_t bytes,
size_t count)
{
/* calculate how many characters from the string are to be written */
size_t length;
size_t str_len = (str != NULL) ? strlen(str) + 1 : 0;
os_off_t offset = (os_off_t)(bytes * offset_in);
size_t len = bytes * count;
if (len == 0)
length = str_len;
else
length = min(len, str_len);
/* write the string */
if (length > 0) {
if (ddmap_write_data(path, str, offset, length))
return -1;
}
/* zero the rest of requested range */
if (length < len) {
if (ddmap_zero(path, (size_t)offset + length, len - length))
return -1;
}
return 0;
}
/*
* ddmap_checksum -- (internal) compute checksum of a slice of an input file
*/
static int
ddmap_checksum(const char *path, size_t bytes, size_t count, size_t offset_in)
{
char *src;
uint64_t checksum;
ssize_t filesize = util_file_get_size(path);
os_off_t offset = (os_off_t)(bytes * offset_in);
size_t len = bytes * count;
if ((size_t)filesize < len + (size_t)offset) {
outv_err("offset with length exceed file size");
return -1;
}
util_init();
src = util_file_map_whole(path);
util_checksum(src + offset, len, &checksum, 1, 0);
util_unmap(src, (size_t)filesize);
printf("%" PRIu64 "\n", checksum);
return 0;
}
/*
* parse_args -- (internal) parse command line arguments
*/
static int
parse_args(struct ddmap_context *ctx, int argc, char *argv[])
{
int opt;
char *endptr;
size_t offset;
size_t count;
size_t bytes;
while ((opt = getopt_long(argc, argv, "i:o:d:s:q:b:n:crhv",
long_options, NULL)) != -1) {
switch (opt) {
case 'i':
ctx->file_in = optarg;
break;
case 'o':
ctx->file_out = optarg;
break;
case 'd':
ctx->str = optarg;
if (ctx->count == 0)
ctx->count = strlen(ctx->str);
if (ctx->bytes == 0)
ctx->bytes = 1;
break;
case 's':
errno = 0;
offset = strtoul(optarg, &endptr, 0);
if ((endptr && *endptr != '\0') || errno) {
outv_err("'%s' -- invalid input offset",
optarg);
return -1;
}
ctx->offset_in = offset;
break;
case 'q':
errno = 0;
offset = strtoul(optarg, &endptr, 0);
if ((endptr && *endptr != '\0') || errno) {
outv_err("'%s' -- invalid output offset",
optarg);
return -1;
}
ctx->offset_out = offset;
break;
case 'b':
errno = 0;
bytes = strtoull(optarg, &endptr, 0);
if ((endptr && *endptr != '\0') || errno) {
outv_err("'%s' -- invalid block size", optarg);
return -1;
}
ctx->bytes = bytes;
break;
case 'n':
errno = 0;
count = strtoull(optarg, &endptr, 0);
if ((endptr && *endptr != '\0') || errno) {
outv_err("'%s' -- invalid count", optarg);
return -1;
}
ctx->count = count;
break;
case 'c':
ctx->checksum = 1;
break;
case 'r':
ctx->runlen = 1;
break;
case 'h':
print_usage();
exit(EXIT_SUCCESS);
case 'v':
out_set_vlevel(1);
break;
default:
print_usage();
exit(EXIT_FAILURE);
}
}
return 0;
}
/*
* validate_args -- (internal) validate arguments
*/
static int
validate_args(struct ddmap_context *ctx)
{
if ((ctx->file_in == NULL) && (ctx->file_out == NULL)) {
outv_err("an input file and/or an output file must be "
"provided");
return -1;
} else if (ctx->file_out == NULL) {
if (ctx->bytes == 0) {
outv_err("number of bytes to read has to be provided");
return -1;
}
} else if (ctx->file_in == NULL) {
/* ddmap_write requirements */
if (ctx->str == NULL && (ctx->count * ctx->bytes) == 0) {
outv_err("when writing, 'data' or 'count' and 'bytes' "
"have to be provided");
return -1;
}
} else {
/* scenarios other than ddmap_write requirement */
if ((ctx->bytes * ctx->count) == 0) {
outv_err("number of bytes and count must be provided");
return -1;
}
}
return 0;
}
/*
* do_ddmap -- (internal) perform ddmap
*/
static int
do_ddmap(struct ddmap_context *ctx)
{
if ((ctx->file_in != NULL) && (ctx->file_out != NULL)) {
if (ddmap_write_from_file(ctx->file_in, ctx->file_out,
ctx->offset_in, ctx->offset_out, ctx->bytes,
ctx->count))
return -1;
return 0;
}
if ((ctx->checksum == 1) && (ctx->file_in != NULL)) {
if (ddmap_checksum(ctx->file_in, ctx->bytes, ctx->count,
ctx->offset_in))
return -1;
return 0;
}
if (ctx->file_in != NULL) {
if (ddmap_read(ctx->file_in, ctx->offset_in, ctx->bytes,
ctx->count, ctx->runlen))
return -1;
} else { /* ctx->file_out != NULL */
if (ddmap_write(ctx->file_out, ctx->str, ctx->offset_in,
ctx->bytes, ctx->count))
return -1;
}
return 0;
}
int
main(int argc, char *argv[])
{
#ifdef _WIN32
util_suppress_errmsg();
wchar_t **wargv = CommandLineToArgvW(GetCommandLineW(), &argc);
for (int i = 0; i < argc; i++) {
argv[i] = util_toUTF8(wargv[i]);
if (argv[i] == NULL) {
for (i--; i >= 0; i--)
free(argv[i]);
outv_err("Error during arguments conversion\n");
return 1;
}
}
#endif
int ret = 0;
struct ddmap_context ctx = ddmap_default;
if ((ret = parse_args(&ctx, argc, argv)))
goto out;
if ((ret = validate_args(&ctx)))
goto out;
if ((ret = do_ddmap(&ctx))) {
outv_err("failed to perform ddmap\n");
if (errno)
outv_err("errno: %s\n", strerror(errno));
ret = -1;
goto out;
}
out:
#ifdef _WIN32
for (int i = argc; i > 0; i--)
free(argv[i - 1]);
#endif
return ret;
}
| 11,872 | 22.280392 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/tools/dllview/dllview.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* dllview.c -- a simple utility displaying the list of symbols exported by DLL
*
* usage: dllview filename
*/
#include <windows.h>
#include <stdio.h>
#include <winnt.h>
#include <imagehlp.h>
#include "util.h"
int
main(int argc, char *argv[])
{
util_suppress_errmsg();
if (argc < 2) {
fprintf(stderr, "usage: %s dllname\n", argv[0]);
exit(1);
}
const char *dllname = argv[1];
LOADED_IMAGE img;
if (MapAndLoad(dllname, NULL, &img, 1, 1) == FALSE) {
fprintf(stderr, "cannot load DLL image\n");
exit(2);
}
IMAGE_EXPORT_DIRECTORY *dir;
ULONG dirsize;
dir = (IMAGE_EXPORT_DIRECTORY *)ImageDirectoryEntryToData(
img.MappedAddress, 0 /* mapped as image */,
IMAGE_DIRECTORY_ENTRY_EXPORT, &dirsize);
if (dir == NULL) {
fprintf(stderr, "cannot read image directory\n");
UnMapAndLoad(&img);
exit(3);
}
DWORD *rva;
rva = (DWORD *)ImageRvaToVa(img.FileHeader, img.MappedAddress,
dir->AddressOfNames, NULL);
for (DWORD i = 0; i < dir->NumberOfNames; i++) {
char *name = (char *)ImageRvaToVa(img.FileHeader,
img.MappedAddress, rva[i], NULL);
printf("%s\n", name);
}
UnMapAndLoad(&img);
return 0;
}
| 1,233 | 20.649123 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/tools/cmpmap/cmpmap.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2019, Intel Corporation */
/*
* cmpmap -- a tool for comparing files using mmap
*/
#include <stdlib.h>
#include <stdio.h>
#include <getopt.h>
#include <sys/mman.h>
#include <assert.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include "file.h"
#include "fcntl.h"
#include "mmap.h"
#include "os.h"
#include "util.h"
#define CMPMAP_ZERO (1<<0)
#define ADDR_SUM(vp, lp) ((void *)((char *)(vp) + (lp)))
/* arguments */
static char *File1 = NULL; /* file1 name */
static char *File2 = NULL; /* file2 name */
static size_t Length = 0; /* number of bytes to read */
static os_off_t Offset = 0; /* offset from beginning of file */
static int Opts = 0; /* options flag */
/*
* print_usage -- print short description of usage
*/
static void
print_usage(void)
{
printf("Usage: cmpmap [options] file1 [file2]\n");
printf("Valid options:\n");
printf("-l, --length=N - compare up to N bytes\n");
printf("-o, --offset=N - skip N bytes at start of the files\n");
printf("-z, --zero - compare bytes of the file1 to NUL\n");
printf("-h, --help - print this usage info\n");
}
/*
* long_options -- command line options
*/
static const struct option long_options[] = {
{"length", required_argument, NULL, 'l'},
{"offset", required_argument, NULL, 'o'},
{"zero", no_argument, NULL, 'z'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0 },
};
/*
* parse_args -- (internal) parse command line arguments
*/
static int
parse_args(int argc, char *argv[])
{
int opt;
char *endptr;
os_off_t off;
ssize_t len;
while ((opt = getopt_long(argc, argv, "l:o:zh",
long_options, NULL)) != -1) {
switch (opt) {
case 'l':
errno = 0;
len = strtoll(optarg, &endptr, 0);
if ((endptr && *endptr != '\0') || errno || len < 0) {
fprintf(stderr, "'%s' -- invalid length",
optarg);
return -1;
}
Length = (size_t)len;
break;
case 'o':
errno = 0;
off = strtol(optarg, &endptr, 0);
if ((endptr && *endptr != '\0') || errno || off < 0) {
fprintf(stderr, "'%s' -- invalid offset",
optarg);
return -1;
}
Offset = off;
break;
case 'z':
Opts |= CMPMAP_ZERO;
break;
case 'h':
print_usage();
return 0;
default:
print_usage();
return -1;
}
}
if (optind < argc) {
File1 = argv[optind];
if (optind + 1 < argc)
File2 = argv[optind + 1];
} else {
print_usage();
return -1;
}
return 0;
}
/*
* validate_args -- (internal) validate arguments
*/
static int
validate_args(void)
{
if (File1 == NULL) {
fprintf(stderr, "no file provided");
return -1;
} else if (File2 == NULL && Length == 0) {
fprintf(stderr, "length of the file has to be provided");
return -1;
}
return 0;
}
/*
* do_cmpmap -- (internal) perform cmpmap
*/
static int
do_cmpmap(void)
{
int ret = 0;
int fd1;
int fd2;
size_t size1;
size_t size2;
/* open the first file */
if ((fd1 = os_open(File1, O_RDONLY)) < 0) {
fprintf(stderr, "opening %s failed, errno %d\n", File1, errno);
return -1;
}
ssize_t size_tmp = util_fd_get_size(fd1);
if (size_tmp < 0) {
fprintf(stderr, "getting size of %s failed, errno %d\n", File1,
errno);
ret = -1;
goto out_close1;
}
size1 = (size_t)size_tmp;
int flag = MAP_SHARED;
if (Opts & CMPMAP_ZERO) {
/* when checking if bytes are zeroed */
fd2 = -1;
size2 = (size_t)Offset + Length;
flag |= MAP_ANONYMOUS;
} else if (File2 != NULL) {
/* when comparing two files */
/* open the second file */
if ((fd2 = os_open(File2, O_RDONLY)) < 0) {
fprintf(stderr, "opening %s failed, errno %d\n",
File2, errno);
ret = -1;
goto out_close1;
}
size_tmp = util_fd_get_size(fd2);
if (size_tmp < 0) {
fprintf(stderr, "getting size of %s failed, errno %d\n",
File2, errno);
ret = -1;
goto out_close2;
}
size2 = (size_t)size_tmp;
/* basic check */
size_t min_size = (size1 < size2) ? size1 : size2;
if ((size_t)Offset + Length > min_size) {
if (size1 != size2) {
fprintf(stdout, "%s %s differ in size: %zu"
" %zu\n", File1, File2, size1, size2);
ret = -1;
goto out_close2;
} else {
Length = min_size - (size_t)Offset;
}
}
} else {
assert(0);
}
/* initialize utils */
util_init();
/* map the first file */
void *addr1;
if ((addr1 = util_map(fd1, 0, size1, MAP_SHARED,
1, 0, NULL)) == MAP_FAILED) {
fprintf(stderr, "mmap failed, file %s, length %zu, offset 0,"
" errno %d\n", File1, size1, errno);
ret = -1;
goto out_close2;
}
/* map the second file, or do anonymous mapping to get zeroed bytes */
void *addr2;
if ((addr2 = util_map(fd2, 0, size2, flag, 1, 0, NULL)) == MAP_FAILED) {
fprintf(stderr, "mmap failed, file %s, length %zu, errno %d\n",
File2 ? File2 : "(anonymous)", size2, errno);
ret = -1;
goto out_unmap1;
}
/* compare bytes of memory */
if ((ret = memcmp(ADDR_SUM(addr1, Offset), ADDR_SUM(addr2, Offset),
Length))) {
if (Opts & CMPMAP_ZERO)
fprintf(stderr, "%s is not zeroed\n", File1);
else
fprintf(stderr, "%s %s differ\n", File1, File2);
ret = -1;
}
munmap(addr2, size2);
out_unmap1:
munmap(addr1, size1);
out_close2:
if (File2 != NULL)
(void) os_close(fd2);
out_close1:
(void) os_close(fd1);
return ret;
}
int
main(int argc, char *argv[])
{
#ifdef _WIN32
util_suppress_errmsg();
wchar_t **wargv = CommandLineToArgvW(GetCommandLineW(), &argc);
for (int i = 0; i < argc; i++) {
argv[i] = util_toUTF8(wargv[i]);
if (argv[i] == NULL) {
for (i--; i >= 0; i--)
free(argv[i]);
fprintf(stderr, "Error during arguments conversion\n");
return 1;
}
}
#endif
int ret = EXIT_FAILURE;
if (parse_args(argc, argv))
goto end;
if (validate_args())
goto end;
if (do_cmpmap())
goto end;
ret = EXIT_SUCCESS;
end:
#ifdef _WIN32
for (int i = argc; i > 0; i--)
free(argv[i - 1]);
#endif
exit(ret);
}
| 5,918 | 20.291367 | 73 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/tools/ctrld/signals_linux.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* signals_linux.h - Signal definitions for Linux
*/
#ifndef _SIGNALS_LINUX_H
#define _SIGNALS_LINUX_H 1
#define SIGNAL_2_STR(sig) [sig] = #sig
static const char *signal2str[] = {
SIGNAL_2_STR(SIGHUP), /* 1 */
SIGNAL_2_STR(SIGINT), /* 2 */
SIGNAL_2_STR(SIGQUIT), /* 3 */
SIGNAL_2_STR(SIGILL), /* 4 */
SIGNAL_2_STR(SIGTRAP), /* 5 */
SIGNAL_2_STR(SIGABRT), /* 6 */
SIGNAL_2_STR(SIGBUS), /* 7 */
SIGNAL_2_STR(SIGFPE), /* 8 */
SIGNAL_2_STR(SIGKILL), /* 9 */
SIGNAL_2_STR(SIGUSR1), /* 10 */
SIGNAL_2_STR(SIGSEGV), /* 11 */
SIGNAL_2_STR(SIGUSR2), /* 12 */
SIGNAL_2_STR(SIGPIPE), /* 13 */
SIGNAL_2_STR(SIGALRM), /* 14 */
SIGNAL_2_STR(SIGTERM), /* 15 */
SIGNAL_2_STR(SIGSTKFLT), /* 16 */
SIGNAL_2_STR(SIGCHLD), /* 17 */
SIGNAL_2_STR(SIGCONT), /* 18 */
SIGNAL_2_STR(SIGSTOP), /* 19 */
SIGNAL_2_STR(SIGTSTP), /* 20 */
SIGNAL_2_STR(SIGTTIN), /* 21 */
SIGNAL_2_STR(SIGTTOU), /* 22 */
SIGNAL_2_STR(SIGURG), /* 23 */
SIGNAL_2_STR(SIGXCPU), /* 24 */
SIGNAL_2_STR(SIGXFSZ), /* 25 */
SIGNAL_2_STR(SIGVTALRM), /* 26 */
SIGNAL_2_STR(SIGPROF), /* 27 */
SIGNAL_2_STR(SIGWINCH), /* 28 */
SIGNAL_2_STR(SIGPOLL), /* 29 */
SIGNAL_2_STR(SIGPWR), /* 30 */
SIGNAL_2_STR(SIGSYS) /* 31 */
};
#define SIGNALMAX SIGSYS
#endif
| 1,322 | 27.148936 | 49 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/tools/ctrld/signals_freebsd.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* signals_fbsd.h - Signal definitions for FreeBSD
*/
#ifndef _SIGNALS_FBSD_H
#define _SIGNALS_FBSD_H 1
#define SIGNAL_2_STR(sig) [sig] = #sig
static const char *signal2str[] = {
SIGNAL_2_STR(SIGHUP), /* 1 */
SIGNAL_2_STR(SIGINT), /* 2 */
SIGNAL_2_STR(SIGQUIT), /* 3 */
SIGNAL_2_STR(SIGILL), /* 4 */
SIGNAL_2_STR(SIGTRAP), /* 5 */
SIGNAL_2_STR(SIGABRT), /* 6 */
SIGNAL_2_STR(SIGEMT), /* 7 */
SIGNAL_2_STR(SIGFPE), /* 8 */
SIGNAL_2_STR(SIGKILL), /* 9 */
SIGNAL_2_STR(SIGBUS), /* 10 */
SIGNAL_2_STR(SIGSEGV), /* 11 */
SIGNAL_2_STR(SIGSYS), /* 12 */
SIGNAL_2_STR(SIGPIPE), /* 13 */
SIGNAL_2_STR(SIGALRM), /* 14 */
SIGNAL_2_STR(SIGTERM), /* 15 */
SIGNAL_2_STR(SIGURG), /* 16 */
SIGNAL_2_STR(SIGSTOP), /* 17 */
SIGNAL_2_STR(SIGTSTP), /* 18 */
SIGNAL_2_STR(SIGCONT), /* 19 */
SIGNAL_2_STR(SIGCHLD), /* 20 */
SIGNAL_2_STR(SIGTTIN), /* 21 */
SIGNAL_2_STR(SIGTTOU), /* 22 */
SIGNAL_2_STR(SIGIO), /* 23 */
SIGNAL_2_STR(SIGXCPU), /* 24 */
SIGNAL_2_STR(SIGXFSZ), /* 25 */
SIGNAL_2_STR(SIGVTALRM), /* 26 */
SIGNAL_2_STR(SIGPROF), /* 27 */
SIGNAL_2_STR(SIGWINCH), /* 28 */
SIGNAL_2_STR(SIGINFO), /* 29 */
SIGNAL_2_STR(SIGUSR1), /* 30 */
SIGNAL_2_STR(SIGUSR2), /* 31 */
SIGNAL_2_STR(SIGTHR), /* 32 */
SIGNAL_2_STR(SIGLIBRT) /* 33 */
};
#define SIGNALMAX SIGLIBRT
#endif
| 1,386 | 26.74 | 50 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_locks/obj_locks.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* obj_locks.c -- unit test for PMEMmutex, PMEMrwlock and PMEMcond
*/
#include <sys/param.h>
#include <string.h>
#include "unittest.h"
#include "libpmemobj.h"
#define LAYOUT_NAME "obj_locks"
#define NUM_THREADS 16
#define MAX_FUNC 5
TOID_DECLARE(struct locks, 0);
struct locks {
PMEMobjpool *pop;
PMEMmutex mtx;
PMEMrwlock rwlk;
PMEMcond cond;
int data;
};
struct thread_args {
os_thread_t t;
TOID(struct locks) lock;
int t_id;
};
typedef void *(*fn_lock)(void *arg);
static struct thread_args threads[NUM_THREADS];
/*
* do_mutex_lock -- lock and unlock the mutex
*/
static void *
do_mutex_lock(void *arg)
{
struct thread_args *t = (struct thread_args *)arg;
struct locks *lock = D_RW(t->lock);
pmemobj_mutex_lock(lock->pop, &lock->mtx);
lock->data++;
pmemobj_persist(lock->pop, &lock->data, sizeof(lock->data));
pmemobj_mutex_unlock(lock->pop, &lock->mtx);
return NULL;
}
/*
* do_rwlock_wrlock -- lock and unlock the write rwlock
*/
static void *
do_rwlock_wrlock(void *arg)
{
struct thread_args *t = (struct thread_args *)arg;
struct locks *lock = D_RW(t->lock);
pmemobj_rwlock_wrlock(lock->pop, &lock->rwlk);
lock->data++;
pmemobj_persist(lock->pop, &lock->data, sizeof(lock->data));
pmemobj_rwlock_unlock(lock->pop, &lock->rwlk);
return NULL;
}
/*
* do_rwlock_rdlock -- lock and unlock the read rwlock
*/
static void *
do_rwlock_rdlock(void *arg)
{
struct thread_args *t = (struct thread_args *)arg;
struct locks *lock = D_RW(t->lock);
pmemobj_rwlock_rdlock(lock->pop, &lock->rwlk);
pmemobj_rwlock_unlock(lock->pop, &lock->rwlk);
return NULL;
}
/*
* do_cond_signal -- lock block on a condition variables,
* and unlock them by signal
*/
static void *
do_cond_signal(void *arg)
{
struct thread_args *t = (struct thread_args *)arg;
struct locks *lock = D_RW(t->lock);
if (t->t_id == 0) {
pmemobj_mutex_lock(lock->pop, &lock->mtx);
while (lock->data < (NUM_THREADS - 1))
pmemobj_cond_wait(lock->pop, &lock->cond,
&lock->mtx);
lock->data++;
pmemobj_persist(lock->pop, &lock->data, sizeof(lock->data));
pmemobj_mutex_unlock(lock->pop, &lock->mtx);
} else {
pmemobj_mutex_lock(lock->pop, &lock->mtx);
lock->data++;
pmemobj_persist(lock->pop, &lock->data, sizeof(lock->data));
pmemobj_cond_signal(lock->pop, &lock->cond);
pmemobj_mutex_unlock(lock->pop, &lock->mtx);
}
return NULL;
}
/*
* do_cond_broadcast -- lock block on a condition variables and unlock
* by broadcasting
*/
static void *
do_cond_broadcast(void *arg)
{
struct thread_args *t = (struct thread_args *)arg;
struct locks *lock = D_RW(t->lock);
if (t->t_id < (NUM_THREADS / 2)) {
pmemobj_mutex_lock(lock->pop, &lock->mtx);
while (lock->data < (NUM_THREADS / 2))
pmemobj_cond_wait(lock->pop, &lock->cond,
&lock->mtx);
lock->data++;
pmemobj_persist(lock->pop, &lock->data, sizeof(lock->data));
pmemobj_mutex_unlock(lock->pop, &lock->mtx);
} else {
pmemobj_mutex_lock(lock->pop, &lock->mtx);
lock->data++;
pmemobj_persist(lock->pop, &lock->data, sizeof(lock->data));
pmemobj_cond_broadcast(lock->pop, &lock->cond);
pmemobj_mutex_unlock(lock->pop, &lock->mtx);
}
return NULL;
}
static fn_lock do_lock[MAX_FUNC] = {do_mutex_lock, do_rwlock_wrlock,
do_rwlock_rdlock, do_cond_signal,
do_cond_broadcast};
/*
* do_lock_init -- initialize all types of locks
*/
static void
do_lock_init(struct locks *lock)
{
pmemobj_mutex_zero(lock->pop, &lock->mtx);
pmemobj_rwlock_zero(lock->pop, &lock->rwlk);
pmemobj_cond_zero(lock->pop, &lock->cond);
}
/*
* do_lock_mt -- perform multithread lock operations
*/
static void
do_lock_mt(TOID(struct locks) lock, unsigned f_num)
{
D_RW(lock)->data = 0;
for (int i = 0; i < NUM_THREADS; ++i) {
threads[i].lock = lock;
threads[i].t_id = i;
THREAD_CREATE(&threads[i].t, NULL, do_lock[f_num],
&threads[i]);
}
for (int i = 0; i < NUM_THREADS; ++i)
THREAD_JOIN(&threads[i].t, NULL);
/*
* If all threads passed function properly and used every lock, there
* should be every element in data array incremented exactly one time
* by every thread.
*/
UT_ASSERT((D_RO(lock)->data == NUM_THREADS) ||
(D_RO(lock)->data == 0));
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_locks");
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
TOID(struct locks) lock;
POBJ_ALLOC(pop, &lock, struct locks, sizeof(struct locks), NULL, NULL);
D_RW(lock)->pop = pop;
do_lock_init(D_RW(lock));
for (unsigned i = 0; i < MAX_FUNC; i++)
do_lock_mt(lock, i);
POBJ_FREE(&lock);
pmemobj_close(pop);
DONE(NULL);
}
| 4,821 | 22.99005 | 72 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/libpmempool_feature/libpmempool_feature.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* libpmempool_feature -- pmempool_feature_(enable|disable|query) test
*
*/
#include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
#include "libpmempool.h"
#include "pool_hdr.h"
#include "unittest.h"
#define EMPTY_FLAGS 0
/*
* print_usage -- print usage of program
*/
static void
print_usage(const char *name)
{
UT_OUT("usage: %s <pool_path> (e|d|q) <feature-name>", name);
UT_OUT("feature-name: SINGLEHDR, CKSUM_2K, SHUTDOWN_STATE");
}
/*
* str2pmempool_feature -- convert feature name to pmempool_feature enum
*/
static enum pmempool_feature
str2pmempool_feature(const char *app, const char *str)
{
uint32_t fval = util_str2pmempool_feature(str);
if (fval == UINT32_MAX) {
print_usage(app);
UT_FATAL("unknown feature: %s", str);
}
return (enum pmempool_feature)fval;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "libpmempool_feature");
if (argc < 4) {
print_usage(argv[0]);
UT_FATAL("insufficient number of arguments: %d", argc - 1);
}
const char *path = argv[1];
char cmd = argv[2][0];
enum pmempool_feature feature = str2pmempool_feature(argv[0], argv[3]);
int ret;
switch (cmd) {
case 'e':
return pmempool_feature_enable(path, feature, EMPTY_FLAGS);
case 'd':
return pmempool_feature_disable(path, feature, EMPTY_FLAGS);
case 'q':
ret = pmempool_feature_query(path, feature, EMPTY_FLAGS);
if (ret < 0)
return 1;
UT_OUT("query %s result is %d", argv[3], ret);
return 0;
default:
print_usage(argv[0]);
UT_FATAL("unknown command: %c", cmd);
}
DONE(NULL);
}
| 1,622 | 20.077922 | 72 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_tx_flow/obj_tx_flow.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_tx_flow.c -- unit test for transaction flow
*/
#include "unittest.h"
#include "obj.h"
#define LAYOUT_NAME "direct"
#define TEST_VALUE_A 5
#define TEST_VALUE_B 10
#define TEST_VALUE_C 15
#define OPS_NUM 9
TOID_DECLARE(struct test_obj, 1);
struct test_obj {
int a;
int b;
int c;
};
static void
do_tx_macro_commit(PMEMobjpool *pop, TOID(struct test_obj) *obj)
{
TX_BEGIN(pop) {
D_RW(*obj)->a = TEST_VALUE_A;
} TX_ONCOMMIT {
UT_ASSERT(D_RW(*obj)->a == TEST_VALUE_A);
D_RW(*obj)->b = TEST_VALUE_B;
} TX_ONABORT { /* not called */
D_RW(*obj)->a = TEST_VALUE_B;
} TX_FINALLY {
UT_ASSERT(D_RW(*obj)->b == TEST_VALUE_B);
D_RW(*obj)->c = TEST_VALUE_C;
} TX_END
}
static void
do_tx_macro_abort(PMEMobjpool *pop, TOID(struct test_obj) *obj)
{
D_RW(*obj)->a = TEST_VALUE_A;
D_RW(*obj)->b = TEST_VALUE_B;
TX_BEGIN(pop) {
TX_ADD(*obj);
D_RW(*obj)->a = TEST_VALUE_B;
pmemobj_tx_abort(EINVAL);
D_RW(*obj)->b = TEST_VALUE_A;
} TX_ONCOMMIT { /* not called */
D_RW(*obj)->a = TEST_VALUE_B;
} TX_ONABORT {
UT_ASSERT(D_RW(*obj)->a == TEST_VALUE_A);
UT_ASSERT(D_RW(*obj)->b == TEST_VALUE_B);
D_RW(*obj)->b = TEST_VALUE_B;
} TX_FINALLY {
UT_ASSERT(D_RW(*obj)->b == TEST_VALUE_B);
D_RW(*obj)->c = TEST_VALUE_C;
} TX_END
}
static void
do_tx_macro_commit_nested(PMEMobjpool *pop, TOID(struct test_obj) *obj)
{
TX_BEGIN(pop) {
TX_BEGIN(pop) {
D_RW(*obj)->a = TEST_VALUE_A;
} TX_ONCOMMIT {
UT_ASSERT(D_RW(*obj)->a == TEST_VALUE_A);
D_RW(*obj)->b = TEST_VALUE_B;
} TX_END
} TX_ONCOMMIT {
D_RW(*obj)->c = TEST_VALUE_C;
} TX_END
}
static void
do_tx_macro_abort_nested(PMEMobjpool *pop, TOID(struct test_obj) *obj)
{
volatile int a = 0;
volatile int b = 0;
volatile int c = 0;
D_RW(*obj)->a = TEST_VALUE_A;
D_RW(*obj)->b = TEST_VALUE_B;
TX_BEGIN(pop) {
TX_ADD(*obj);
D_RW(*obj)->a = TEST_VALUE_B;
a = TEST_VALUE_C;
TX_BEGIN(pop) {
D_RW(*obj)->b = TEST_VALUE_C;
a = TEST_VALUE_A;
pmemobj_tx_abort(EINVAL);
a = TEST_VALUE_B;
} TX_ONCOMMIT { /* not called */
a = TEST_VALUE_C;
} TX_ONABORT {
UT_ASSERT(a == TEST_VALUE_A);
b = TEST_VALUE_B;
} TX_FINALLY {
UT_ASSERT(b == TEST_VALUE_B);
c = TEST_VALUE_C;
} TX_END
a = TEST_VALUE_B;
} TX_ONCOMMIT { /* not called */
UT_ASSERT(a == TEST_VALUE_A);
c = TEST_VALUE_C;
} TX_ONABORT {
UT_ASSERT(a == TEST_VALUE_A);
UT_ASSERT(b == TEST_VALUE_B);
UT_ASSERT(c == TEST_VALUE_C);
b = TEST_VALUE_A;
} TX_FINALLY {
UT_ASSERT(b == TEST_VALUE_A);
D_RW(*obj)->c = TEST_VALUE_C;
a = TEST_VALUE_B;
} TX_END
UT_ASSERT(a == TEST_VALUE_B);
}
static void
do_tx_macro_abort_nested_begin(PMEMobjpool *pop, TOID(struct test_obj) *obj)
{
errno = 0;
TX_BEGIN(pop) {
D_RW(*obj)->a = TEST_VALUE_A;
D_RW(*obj)->b = TEST_VALUE_B;
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
TX_BEGIN((PMEMobjpool *)(uintptr_t)7) {
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERT(errno == EINVAL);
} TX_ONABORT {
D_RW(*obj)->c = TEST_VALUE_C;
} TX_ONCOMMIT { /* not called */
D_RW(*obj)->a = TEST_VALUE_B;
} TX_END
}
static void
do_tx_commit(PMEMobjpool *pop, TOID(struct test_obj) *obj)
{
pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE);
D_RW(*obj)->a = TEST_VALUE_A;
TX_ADD(*obj);
D_RW(*obj)->b = TEST_VALUE_B;
pmemobj_tx_commit();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONCOMMIT);
D_RW(*obj)->c = TEST_VALUE_C;
pmemobj_tx_end();
}
static void
do_tx_commit_nested(PMEMobjpool *pop, TOID(struct test_obj) *obj)
{
pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE);
TX_ADD(*obj);
D_RW(*obj)->a = TEST_VALUE_A;
pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE);
TX_ADD(*obj);
D_RW(*obj)->b = TEST_VALUE_B;
pmemobj_tx_commit();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONCOMMIT);
pmemobj_tx_end();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_WORK);
pmemobj_tx_commit();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONCOMMIT);
D_RW(*obj)->c = TEST_VALUE_C;
pmemobj_tx_end();
}
static void
do_tx_abort(PMEMobjpool *pop, TOID(struct test_obj) *obj)
{
D_RW(*obj)->a = TEST_VALUE_A;
pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE);
D_RW(*obj)->b = TEST_VALUE_B;
TX_ADD(*obj);
D_RW(*obj)->a = 0;
pmemobj_tx_abort(EINVAL);
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONABORT);
D_RW(*obj)->c = TEST_VALUE_C;
pmemobj_tx_end();
}
static void
do_tx_abort_nested(PMEMobjpool *pop, TOID(struct test_obj) *obj)
{
D_RW(*obj)->a = TEST_VALUE_A;
D_RW(*obj)->b = TEST_VALUE_B;
pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE);
TX_ADD(*obj);
D_RW(*obj)->a = 0;
pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE);
TX_ADD(*obj);
D_RW(*obj)->b = 0;
pmemobj_tx_abort(EINVAL);
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONABORT);
pmemobj_tx_end();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONABORT);
D_RW(*obj)->c = TEST_VALUE_C;
pmemobj_tx_end();
}
typedef void (*fn_op)(PMEMobjpool *pop, TOID(struct test_obj) *obj);
static fn_op tx_op[OPS_NUM] = {do_tx_macro_commit, do_tx_macro_abort,
do_tx_macro_commit_nested, do_tx_macro_abort_nested,
do_tx_macro_abort_nested_begin, do_tx_commit,
do_tx_commit_nested, do_tx_abort, do_tx_abort_nested};
static void
do_tx_process(PMEMobjpool *pop)
{
pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE);
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_WORK);
pmemobj_tx_process();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONCOMMIT);
pmemobj_tx_process();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_FINALLY);
pmemobj_tx_process();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_NONE);
pmemobj_tx_end();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_NONE);
}
static void
do_tx_process_nested(PMEMobjpool *pop)
{
pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE);
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_WORK);
pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE);
pmemobj_tx_process();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONCOMMIT);
pmemobj_tx_process();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_FINALLY);
pmemobj_tx_end();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_WORK);
pmemobj_tx_abort(EINVAL);
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONABORT);
pmemobj_tx_process();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_FINALLY);
pmemobj_tx_process();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_NONE);
pmemobj_tx_end();
UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_NONE);
}
static void
do_fault_injection(PMEMobjpool *pop)
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "pmemobj_tx_begin");
int ret = pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOMEM);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_flow");
if (argc != 3)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[2], LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
TOID(struct test_obj) obj;
POBJ_ZNEW(pop, &obj, struct test_obj);
for (int i = 0; i < OPS_NUM; i++) {
D_RW(obj)->a = 0;
D_RW(obj)->b = 0;
D_RW(obj)->c = 0;
tx_op[i](pop, &obj);
UT_ASSERT(D_RO(obj)->a == TEST_VALUE_A);
UT_ASSERT(D_RO(obj)->b == TEST_VALUE_B);
UT_ASSERT(D_RO(obj)->c == TEST_VALUE_C);
}
switch (argv[1][0]) {
case 't':
do_tx_process(pop);
do_tx_process_nested(pop);
break;
case 'f':
do_fault_injection(pop);
break;
default:
UT_FATAL("usage: %s [t|f]", argv[0]);
}
pmemobj_close(pop);
DONE(NULL);
}
| 7,445 | 23.574257 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/util_pool_hdr/util_pool_hdr.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* util_pool_hdr.c -- unit test for pool_hdr layout and default values
*
* This test should be modified after every layout change. It's here to prevent
* any accidental layout changes.
*/
#include "util.h"
#include "unittest.h"
#include "set.h"
#include "pool_hdr.h"
#define POOL_HDR_SIG_LEN_V1 (8)
#define POOL_HDR_UNUSED_LEN_V1 (1904)
#define POOL_HDR_UNUSED2_LEN_V1 (1976)
#define POOL_HDR_2K_CHECKPOINT (2048UL)
#define FEATURES_T_SIZE_V1 (12)
#define ARCH_FLAGS_SIZE_V1 (16)
#define ARCH_FLAGS_RESERVED_LEN_V1 (4)
#define SHUTDOWN_STATE_SIZE_V1 (64)
#define SHUTDOWN_STATE_RESERVED_LEN_V1 (39)
/*
* test_layout -- test pool_hdr layout
*/
static void
test_layout()
{
ASSERT_ALIGNED_BEGIN(struct pool_hdr);
ASSERT_ALIGNED_FIELD(struct pool_hdr, signature);
ASSERT_FIELD_SIZE(signature, POOL_HDR_SIG_LEN_V1);
ASSERT_ALIGNED_FIELD(struct pool_hdr, major);
ASSERT_ALIGNED_FIELD(struct pool_hdr, features);
ASSERT_ALIGNED_FIELD(struct pool_hdr, poolset_uuid);
ASSERT_ALIGNED_FIELD(struct pool_hdr, uuid);
ASSERT_ALIGNED_FIELD(struct pool_hdr, prev_part_uuid);
ASSERT_ALIGNED_FIELD(struct pool_hdr, next_part_uuid);
ASSERT_ALIGNED_FIELD(struct pool_hdr, prev_repl_uuid);
ASSERT_ALIGNED_FIELD(struct pool_hdr, next_repl_uuid);
ASSERT_ALIGNED_FIELD(struct pool_hdr, crtime);
ASSERT_ALIGNED_FIELD(struct pool_hdr, arch_flags);
ASSERT_ALIGNED_FIELD(struct pool_hdr, unused);
ASSERT_FIELD_SIZE(unused, POOL_HDR_UNUSED_LEN_V1);
ASSERT_OFFSET_CHECKPOINT(struct pool_hdr, POOL_HDR_2K_CHECKPOINT);
ASSERT_ALIGNED_FIELD(struct pool_hdr, unused2);
ASSERT_FIELD_SIZE(unused2, POOL_HDR_UNUSED2_LEN_V1);
ASSERT_ALIGNED_FIELD(struct pool_hdr, sds);
ASSERT_ALIGNED_FIELD(struct pool_hdr, checksum);
#if PMEM_PAGESIZE > 4096
ASSERT_ALIGNED_FIELD(struct pool_hdr, align_pad);
#endif
ASSERT_ALIGNED_CHECK(struct pool_hdr);
ASSERT_ALIGNED_BEGIN(features_t);
ASSERT_ALIGNED_FIELD(features_t, compat);
ASSERT_ALIGNED_FIELD(features_t, incompat);
ASSERT_ALIGNED_FIELD(features_t, ro_compat);
ASSERT_ALIGNED_CHECK(features_t);
UT_COMPILE_ERROR_ON(sizeof(features_t) != FEATURES_T_SIZE_V1);
ASSERT_ALIGNED_BEGIN(struct arch_flags);
ASSERT_ALIGNED_FIELD(struct arch_flags, alignment_desc);
ASSERT_ALIGNED_FIELD(struct arch_flags, machine_class);
ASSERT_ALIGNED_FIELD(struct arch_flags, data);
ASSERT_ALIGNED_FIELD(struct arch_flags, reserved);
ASSERT_FIELD_SIZE(reserved, ARCH_FLAGS_RESERVED_LEN_V1);
ASSERT_ALIGNED_FIELD(struct arch_flags, machine);
ASSERT_ALIGNED_CHECK(struct arch_flags);
UT_COMPILE_ERROR_ON(sizeof(struct arch_flags) != ARCH_FLAGS_SIZE_V1);
ASSERT_ALIGNED_BEGIN(struct shutdown_state);
ASSERT_ALIGNED_FIELD(struct shutdown_state, usc);
ASSERT_ALIGNED_FIELD(struct shutdown_state, uuid);
ASSERT_ALIGNED_FIELD(struct shutdown_state, dirty);
ASSERT_ALIGNED_FIELD(struct shutdown_state, reserved);
ASSERT_FIELD_SIZE(reserved, SHUTDOWN_STATE_RESERVED_LEN_V1);
ASSERT_ALIGNED_FIELD(struct shutdown_state, checksum);
ASSERT_ALIGNED_CHECK(struct shutdown_state);
UT_COMPILE_ERROR_ON(sizeof(struct shutdown_state) !=
SHUTDOWN_STATE_SIZE_V1);
}
/* incompat features - final values */
#define POOL_FEAT_SINGLEHDR_FINAL 0x0001U
#define POOL_FEAT_CKSUM_2K_FINAL 0x0002U
#define POOL_FEAT_SDS_FINAL 0x0004U
/* incompat features effective values */
#if defined(_WIN32) || NDCTL_ENABLED
#ifdef SDS_ENABLED
#define POOL_E_FEAT_SDS_FINAL POOL_FEAT_SDS_FINAL
#else
#define POOL_E_FEAT_SDS_FINAL 0x0000U /* empty */
#endif
#else
/*
* shutdown state support on Linux requires root access on kernel < 4.20 with
* ndctl < 63 so it is disabled by default
*/
#define POOL_E_FEAT_SDS_FINAL 0x0000U /* empty */
#endif
#define POOL_FEAT_INCOMPAT_DEFAULT_V1 \
(POOL_FEAT_CKSUM_2K_FINAL | POOL_E_FEAT_SDS_FINAL)
#ifdef _WIN32
#define SDS_AT_CREATE_EXPECTED 1
#else
#define SDS_AT_CREATE_EXPECTED 0
#endif
/*
* test_default_values -- test default values
*/
static void
test_default_values()
{
UT_COMPILE_ERROR_ON(POOL_FEAT_SINGLEHDR != POOL_FEAT_SINGLEHDR_FINAL);
UT_COMPILE_ERROR_ON(POOL_FEAT_CKSUM_2K != POOL_FEAT_CKSUM_2K_FINAL);
UT_COMPILE_ERROR_ON(POOL_FEAT_SDS != POOL_FEAT_SDS_FINAL);
UT_COMPILE_ERROR_ON(SDS_at_create != SDS_AT_CREATE_EXPECTED);
UT_COMPILE_ERROR_ON(POOL_FEAT_INCOMPAT_DEFAULT !=
POOL_FEAT_INCOMPAT_DEFAULT_V1);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "util_pool_hdr");
test_layout();
test_default_values();
DONE(NULL);
}
| 4,508 | 30.531469 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/util_map_proc/util_map_proc.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* util_map_proc.c -- unit test for util_map() /proc parsing
*
* usage: util_map_proc maps_file len [len]...
*/
#define _GNU_SOURCE
#include <dlfcn.h>
#include "unittest.h"
#include "util.h"
#include "mmap.h"
#define GIGABYTE ((uintptr_t)1 << 30)
#define TERABYTE ((uintptr_t)1 << 40)
int
main(int argc, char *argv[])
{
START(argc, argv, "util_map_proc");
util_init();
util_mmap_init();
if (argc < 3)
UT_FATAL("usage: %s maps_file len [len]...", argv[0]);
Mmap_mapfile = argv[1];
UT_OUT("redirecting " OS_MAPFILE " to %s", Mmap_mapfile);
for (int arg = 2; arg < argc; arg++) {
size_t len = (size_t)strtoull(argv[arg], NULL, 0);
size_t align = 2 * MEGABYTE;
if (len >= 2 * GIGABYTE)
align = GIGABYTE;
void *h1 =
util_map_hint_unused((void *)TERABYTE, len, GIGABYTE);
void *h2 = util_map_hint(len, 0);
if (h1 != MAP_FAILED && h1 != NULL)
UT_ASSERTeq((uintptr_t)h1 & (GIGABYTE - 1), 0);
if (h2 != MAP_FAILED && h2 != NULL)
UT_ASSERTeq((uintptr_t)h2 & (align - 1), 0);
if (h1 == NULL) /* XXX portability */
UT_OUT("len %zu: (nil) %p", len, h2);
else if (h2 == NULL)
UT_OUT("len %zu: %p (nil)", len, h1);
else
UT_OUT("len %zu: %p %p", len, h1, h2);
}
util_mmap_fini();
DONE(NULL);
}
| 1,335 | 21.644068 | 60 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/win_lists/win_lists.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* win_lists.c -- test list routines used in windows implementation
*/
#include "unittest.h"
#include "queue.h"
typedef struct TEST_LIST_NODE {
PMDK_LIST_ENTRY(TEST_LIST_NODE) ListEntry;
int dummy;
} *PTEST_LIST_NODE;
PMDK_LIST_HEAD(TestList, TEST_LIST_NODE);
static void
dump_list(struct TestList *head)
{
PTEST_LIST_NODE pNode = NULL;
pNode = (PTEST_LIST_NODE)PMDK_LIST_FIRST(head);
while (pNode != NULL) {
UT_OUT("Node value: %d", pNode->dummy);
pNode = (PTEST_LIST_NODE)PMDK_LIST_NEXT(pNode, ListEntry);
}
}
static int
get_list_count(struct TestList *head)
{
PTEST_LIST_NODE pNode = NULL;
int listCount = 0;
pNode = (PTEST_LIST_NODE)PMDK_LIST_FIRST(head);
while (pNode != NULL) {
listCount++;
pNode = (PTEST_LIST_NODE)PMDK_LIST_NEXT(pNode, ListEntry);
}
return listCount;
}
/*
* test_list - Do some basic list manipulations and output to log for
* script comparison. Only testing the macros we use.
*/
static void
test_list(void)
{
PTEST_LIST_NODE pNode = NULL;
struct TestList head = PMDK_LIST_HEAD_INITIALIZER(head);
PMDK_LIST_INIT(&head);
UT_ASSERT_rt(PMDK_LIST_EMPTY(&head));
pNode = MALLOC(sizeof(struct TEST_LIST_NODE));
pNode->dummy = 0;
PMDK_LIST_INSERT_HEAD(&head, pNode, ListEntry);
UT_ASSERTeq_rt(1, get_list_count(&head));
dump_list(&head);
/* Remove one node */
PMDK_LIST_REMOVE(pNode, ListEntry);
UT_ASSERTeq_rt(0, get_list_count(&head));
dump_list(&head);
free(pNode);
/* Add a bunch of nodes */
for (int i = 1; i < 10; i++) {
pNode = MALLOC(sizeof(struct TEST_LIST_NODE));
pNode->dummy = i;
PMDK_LIST_INSERT_HEAD(&head, pNode, ListEntry);
}
UT_ASSERTeq_rt(9, get_list_count(&head));
dump_list(&head);
/* Remove all of them */
while (!PMDK_LIST_EMPTY(&head)) {
pNode = (PTEST_LIST_NODE)PMDK_LIST_FIRST(&head);
PMDK_LIST_REMOVE(pNode, ListEntry);
free(pNode);
}
UT_ASSERTeq_rt(0, get_list_count(&head));
dump_list(&head);
}
typedef struct TEST_SORTEDQ_NODE {
PMDK_SORTEDQ_ENTRY(TEST_SORTEDQ_NODE) queue_link;
int dummy;
} TEST_SORTEDQ_NODE, *PTEST_SORTEDQ_NODE;
PMDK_SORTEDQ_HEAD(TEST_SORTEDQ, TEST_SORTEDQ_NODE);
static int
sortedq_node_comparer(TEST_SORTEDQ_NODE *a, TEST_SORTEDQ_NODE *b)
{
return a->dummy - b->dummy;
}
struct TEST_DATA_SORTEDQ {
int count;
int data[10];
};
/*
* test_sortedq - Do some basic operations on SORTEDQ and make sure that the
* queue is sorted for different input sequences.
*/
void
test_sortedq(void)
{
PTEST_SORTEDQ_NODE node = NULL;
struct TEST_SORTEDQ head = PMDK_SORTEDQ_HEAD_INITIALIZER(head);
struct TEST_DATA_SORTEDQ test_data[] = {
{5, {5, 7, 9, 100, 101}},
{7, {1, 2, 3, 4, 5, 6, 7}},
{5, {100, 90, 80, 70, 40}},
{6, {10, 9, 8, 7, 6, 5}},
{5, {23, 13, 27, 4, 15}},
{5, {2, 2, 2, 2, 2}}
};
PMDK_SORTEDQ_INIT(&head);
UT_ASSERT_rt(PMDK_SORTEDQ_EMPTY(&head));
for (int i = 0; i < _countof(test_data); i++) {
for (int j = 0; j < test_data[i].count; j++) {
node = MALLOC(sizeof(TEST_SORTEDQ_NODE));
node->dummy = test_data[i].data[j];
PMDK_SORTEDQ_INSERT(&head, node, queue_link,
TEST_SORTEDQ_NODE, sortedq_node_comparer);
}
int prev = MININT;
int num_entries = 0;
PMDK_SORTEDQ_FOREACH(node, &head, queue_link) {
UT_ASSERT(prev <= node->dummy);
num_entries++;
}
UT_ASSERT(num_entries == test_data[i].count);
while (!PMDK_SORTEDQ_EMPTY(&head)) {
node = PMDK_SORTEDQ_FIRST(&head);
PMDK_SORTEDQ_REMOVE(&head, node, queue_link);
FREE(node);
}
}
}
int
main(int argc, char *argv[])
{
START(argc, argv, "win_lists - testing %s",
(argc > 1) ? argv[1] : "list");
if (argc == 1 || (stricmp(argv[1], "list") == 0))
test_list();
if (argc > 1 && (stricmp(argv[1], "sortedq") == 0))
test_sortedq();
DONE(NULL);
}
| 5,431 | 27 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_pool/obj_pool.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_pool.c -- unit test for pmemobj_create() and pmemobj_open()
* Also tests pmemobj_(set/get)_user_data().
*
* usage: obj_pool op path layout [poolsize mode]
*
* op can be:
* c - create
* o - open
*
* "poolsize" and "mode" arguments are ignored for "open"
*/
#include "unittest.h"
#include "../libpmemobj/obj.h"
#define MB ((size_t)1 << 20)
#define USER_DATA_V (void *) 123456789ULL
static void
pool_create(const char *path, const char *layout, size_t poolsize,
unsigned mode)
{
PMEMobjpool *pop = pmemobj_create(path, layout, poolsize, mode);
if (pop == NULL)
UT_OUT("!%s: pmemobj_create: %s", path, pmemobj_errormsg());
else {
/* Test pmemobj_(get/set)_user data */
UT_ASSERTeq(NULL, pmemobj_get_user_data(pop));
pmemobj_set_user_data(pop, USER_DATA_V);
UT_ASSERTeq(USER_DATA_V, pmemobj_get_user_data(pop));
os_stat_t stbuf;
STAT(path, &stbuf);
UT_OUT("%s: file size %zu mode 0%o",
path, stbuf.st_size,
stbuf.st_mode & 0777);
pmemobj_close(pop);
int result = pmemobj_check(path, layout);
if (result < 0)
UT_OUT("!%s: pmemobj_check", path);
else if (result == 0)
UT_OUT("%s: pmemobj_check: not consistent", path);
}
}
static void
pool_open(const char *path, const char *layout)
{
PMEMobjpool *pop = pmemobj_open(path, layout);
if (pop == NULL)
UT_OUT("!%s: pmemobj_open: %s", path, pmemobj_errormsg());
else {
UT_OUT("%s: pmemobj_open: Success", path);
UT_ASSERTeq(NULL, pmemobj_get_user_data(pop));
pmemobj_close(pop);
}
}
static void
test_fault_injection(const char *path, const char *layout, size_t poolsize,
unsigned mode)
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "tx_params_new");
PMEMobjpool *pop = pmemobj_create(path, layout, poolsize, mode);
UT_ASSERTeq(pop, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_pool");
if (argc < 4)
UT_FATAL("usage: %s op path layout [poolsize mode]", argv[0]);
char *layout = NULL;
size_t poolsize;
unsigned mode;
if (strcmp(argv[3], "EMPTY") == 0)
layout = "";
else if (strcmp(argv[3], "NULL") != 0)
layout = argv[3];
switch (argv[1][0]) {
case 'c':
poolsize = strtoull(argv[4], NULL, 0) * MB; /* in megabytes */
mode = strtoul(argv[5], NULL, 8);
pool_create(argv[2], layout, poolsize, mode);
break;
case 'o':
pool_open(argv[2], layout);
break;
case 'f':
os_setenv("PMEMOBJ_CONF", "invalid-query", 1);
pool_open(argv[2], layout);
os_unsetenv("PMEMOBJ_CONF");
pool_open(argv[2], layout);
break;
case 't':
poolsize = strtoull(argv[4], NULL, 0) * MB; /* in megabytes */
mode = strtoul(argv[5], NULL, 8);
test_fault_injection(argv[2], layout, poolsize, mode);
break;
default:
UT_FATAL("unknown operation");
}
DONE(NULL);
}
| 2,905 | 21.527132 | 75 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_memset/pmem2_memset.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem_memset.c -- unit test for doing a memset
*
* usage: pmem_memset file offset length
*/
#include "unittest.h"
#include "file.h"
#include "ut_pmem2.h"
#include "memset_common.h"
static void
do_memset_variants(int fd, char *dest, const char *file_name, size_t dest_off,
size_t bytes, persist_fn p, memset_fn fn)
{
for (int i = 0; i < ARRAY_SIZE(Flags); ++i) {
do_memset(fd, dest, file_name, dest_off, bytes,
fn, Flags[i], p);
if (Flags[i] & PMEMOBJ_F_MEM_NOFLUSH)
p(dest, bytes);
}
}
int
main(int argc, char *argv[])
{
int fd;
char *dest;
struct pmem2_config *cfg;
struct pmem2_source *src;
struct pmem2_map *map;
if (argc != 4)
UT_FATAL("usage: %s file offset length", argv[0]);
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem2_memset %s %s %s %savx %savx512f",
argv[2], argv[3],
thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
fd = OPEN(argv[1], O_RDWR);
PMEM2_CONFIG_NEW(&cfg);
PMEM2_SOURCE_FROM_FD(&src, fd);
PMEM2_CONFIG_SET_GRANULARITY(cfg, PMEM2_GRANULARITY_PAGE);
int ret = pmem2_map(cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_CONFIG_DELETE(&cfg);
dest = pmem2_map_get_address(map);
if (dest == NULL)
UT_FATAL("!could not map file: %s", argv[1]);
size_t dest_off = strtoul(argv[2], NULL, 0);
size_t bytes = strtoul(argv[3], NULL, 0);
pmem2_persist_fn persist = pmem2_get_persist_fn(map);
pmem2_memset_fn memset_fn = pmem2_get_memset_fn(map);
do_memset_variants(fd, dest, argv[1], dest_off, bytes,
persist, memset_fn);
ret = pmem2_unmap(&map);
UT_ASSERTeq(ret, 0);
CLOSE(fd);
DONE(NULL);
}
| 1,810 | 21.6375 | 78 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_memset/memset_common.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* memset_common.c -- common part for tests doing a persistent memset
*/
#include "unittest.h"
#include "memset_common.h"
/*
* do_memset - worker function for memset
*/
void
do_memset(int fd, char *dest, const char *file_name, size_t dest_off,
size_t bytes, memset_fn fn, unsigned flags,
persist_fn persist)
{
char *buf = MALLOC(bytes);
char *dest1;
char *ret;
memset(dest, 0, bytes);
persist(dest, bytes);
dest1 = MALLOC(bytes);
memset(dest1, 0, bytes);
/*
* This is used to verify that the value of what a non persistent
* memset matches the outcome of the persistent memset. The
* persistent memset will match the file but may not be the
* correct or expected value.
*/
memset(dest1 + dest_off, 0x5A, bytes / 4);
memset(dest1 + dest_off + (bytes / 4), 0x46, bytes / 4);
/* Test the corner cases */
ret = fn(dest + dest_off, 0x5A, 0, flags);
UT_ASSERTeq(ret, dest + dest_off);
UT_ASSERTeq(*(char *)(dest + dest_off), 0);
/*
* Do the actual memset with persistence.
*/
ret = fn(dest + dest_off, 0x5A, bytes / 4, flags);
UT_ASSERTeq(ret, dest + dest_off);
ret = fn(dest + dest_off + (bytes / 4), 0x46, bytes / 4, flags);
UT_ASSERTeq(ret, dest + dest_off + (bytes / 4));
if (memcmp(dest, dest1, bytes / 2))
UT_FATAL("%s: first %zu bytes do not match",
file_name, bytes / 2);
LSEEK(fd, 0, SEEK_SET);
if (READ(fd, buf, bytes / 2) == bytes / 2) {
if (memcmp(buf, dest, bytes / 2))
UT_FATAL("%s: first %zu bytes do not match",
file_name, bytes / 2);
}
FREE(dest1);
FREE(buf);
}
unsigned Flags[] = {
0,
PMEM_F_MEM_NODRAIN,
PMEM_F_MEM_NONTEMPORAL,
PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL,
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_NODRAIN,
PMEM_F_MEM_WC,
PMEM_F_MEM_WB,
PMEM_F_MEM_NOFLUSH,
/* all possible flags */
PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH |
PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL |
PMEM_F_MEM_WC | PMEM_F_MEM_WB,
};
| 2,043 | 24.55 | 69 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem2_memset/memset_common.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* memset_common.h -- header file for common memset utilities
*/
#ifndef MEMSET_COMMON_H
#define MEMSET_COMMON_H 1
#include "unittest.h"
#include "file.h"
extern unsigned Flags[10];
typedef void *(*memset_fn)(void *pmemdest, int c, size_t len, unsigned flags);
typedef void (*persist_fn)(const void *ptr, size_t len);
void
do_memset(int fd, char *dest, const char *file_name, size_t dest_off,
size_t bytes, memset_fn fn, unsigned flags, persist_fn p);
#endif
| 552 | 22.041667 | 78 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_pmalloc_basic/obj_pmalloc_basic.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* obj_pmalloc_basic.c -- unit test for pmalloc interface
*/
#include <stdint.h>
#include "heap.h"
#include "obj.h"
#include "pmalloc.h"
#include "unittest.h"
#include "valgrind_internal.h"
#include "set.h"
#define MOCK_POOL_SIZE (PMEMOBJ_MIN_POOL * 3)
#define TEST_MEGA_ALLOC_SIZE (10 * 1024 * 1024)
#define TEST_HUGE_ALLOC_SIZE (4 * 255 * 1024)
#define TEST_SMALL_ALLOC_SIZE (1000)
#define TEST_MEDIUM_ALLOC_SIZE (1024 * 200)
#define TEST_TINY_ALLOC_SIZE (64)
#define TEST_RUNS 2
#define MAX_MALLOC_FREE_LOOP 1000
#define MALLOC_FREE_SIZE 8000
#define PAD_SIZE (PMEM_PAGESIZE - LANE_TOTAL_SIZE)
struct mock_pop {
PMEMobjpool p;
char lanes[LANE_TOTAL_SIZE];
char padding[PAD_SIZE]; /* to page boundary */
uint64_t ptr;
};
static struct mock_pop *addr;
static PMEMobjpool *mock_pop;
/*
* drain_empty -- (internal) empty function for drain on non-pmem memory
*/
static void
drain_empty(void)
{
/* do nothing */
}
/*
* obj_persist -- pmemobj version of pmem_persist w/o replication
*/
static int
obj_persist(void *ctx, const void *addr, size_t len, unsigned flags)
{
PMEMobjpool *pop = ctx;
pop->persist_local(addr, len);
return 0;
}
/*
* obj_flush -- pmemobj version of pmem_flush w/o replication
*/
static int
obj_flush(void *ctx, const void *addr, size_t len, unsigned flags)
{
PMEMobjpool *pop = ctx;
pop->flush_local(addr, len);
return 0;
}
/*
* obj_drain -- pmemobj version of pmem_drain w/o replication
*/
static void
obj_drain(void *ctx)
{
PMEMobjpool *pop = ctx;
pop->drain_local();
}
static void
obj_msync_nofail(const void *addr, size_t size)
{
if (pmem_msync(addr, size))
UT_FATAL("!pmem_msync");
}
/*
* obj_memcpy -- pmemobj version of memcpy w/o replication
*/
static void *
obj_memcpy(void *ctx, void *dest, const void *src, size_t len, unsigned flags)
{
pmem_memcpy(dest, src, len, flags);
return dest;
}
static void *
obj_memset(void *ctx, void *ptr, int c, size_t sz, unsigned flags)
{
pmem_memset(ptr, c, sz, flags);
return ptr;
}
static size_t
test_oom_allocs(size_t size)
{
uint64_t max_allocs = MOCK_POOL_SIZE / size;
uint64_t *allocs = CALLOC(max_allocs, sizeof(*allocs));
size_t count = 0;
for (;;) {
if (pmalloc(mock_pop, &addr->ptr, size, 0, 0)) {
break;
}
UT_ASSERT(addr->ptr != 0);
allocs[count++] = addr->ptr;
}
for (int i = 0; i < count; ++i) {
addr->ptr = allocs[i];
pfree(mock_pop, &addr->ptr);
UT_ASSERT(addr->ptr == 0);
}
UT_ASSERT(count != 0);
FREE(allocs);
return count;
}
static size_t
test_oom_resrv(size_t size)
{
uint64_t max_allocs = MOCK_POOL_SIZE / size;
uint64_t *allocs = CALLOC(max_allocs, sizeof(*allocs));
struct pobj_action *resvs = CALLOC(max_allocs, sizeof(*resvs));
size_t count = 0;
for (;;) {
if (palloc_reserve(&mock_pop->heap, size,
NULL, NULL, 0, 0, 0, 0,
&resvs[count]) != 0)
break;
allocs[count] = resvs[count].heap.offset;
UT_ASSERT(allocs[count] != 0);
count++;
}
for (size_t i = 0; i < count; ) {
size_t nresv = MIN(count - i, 10);
struct operation_context *ctx =
pmalloc_operation_hold(mock_pop);
palloc_publish(&mock_pop->heap, &resvs[i], nresv, ctx);
pmalloc_operation_release(mock_pop);
i += nresv;
}
for (int i = 0; i < count; ++i) {
addr->ptr = allocs[i];
pfree(mock_pop, &addr->ptr);
UT_ASSERT(addr->ptr == 0);
}
UT_ASSERT(count != 0);
FREE(allocs);
FREE(resvs);
return count;
}
static void
test_malloc_free_loop(size_t size)
{
int err;
for (int i = 0; i < MAX_MALLOC_FREE_LOOP; ++i) {
err = pmalloc(mock_pop, &addr->ptr, size, 0, 0);
UT_ASSERTeq(err, 0);
pfree(mock_pop, &addr->ptr);
}
}
static void
test_realloc(size_t org, size_t dest)
{
int err;
struct palloc_heap *heap = &mock_pop->heap;
err = pmalloc(mock_pop, &addr->ptr, org, 0, 0);
UT_ASSERTeq(err, 0);
UT_ASSERT(palloc_usable_size(heap, addr->ptr) >= org);
err = prealloc(mock_pop, &addr->ptr, dest, 0, 0);
UT_ASSERTeq(err, 0);
UT_ASSERT(palloc_usable_size(heap, addr->ptr) >= dest);
pfree(mock_pop, &addr->ptr);
}
#define PMALLOC_EXTRA 20
#define PALLOC_FLAG (1 << 15)
#define FIRST_SIZE 1 /* use the first allocation class */
#define FIRST_USIZE 112 /* the usable size is 128 - 16 */
static void
test_pmalloc_extras(PMEMobjpool *pop)
{
uint64_t val;
int ret = pmalloc(pop, &val, FIRST_SIZE, PMALLOC_EXTRA, PALLOC_FLAG);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(palloc_extra(&pop->heap, val), PMALLOC_EXTRA);
UT_ASSERT((palloc_flags(&pop->heap, val) & PALLOC_FLAG) == PALLOC_FLAG);
UT_ASSERT(palloc_usable_size(&pop->heap, val) == FIRST_USIZE);
pfree(pop, &val);
}
#define PMALLOC_ELEMENTS 20
static void
test_pmalloc_first_next(PMEMobjpool *pop)
{
uint64_t vals[PMALLOC_ELEMENTS];
for (unsigned i = 0; i < PMALLOC_ELEMENTS; ++i) {
int ret = pmalloc(pop, &vals[i], FIRST_SIZE, i, i);
UT_ASSERTeq(ret, 0);
}
uint64_t off = palloc_first(&pop->heap);
UT_ASSERTne(off, 0);
int nvalues = 0;
do {
UT_ASSERTeq(vals[nvalues], off);
UT_ASSERTeq(palloc_extra(&pop->heap, off), nvalues);
UT_ASSERTeq(palloc_flags(&pop->heap, off), nvalues);
UT_ASSERT(palloc_usable_size(&pop->heap, off) == FIRST_USIZE);
nvalues ++;
} while ((off = palloc_next(&pop->heap, off)) != 0);
UT_ASSERTeq(nvalues, PMALLOC_ELEMENTS);
for (int i = 0; i < PMALLOC_ELEMENTS; ++i)
pfree(pop, &vals[i]);
}
static void
test_mock_pool_allocs(void)
{
addr = MMAP_ANON_ALIGNED(MOCK_POOL_SIZE, Ut_mmap_align);
mock_pop = &addr->p;
mock_pop->addr = addr;
mock_pop->rdonly = 0;
mock_pop->is_pmem = 0;
mock_pop->heap_offset = offsetof(struct mock_pop, ptr);
UT_ASSERTeq(mock_pop->heap_offset % Ut_pagesize, 0);
mock_pop->nlanes = 1;
mock_pop->lanes_offset = sizeof(PMEMobjpool);
mock_pop->is_master_replica = 1;
mock_pop->persist_local = obj_msync_nofail;
mock_pop->flush_local = obj_msync_nofail;
mock_pop->drain_local = drain_empty;
mock_pop->p_ops.persist = obj_persist;
mock_pop->p_ops.flush = obj_flush;
mock_pop->p_ops.drain = obj_drain;
mock_pop->p_ops.memcpy = obj_memcpy;
mock_pop->p_ops.memset = obj_memset;
mock_pop->p_ops.base = mock_pop;
mock_pop->set = MALLOC(sizeof(*(mock_pop->set)));
mock_pop->set->options = 0;
mock_pop->set->directory_based = 0;
void *heap_start = (char *)mock_pop + mock_pop->heap_offset;
uint64_t heap_size = MOCK_POOL_SIZE - mock_pop->heap_offset;
struct stats *s = stats_new(mock_pop);
UT_ASSERTne(s, NULL);
heap_init(heap_start, heap_size, &mock_pop->heap_size,
&mock_pop->p_ops);
heap_boot(&mock_pop->heap, heap_start, heap_size, &mock_pop->heap_size,
mock_pop, &mock_pop->p_ops, s, mock_pop->set);
heap_buckets_init(&mock_pop->heap);
/* initialize runtime lanes structure */
mock_pop->lanes_desc.runtime_nlanes = (unsigned)mock_pop->nlanes;
lane_boot(mock_pop);
UT_ASSERTne(mock_pop->heap.rt, NULL);
test_pmalloc_extras(mock_pop);
test_pmalloc_first_next(mock_pop);
test_malloc_free_loop(MALLOC_FREE_SIZE);
size_t medium_resv = test_oom_resrv(TEST_MEDIUM_ALLOC_SIZE);
/*
* Allocating till OOM and freeing the objects in a loop for different
* buckets covers basically all code paths except error cases.
*/
size_t medium0 = test_oom_allocs(TEST_MEDIUM_ALLOC_SIZE);
size_t mega0 = test_oom_allocs(TEST_MEGA_ALLOC_SIZE);
size_t huge0 = test_oom_allocs(TEST_HUGE_ALLOC_SIZE);
size_t small0 = test_oom_allocs(TEST_SMALL_ALLOC_SIZE);
size_t tiny0 = test_oom_allocs(TEST_TINY_ALLOC_SIZE);
size_t huge1 = test_oom_allocs(TEST_HUGE_ALLOC_SIZE);
size_t small1 = test_oom_allocs(TEST_SMALL_ALLOC_SIZE);
size_t mega1 = test_oom_allocs(TEST_MEGA_ALLOC_SIZE);
size_t tiny1 = test_oom_allocs(TEST_TINY_ALLOC_SIZE);
size_t medium1 = test_oom_allocs(TEST_MEDIUM_ALLOC_SIZE);
UT_ASSERTeq(mega0, mega1);
UT_ASSERTeq(huge0, huge1);
UT_ASSERTeq(small0, small1);
UT_ASSERTeq(tiny0, tiny1);
UT_ASSERTeq(medium0, medium1);
UT_ASSERTeq(medium0, medium_resv);
/* realloc to the same size shouldn't affect anything */
for (size_t i = 0; i < tiny1; ++i)
test_realloc(TEST_TINY_ALLOC_SIZE, TEST_TINY_ALLOC_SIZE);
size_t tiny2 = test_oom_allocs(TEST_TINY_ALLOC_SIZE);
UT_ASSERTeq(tiny1, tiny2);
test_realloc(TEST_SMALL_ALLOC_SIZE, TEST_MEDIUM_ALLOC_SIZE);
test_realloc(TEST_HUGE_ALLOC_SIZE, TEST_MEGA_ALLOC_SIZE);
stats_delete(mock_pop, s);
lane_cleanup(mock_pop);
heap_cleanup(&mock_pop->heap);
FREE(mock_pop->set);
MUNMAP_ANON_ALIGNED(addr, MOCK_POOL_SIZE);
}
static void
test_spec_compliance(void)
{
uint64_t max_alloc = MAX_MEMORY_BLOCK_SIZE -
sizeof(struct allocation_header_legacy);
UT_ASSERTeq(max_alloc, PMEMOBJ_MAX_ALLOC_SIZE);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_pmalloc_basic");
for (int i = 0; i < TEST_RUNS; ++i)
test_mock_pool_allocs();
test_spec_compliance();
DONE(NULL);
}
#ifdef _MSC_VER
/*
* Since libpmemobj is linked statically, we need to invoke its ctor/dtor.
*/
MSVC_CONSTR(libpmemobj_init)
MSVC_DESTR(libpmemobj_fini)
#endif
| 8,962 | 23.15903 | 78 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/win_common/win_common.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* win_common.c -- test common POSIX or Linux API that were implemented
* for Windows by our library.
*/
#include "unittest.h"
/*
* test_setunsetenv - test the setenv and unsetenv APIs
*/
static void
test_setunsetenv(void)
{
os_unsetenv("TEST_SETUNSETENV_ONE");
/* set a new variable without overwriting - expect the new value */
UT_ASSERT(os_setenv("TEST_SETUNSETENV_ONE",
"test_setunsetenv_one", 0) == 0);
UT_ASSERT(strcmp(os_getenv("TEST_SETUNSETENV_ONE"),
"test_setunsetenv_one") == 0);
/* set an existing variable without overwriting - expect old value */
UT_ASSERT(os_setenv("TEST_SETUNSETENV_ONE",
"test_setunsetenv_two", 0) == 0);
UT_ASSERT(strcmp(os_getenv("TEST_SETUNSETENV_ONE"),
"test_setunsetenv_one") == 0);
/* set an existing variable with overwriting - expect the new value */
UT_ASSERT(os_setenv("TEST_SETUNSETENV_ONE",
"test_setunsetenv_two", 1) == 0);
UT_ASSERT(strcmp(os_getenv("TEST_SETUNSETENV_ONE"),
"test_setunsetenv_two") == 0);
/* unset our test value - expect it to be empty */
UT_ASSERT(os_unsetenv("TEST_SETUNSETENV_ONE") == 0);
UT_ASSERT(os_getenv("TEST_SETUNSETENV_ONE") == NULL);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "win_common - testing %s",
(argc > 1) ? argv[1] : "setunsetenv");
if (argc == 1 || (stricmp(argv[1], "setunsetenv") == 0))
test_setunsetenv();
DONE(NULL);
}
| 3,080 | 35.678571 | 74 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_realloc/obj_realloc.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* obj_realloc.c -- unit test for pmemobj_realloc and pmemobj_zrealloc
*/
#include <sys/param.h>
#include <string.h>
#include "unittest.h"
#include "heap.h"
#include "alloc_class.h"
#include "obj.h"
#include "util.h"
#define MAX_ALLOC_MUL 8
#define MAX_ALLOC_CLASS 5
POBJ_LAYOUT_BEGIN(realloc);
POBJ_LAYOUT_ROOT(realloc, struct root);
POBJ_LAYOUT_TOID(realloc, struct object);
POBJ_LAYOUT_END(realloc);
struct object {
size_t value;
char data[];
};
struct root {
TOID(struct object) obj;
char data[CHUNKSIZE - sizeof(TOID(struct object))];
};
static struct alloc_class_collection *alloc_classes;
/*
* test_alloc -- test allocation using realloc
*/
static void
test_alloc(PMEMobjpool *pop, size_t size)
{
TOID(struct root) root = POBJ_ROOT(pop, struct root);
UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj));
int ret = pmemobj_realloc(pop, &D_RW(root)->obj.oid, size,
TOID_TYPE_NUM(struct object));
UT_ASSERTeq(ret, 0);
UT_ASSERT(!TOID_IS_NULL(D_RO(root)->obj));
UT_ASSERT(pmemobj_alloc_usable_size(D_RO(root)->obj.oid) >= size);
}
/*
* test_free -- test free using realloc
*/
static void
test_free(PMEMobjpool *pop)
{
TOID(struct root) root = POBJ_ROOT(pop, struct root);
UT_ASSERT(!TOID_IS_NULL(D_RO(root)->obj));
int ret = pmemobj_realloc(pop, &D_RW(root)->obj.oid, 0,
TOID_TYPE_NUM(struct object));
UT_ASSERTeq(ret, 0);
UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj));
}
/*
* test_huge_size -- test zrealloc with size greater than pool size
*/
static void
test_huge_size(PMEMobjpool *pop)
{
TOID(struct root) root = POBJ_ROOT(pop, struct root);
UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj));
int ret;
ret = pmemobj_zrealloc(pop, &D_RW(root)->obj.oid,
PMEMOBJ_MAX_ALLOC_SIZE, TOID_TYPE_NUM(struct object));
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOMEM);
UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj));
ret = pmemobj_zrealloc(pop, &D_RW(root)->obj.oid, UINTMAX_MAX,
TOID_TYPE_NUM(struct object));
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOMEM);
UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj));
ret = pmemobj_zrealloc(pop, &D_RW(root)->obj.oid, UINTMAX_MAX - 1,
TOID_TYPE_NUM(struct object));
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOMEM);
UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj));
}
/* test zrealloc passing PMEMoid that points to OID_NULL value */
static void
test_null_oid(PMEMobjpool *pop)
{
TOID(struct root) root = POBJ_ROOT(pop, struct root);
UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj));
int ret = pmemobj_zrealloc(pop, &D_RW(root)->obj.oid, 1024,
TOID_TYPE_NUM(struct object));
UT_ASSERTeq(ret, 0);
UT_ASSERT(!TOID_IS_NULL(D_RO(root)->obj));
pmemobj_free(&D_RW(root)->obj.oid);
UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj));
}
static int check_integrity = 1;
/*
* fill_buffer -- fill buffer with random data and return its checksum
*/
static uint16_t
fill_buffer(unsigned char *buf, size_t size)
{
for (size_t i = 0; i < size; ++i)
buf[i] = rand() % 255;
pmem_persist(buf, size);
return ut_checksum(buf, size);
}
/*
* test_realloc -- test single reallocation
*/
static void
test_realloc(PMEMobjpool *pop, size_t size_from, size_t size_to,
uint64_t type_from, uint64_t type_to, int zrealloc)
{
TOID(struct root) root = POBJ_ROOT(pop, struct root);
UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj));
int ret;
if (zrealloc)
ret = pmemobj_zalloc(pop, &D_RW(root)->obj.oid,
size_from, type_from);
else
ret = pmemobj_alloc(pop, &D_RW(root)->obj.oid,
size_from, type_from, NULL, NULL);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!TOID_IS_NULL(D_RO(root)->obj));
size_t usable_size_from =
pmemobj_alloc_usable_size(D_RO(root)->obj.oid);
UT_ASSERT(usable_size_from >= size_from);
size_t check_size;
uint16_t checksum;
if (zrealloc) {
UT_ASSERT(util_is_zeroed(D_RO(D_RO(root)->obj),
size_from));
} else if (check_integrity) {
check_size = size_to >= usable_size_from ?
usable_size_from : size_to;
checksum = fill_buffer((unsigned char *)D_RW(D_RW(root)->obj),
check_size);
}
if (zrealloc) {
ret = pmemobj_zrealloc(pop, &D_RW(root)->obj.oid,
size_to, type_to);
} else {
ret = pmemobj_realloc(pop, &D_RW(root)->obj.oid,
size_to, type_to);
}
UT_ASSERTeq(ret, 0);
UT_ASSERT(!TOID_IS_NULL(D_RO(root)->obj));
size_t usable_size_to =
pmemobj_alloc_usable_size(D_RO(root)->obj.oid);
UT_ASSERT(usable_size_to >= size_to);
if (size_to < size_from) {
UT_ASSERT(usable_size_to <= usable_size_from);
}
if (zrealloc) {
UT_ASSERT(util_is_zeroed(D_RO(D_RO(root)->obj), size_to));
} else if (check_integrity) {
uint16_t checksum2 = ut_checksum(
(uint8_t *)D_RW(D_RW(root)->obj), check_size);
if (checksum2 != checksum)
UT_ASSERTinfo(0, "memory corruption");
}
pmemobj_free(&D_RW(root)->obj.oid);
UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj));
}
/*
* test_realloc_sizes -- test reallocations from/to specified sizes
*/
static void
test_realloc_sizes(PMEMobjpool *pop, uint64_t type_from,
uint64_t type_to, int zrealloc, unsigned size_diff)
{
for (uint8_t i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
struct alloc_class *c = alloc_class_by_id(alloc_classes, i);
if (c == NULL)
continue;
size_t header_size = header_type_to_size[c->header_type];
size_t size_from = c->unit_size - header_size - size_diff;
for (unsigned j = 2; j <= MAX_ALLOC_MUL; j++) {
size_t inc_size_to = c->unit_size * j - header_size;
test_realloc(pop, size_from, inc_size_to,
type_from, type_to, zrealloc);
size_t dec_size_to = c->unit_size / j;
if (dec_size_to <= header_size)
dec_size_to = header_size;
else
dec_size_to -= header_size;
test_realloc(pop, size_from, dec_size_to,
type_from, type_to, zrealloc);
for (int k = 0; k < MAX_ALLOC_CLASS; k++) {
struct alloc_class *ck = alloc_class_by_id(
alloc_classes, k);
if (c == NULL)
continue;
size_t header_sizek =
header_type_to_size[c->header_type];
size_t prev_size = ck->unit_size - header_sizek;
test_realloc(pop, size_from, prev_size,
type_from, type_to, zrealloc);
}
}
}
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_realloc");
/* root doesn't count */
UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(realloc) != 1);
if (argc < 2)
UT_FATAL("usage: %s file [check_integrity]", argv[0]);
PMEMobjpool *pop = pmemobj_open(argv[1], POBJ_LAYOUT_NAME(realloc));
if (!pop)
UT_FATAL("!pmemobj_open");
if (argc >= 3)
check_integrity = atoi(argv[2]);
alloc_classes = alloc_class_collection_new();
/* test huge size alloc */
test_huge_size(pop);
/* test alloc and free */
test_alloc(pop, 16);
test_free(pop);
/* test zrealloc passing PMEMoid that points to OID_NULL value */
test_null_oid(pop);
/* test realloc without changing type number */
test_realloc_sizes(pop, 0, 0, 0, 0);
/* test realloc with changing type number */
test_realloc_sizes(pop, 0, 1, 0, 0);
/* test zrealloc without changing type number... */
test_realloc_sizes(pop, 0, 0, 1, 8);
test_realloc_sizes(pop, 0, 0, 1, 0);
/* test zrealloc with changing type number... */
test_realloc_sizes(pop, 0, 1, 1, 8);
test_realloc_sizes(pop, 0, 1, 1, 0);
/* test realloc with type number equal to range of long long int */
test_realloc_sizes(pop, 0, UINT64_MAX, 0, 0);
test_realloc_sizes(pop, 0, UINT64_MAX - 1, 0, 0);
/* test zrealloc with type number equal to range of long long int */
test_realloc_sizes(pop, 0, UINT64_MAX, 1, 0);
test_realloc_sizes(pop, 0, (UINT64_MAX - 1), 1, 0);
alloc_class_collection_delete(alloc_classes);
pmemobj_close(pop);
DONE(NULL);
}
#ifdef _MSC_VER
extern "C" {
/*
* Since libpmemobj is linked statically,
* we need to invoke its ctor/dtor.
*/
MSVC_CONSTR(libpmemobj_init)
MSVC_DESTR(libpmemobj_fini)
}
#endif
| 7,788 | 24.371336 | 70 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem_deep_persist/mocks_posix.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* mocks_posix.c -- redefinitions of open/write functions (Posix implementation)
*/
#include "util.h"
#include "os.h"
#include "unittest.h"
/*
* open -- open mock because of Dev DAX without deep_flush
* sysfs file, eg. DAX on emulated pmem
*/
FUNC_MOCK(os_open, int, const char *path, int flags, ...)
FUNC_MOCK_RUN_DEFAULT {
if (strstr(path, "/sys/bus/nd/devices/region") &&
strstr(path, "/deep_flush")) {
UT_OUT("mocked open, path %s", path);
if (os_access(path, R_OK))
return 999;
}
va_list ap;
va_start(ap, flags);
int mode = va_arg(ap, int);
va_end(ap);
return _FUNC_REAL(os_open)(path, flags, mode);
}
FUNC_MOCK_END
/*
* write -- write mock
*/
FUNC_MOCK(write, int, int fd, const void *buffer, size_t count)
FUNC_MOCK_RUN_DEFAULT {
if (fd == 999) {
UT_OUT("mocked write, path %d", fd);
return 1;
}
return _FUNC_REAL(write)(fd, buffer, count);
}
FUNC_MOCK_END
/*
* read -- read mock
*/
FUNC_MOCK(read, size_t, int fd, void *buffer, size_t nbyte)
FUNC_MOCK_RUN_DEFAULT {
if (fd == 999) {
char pattern[2] = {'1', '\n'};
memcpy(buffer, pattern, sizeof(pattern));
UT_OUT("mocked read, fd %d", fd);
return sizeof(pattern);
}
return _FUNC_REAL(read)(fd, buffer, nbyte);
}
FUNC_MOCK_END
| 1,326 | 20.754098 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_tx_free/obj_tx_free.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_tx_free.c -- unit test for pmemobj_tx_free
*/
#include <sys/param.h>
#include <string.h>
#include "unittest.h"
#include "util.h"
#include "valgrind_internal.h"
#define LAYOUT_NAME "tx_free"
#define OBJ_SIZE (200 * 1024)
enum type_number {
TYPE_FREE_NO_TX,
TYPE_FREE_WRONG_UUID,
TYPE_FREE_COMMIT,
TYPE_FREE_ABORT,
TYPE_FREE_COMMIT_NESTED1,
TYPE_FREE_COMMIT_NESTED2,
TYPE_FREE_ABORT_NESTED1,
TYPE_FREE_ABORT_NESTED2,
TYPE_FREE_ABORT_AFTER_NESTED1,
TYPE_FREE_ABORT_AFTER_NESTED2,
TYPE_FREE_OOM,
TYPE_FREE_ALLOC,
TYPE_FREE_AFTER_ABORT,
TYPE_FREE_MANY_TIMES,
};
TOID_DECLARE(struct object, 0);
struct object {
size_t value;
char data[OBJ_SIZE - sizeof(size_t)];
};
/*
* do_tx_alloc -- do tx allocation with specified type number
*/
static PMEMoid
do_tx_alloc(PMEMobjpool *pop, unsigned type_num)
{
PMEMoid ret = OID_NULL;
TX_BEGIN(pop) {
ret = pmemobj_tx_alloc(sizeof(struct object), type_num);
} TX_END
return ret;
}
/*
* do_tx_free_wrong_uuid -- try to free object with invalid uuid
*/
static void
do_tx_free_wrong_uuid(PMEMobjpool *pop)
{
volatile int ret = 0;
PMEMoid oid = do_tx_alloc(pop, TYPE_FREE_WRONG_UUID);
oid.pool_uuid_lo = ~oid.pool_uuid_lo;
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid);
UT_ASSERTeq(ret, 0);
} TX_ONABORT {
ret = -1;
} TX_END
UT_ASSERTeq(ret, -1);
/* POBJ_XFREE_NO_ABORT flag is set */
TX_BEGIN(pop) {
ret = pmemobj_tx_xfree(oid, POBJ_XFREE_NO_ABORT);
} TX_ONCOMMIT {
UT_ASSERTeq(ret, EINVAL);
} TX_ONABORT {
UT_ASSERT(0); /* should not get to this point */
} TX_END
TOID(struct object) obj;
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_WRONG_UUID));
UT_ASSERT(!TOID_IS_NULL(obj));
}
/*
* do_tx_free_wrong_uuid_abort_on_failure -- try to free object with
* invalid uuid in a transaction where pmemobj_tx_set_failure_behavior
* was called.
*/
static void
do_tx_free_wrong_uuid_abort_on_failure(PMEMobjpool *pop)
{
volatile int ret = 0;
PMEMoid oid = do_tx_alloc(pop, TYPE_FREE_WRONG_UUID);
oid.pool_uuid_lo = ~oid.pool_uuid_lo;
/* pmemobj_tx_set_failure_behavior is called */
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
UT_ASSERTeq(pmemobj_tx_get_failure_behavior(),
POBJ_TX_FAILURE_RETURN);
ret = pmemobj_tx_free(oid);
} TX_ONCOMMIT {
UT_ASSERTeq(ret, EINVAL);
} TX_ONABORT {
UT_ASSERT(0); /* should not get to this point */
} TX_END
/* pmemobj_tx_set_failure_behavior is called */
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
UT_ASSERTeq(pmemobj_tx_get_failure_behavior(),
POBJ_TX_FAILURE_RETURN);
ret = pmemobj_tx_xfree(oid, 0);
} TX_ONCOMMIT {
UT_ASSERTeq(ret, EINVAL);
} TX_ONABORT {
UT_ASSERT(0); /* should not get to this point */
} TX_END
/* pmemobj_tx_set_failure_behavior is called in outer tx */
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
TX_BEGIN(pop) {
UT_ASSERTeq(pmemobj_tx_get_failure_behavior(),
POBJ_TX_FAILURE_RETURN);
ret = pmemobj_tx_free(oid);
} TX_ONCOMMIT {
UT_ASSERTeq(ret, EINVAL);
} TX_ONABORT {
UT_ASSERT(0); /* should not get to this point */
} TX_END
ret = pmemobj_tx_free(oid);
} TX_ONCOMMIT {
UT_ASSERTeq(ret, EINVAL);
} TX_ONABORT {
UT_ASSERT(0); /* should not get to this point */
} TX_END
/* pmemobj_tx_set_failure_behavior is called in neighbour tx */
TX_BEGIN(pop) {
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
ret = pmemobj_tx_free(oid);
} TX_ONCOMMIT {
UT_ASSERTeq(ret, EINVAL);
} TX_ONABORT {
UT_ASSERT(0); /* should not get to this point */
} TX_END
TX_BEGIN(pop) {
UT_ASSERTeq(pmemobj_tx_get_failure_behavior(),
POBJ_TX_FAILURE_ABORT);
} TX_ONCOMMIT {
UT_ASSERTeq(ret, EINVAL);
} TX_ONABORT {
UT_ASSERT(0); /* should not get to this point */
} TX_END
} TX_ONCOMMIT {
UT_ASSERTeq(ret, EINVAL);
} TX_ONABORT {
UT_ASSERT(0); /* should not get to this point */
} TX_END
/* pmemobj_tx_set_failure_behavior is called in neighbour tx */
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_ABORT);
UT_ASSERTeq(pmemobj_tx_get_failure_behavior(),
POBJ_TX_FAILURE_ABORT);
} TX_ONCOMMIT {
UT_ASSERTeq(ret, EINVAL);
} TX_ONABORT {
UT_ASSERT(0); /* should not get to this point */
} TX_END
TX_BEGIN(pop) {
UT_ASSERTeq(pmemobj_tx_get_failure_behavior(),
POBJ_TX_FAILURE_RETURN);
ret = pmemobj_tx_free(oid);
} TX_ONCOMMIT {
UT_ASSERTeq(ret, EINVAL);
} TX_ONABORT {
UT_ASSERT(0); /* should not get to this point */
} TX_END
} TX_ONCOMMIT {
UT_ASSERTeq(ret, EINVAL);
} TX_ONABORT {
UT_ASSERT(0); /* should not get to this point */
} TX_END
TOID(struct object) obj;
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_WRONG_UUID));
UT_ASSERT(!TOID_IS_NULL(obj));
}
/*
* do_tx_free_null_oid -- call pmemobj_tx_free with OID_NULL
*/
static void
do_tx_free_null_oid(PMEMobjpool *pop)
{
volatile int ret = 0;
TX_BEGIN(pop) {
ret = pmemobj_tx_free(OID_NULL);
} TX_ONABORT {
ret = -1;
} TX_END
UT_ASSERTeq(ret, 0);
}
/*
* do_tx_free_commit -- do the basic transactional deallocation of object
*/
static void
do_tx_free_commit(PMEMobjpool *pop)
{
int ret;
PMEMoid oid = do_tx_alloc(pop, TYPE_FREE_COMMIT);
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid);
UT_ASSERTeq(ret, 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID(struct object) obj;
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_COMMIT));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_free_abort -- abort deallocation of object
*/
static void
do_tx_free_abort(PMEMobjpool *pop)
{
int ret;
PMEMoid oid = do_tx_alloc(pop, TYPE_FREE_ABORT);
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid);
UT_ASSERTeq(ret, 0);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID(struct object) obj;
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ABORT));
UT_ASSERT(!TOID_IS_NULL(obj));
}
/*
* do_tx_free_commit_nested -- do allocation in nested transaction
*/
static void
do_tx_free_commit_nested(PMEMobjpool *pop)
{
int ret;
PMEMoid oid1 = do_tx_alloc(pop, TYPE_FREE_COMMIT_NESTED1);
PMEMoid oid2 = do_tx_alloc(pop, TYPE_FREE_COMMIT_NESTED2);
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid1);
UT_ASSERTeq(ret, 0);
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid2);
UT_ASSERTeq(ret, 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID(struct object) obj;
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_COMMIT_NESTED1));
UT_ASSERT(TOID_IS_NULL(obj));
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_COMMIT_NESTED2));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_free_abort_nested -- abort allocation in nested transaction
*/
static void
do_tx_free_abort_nested(PMEMobjpool *pop)
{
int ret;
PMEMoid oid1 = do_tx_alloc(pop, TYPE_FREE_ABORT_NESTED1);
PMEMoid oid2 = do_tx_alloc(pop, TYPE_FREE_ABORT_NESTED2);
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid1);
UT_ASSERTeq(ret, 0);
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid2);
UT_ASSERTeq(ret, 0);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID(struct object) obj;
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ABORT_NESTED1));
UT_ASSERT(!TOID_IS_NULL(obj));
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ABORT_NESTED2));
UT_ASSERT(!TOID_IS_NULL(obj));
}
/*
* do_tx_free_abort_after_nested -- abort transaction after nested
* pmemobj_tx_free
*/
static void
do_tx_free_abort_after_nested(PMEMobjpool *pop)
{
int ret;
PMEMoid oid1 = do_tx_alloc(pop, TYPE_FREE_ABORT_AFTER_NESTED1);
PMEMoid oid2 = do_tx_alloc(pop, TYPE_FREE_ABORT_AFTER_NESTED2);
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid1);
UT_ASSERTeq(ret, 0);
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid2);
UT_ASSERTeq(ret, 0);
} TX_END
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID(struct object) obj;
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop,
TYPE_FREE_ABORT_AFTER_NESTED1));
UT_ASSERT(!TOID_IS_NULL(obj));
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop,
TYPE_FREE_ABORT_AFTER_NESTED2));
UT_ASSERT(!TOID_IS_NULL(obj));
}
/*
* do_tx_free_alloc_abort -- free object allocated in the same transaction
* and abort transaction
*/
static void
do_tx_free_alloc_abort(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_alloc(
sizeof(struct object), TYPE_FREE_ALLOC));
UT_ASSERT(!TOID_IS_NULL(obj));
ret = pmemobj_tx_free(obj.oid);
UT_ASSERTeq(ret, 0);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ALLOC));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_free_alloc_abort -- free object allocated in the same transaction
* and commit transaction
*/
static void
do_tx_free_alloc_commit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_alloc(
sizeof(struct object), TYPE_FREE_ALLOC));
UT_ASSERT(!TOID_IS_NULL(obj));
ret = pmemobj_tx_free(obj.oid);
UT_ASSERTeq(ret, 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ALLOC));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_free_abort_free - allocate a new object, perform a transactional free
* in an aborted transaction and then to actually free the object.
*
* This can expose any issues with not properly handled free undo log.
*/
static void
do_tx_free_abort_free(PMEMobjpool *pop)
{
PMEMoid oid = do_tx_alloc(pop, TYPE_FREE_AFTER_ABORT);
TX_BEGIN(pop) {
pmemobj_tx_free(oid);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TX_BEGIN(pop) {
pmemobj_tx_free(oid);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
/*
* do_tx_free_many_times -- free enough objects to trigger vector array alloc
*/
static void
do_tx_free_many_times(PMEMobjpool *pop)
{
#define TX_FREE_COUNT ((1 << 3) + 1)
PMEMoid oids[TX_FREE_COUNT];
for (int i = 0; i < TX_FREE_COUNT; ++i)
oids[i] = do_tx_alloc(pop, TYPE_FREE_MANY_TIMES);
TX_BEGIN(pop) {
for (int i = 0; i < TX_FREE_COUNT; ++i)
pmemobj_tx_free(oids[i]);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
#undef TX_FREE_COUNT
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_free");
util_init();
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
do_tx_free_wrong_uuid(pop);
VALGRIND_WRITE_STATS;
do_tx_free_wrong_uuid_abort_on_failure(pop);
VALGRIND_WRITE_STATS;
do_tx_free_null_oid(pop);
VALGRIND_WRITE_STATS;
do_tx_free_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_free_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_free_commit_nested(pop);
VALGRIND_WRITE_STATS;
do_tx_free_abort_nested(pop);
VALGRIND_WRITE_STATS;
do_tx_free_abort_after_nested(pop);
VALGRIND_WRITE_STATS;
do_tx_free_alloc_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_free_alloc_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_free_abort_free(pop);
VALGRIND_WRITE_STATS;
do_tx_free_many_times(pop);
VALGRIND_WRITE_STATS;
pmemobj_close(pop);
DONE(NULL);
}
| 11,423 | 21.356164 | 78 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/util_uuid_generate/util_uuid_generate.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* util_uuid_generate.c -- unit test for generating a uuid
*
* usage: util_uuid_generate [string] [valid|invalid]
*/
#include "unittest.h"
#include "uuid.h"
#include <unistd.h>
#include <string.h>
int
main(int argc, char *argv[])
{
START(argc, argv, "util_uuid_generate");
uuid_t uuid;
uuid_t uuid1;
int ret;
char conv_uu[POOL_HDR_UUID_STR_LEN];
char uu[POOL_HDR_UUID_STR_LEN];
/*
* No string passed in. Generate uuid.
*/
if (argc == 1) {
/* generate a UUID string */
ret = ut_get_uuid_str(uu);
UT_ASSERTeq(ret, 0);
/*
* Convert the string to a uuid, convert generated
* uuid back to a string and compare strings.
*/
ret = util_uuid_from_string(uu, (struct uuid *)&uuid);
UT_ASSERTeq(ret, 0);
ret = util_uuid_to_string(uuid, conv_uu);
UT_ASSERTeq(ret, 0);
UT_ASSERT(strncmp(uu, conv_uu, POOL_HDR_UUID_STR_LEN) == 0);
/*
* Generate uuid from util_uuid_generate and translate to
* string then back to uuid to verify they match.
*/
memset(uuid, 0, sizeof(uuid_t));
memset(uu, 0, POOL_HDR_UUID_STR_LEN);
memset(conv_uu, 0, POOL_HDR_UUID_STR_LEN);
ret = util_uuid_generate(uuid);
UT_ASSERTeq(ret, 0);
ret = util_uuid_to_string(uuid, uu);
UT_ASSERTeq(ret, 0);
ret = util_uuid_from_string(uu, (struct uuid *)&uuid1);
UT_ASSERTeq(ret, 0);
UT_ASSERT(memcmp(&uuid, &uuid1, sizeof(uuid_t)) == 0);
} else {
/*
* Caller passed in string.
*/
if (strcmp(argv[2], "valid") == 0) {
ret = util_uuid_from_string(argv[1],
(struct uuid *)&uuid);
UT_ASSERTeq(ret, 0);
ret = util_uuid_to_string(uuid, conv_uu);
UT_ASSERTeq(ret, 0);
} else {
ret = util_uuid_from_string(argv[1],
(struct uuid *)&uuid);
UT_ASSERT(ret < 0);
UT_OUT("util_uuid_generate: invalid uuid string");
}
}
DONE(NULL);
}
| 1,885 | 21.722892 | 62 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_pool_lookup/obj_pool_lookup.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* obj_pool_lookup.c -- unit test for pmemobj_pool and pmemobj_pool_of
*/
#include "unittest.h"
#define MAX_PATH_LEN 255
#define LAYOUT_NAME "pool_lookup"
#define ALLOC_SIZE 100
static void
define_path(char *str, size_t size, const char *dir, unsigned i)
{
int ret = snprintf(str, size, "%s"OS_DIR_SEP_STR"testfile%d",
dir, i);
if (ret < 0 || ret >= size)
UT_FATAL("snprintf: %d", ret);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_pool_lookup");
if (argc != 3)
UT_FATAL("usage: %s [directory] [# of pools]", argv[0]);
unsigned npools = ATOU(argv[2]);
const char *dir = argv[1];
int r;
/* check before pool creation */
PMEMoid some_oid = {2, 3};
UT_ASSERTeq(pmemobj_pool_by_ptr(&some_oid), NULL);
UT_ASSERTeq(pmemobj_pool_by_oid(some_oid), NULL);
PMEMobjpool **pops = MALLOC(npools * sizeof(PMEMobjpool *));
void **guard_after = MALLOC(npools * sizeof(void *));
size_t length = strlen(dir) + MAX_PATH_LEN;
char *path = MALLOC(length);
for (unsigned i = 0; i < npools; ++i) {
define_path(path, length, dir, i);
pops[i] = pmemobj_create(path, LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR);
/*
* Reserve a page after the pool for address checks, if it
* doesn't map precisely at that address - it's OK.
*/
guard_after[i] =
MMAP((char *)pops[i] + PMEMOBJ_MIN_POOL, Ut_pagesize,
PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
UT_ASSERTne(guard_after[i], NULL);
if (pops[i] == NULL)
UT_FATAL("!pmemobj_create");
}
PMEMoid *oids = MALLOC(npools * sizeof(PMEMoid));
for (unsigned i = 0; i < npools; ++i) {
r = pmemobj_alloc(pops[i], &oids[i], ALLOC_SIZE, 1, NULL, NULL);
UT_ASSERTeq(r, 0);
}
PMEMoid invalid = {123, 321};
UT_ASSERTeq(pmemobj_pool_by_oid(OID_NULL), NULL);
UT_ASSERTeq(pmemobj_pool_by_oid(invalid), NULL);
for (unsigned i = 0; i < npools; ++i) {
UT_ASSERTeq(pmemobj_pool_by_oid(oids[i]), pops[i]);
}
UT_ASSERTeq(pmemobj_pool_by_ptr(NULL), NULL);
UT_ASSERTeq(pmemobj_pool_by_ptr((void *)0xCBA), NULL);
void *valid_ptr = MALLOC(ALLOC_SIZE);
UT_ASSERTeq(pmemobj_pool_by_ptr(valid_ptr), NULL);
FREE(valid_ptr);
for (unsigned i = 0; i < npools; ++i) {
void *before_pool = (char *)pops[i] - 1;
void *after_pool = (char *)pops[i] + PMEMOBJ_MIN_POOL + 1;
void *start_pool = (char *)pops[i];
void *end_pool = (char *)pops[i] + PMEMOBJ_MIN_POOL - 1;
void *edge = (char *)pops[i] + PMEMOBJ_MIN_POOL;
void *middle = (char *)pops[i] + (PMEMOBJ_MIN_POOL / 2);
void *in_oid = (char *)pmemobj_direct(oids[i]) +
(ALLOC_SIZE / 2);
UT_ASSERTeq(pmemobj_pool_by_ptr(before_pool), NULL);
UT_ASSERTeq(pmemobj_pool_by_ptr(after_pool), NULL);
UT_ASSERTeq(pmemobj_pool_by_ptr(start_pool), pops[i]);
UT_ASSERTeq(pmemobj_pool_by_ptr(end_pool), pops[i]);
UT_ASSERTeq(pmemobj_pool_by_ptr(edge), NULL);
UT_ASSERTeq(pmemobj_pool_by_ptr(middle), pops[i]);
UT_ASSERTeq(pmemobj_pool_by_ptr(in_oid), pops[i]);
pmemobj_close(pops[i]);
UT_ASSERTeq(pmemobj_pool_by_ptr(middle), NULL);
UT_ASSERTeq(pmemobj_pool_by_ptr(in_oid), NULL);
MUNMAP(guard_after[i], Ut_pagesize);
}
for (unsigned i = 0; i < npools; ++i) {
UT_ASSERTeq(pmemobj_pool_by_oid(oids[i]), NULL);
define_path(path, length, dir, i);
pops[i] = pmemobj_open(path, LAYOUT_NAME);
UT_ASSERTne(pops[i], NULL);
UT_ASSERTeq(pmemobj_pool_by_oid(oids[i]), pops[i]);
pmemobj_close(pops[i]);
}
FREE(path);
FREE(pops);
FREE(guard_after);
FREE(oids);
DONE(NULL);
}
| 3,576 | 26.305344 | 70 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_constructor/obj_constructor.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* obj_constructor.c -- tests for constructor
*/
#include <stddef.h>
#include "unittest.h"
/*
* Command line toggle indicating use of a bigger node structure for querying
* pool size expressed in a number of possible allocations. A small node
* structure results in a great number of allocations impossible to replicate
* in assumed timeout. It is required by unit tests using remote replication to
* pass on Travis.
*/
#define USE_BIG_ALLOC "--big-alloc"
/*
* Layout definition
*/
POBJ_LAYOUT_BEGIN(constr);
POBJ_LAYOUT_ROOT(constr, struct root);
POBJ_LAYOUT_TOID(constr, struct node);
POBJ_LAYOUT_TOID(constr, struct node_big);
POBJ_LAYOUT_END(constr);
struct root {
TOID(struct node) n;
POBJ_LIST_HEAD(head, struct node) list;
POBJ_LIST_HEAD(head_big, struct node_big) list_big;
};
struct node {
POBJ_LIST_ENTRY(struct node) next;
};
struct node_big {
POBJ_LIST_ENTRY(struct node_big) next;
int weight[2048];
};
static int
root_constr_cancel(PMEMobjpool *pop, void *ptr, void *arg)
{
return 1;
}
static int
node_constr_cancel(PMEMobjpool *pop, void *ptr, void *arg)
{
return 1;
}
struct foo {
int bar;
};
static struct foo *Canceled_ptr;
static int
vg_test_save_ptr(PMEMobjpool *pop, void *ptr, void *arg)
{
Canceled_ptr = (struct foo *)ptr;
return 1;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_constructor");
/* root doesn't count */
UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(constr) != 2);
int big = (argc == 3 && strcmp(argv[2], USE_BIG_ALLOC) == 0);
size_t node_size;
size_t next_off;
if (big) {
node_size = sizeof(struct node_big);
next_off = offsetof(struct node_big, next);
} else if (argc == 2) {
node_size = sizeof(struct node);
next_off = offsetof(struct node, next);
} else {
UT_FATAL("usage: %s file-name [ %s ]", argv[0], USE_BIG_ALLOC);
}
const char *path = argv[1];
PMEMobjpool *pop = NULL;
int ret;
TOID(struct root) root;
TOID(struct node) node;
TOID(struct node_big) node_big;
if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(constr),
0, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
errno = 0;
root.oid = pmemobj_root_construct(pop, sizeof(struct root),
root_constr_cancel, NULL);
UT_ASSERT(TOID_IS_NULL(root));
UT_ASSERTeq(errno, ECANCELED);
/*
* Allocate memory until OOM, so we can check later if the alloc
* cancellation didn't damage the heap in any way.
*/
int allocs = 0;
while (pmemobj_alloc(pop, NULL, node_size, 1, NULL, NULL) == 0)
allocs++;
UT_ASSERTne(allocs, 0);
PMEMoid oid;
PMEMoid next;
POBJ_FOREACH_SAFE(pop, oid, next)
pmemobj_free(&oid);
errno = 0;
ret = pmemobj_alloc(pop, NULL, node_size, 1, node_constr_cancel, NULL);
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(errno, ECANCELED);
/* the same number of allocations should be possible. */
while (pmemobj_alloc(pop, NULL, node_size, 1, NULL, NULL) == 0)
allocs--;
UT_ASSERT(allocs <= 0);
POBJ_FOREACH_SAFE(pop, oid, next)
pmemobj_free(&oid);
root.oid = pmemobj_root_construct(pop, sizeof(struct root),
NULL, NULL);
UT_ASSERT(!TOID_IS_NULL(root));
errno = 0;
if (big) {
node_big.oid = pmemobj_list_insert_new(pop, next_off,
&D_RW(root)->list_big, OID_NULL, 0, node_size,
1, node_constr_cancel, NULL);
UT_ASSERT(TOID_IS_NULL(node_big));
} else {
node.oid = pmemobj_list_insert_new(pop, next_off,
&D_RW(root)->list, OID_NULL, 0, node_size,
1, node_constr_cancel, NULL);
UT_ASSERT(TOID_IS_NULL(node));
}
UT_ASSERTeq(errno, ECANCELED);
pmemobj_alloc(pop, &oid, sizeof(struct foo), 1,
vg_test_save_ptr, NULL);
UT_ASSERTne(Canceled_ptr, NULL);
/* this should generate a valgrind memcheck warning */
Canceled_ptr->bar = 5;
pmemobj_persist(pop, &Canceled_ptr->bar, sizeof(Canceled_ptr->bar));
/*
* Allocate and cancel a huge object. It should return back to the
* heap and it should be possible to allocate it again.
*/
Canceled_ptr = NULL;
ret = pmemobj_alloc(pop, &oid, sizeof(struct foo) + (1 << 22), 1,
vg_test_save_ptr, NULL);
UT_ASSERTne(Canceled_ptr, NULL);
void *first_ptr = Canceled_ptr;
Canceled_ptr = NULL;
ret = pmemobj_alloc(pop, &oid, sizeof(struct foo) + (1 << 22), 1,
vg_test_save_ptr, NULL);
UT_ASSERTeq(first_ptr, Canceled_ptr);
pmemobj_close(pop);
DONE(NULL);
}
| 4,369 | 22.621622 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/unittest/unittest.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* unittest.h -- the mundane stuff shared by all unit tests
*
* we want unit tests to be very thorough and check absolutely everything
* in order to nail down the test case as precisely as possible and flag
* anything at all unexpected. as a result, most unit tests are 90% code
* checking stuff that isn't really interesting to what is being tested.
* to help address this, the macros defined here include all the boilerplate
* error checking which prints information and exits on unexpected errors.
*
* the result changes this code:
*
* if ((buf = malloc(size)) == NULL) {
* fprintf(stderr, "cannot allocate %d bytes for buf\n", size);
* exit(1);
* }
*
* into this code:
*
* buf = MALLOC(size);
*
* and the error message includes the calling context information (file:line).
* in general, using the all-caps version of a call means you're using the
* unittest.h version which does the most common checking for you. so
* calling VMEM_CREATE() instead of vmem_create() returns the same
* thing, but can never return an error since the unit test library checks for
* it. * for routines like vmem_delete() there is no corresponding
* VMEM_DELETE() because there's no error to check for.
*
* all unit tests should use the same initialization:
*
* START(argc, argv, "brief test description", ...);
*
* all unit tests should use these exit calls:
*
* DONE("message", ...);
* UT_FATAL("message", ...);
*
* uniform stderr and stdout messages:
*
* UT_OUT("message", ...);
* UT_ERR("message", ...);
*
* in all cases above, the message is printf-like, taking variable args.
* the message can be NULL. it can start with "!" in which case the "!" is
* skipped and the message gets the errno string appended to it, like this:
*
* if (somesyscall(..) < 0)
* UT_FATAL("!my message");
*/
#ifndef _UNITTEST_H
#define _UNITTEST_H 1
#include <libpmem.h>
#include <libpmem2.h>
#include <libpmemblk.h>
#include <libpmemlog.h>
#include <libpmemobj.h>
#include <libpmempool.h>
#ifdef __cplusplus
extern "C" {
#endif
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdarg.h>
#include <stdint.h>
#include <string.h>
#include <strings.h>
#include <setjmp.h>
#include <time.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <sys/file.h>
#ifndef __FreeBSD__
#include <sys/mount.h>
#endif
#include <fcntl.h>
#include <signal.h>
#include <errno.h>
#include <dirent.h>
/* XXX: move OS abstraction layer out of common */
#include "os.h"
#include "os_thread.h"
#include "util.h"
int ut_get_uuid_str(char *);
#define UT_MAX_ERR_MSG 128
#define UT_POOL_HDR_UUID_STR_LEN 37 /* uuid string length */
#define UT_POOL_HDR_UUID_GEN_FILE "/proc/sys/kernel/random/uuid"
/* XXX - fix this temp hack dup'ing util_strerror when we get mock for win */
void ut_strerror(int errnum, char *buff, size_t bufflen);
/* XXX - eliminate duplicated definitions in unittest.h and util.h */
#ifdef _WIN32
static inline int ut_util_statW(const wchar_t *path,
os_stat_t *st_bufp) {
int retVal = _wstat64(path, st_bufp);
/* clear unused bits to avoid confusion */
st_bufp->st_mode &= 0600;
return retVal;
}
#endif
/*
* unit test support...
*/
void ut_start(const char *file, int line, const char *func,
int argc, char * const argv[], const char *fmt, ...)
__attribute__((format(printf, 6, 7)));
void ut_startW(const char *file, int line, const char *func,
int argc, wchar_t * const argv[], const char *fmt, ...)
__attribute__((format(printf, 6, 7)));
void NORETURN ut_done(const char *file, int line, const char *func,
const char *fmt, ...)
__attribute__((format(printf, 4, 5)));
void NORETURN ut_fatal(const char *file, int line, const char *func,
const char *fmt, ...)
__attribute__((format(printf, 4, 5)));
void NORETURN ut_end(const char *file, int line, const char *func,
int ret);
void ut_out(const char *file, int line, const char *func,
const char *fmt, ...)
__attribute__((format(printf, 4, 5)));
void ut_err(const char *file, int line, const char *func,
const char *fmt, ...)
__attribute__((format(printf, 4, 5)));
/* indicate the start of the test */
#ifndef _WIN32
#define START(argc, argv, ...)\
ut_start(__FILE__, __LINE__, __func__, argc, argv, __VA_ARGS__)
#else
#define START(argc, argv, ...)\
wchar_t **wargv = CommandLineToArgvW(GetCommandLineW(), &argc);\
for (int i = 0; i < argc; i++) {\
argv[i] = ut_toUTF8(wargv[i]);\
if (argv[i] == NULL) {\
for (i--; i >= 0; i--)\
free(argv[i]);\
UT_FATAL("Error during arguments conversion\n");\
}\
}\
ut_start(__FILE__, __LINE__, __func__, argc, argv, __VA_ARGS__)
#endif
/* indicate the start of the test */
#define STARTW(argc, argv, ...)\
ut_startW(__FILE__, __LINE__, __func__, argc, argv, __VA_ARGS__)
/* normal exit from test */
#ifndef _WIN32
#define DONE(...)\
ut_done(__FILE__, __LINE__, __func__, __VA_ARGS__)
#else
#define DONE(...)\
for (int i = argc; i > 0; i--)\
free(argv[i - 1]);\
ut_done(__FILE__, __LINE__, __func__, __VA_ARGS__)
#endif
#define DONEW(...)\
ut_done(__FILE__, __LINE__, __func__, __VA_ARGS__)
#define END(ret, ...)\
ut_end(__FILE__, __LINE__, __func__, ret)
/* fatal error detected */
#define UT_FATAL(...)\
ut_fatal(__FILE__, __LINE__, __func__, __VA_ARGS__)
/* normal output */
#define UT_OUT(...)\
ut_out(__FILE__, __LINE__, __func__, __VA_ARGS__)
/* error output */
#define UT_ERR(...)\
ut_err(__FILE__, __LINE__, __func__, __VA_ARGS__)
/*
* assertions...
*/
/* assert a condition is true at runtime */
#define UT_ASSERT_rt(cnd)\
((void)((cnd) || (ut_fatal(__FILE__, __LINE__, __func__,\
"assertion failure: %s", #cnd), 0)))
/* assertion with extra info printed if assertion fails at runtime */
#define UT_ASSERTinfo_rt(cnd, info) \
((void)((cnd) || (ut_fatal(__FILE__, __LINE__, __func__,\
"assertion failure: %s (%s)", #cnd, info), 0)))
/* assert two integer values are equal at runtime */
#define UT_ASSERTeq_rt(lhs, rhs)\
((void)(((lhs) == (rhs)) || (ut_fatal(__FILE__, __LINE__, __func__,\
"assertion failure: %s (0x%llx) == %s (0x%llx)", #lhs,\
(unsigned long long)(lhs), #rhs, (unsigned long long)(rhs)), 0)))
/* assert two integer values are not equal at runtime */
#define UT_ASSERTne_rt(lhs, rhs)\
((void)(((lhs) != (rhs)) || (ut_fatal(__FILE__, __LINE__, __func__,\
"assertion failure: %s (0x%llx) != %s (0x%llx)", #lhs,\
(unsigned long long)(lhs), #rhs, (unsigned long long)(rhs)), 0)))
#if defined(__CHECKER__)
#define UT_COMPILE_ERROR_ON(cond)
#define UT_ASSERT_COMPILE_ERROR_ON(cond)
#elif defined(_MSC_VER)
#define UT_COMPILE_ERROR_ON(cond) C_ASSERT(!(cond))
/* XXX - can't be done with C_ASSERT() unless we have __builtin_constant_p() */
#define UT_ASSERT_COMPILE_ERROR_ON(cond) (void)(cond)
#else
#define UT_COMPILE_ERROR_ON(cond) ((void)sizeof(char[(cond) ? -1 : 1]))
#ifndef __cplusplus
#define UT_ASSERT_COMPILE_ERROR_ON(cond) UT_COMPILE_ERROR_ON(cond)
#else /* __cplusplus */
/*
* XXX - workaround for https://github.com/pmem/issues/issues/189
*/
#define UT_ASSERT_COMPILE_ERROR_ON(cond) UT_ASSERT_rt(!(cond))
#endif /* __cplusplus */
#endif /* _MSC_VER */
/* assert a condition is true */
#define UT_ASSERT(cnd)\
do {\
/*\
* Detect useless asserts on always true expression. Please use\
* UT_COMPILE_ERROR_ON(!cnd) or UT_ASSERT_rt(cnd) in such\
* cases.\
*/\
if (__builtin_constant_p(cnd))\
UT_ASSERT_COMPILE_ERROR_ON(cnd);\
UT_ASSERT_rt(cnd);\
} while (0)
/* assertion with extra info printed if assertion fails */
#define UT_ASSERTinfo(cnd, info) \
do {\
/* See comment in UT_ASSERT. */\
if (__builtin_constant_p(cnd))\
UT_ASSERT_COMPILE_ERROR_ON(cnd);\
UT_ASSERTinfo_rt(cnd, info);\
} while (0)
/* assert two integer values are equal */
#define UT_ASSERTeq(lhs, rhs)\
do {\
/* See comment in UT_ASSERT. */\
if (__builtin_constant_p(lhs) && __builtin_constant_p(rhs))\
UT_ASSERT_COMPILE_ERROR_ON((lhs) == (rhs));\
UT_ASSERTeq_rt(lhs, rhs);\
} while (0)
/* assert two integer values are not equal */
#define UT_ASSERTne(lhs, rhs)\
do {\
/* See comment in UT_ASSERT. */\
if (__builtin_constant_p(lhs) && __builtin_constant_p(rhs))\
UT_ASSERT_COMPILE_ERROR_ON((lhs) != (rhs));\
UT_ASSERTne_rt(lhs, rhs);\
} while (0)
/* assert pointer is fits range of [start, start + size) */
#define UT_ASSERTrange(ptr, start, size)\
((void)(((uintptr_t)(ptr) >= (uintptr_t)(start) &&\
(uintptr_t)(ptr) < (uintptr_t)(start) + (uintptr_t)(size)) ||\
(ut_fatal(__FILE__, __LINE__, __func__,\
"assert failure: %s (%p) is outside range [%s (%p), %s (%p))", #ptr,\
(void *)(ptr), #start, (void *)(start), #start"+"#size,\
(void *)((uintptr_t)(start) + (uintptr_t)(size))), 0)))
/*
* memory allocation...
*/
void *ut_malloc(const char *file, int line, const char *func, size_t size);
void *ut_calloc(const char *file, int line, const char *func,
size_t nmemb, size_t size);
void ut_free(const char *file, int line, const char *func, void *ptr);
void ut_aligned_free(const char *file, int line, const char *func, void *ptr);
void *ut_realloc(const char *file, int line, const char *func,
void *ptr, size_t size);
char *ut_strdup(const char *file, int line, const char *func,
const char *str);
void *ut_pagealignmalloc(const char *file, int line, const char *func,
size_t size);
void *ut_memalign(const char *file, int line, const char *func,
size_t alignment, size_t size);
void *ut_mmap_anon_aligned(const char *file, int line, const char *func,
size_t alignment, size_t size);
int ut_munmap_anon_aligned(const char *file, int line, const char *func,
void *start, size_t size);
/* a malloc() that can't return NULL */
#define MALLOC(size)\
ut_malloc(__FILE__, __LINE__, __func__, size)
/* a calloc() that can't return NULL */
#define CALLOC(nmemb, size)\
ut_calloc(__FILE__, __LINE__, __func__, nmemb, size)
/* a malloc() of zeroed memory */
#define ZALLOC(size)\
ut_calloc(__FILE__, __LINE__, __func__, 1, size)
#define FREE(ptr)\
ut_free(__FILE__, __LINE__, __func__, ptr)
#define ALIGNED_FREE(ptr)\
ut_aligned_free(__FILE__, __LINE__, __func__, ptr)
/* a realloc() that can't return NULL */
#define REALLOC(ptr, size)\
ut_realloc(__FILE__, __LINE__, __func__, ptr, size)
/* a strdup() that can't return NULL */
#define STRDUP(str)\
ut_strdup(__FILE__, __LINE__, __func__, str)
/* a malloc() that only returns page aligned memory */
#define PAGEALIGNMALLOC(size)\
ut_pagealignmalloc(__FILE__, __LINE__, __func__, size)
/* a malloc() that returns memory with given alignment */
#define MEMALIGN(alignment, size)\
ut_memalign(__FILE__, __LINE__, __func__, alignment, size)
/*
* A mmap() that returns anonymous memory with given alignment and guard
* pages.
*/
#define MMAP_ANON_ALIGNED(size, alignment)\
ut_mmap_anon_aligned(__FILE__, __LINE__, __func__, alignment, size)
#define MUNMAP_ANON_ALIGNED(start, size)\
ut_munmap_anon_aligned(__FILE__, __LINE__, __func__, start, size)
/*
* file operations
*/
int ut_open(const char *file, int line, const char *func, const char *path,
int flags, ...);
int ut_wopen(const char *file, int line, const char *func, const wchar_t *path,
int flags, ...);
int ut_close(const char *file, int line, const char *func, int fd);
FILE *ut_fopen(const char *file, int line, const char *func, const char *path,
const char *mode);
int ut_fclose(const char *file, int line, const char *func, FILE *stream);
int ut_unlink(const char *file, int line, const char *func, const char *path);
size_t ut_write(const char *file, int line, const char *func, int fd,
const void *buf, size_t len);
size_t ut_read(const char *file, int line, const char *func, int fd,
void *buf, size_t len);
os_off_t ut_lseek(const char *file, int line, const char *func, int fd,
os_off_t offset, int whence);
int ut_posix_fallocate(const char *file, int line, const char *func, int fd,
os_off_t offset, os_off_t len);
int ut_stat(const char *file, int line, const char *func, const char *path,
os_stat_t *st_bufp);
int ut_statW(const char *file, int line, const char *func, const wchar_t *path,
os_stat_t *st_bufp);
int ut_fstat(const char *file, int line, const char *func, int fd,
os_stat_t *st_bufp);
void *ut_mmap(const char *file, int line, const char *func, void *addr,
size_t length, int prot, int flags, int fd, os_off_t offset);
int ut_munmap(const char *file, int line, const char *func, void *addr,
size_t length);
int ut_mprotect(const char *file, int line, const char *func, void *addr,
size_t len, int prot);
int ut_ftruncate(const char *file, int line, const char *func,
int fd, os_off_t length);
long long ut_strtoll(const char *file, int line, const char *func,
const char *nptr, char **endptr, int base);
long ut_strtol(const char *file, int line, const char *func,
const char *nptr, char **endptr, int base);
int ut_strtoi(const char *file, int line, const char *func,
const char *nptr, char **endptr, int base);
unsigned long long ut_strtoull(const char *file, int line, const char *func,
const char *nptr, char **endptr, int base);
unsigned long ut_strtoul(const char *file, int line, const char *func,
const char *nptr, char **endptr, int base);
unsigned ut_strtou(const char *file, int line, const char *func,
const char *nptr, char **endptr, int base);
int ut_snprintf(const char *file, int line, const char *func,
char *str, size_t size, const char *format, ...);
/* an open() that can't return < 0 */
#define OPEN(path, ...)\
ut_open(__FILE__, __LINE__, __func__, path, __VA_ARGS__)
/* a _wopen() that can't return < 0 */
#define WOPEN(path, ...)\
ut_wopen(__FILE__, __LINE__, __func__, path, __VA_ARGS__)
/* a close() that can't return -1 */
#define CLOSE(fd)\
ut_close(__FILE__, __LINE__, __func__, fd)
/* an fopen() that can't return != 0 */
#define FOPEN(path, mode)\
ut_fopen(__FILE__, __LINE__, __func__, path, mode)
/* a fclose() that can't return != 0 */
#define FCLOSE(stream)\
ut_fclose(__FILE__, __LINE__, __func__, stream)
/* an unlink() that can't return -1 */
#define UNLINK(path)\
ut_unlink(__FILE__, __LINE__, __func__, path)
/* a write() that can't return -1 */
#define WRITE(fd, buf, len)\
ut_write(__FILE__, __LINE__, __func__, fd, buf, len)
/* a read() that can't return -1 */
#define READ(fd, buf, len)\
ut_read(__FILE__, __LINE__, __func__, fd, buf, len)
/* a lseek() that can't return -1 */
#define LSEEK(fd, offset, whence)\
ut_lseek(__FILE__, __LINE__, __func__, fd, offset, whence)
#define POSIX_FALLOCATE(fd, off, len)\
ut_posix_fallocate(__FILE__, __LINE__, __func__, fd, off, len)
#define FSTAT(fd, st_bufp)\
ut_fstat(__FILE__, __LINE__, __func__, fd, st_bufp)
/* a mmap() that can't return MAP_FAILED */
#define MMAP(addr, len, prot, flags, fd, offset)\
ut_mmap(__FILE__, __LINE__, __func__, addr, len, prot, flags, fd, offset);
/* a munmap() that can't return -1 */
#define MUNMAP(addr, length)\
ut_munmap(__FILE__, __LINE__, __func__, addr, length);
/* a mprotect() that can't return -1 */
#define MPROTECT(addr, len, prot)\
ut_mprotect(__FILE__, __LINE__, __func__, addr, len, prot);
#define STAT(path, st_bufp)\
ut_stat(__FILE__, __LINE__, __func__, path, st_bufp)
#define STATW(path, st_bufp)\
ut_statW(__FILE__, __LINE__, __func__, path, st_bufp)
#define FTRUNCATE(fd, length)\
ut_ftruncate(__FILE__, __LINE__, __func__, fd, length)
#define ATOU(nptr) STRTOU(nptr, NULL, 10)
#define ATOUL(nptr) STRTOUL(nptr, NULL, 10)
#define ATOULL(nptr) STRTOULL(nptr, NULL, 10)
#define ATOI(nptr) STRTOI(nptr, NULL, 10)
#define ATOL(nptr) STRTOL(nptr, NULL, 10)
#define ATOLL(nptr) STRTOLL(nptr, NULL, 10)
#define STRTOULL(nptr, endptr, base)\
ut_strtoull(__FILE__, __LINE__, __func__, nptr, endptr, base)
#define STRTOUL(nptr, endptr, base)\
ut_strtoul(__FILE__, __LINE__, __func__, nptr, endptr, base)
#define STRTOL(nptr, endptr, base)\
ut_strtol(__FILE__, __LINE__, __func__, nptr, endptr, base)
#define STRTOLL(nptr, endptr, base)\
ut_strtoll(__FILE__, __LINE__, __func__, nptr, endptr, base)
#define STRTOU(nptr, endptr, base)\
ut_strtou(__FILE__, __LINE__, __func__, nptr, endptr, base)
#define STRTOI(nptr, endptr, base)\
ut_strtoi(__FILE__, __LINE__, __func__, nptr, endptr, base)
#define SNPRINTF(str, size, format, ...) \
ut_snprintf(__FILE__, __LINE__, __func__, \
str, size, format, __VA_ARGS__)
#ifndef _WIN32
#define ut_jmp_buf_t sigjmp_buf
#define ut_siglongjmp(b) siglongjmp(b, 1)
#define ut_sigsetjmp(b) sigsetjmp(b, 1)
#else
#define ut_jmp_buf_t jmp_buf
#define ut_siglongjmp(b) longjmp(b, 1)
#define ut_sigsetjmp(b) setjmp(b)
#endif
void ut_suppress_errmsg(void);
void ut_unsuppress_errmsg(void);
void ut_suppress_crt_assert(void);
void ut_unsuppress_crt_assert(void);
/*
* signals...
*/
int ut_sigaction(const char *file, int line, const char *func,
int signum, struct sigaction *act, struct sigaction *oldact);
/* a sigaction() that can't return an error */
#define SIGACTION(signum, act, oldact)\
ut_sigaction(__FILE__, __LINE__, __func__, signum, act, oldact)
/*
* pthreads...
*/
int ut_thread_create(const char *file, int line, const char *func,
os_thread_t *__restrict thread,
const os_thread_attr_t *__restrict attr,
void *(*start_routine)(void *), void *__restrict arg);
int ut_thread_join(const char *file, int line, const char *func,
os_thread_t *thread, void **value_ptr);
/* a os_thread_create() that can't return an error */
#define THREAD_CREATE(thread, attr, start_routine, arg)\
ut_thread_create(__FILE__, __LINE__, __func__,\
thread, attr, start_routine, arg)
/* a os_thread_join() that can't return an error */
#define THREAD_JOIN(thread, value_ptr)\
ut_thread_join(__FILE__, __LINE__, __func__, thread, value_ptr)
/*
* processes...
*/
#ifdef _WIN32
intptr_t ut_spawnv(int argc, const char **argv, ...);
#endif
/*
* mocks...
*
* NOTE: On Linux, function mocking is implemented using wrapper functions.
* See "--wrap" option of the GNU linker.
* There is no such feature in VC++, so on Windows we do the mocking at
* compile time, by redefining symbol names:
* - all the references to <symbol> are replaced with <__wrap_symbol>
* in all the compilation units, except the one where the <symbol> is
* defined and the test source file
* - the original definition of <symbol> is replaced with <__real_symbol>
* - a wrapper function <__wrap_symbol> must be defined in the test program
* (it may still call the original function via <__real_symbol>)
* Such solution seems to be sufficient for the purpose of our tests, even
* though it has some limitations. I.e. it does no work well with malloc/free,
* so to wrap the system memory allocator functions, we use the built-in
* feature of all the PMDK libraries, allowing to override default memory
* allocator with the custom one.
*/
#ifndef _WIN32
#define _FUNC_REAL_DECL(name, ret_type, ...)\
ret_type __real_##name(__VA_ARGS__) __attribute__((unused));
#else
#define _FUNC_REAL_DECL(name, ret_type, ...)\
ret_type name(__VA_ARGS__);
#endif
#ifndef _WIN32
#define _FUNC_REAL(name)\
__real_##name
#else
#define _FUNC_REAL(name)\
name
#endif
#define RCOUNTER(name)\
_rcounter##name
#define FUNC_MOCK_RCOUNTER_SET(name, val)\
RCOUNTER(name) = val;
#define FUNC_MOCK(name, ret_type, ...)\
_FUNC_REAL_DECL(name, ret_type, ##__VA_ARGS__)\
static unsigned RCOUNTER(name);\
ret_type __wrap_##name(__VA_ARGS__);\
ret_type __wrap_##name(__VA_ARGS__) {\
switch (util_fetch_and_add32(&RCOUNTER(name), 1)) {
#define FUNC_MOCK_DLLIMPORT(name, ret_type, ...)\
__declspec(dllimport) _FUNC_REAL_DECL(name, ret_type, ##__VA_ARGS__)\
static unsigned RCOUNTER(name);\
ret_type __wrap_##name(__VA_ARGS__);\
ret_type __wrap_##name(__VA_ARGS__) {\
switch (util_fetch_and_add32(&RCOUNTER(name), 1)) {
#define FUNC_MOCK_END\
}}
#define FUNC_MOCK_RUN(run)\
case run:
#define FUNC_MOCK_RUN_DEFAULT\
default:
#define FUNC_MOCK_RUN_RET(run, ret)\
case run: return (ret);
#define FUNC_MOCK_RUN_RET_DEFAULT_REAL(name, ...)\
default: return _FUNC_REAL(name)(__VA_ARGS__);
#define FUNC_MOCK_RUN_RET_DEFAULT(ret)\
default: return (ret);
#define FUNC_MOCK_RET_ALWAYS(name, ret_type, ret, ...)\
FUNC_MOCK(name, ret_type, __VA_ARGS__)\
FUNC_MOCK_RUN_RET_DEFAULT(ret);\
FUNC_MOCK_END
#define FUNC_MOCK_RET_ALWAYS_VOID(name, ...)\
FUNC_MOCK(name, void, __VA_ARGS__)\
default: return;\
FUNC_MOCK_END
extern unsigned long Ut_pagesize;
extern unsigned long long Ut_mmap_align;
extern os_mutex_t Sigactions_lock;
void ut_dump_backtrace(void);
void ut_sighandler(int);
void ut_register_sighandlers(void);
uint16_t ut_checksum(uint8_t *addr, size_t len);
char *ut_toUTF8(const wchar_t *wstr);
wchar_t *ut_toUTF16(const char *wstr);
struct test_case {
const char *name;
int (*func)(const struct test_case *tc, int argc, char *argv[]);
};
/*
* get_tc -- return test case of specified name
*/
static inline const struct test_case *
get_tc(const char *name, const struct test_case *test_cases, size_t ntests)
{
for (size_t i = 0; i < ntests; i++) {
if (strcmp(name, test_cases[i].name) == 0)
return &test_cases[i];
}
return NULL;
}
static inline void
TEST_CASE_PROCESS(int argc, char *argv[],
const struct test_case *test_cases, size_t ntests)
{
if (argc < 2)
UT_FATAL("usage: %s <test case> [<args>]", argv[0]);
for (int i = 1; i < argc; i++) {
char *str_test = argv[i];
const int args_off = i + 1;
const struct test_case *tc = get_tc(str_test,
test_cases, ntests);
if (!tc)
UT_FATAL("unknown test case -- '%s'", str_test);
int ret = tc->func(tc, argc - args_off, &argv[args_off]);
if (ret < 0)
UT_FATAL("test return value cannot be negative");
i += ret;
}
}
#define TEST_CASE_DECLARE(_name)\
int \
_name(const struct test_case *tc, int argc, char *argv[])
#define TEST_CASE(_name)\
{\
.name = #_name,\
.func = (_name),\
}
#define STR(x) #x
#define ASSERT_ALIGNED_BEGIN(type) do {\
size_t off = 0;\
const char *last = "(none)";\
type t;
#define ASSERT_ALIGNED_FIELD(type, field) do {\
if (offsetof(type, field) != off)\
UT_FATAL("%s: padding, missing field or fields not in order between "\
"'%s' and '%s' -- offset %lu, real offset %lu",\
STR(type), last, STR(field), off, offsetof(type, field));\
off += sizeof(t.field);\
last = STR(field);\
} while (0)
#define ASSERT_FIELD_SIZE(field, size) do {\
UT_COMPILE_ERROR_ON(size != sizeof(t.field));\
} while (0)
#define ASSERT_OFFSET_CHECKPOINT(type, checkpoint) do {\
if (off != checkpoint)\
UT_FATAL("%s: violated offset checkpoint -- "\
"checkpoint %lu, real offset %lu",\
STR(type), checkpoint, off);\
} while (0)
#define ASSERT_ALIGNED_CHECK(type)\
if (off != sizeof(type))\
UT_FATAL("%s: missing field or padding after '%s': "\
"sizeof(%s) = %lu, fields size = %lu",\
STR(type), last, STR(type), sizeof(type), off);\
} while (0)
/*
* AddressSanitizer
*/
#ifdef __clang__
#if __has_feature(address_sanitizer)
#define UT_DEFINE_ASAN_POISON
#endif
#else
#ifdef __SANITIZE_ADDRESS__
#define UT_DEFINE_ASAN_POISON
#endif
#endif
#ifdef UT_DEFINE_ASAN_POISON
void __asan_poison_memory_region(void const volatile *addr, size_t size);
void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
#define ASAN_POISON_MEMORY_REGION(addr, size) \
__asan_poison_memory_region((addr), (size))
#define ASAN_UNPOISON_MEMORY_REGION(addr, size) \
__asan_unpoison_memory_region((addr), (size))
#else
#define ASAN_POISON_MEMORY_REGION(addr, size) \
((void)(addr), (void)(size))
#define ASAN_UNPOISON_MEMORY_REGION(addr, size) \
((void)(addr), (void)(size))
#endif
#ifdef __cplusplus
}
#endif
#endif /* unittest.h */
| 23,907 | 29.769627 | 79 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/unittest/ut_fh.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* ut_fh.c -- implementation of OS-independent file handle / file descriptor
* interface
*/
/* for O_TMPFILE */
#define _GNU_SOURCE
#include <fcntl.h>
#include "ut_fh.h"
#include "unittest.h"
struct FHandle {
int fd;
#ifdef _WIN32
HANDLE h;
#endif
enum file_handle_type type;
};
#ifdef _WIN32
#define HIDWORD(x) ((DWORD)((x) >> 32))
#define LODWORD(x) ((DWORD)((x) & 0xFFFFFFFF))
#endif
static void
check_invalid_flags(const char *file, int line, const char *func, int flags)
{
if ((flags & FH_EXCL) && !(flags & FH_CREAT)) {
ut_fatal(file, line, func,
"FH_EXCL without FH_CREAT is meaningless");
}
if ((flags & FH_TRUNC) && (flags & FH_CREAT)) {
/* because Windows doesn't support both */
ut_fatal(file, line, func,
"FH_TRUNC with FH_CREAT is forbidden");
}
}
static int
ut_fh_open_fd(const char *file, int line, const char *func,
const char *path, int flags, mode_t mode)
{
int sflags = 0;
check_invalid_flags(file, line, func, flags);
if ((flags & (FH_CREAT | FH_EXCL)) == (FH_CREAT | FH_EXCL)) {
flags &= ~(FH_CREAT | FH_EXCL);
sflags |= O_CREAT | O_EXCL;
} else if (flags & FH_CREAT) {
flags &= ~FH_CREAT;
sflags |= O_CREAT;
/* Windows version doesn't support both O_TRUNC and O_CREAT */
} else if (flags & FH_TRUNC) {
flags &= ~FH_TRUNC;
sflags |= O_TRUNC;
}
int acc = flags & FH_ACCMODE;
/* Linux version does not have FH_EXEC equivalent */
if ((acc & FH_WRITE) && (acc & FH_READ))
sflags |= O_RDWR;
else if (acc & FH_WRITE)
sflags |= O_WRONLY;
else if (acc & FH_READ)
sflags |= O_RDONLY;
else
ut_fatal(file, line, func, "unknown access mode %d", acc);
flags &= ~FH_ACCMODE;
if (flags & FH_DIRECTORY) {
#ifdef _WIN32
ut_fatal(file, line, func,
"FH_DIRECTORY is not supported on Windows using FD interface");
#else
flags &= ~FH_DIRECTORY;
sflags |= O_DIRECTORY;
#endif
}
if (flags & FH_TMPFILE) {
#ifdef O_TMPFILE
flags &= ~FH_TMPFILE;
sflags |= O_TMPFILE;
#else
ut_fatal(file, line, func,
"FH_TMPFILE is not supported on this system for file descriptors");
#endif
}
if (flags)
ut_fatal(file, line, func, "unsupported flag(s) 0%o", flags);
return ut_open(file, line, func, path, sflags, mode);
}
#ifdef _WIN32
static HANDLE
ut_fh_open_handle(const char *file, int line, const char *func,
const char *path, int flags, mode_t mode)
{
DWORD dwDesiredAccess;
/* do not allow delete, read or write from another process */
DWORD dwShareMode = 0;
LPSECURITY_ATTRIBUTES lpSecurityAttributes = NULL;
DWORD dwCreationDisposition;
DWORD dwFlagsAndAttributes = FILE_ATTRIBUTE_NORMAL;
HANDLE hTemplateFile = NULL;
/* XXX sometimes doesn't work, ERROR_ACCESS_DENIED on AppVeyor */
#if 0
/*
* FILE_FLAG_DELETE_ON_CLOSE needs a real file (FH_CREAT)
* If it already exists refuse to use it (FH_EXCL), because this means
* something weird is going on (either there's another process with
* the same file opened or FILE_FLAG_DELETE_ON_CLOSE didn't actually
* delete the file on close)
*/
if (flags & FH_TMPFILE)
flags |= FH_CREAT | FH_EXCL;
#else
if (flags & FH_TMPFILE)
ut_fatal(file, line, func,
"FH_TMPFILE is not supported for file handles");
#endif
check_invalid_flags(file, line, func, flags);
/* only write permission can be taken out on Windows */
if (!(mode & _S_IWRITE))
dwFlagsAndAttributes |= FILE_ATTRIBUTE_READONLY;
if ((flags & (FH_CREAT | FH_EXCL)) == (FH_CREAT | FH_EXCL)) {
flags &= ~(FH_CREAT | FH_EXCL);
dwCreationDisposition = CREATE_NEW;
} else if (flags & FH_CREAT) {
flags &= ~FH_CREAT;
dwCreationDisposition = OPEN_ALWAYS;
} else if (flags & FH_TRUNC) {
flags &= ~FH_TRUNC;
dwCreationDisposition = TRUNCATE_EXISTING;
} else {
dwCreationDisposition = OPEN_EXISTING;
}
int acc = flags & FH_ACCMODE;
dwDesiredAccess = 0;
if (acc & FH_READ) {
dwDesiredAccess |= GENERIC_READ;
acc &= ~FH_READ;
}
if (acc & FH_WRITE) {
dwDesiredAccess |= GENERIC_WRITE;
acc &= ~FH_WRITE;
}
if (acc & FH_EXEC) {
dwDesiredAccess |= GENERIC_EXECUTE;
acc &= ~FH_EXEC;
}
if (acc)
ut_fatal(file, line, func, "unknown access mode %d", acc);
flags &= ~FH_ACCMODE;
if (flags & FH_DIRECTORY) {
flags &= ~FH_DIRECTORY;
/* GJ MS */
dwFlagsAndAttributes |= FILE_FLAG_BACKUP_SEMANTICS;
}
char *full_path = NULL;
if (flags & FH_TMPFILE) {
flags &= ~FH_TMPFILE;
dwFlagsAndAttributes |= FILE_FLAG_DELETE_ON_CLOSE;
/*
* FILE_FLAG_DELETE_ON_CLOSE needs a real file,
* not a directory
*/
full_path = MALLOC(strlen(path) + 1 +
strlen("UT_FH_TMPFILE") + 1);
sprintf(full_path, "%s\\UT_FH_TMPFILE", path);
path = full_path;
}
if (flags)
ut_fatal(file, line, func, "unsupported flag(s) 0%o", flags);
wchar_t *wpath = util_toUTF16(path);
if (wpath == NULL)
ut_fatal(file, line, func, "conversion to utf16 failed");
HANDLE h = CreateFileW(wpath, dwDesiredAccess, dwShareMode,
lpSecurityAttributes, dwCreationDisposition,
dwFlagsAndAttributes, hTemplateFile);
util_free_UTF16(wpath);
if (h == INVALID_HANDLE_VALUE) {
ut_fatal(file, line, func, "opening file %s failed: %d", path,
GetLastError());
}
if (full_path)
free(full_path);
return h;
}
#endif
struct FHandle *
ut_fh_open(const char *file, int line, const char *func,
enum file_handle_type type, const char *path, int flags, ...)
{
struct FHandle *f = MALLOC(sizeof(*f));
mode_t mode = 0;
va_list ap;
va_start(ap, flags);
if ((flags & FH_CREAT) || (flags & FH_TMPFILE))
mode = va_arg(ap, mode_t);
va_end(ap);
f->type = type;
if (type == FH_FD) {
f->fd = ut_fh_open_fd(file, line, func, path, flags, mode);
} else if (type == FH_HANDLE) {
#ifdef _WIN32
f->h = ut_fh_open_handle(file, line, func, path, flags,
mode);
#else
ut_fatal(file, line, func,
"FH_HANDLE not supported on !Windows");
#endif
} else {
ut_fatal(file, line, func, "unknown type value %d", type);
}
return f;
}
void
ut_fh_truncate(const char *file, int line, const char *func,
struct FHandle *f, os_off_t length)
{
if (f->type == FH_FD) {
ut_ftruncate(file, line, func, f->fd, length);
} else if (f->type == FH_HANDLE) {
#ifdef _WIN32
LONG low = LODWORD(length);
LONG high = HIDWORD(length);
if (SetFilePointer(f->h, low, &high, FILE_BEGIN) ==
INVALID_SET_FILE_POINTER &&
GetLastError() != ERROR_SUCCESS) {
ut_fatal(file, line, func, "SetFilePointer failed: %d",
GetLastError());
}
if (SetEndOfFile(f->h) == 0) {
ut_fatal(file, line, func, "SetEndOfFile failed: %d",
GetLastError());
}
#else
ut_fatal(file, line, func,
"FH_HANDLE not supported on !Windows");
#endif
} else {
ut_fatal(file, line, func, "unknown type value %d", f->type);
}
}
void
ut_fh_close(const char *file, int line, const char *func, struct FHandle *f)
{
if (f->type == FH_FD) {
CLOSE(f->fd);
} else if (f->type == FH_HANDLE) {
#ifdef _WIN32
CloseHandle(f->h);
#else
ut_fatal(file, line, func,
"FH_HANDLE not supported on !Windows");
#endif
} else {
ut_fatal(file, line, func, "unknown type value %d", f->type);
}
memset(f, 0, sizeof(*f));
FREE(f);
}
int
ut_fh_get_fd(const char *file, int line, const char *func, struct FHandle *f)
{
if (f->type == FH_FD)
return f->fd;
ut_fatal(file, line, func,
"requested file descriptor on FHandle that doesn't contain it");
}
#ifdef _WIN32
HANDLE
ut_fh_get_handle(const char *file, int line, const char *func,
struct FHandle *f)
{
if (f->type == FH_HANDLE)
return f->h;
ut_fatal(file, line, func,
"requested file handle on FHandle that doesn't contain it");
}
#endif
enum file_handle_type
ut_fh_get_handle_type(struct FHandle *fh)
{
return fh->type;
}
| 7,734 | 22.158683 | 77 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/unittest/ut_pmem2_config.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* ut_pmem2_config.h -- utility helper functions for libpmem2 config tests
*/
#ifndef UT_PMEM2_CONFIG_H
#define UT_PMEM2_CONFIG_H 1
#include "ut_fh.h"
/* a pmem2_config_new() that can't return NULL */
#define PMEM2_CONFIG_NEW(cfg) \
ut_pmem2_config_new(__FILE__, __LINE__, __func__, cfg)
/* a pmem2_config_set_required_store_granularity() doesn't return an error */
#define PMEM2_CONFIG_SET_GRANULARITY(cfg, g) \
ut_pmem2_config_set_required_store_granularity \
(__FILE__, __LINE__, __func__, cfg, g)
/* a pmem2_config_delete() that can't return NULL */
#define PMEM2_CONFIG_DELETE(cfg) \
ut_pmem2_config_delete(__FILE__, __LINE__, __func__, cfg)
void ut_pmem2_config_new(const char *file, int line, const char *func,
struct pmem2_config **cfg);
void ut_pmem2_config_set_required_store_granularity(const char *file,
int line, const char *func, struct pmem2_config *cfg,
enum pmem2_granularity g);
void ut_pmem2_config_delete(const char *file, int line, const char *func,
struct pmem2_config **cfg);
#endif /* UT_PMEM2_CONFIG_H */
| 1,152 | 30.162162 | 77 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/unittest/ut_alloc.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* ut_alloc.c -- unit test memory allocation routines
*/
#include "unittest.h"
/*
* ut_malloc -- a malloc that cannot return NULL
*/
void *
ut_malloc(const char *file, int line, const char *func, size_t size)
{
void *retval = malloc(size);
if (retval == NULL)
ut_fatal(file, line, func, "cannot malloc %zu bytes", size);
return retval;
}
/*
* ut_calloc -- a calloc that cannot return NULL
*/
void *
ut_calloc(const char *file, int line, const char *func,
size_t nmemb, size_t size)
{
void *retval = calloc(nmemb, size);
if (retval == NULL)
ut_fatal(file, line, func, "cannot calloc %zu bytes", size);
return retval;
}
/*
* ut_free -- wrapper for free
*
* technically we don't need to wrap free since there's no return to
* check. using this wrapper to add memory allocation tracking later.
*/
void
ut_free(const char *file, int line, const char *func, void *ptr)
{
free(ptr);
}
/*
* ut_aligned_free -- wrapper for aligned memory free
*/
void
ut_aligned_free(const char *file, int line, const char *func, void *ptr)
{
#ifndef _WIN32
free(ptr);
#else
_aligned_free(ptr);
#endif
}
/*
* ut_realloc -- a realloc that cannot return NULL
*/
void *
ut_realloc(const char *file, int line, const char *func,
void *ptr, size_t size)
{
void *retval = realloc(ptr, size);
if (retval == NULL)
ut_fatal(file, line, func, "cannot realloc %zu bytes", size);
return retval;
}
/*
* ut_strdup -- a strdup that cannot return NULL
*/
char *
ut_strdup(const char *file, int line, const char *func,
const char *str)
{
char *retval = strdup(str);
if (retval == NULL)
ut_fatal(file, line, func, "cannot strdup %zu bytes",
strlen(str));
return retval;
}
/*
* ut_memalign -- like malloc but page-aligned memory
*/
void *
ut_memalign(const char *file, int line, const char *func, size_t alignment,
size_t size)
{
void *retval;
#ifndef _WIN32
if ((errno = posix_memalign(&retval, alignment, size)) != 0)
ut_fatal(file, line, func,
"!memalign %zu bytes (%zu alignment)", size, alignment);
#else
retval = _aligned_malloc(size, alignment);
if (!retval) {
ut_fatal(file, line, func,
"!memalign %zu bytes (%zu alignment)", size, alignment);
}
#endif
return retval;
}
/*
* ut_pagealignmalloc -- like malloc but page-aligned memory
*/
void *
ut_pagealignmalloc(const char *file, int line, const char *func,
size_t size)
{
return ut_memalign(file, line, func, (size_t)Ut_pagesize, size);
}
/*
* ut_mmap_anon_aligned -- mmaps anonymous memory with specified (power of two,
* multiple of page size) alignment and adds guard
* pages around it
*/
void *
ut_mmap_anon_aligned(const char *file, int line, const char *func,
size_t alignment, size_t size)
{
char *d, *d_aligned;
uintptr_t di, di_aligned;
size_t sz;
if (alignment == 0)
alignment = Ut_mmap_align;
/* alignment must be a multiple of page size */
if (alignment & (Ut_mmap_align - 1))
return NULL;
/* power of two */
if (alignment & (alignment - 1))
return NULL;
d = ut_mmap(file, line, func, NULL, size + 2 * alignment,
PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
di = (uintptr_t)d;
di_aligned = (di + alignment - 1) & ~(alignment - 1);
if (di == di_aligned)
di_aligned += alignment;
d_aligned = (void *)di_aligned;
sz = di_aligned - di;
if (sz - Ut_mmap_align)
ut_munmap(file, line, func, d, sz - Ut_mmap_align);
/* guard page before */
ut_mprotect(file, line, func,
d_aligned - Ut_mmap_align, Ut_mmap_align, PROT_NONE);
/* guard page after */
ut_mprotect(file, line, func,
d_aligned + size, Ut_mmap_align, PROT_NONE);
sz = di + size + 2 * alignment - (di_aligned + size) - Ut_mmap_align;
if (sz)
ut_munmap(file, line, func,
d_aligned + size + Ut_mmap_align, sz);
return d_aligned;
}
/*
* ut_munmap_anon_aligned -- unmaps anonymous memory allocated by
* ut_mmap_anon_aligned
*/
int
ut_munmap_anon_aligned(const char *file, int line, const char *func,
void *start, size_t size)
{
return ut_munmap(file, line, func, (char *)start - Ut_mmap_align,
size + 2 * Ut_mmap_align);
}
| 4,238 | 20.963731 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/unittest/ut_pmem2_utils.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* ut_pmem2_utils.c -- utility helper functions for libpmem2 tests
*/
#include "unittest.h"
#include "ut_pmem2_utils.h"
/*
* ut_pmem2_expect_return -- veryfies error code and prints appropriate
* error message in case of error
*/
void ut_pmem2_expect_return(const char *file, int line, const char *func,
int value, int expected)
{
if (value != expected) {
ut_fatal(file, line, func,
"unexpected return code (got %d, expected: %d): %s",
value, expected,
(value == 0 ? "success" : pmem2_errormsg()));
}
}
| 608 | 23.36 | 73 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/unittest/ut_pmem2_utils.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* ut_pmem2_utils.h -- utility helper functions for libpmem2 tests
*/
#ifndef UT_PMEM2_UTILS_H
#define UT_PMEM2_UTILS_H 1
/* veryfies error code and prints appropriate error message in case of error */
#define UT_PMEM2_EXPECT_RETURN(value, expected) \
ut_pmem2_expect_return(__FILE__, __LINE__, __func__, \
value, expected)
void ut_pmem2_expect_return(const char *file, int line, const char *func,
int value, int expected);
#endif /* UT_PMEM2_UTILS_H */
| 552 | 26.65 | 79 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/unittest/ut_fh.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* ut_fh.h -- OS-independent file handle / file descriptor interface
*/
#ifndef UT_FH_H
#define UT_FH_H
#include "os.h"
struct FHandle;
enum file_handle_type { FH_FD, FH_HANDLE };
#define FH_ACCMODE (7)
#define FH_READ (1 << 0)
#define FH_WRITE (1 << 1)
#define FH_RDWR (FH_READ | FH_WRITE)
#define FH_EXEC (1 << 2)
#define FH_CREAT (1 << 3)
#define FH_EXCL (1 << 4)
#define FH_TRUNC (1 << 5)
/* needs directory, on Windows it creates publicly visible file */
#define FH_TMPFILE (1 << 6)
#define FH_DIRECTORY (1 << 7)
#define UT_FH_OPEN(type, path, flags, ...) \
ut_fh_open(__FILE__, __LINE__, __func__, type, path, \
flags, ##__VA_ARGS__)
#define UT_FH_TRUNCATE(fhandle, size) \
ut_fh_truncate(__FILE__, __LINE__, __func__, fhandle, size)
#define UT_FH_GET_FD(fhandle) \
ut_fh_get_fd(__FILE__, __LINE__, __func__, fhandle)
#ifdef _WIN32
#define UT_FH_GET_HANDLE(fhandle) \
ut_fh_get_handle(__FILE__, __LINE__, __func__, fhandle)
#endif
#define UT_FH_CLOSE(fhandle) \
ut_fh_close(__FILE__, __LINE__, __func__, fhandle)
struct FHandle *ut_fh_open(const char *file, int line, const char *func,
enum file_handle_type type, const char *path, int flags, ...);
void ut_fh_truncate(const char *file, int line, const char *func,
struct FHandle *f, os_off_t length);
void ut_fh_close(const char *file, int line, const char *func,
struct FHandle *f);
enum file_handle_type ut_fh_get_handle_type(struct FHandle *fh);
int ut_fh_get_fd(const char *file, int line, const char *func,
struct FHandle *f);
#ifdef _WIN32
HANDLE ut_fh_get_handle(const char *file, int line, const char *func,
struct FHandle *f);
#endif
#endif
| 1,761 | 24.536232 | 72 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/unittest/ut_pmem2_source.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* ut_pmem2_source.h -- utility helper functions for libpmem2 source tests
*/
#include <libpmem2.h>
#include "unittest.h"
#include "ut_pmem2_source.h"
#include "ut_pmem2_utils.h"
/*
* ut_pmem2_source_from_fd -- sets fd (cannot fail)
*/
void
ut_pmem2_source_from_fd(const char *file, int line, const char *func,
struct pmem2_source **src, int fd)
{
int ret = pmem2_source_from_fd(src, fd);
ut_pmem2_expect_return(file, line, func, ret, 0);
}
void
ut_pmem2_source_from_fh(const char *file, int line, const char *func,
struct pmem2_source **src, struct FHandle *f)
{
enum file_handle_type type = ut_fh_get_handle_type(f);
int ret;
if (type == FH_FD) {
int fd = ut_fh_get_fd(file, line, func, f);
#ifdef _WIN32
ret = pmem2_source_from_handle(src, (HANDLE)_get_osfhandle(fd));
#else
ret = pmem2_source_from_fd(src, fd);
#endif
} else if (type == FH_HANDLE) {
#ifdef _WIN32
HANDLE h = ut_fh_get_handle(file, line, func, f);
ret = pmem2_source_from_handle(src, h);
#else
ut_fatal(file, line, func,
"FH_HANDLE not supported on !Windows");
#endif
} else {
ut_fatal(file, line, func,
"unknown file handle type");
}
ut_pmem2_expect_return(file, line, func, ret, 0);
}
void
ut_pmem2_source_alignment(const char *file, int line, const char *func,
struct pmem2_source *src, size_t *al)
{
int ret = pmem2_source_alignment(src, al);
ut_pmem2_expect_return(file, line, func, ret, 0);
}
void
ut_pmem2_source_delete(const char *file, int line, const char *func,
struct pmem2_source **src)
{
int ret = pmem2_source_delete(src);
ut_pmem2_expect_return(file, line, func, ret, 0);
UT_ASSERTeq(*src, NULL);
}
void
ut_pmem2_source_size(const char *file, int line, const char *func,
struct pmem2_source *src, size_t *size)
{
int ret = pmem2_source_size(src, size);
ut_pmem2_expect_return(file, line, func, ret, 0);
}
| 1,929 | 24.064935 | 74 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/unittest/ut_signal.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* ut_signal.c -- unit test signal operations
*/
#include "unittest.h"
#ifdef _WIN32
/*
* On Windows, Access Violation exception does not raise SIGSEGV signal.
* The trick is to catch the exception and... call the signal handler.
*/
/*
* Sigactions[] - allows registering more than one signal/exception handler
*/
static struct sigaction Sigactions[NSIG];
/*
* exception_handler -- called for unhandled exceptions
*/
static LONG CALLBACK
exception_handler(_In_ PEXCEPTION_POINTERS ExceptionInfo)
{
DWORD excode = ExceptionInfo->ExceptionRecord->ExceptionCode;
if (excode == EXCEPTION_ACCESS_VIOLATION)
Sigactions[SIGSEGV].sa_handler(SIGSEGV);
return EXCEPTION_CONTINUE_EXECUTION;
}
/*
* signal_handler_wrapper -- (internal) wrapper for user-defined signal handler
*
* Before the specified handler function is executed, signal disposition
* is reset to SIG_DFL. This wrapper allows to handle subsequent signals
* without the need to set the signal disposition again.
*/
static void
signal_handler_wrapper(int signum)
{
_crt_signal_t retval = signal(signum, signal_handler_wrapper);
if (retval == SIG_ERR)
UT_FATAL("!signal: %d", signum);
if (Sigactions[signum].sa_handler)
Sigactions[signum].sa_handler(signum);
else
UT_FATAL("handler for signal: %d is not defined", signum);
}
#endif
/*
* ut_sigaction -- a sigaction that cannot return < 0
*/
int
ut_sigaction(const char *file, int line, const char *func,
int signum, struct sigaction *act, struct sigaction *oldact)
{
#ifndef _WIN32
int retval = sigaction(signum, act, oldact);
if (retval != 0)
ut_fatal(file, line, func, "!sigaction: %s",
os_strsignal(signum));
return retval;
#else
UT_ASSERT(signum < NSIG);
os_mutex_lock(&Sigactions_lock);
if (oldact)
*oldact = Sigactions[signum];
if (act)
Sigactions[signum] = *act;
os_mutex_unlock(&Sigactions_lock);
if (signum == SIGABRT) {
ut_suppress_errmsg();
}
if (signum == SIGSEGV) {
AddVectoredExceptionHandler(0, exception_handler);
}
_crt_signal_t retval = signal(signum, signal_handler_wrapper);
if (retval == SIG_ERR)
ut_fatal(file, line, func, "!signal: %d", signum);
if (oldact != NULL)
oldact->sa_handler = retval;
return 0;
#endif
}
| 2,306 | 23.806452 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/unittest/ut_pthread.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* ut_pthread.c -- unit test wrappers for pthread routines
*/
#include "unittest.h"
/*
* ut_thread_create -- a os_thread_create that cannot return an error
*/
int
ut_thread_create(const char *file, int line, const char *func,
os_thread_t *__restrict thread,
const os_thread_attr_t *__restrict attr,
void *(*start_routine)(void *), void *__restrict arg)
{
if ((errno = os_thread_create(thread, attr, start_routine, arg)) != 0)
ut_fatal(file, line, func, "!os_thread_create");
return 0;
}
/*
* ut_thread_join -- a os_thread_join that cannot return an error
*/
int
ut_thread_join(const char *file, int line, const char *func,
os_thread_t *thread, void **value_ptr)
{
if ((errno = os_thread_join(thread, value_ptr)) != 0)
ut_fatal(file, line, func, "!os_thread_join");
return 0;
}
| 901 | 23.378378 | 71 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/unittest/ut_pmem2_map.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* ut_pmem2_map.h -- utility helper functions for libpmem2 map tests
*/
#ifndef UT_PMEM2_MAP_H
#define UT_PMEM2_MAP_H 1
/* a pmem2_map() that can't return NULL */
#define PMEM2_MAP(cfg, src, map) \
ut_pmem2_map(__FILE__, __LINE__, __func__, cfg, src, map)
void ut_pmem2_map(const char *file, int line, const char *func,
struct pmem2_config *cfg, struct pmem2_source *src,
struct pmem2_map **map);
#endif /* UT_PMEM2_MAP_H */
| 522 | 25.15 | 68 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/unittest/ut_pmem2_config.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* ut_pmem2_config.h -- utility helper functions for libpmem2 config tests
*/
#include <libpmem2.h>
#include "unittest.h"
#include "ut_pmem2_config.h"
#include "ut_pmem2_utils.h"
/*
* ut_pmem2_config_new -- allocates cfg (cannot fail)
*/
void
ut_pmem2_config_new(const char *file, int line, const char *func,
struct pmem2_config **cfg)
{
int ret = pmem2_config_new(cfg);
ut_pmem2_expect_return(file, line, func, ret, 0);
UT_ASSERTne(*cfg, NULL);
}
/*
* pmem2_config_set_required_store_granularity -- sets granularity
*/
void
ut_pmem2_config_set_required_store_granularity(const char *file, int line,
const char *func, struct pmem2_config *cfg, enum pmem2_granularity g)
{
int ret = pmem2_config_set_required_store_granularity(cfg, g);
ut_pmem2_expect_return(file, line, func, ret, 0);
}
/*
* ut_pmem2_config_delete -- deallocates cfg (cannot fail)
*/
void
ut_pmem2_config_delete(const char *file, int line, const char *func,
struct pmem2_config **cfg)
{
int ret = pmem2_config_delete(cfg);
ut_pmem2_expect_return(file, line, func, ret, 0);
UT_ASSERTeq(*cfg, NULL);
}
| 1,181 | 23.122449 | 74 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/unittest/ut_pmem2_setup_integration.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* ut_pmem2_setup_integration.h -- libpmem2 setup functions using public API
* (for integration tests)
*/
#include <libpmem2.h>
#include "ut_pmem2_config.h"
#include "ut_pmem2_setup_integration.h"
#include "ut_pmem2_source.h"
#include "unittest.h"
/*
* ut_pmem2_prepare_config_integration -- fill pmem2_config in minimal scope
*/
void
ut_pmem2_prepare_config_integration(const char *file, int line,
const char *func, struct pmem2_config **cfg, struct pmem2_source **src,
int fd, enum pmem2_granularity granularity)
{
ut_pmem2_config_new(file, line, func, cfg);
ut_pmem2_config_set_required_store_granularity(file, line, func, *cfg,
granularity);
ut_pmem2_source_from_fd(file, line, func, src, fd);
}
| 804 | 26.758621 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/unittest/ut_pmem2_source.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* ut_pmem2_source.h -- utility helper functions for libpmem2 source tests
*/
#ifndef UT_PMEM2_SOURCE_H
#define UT_PMEM2_SOURCE_H 1
#include "ut_fh.h"
/* a pmem2_config_set_fd() that can't return NULL */
#define PMEM2_SOURCE_FROM_FD(src, fd) \
ut_pmem2_source_from_fd(__FILE__, __LINE__, __func__, src, fd)
/* a pmem2_config_set_fd() that can't return NULL */
#define PMEM2_SOURCE_FROM_FH(src, fh) \
ut_pmem2_source_from_fh(__FILE__, __LINE__, __func__, src, fh)
/* a pmem2_source_alignment() that can't return an error */
#define PMEM2_SOURCE_ALIGNMENT(src, al) \
ut_pmem2_source_alignment(__FILE__, __LINE__, __func__, src, al)
/* a pmem2_source_delete() that can't return NULL */
#define PMEM2_SOURCE_DELETE(src) \
ut_pmem2_source_delete(__FILE__, __LINE__, __func__, src)
/* a pmem2_source_source() that can't return NULL */
#define PMEM2_SOURCE_SIZE(src, size) \
ut_pmem2_source_size(__FILE__, __LINE__, __func__, src, size)
void ut_pmem2_source_from_fd(const char *file, int line, const char *func,
struct pmem2_source **src, int fd);
void ut_pmem2_source_from_fh(const char *file, int line, const char *func,
struct pmem2_source **src, struct FHandle *fhandle);
void ut_pmem2_source_alignment(const char *file, int line, const char *func,
struct pmem2_source *src, size_t *alignment);
void ut_pmem2_source_delete(const char *file, int line, const char *func,
struct pmem2_source **src);
void ut_pmem2_source_size(const char *file, int line, const char *func,
struct pmem2_source *src, size_t *size);
#endif /* UT_PMEM2_SOURCE_H */
| 1,667 | 33.040816 | 76 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/unittest/ut_pmem2_setup.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* ut_pmem2_setup.h -- libpmem2 setup functions using non-public API
* (only for unit tests)
*/
#include "../../libpmem2/config.h"
#include "ut_pmem2_source.h"
#include "ut_pmem2_setup.h"
#include "unittest.h"
/*
* ut_pmem2_prepare_config -- fill pmem2_config, this function can not set
* the wrong value
*/
void
ut_pmem2_prepare_config(struct pmem2_config *cfg, struct pmem2_source **src,
struct FHandle **fh, enum file_handle_type fh_type, const char *file,
size_t length, size_t offset, int access)
{
pmem2_config_init(cfg);
cfg->offset = offset;
cfg->length = length;
cfg->requested_max_granularity = PMEM2_GRANULARITY_PAGE;
*fh = UT_FH_OPEN(fh_type, file, access);
PMEM2_SOURCE_FROM_FH(src, *fh);
}
| 805 | 25 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/unittest/ut_pmem2_map.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* ut_pmem2_map.h -- utility helper functions for libpmem2 map tests
*/
#include <libpmem2.h>
#include "unittest.h"
#include "ut_pmem2_map.h"
#include "ut_pmem2_utils.h"
/*
* ut_pmem2_map -- allocates map (cannot fail)
*/
void
ut_pmem2_map(const char *file, int line, const char *func,
struct pmem2_config *cfg, struct pmem2_source *src,
struct pmem2_map **map)
{
int ret = pmem2_map(cfg, src, map);
ut_pmem2_expect_return(file, line, func, ret, 0);
UT_ASSERTne(*map, NULL);
}
| 572 | 21.92 | 68 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/unittest/ut_pmem2_setup_integration.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* ut_pmem2_setup_integration.h -- libpmem2 setup functions using public API
* (for integration tests)
*/
#ifndef UT_PMEM2_SETUP_INTEGRATION_H
#define UT_PMEM2_SETUP_INTEGRATION_H 1
#include "ut_fh.h"
/* a prepare_config() that can't set wrong value */
#define PMEM2_PREPARE_CONFIG_INTEGRATION(cfg, src, fd, g) \
ut_pmem2_prepare_config_integration( \
__FILE__, __LINE__, __func__, cfg, src, fd, g)
void ut_pmem2_prepare_config_integration(const char *file, int line,
const char *func, struct pmem2_config **cfg, struct pmem2_source **src,
int fd, enum pmem2_granularity granularity);
#endif /* UT_PMEM2_SETUP_INTEGRATION_H */
| 728 | 29.375 | 76 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem_movnt_align/pmem_movnt_align.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* pmem_movnt_align.c -- unit test for functions with non-temporal stores
*
* usage: pmem_movnt_align [C|F|B|S]
*
* C - pmem_memcpy_persist()
* B - pmem_memmove_persist() in backward direction
* F - pmem_memmove_persist() in forward direction
* S - pmem_memset_persist()
*/
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include "libpmem.h"
#include "unittest.h"
#include "movnt_align_common.h"
#define N_BYTES (Ut_pagesize * 2)
static int Heavy;
static void *
pmem_memcpy_persist_wrapper(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
(void) flags;
return pmem_memcpy_persist(pmemdest, src, len);
}
static void *
pmem_memcpy_nodrain_wrapper(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
(void) flags;
return pmem_memcpy_nodrain(pmemdest, src, len);
}
static void *
pmem_memmove_persist_wrapper(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
(void) flags;
return pmem_memmove_persist(pmemdest, src, len);
}
static void *
pmem_memmove_nodrain_wrapper(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
(void) flags;
return pmem_memmove_nodrain(pmemdest, src, len);
}
static void *
pmem_memset_persist_wrapper(void *pmemdest, int c, size_t len, unsigned flags)
{
(void) flags;
return pmem_memset_persist(pmemdest, c, len);
}
static void *
pmem_memset_nodrain_wrapper(void *pmemdest, int c, size_t len, unsigned flags)
{
(void) flags;
return pmem_memset_nodrain(pmemdest, c, len);
}
static void
check_memmove_variants(size_t doff, size_t soff, size_t len)
{
check_memmove(doff, soff, len, pmem_memmove_persist_wrapper, 0);
if (!Heavy)
return;
check_memmove(doff, soff, len, pmem_memmove_nodrain_wrapper, 0);
for (int i = 0; i < ARRAY_SIZE(Flags); ++i)
check_memmove(doff, soff, len, pmem_memmove, Flags[i]);
}
static void
check_memcpy_variants(size_t doff, size_t soff, size_t len)
{
check_memcpy(doff, soff, len, pmem_memcpy_persist_wrapper, 0);
if (!Heavy)
return;
check_memcpy(doff, soff, len, pmem_memcpy_nodrain_wrapper, 0);
for (int i = 0; i < ARRAY_SIZE(Flags); ++i)
check_memcpy(doff, soff, len, pmem_memcpy, Flags[i]);
}
static void
check_memset_variants(size_t off, size_t len)
{
check_memset(off, len, pmem_memset_persist_wrapper, 0);
if (!Heavy)
return;
check_memset(off, len, pmem_memset_nodrain_wrapper, 0);
for (int i = 0; i < ARRAY_SIZE(Flags); ++i)
check_memset(off, len, pmem_memset, Flags[i]);
}
int
main(int argc, char *argv[])
{
if (argc != 3)
UT_FATAL("usage: %s type heavy=[0|1]", argv[0]);
char type = argv[1][0];
Heavy = argv[2][0] == '1';
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem_movnt_align %c %s %savx %savx512f", type,
thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
size_t page_size = Ut_pagesize;
size_t s;
switch (type) {
case 'C': /* memcpy */
/* mmap with guard pages */
Src = MMAP_ANON_ALIGNED(N_BYTES, 0);
Dst = MMAP_ANON_ALIGNED(N_BYTES, 0);
if (Src == NULL || Dst == NULL)
UT_FATAL("!mmap");
Scratch = MALLOC(N_BYTES);
/* check memcpy with 0 size */
check_memcpy_variants(0, 0, 0);
/* check memcpy with unaligned size */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memcpy_variants(0, 0, N_BYTES - s);
/* check memcpy with unaligned begin */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memcpy_variants(s, 0, N_BYTES - s);
/* check memcpy with unaligned begin and end */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memcpy_variants(s, s, N_BYTES - 2 * s);
MUNMAP_ANON_ALIGNED(Src, N_BYTES);
MUNMAP_ANON_ALIGNED(Dst, N_BYTES);
FREE(Scratch);
break;
case 'B': /* memmove backward */
/* mmap with guard pages */
Src = MMAP_ANON_ALIGNED(2 * N_BYTES - page_size, 0);
Dst = Src + N_BYTES - page_size;
if (Src == NULL)
UT_FATAL("!mmap");
/* check memmove in backward direction with 0 size */
check_memmove_variants(0, 0, 0);
/* check memmove in backward direction with unaligned size */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(0, 0, N_BYTES - s);
/* check memmove in backward direction with unaligned begin */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(s, 0, N_BYTES - s);
/*
* check memmove in backward direction with unaligned begin
* and end
*/
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(s, s, N_BYTES - 2 * s);
MUNMAP_ANON_ALIGNED(Src, 2 * N_BYTES - page_size);
break;
case 'F': /* memmove forward */
/* mmap with guard pages */
Dst = MMAP_ANON_ALIGNED(2 * N_BYTES - page_size, 0);
Src = Dst + N_BYTES - page_size;
if (Src == NULL)
UT_FATAL("!mmap");
/* check memmove in forward direction with 0 size */
check_memmove_variants(0, 0, 0);
/* check memmove in forward direction with unaligned size */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(0, 0, N_BYTES - s);
/* check memmove in forward direction with unaligned begin */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(s, 0, N_BYTES - s);
/*
* check memmove in forward direction with unaligned begin
* and end
*/
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(s, s, N_BYTES - 2 * s);
MUNMAP_ANON_ALIGNED(Dst, 2 * N_BYTES - page_size);
break;
case 'S': /* memset */
/* mmap with guard pages */
Dst = MMAP_ANON_ALIGNED(N_BYTES, 0);
if (Dst == NULL)
UT_FATAL("!mmap");
Scratch = MALLOC(N_BYTES);
/* check memset with 0 size */
check_memset_variants(0, 0);
/* check memset with unaligned size */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memset_variants(0, N_BYTES - s);
/* check memset with unaligned begin */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memset_variants(s, N_BYTES - s);
/* check memset with unaligned begin and end */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memset_variants(s, N_BYTES - 2 * s);
MUNMAP_ANON_ALIGNED(Dst, N_BYTES);
FREE(Scratch);
break;
default:
UT_FATAL("!wrong type of test");
break;
}
DONE(NULL);
}
| 6,229 | 23.92 | 78 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_memblock/obj_memblock.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* obj_memblock.c -- unit test for memblock interface
*/
#include "memblock.h"
#include "memops.h"
#include "obj.h"
#include "unittest.h"
#include "heap.h"
#define NCHUNKS 10
static PMEMobjpool *pop;
FUNC_MOCK(operation_add_typed_entry, int, struct operation_context *ctx,
void *ptr, uint64_t value,
ulog_operation_type type, enum operation_log_type en_type)
FUNC_MOCK_RUN_DEFAULT {
uint64_t *pval = ptr;
switch (type) {
case ULOG_OPERATION_SET:
*pval = value;
break;
case ULOG_OPERATION_AND:
*pval &= value;
break;
case ULOG_OPERATION_OR:
*pval |= value;
break;
default:
UT_ASSERT(0);
}
return 0;
}
FUNC_MOCK_END
FUNC_MOCK(operation_add_entry, int, struct operation_context *ctx, void *ptr,
uint64_t value, ulog_operation_type type)
FUNC_MOCK_RUN_DEFAULT {
/* just call the mock above - the entry type doesn't matter */
return operation_add_typed_entry(ctx, ptr, value, type,
LOG_TRANSIENT);
}
FUNC_MOCK_END
static void
test_detect(void)
{
struct memory_block mhuge_used = { .chunk_id = 0, 0, 0, 0 };
struct memory_block mhuge_free = { .chunk_id = 1, 0, 0, 0 };
struct memory_block mrun = { .chunk_id = 2, 0, 0, 0 };
struct heap_layout *layout = pop->heap.layout;
layout->zone0.chunk_headers[0].size_idx = 1;
layout->zone0.chunk_headers[0].type = CHUNK_TYPE_USED;
layout->zone0.chunk_headers[1].size_idx = 1;
layout->zone0.chunk_headers[1].type = CHUNK_TYPE_FREE;
layout->zone0.chunk_headers[2].size_idx = 1;
layout->zone0.chunk_headers[2].type = CHUNK_TYPE_RUN;
memblock_rebuild_state(&pop->heap, &mhuge_used);
memblock_rebuild_state(&pop->heap, &mhuge_free);
memblock_rebuild_state(&pop->heap, &mrun);
UT_ASSERTeq(mhuge_used.type, MEMORY_BLOCK_HUGE);
UT_ASSERTeq(mhuge_free.type, MEMORY_BLOCK_HUGE);
UT_ASSERTeq(mrun.type, MEMORY_BLOCK_RUN);
}
static void
test_block_size(void)
{
struct memory_block mhuge = { .chunk_id = 0, 0, 0, 0 };
struct memory_block mrun = { .chunk_id = 1, 0, 0, 0 };
struct palloc_heap *heap = &pop->heap;
struct heap_layout *layout = heap->layout;
layout->zone0.chunk_headers[0].size_idx = 1;
layout->zone0.chunk_headers[0].type = CHUNK_TYPE_USED;
layout->zone0.chunk_headers[1].size_idx = 1;
layout->zone0.chunk_headers[1].type = CHUNK_TYPE_RUN;
struct chunk_run *run = (struct chunk_run *)
&layout->zone0.chunks[1];
run->hdr.block_size = 1234;
memblock_rebuild_state(&pop->heap, &mhuge);
memblock_rebuild_state(&pop->heap, &mrun);
UT_ASSERTne(mhuge.m_ops, NULL);
UT_ASSERTne(mrun.m_ops, NULL);
UT_ASSERTeq(mhuge.m_ops->block_size(&mhuge), CHUNKSIZE);
UT_ASSERTeq(mrun.m_ops->block_size(&mrun), 1234);
}
static void
test_prep_hdr(void)
{
struct memory_block mhuge_used = { .chunk_id = 0, 0, .size_idx = 1, 0 };
struct memory_block mhuge_free = { .chunk_id = 1, 0, .size_idx = 1, 0 };
struct memory_block mrun_used = { .chunk_id = 2, 0,
.size_idx = 4, .block_off = 0 };
struct memory_block mrun_free = { .chunk_id = 2, 0,
.size_idx = 4, .block_off = 4 };
struct memory_block mrun_large_used = { .chunk_id = 2, 0,
.size_idx = 64, .block_off = 64 };
struct memory_block mrun_large_free = { .chunk_id = 2, 0,
.size_idx = 64, .block_off = 128 };
struct palloc_heap *heap = &pop->heap;
struct heap_layout *layout = heap->layout;
layout->zone0.chunk_headers[0].size_idx = 1;
layout->zone0.chunk_headers[0].type = CHUNK_TYPE_USED;
layout->zone0.chunk_headers[1].size_idx = 1;
layout->zone0.chunk_headers[1].type = CHUNK_TYPE_FREE;
layout->zone0.chunk_headers[2].size_idx = 1;
layout->zone0.chunk_headers[2].type = CHUNK_TYPE_RUN;
struct chunk_run *run = (struct chunk_run *)&layout->zone0.chunks[2];
run->hdr.block_size = 128;
uint64_t *bitmap = (uint64_t *)run->content;
bitmap[0] = 0b1111;
bitmap[1] = ~0ULL;
bitmap[2] = 0ULL;
memblock_rebuild_state(heap, &mhuge_used);
memblock_rebuild_state(heap, &mhuge_free);
memblock_rebuild_state(heap, &mrun_used);
memblock_rebuild_state(heap, &mrun_free);
memblock_rebuild_state(heap, &mrun_large_used);
memblock_rebuild_state(heap, &mrun_large_free);
UT_ASSERTne(mhuge_used.m_ops, NULL);
mhuge_used.m_ops->prep_hdr(&mhuge_used, MEMBLOCK_FREE, NULL);
UT_ASSERTeq(layout->zone0.chunk_headers[0].type, CHUNK_TYPE_FREE);
mhuge_free.m_ops->prep_hdr(&mhuge_free, MEMBLOCK_ALLOCATED, NULL);
UT_ASSERTeq(layout->zone0.chunk_headers[1].type, CHUNK_TYPE_USED);
mrun_used.m_ops->prep_hdr(&mrun_used, MEMBLOCK_FREE, NULL);
UT_ASSERTeq(bitmap[0], 0ULL);
mrun_free.m_ops->prep_hdr(&mrun_free, MEMBLOCK_ALLOCATED, NULL);
UT_ASSERTeq(bitmap[0], 0b11110000);
mrun_large_used.m_ops->prep_hdr(&mrun_large_used, MEMBLOCK_FREE, NULL);
UT_ASSERTeq(bitmap[1], 0ULL);
mrun_large_free.m_ops->prep_hdr(&mrun_large_free,
MEMBLOCK_ALLOCATED, NULL);
UT_ASSERTeq(bitmap[2], ~0ULL);
}
static int
fake_persist(void *base, const void *addr, size_t size, unsigned flags)
{
return 0;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_memblock");
PMEMobjpool pool;
pop = &pool;
pop->heap.layout = ZALLOC(sizeof(struct heap_layout) +
NCHUNKS * sizeof(struct chunk));
pop->heap.p_ops.persist = fake_persist;
test_detect();
test_block_size();
test_prep_hdr();
FREE(pop->heap.layout);
DONE(NULL);
}
| 5,320 | 27.153439 | 77 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_memblock/mocks_windows.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* mocks_windows.h -- redefinitions of memops functions
*
* This file is Windows-specific.
*
* This file should be included (i.e. using Forced Include) by libpmemobj
* files, when compiled for the purpose of obj_memblock test.
* It would replace default implementation with mocked functions defined
* in obj_memblock.c.
*
* These defines could be also passed as preprocessor definitions.
*/
#ifndef WRAP_REAL
#define operation_add_typed_entry __wrap_operation_add_typed_entry
#define operation_add_entry __wrap_operation_add_entry
#endif
| 634 | 29.238095 | 73 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmreorder_flushes/pmreorder_flushes.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* pmreorder_flushes.c -- test for store reordering with flushes
* in different barriers
*
* usage: pmreorder_flushes g|c file
*
* g - write data in a specific manner - some flushes
* of the stores are made in different barriers,
* c - check data consistency - stores should be applied only
* after flush - no matter in which barrier the flush will happen
*
*/
#include "unittest.h"
#include "util.h"
#include "valgrind_internal.h"
#define STORE_SIZE 64
static FILE *fp;
struct stores_fields {
char A[STORE_SIZE];
char B[STORE_SIZE];
char C[STORE_SIZE];
char D[STORE_SIZE];
char E[STORE_SIZE];
};
/*
* write_consistent -- (internal) write data in a specific order
*/
static void
write_consistent(struct stores_fields *sf)
{
/*
* STORE (A)
* STORE (B)
* STORE (C)
*
* FLUSH (A, B) (no flush C)
* FENCE
*/
pmem_memset(&sf->A, -1, sizeof(sf->A), PMEM_F_MEM_NODRAIN);
pmem_memset(&sf->B, 2, sizeof(sf->B), PMEM_F_MEM_NODRAIN);
pmem_memset(&sf->C, 3, sizeof(sf->C), PMEM_F_MEM_NOFLUSH);
pmem_drain();
/*
* STORE (A)
* STORE (D)
*
* FLUSH (D) (no flush A, still no flush C)
* FENCE
*/
pmem_memset(sf->A, 1, sizeof(sf->A), PMEM_F_MEM_NOFLUSH);
pmem_memset(sf->D, 4, sizeof(sf->D), PMEM_F_MEM_NODRAIN);
pmem_drain();
/*
* There are two transitive stores now: A (which does not change
* it's value) and C (which is modified).
*
* STORE (D)
* STORE (C)
*
* FLUSH (D) (still no flush A and C)
* FENCE
*/
pmem_memset(sf->D, 5, sizeof(sf->D), PMEM_F_MEM_NODRAIN);
pmem_memset(sf->C, 8, sizeof(sf->C), PMEM_F_MEM_NOFLUSH);
pmem_drain();
/*
* E is modified just to add additional step to the log.
* Values of A and C should still be -1, 2.
*
* STORE (E)
* FLUSH (E)
* FENCE
*/
pmem_memset(sf->E, 6, sizeof(sf->E), PMEM_F_MEM_NODRAIN);
pmem_drain();
/*
* FLUSH (A, C)
* FENCE
*/
pmem_flush(sf->A, sizeof(sf->A));
pmem_flush(sf->C, sizeof(sf->C));
pmem_drain();
}
/*
* check_consistency -- (internal) check if stores are made in proper manner
*/
static int
check_consistency(struct stores_fields *sf)
{
fprintf(fp, "A=%d B=%d C=%d D=%d E=%d\n",
sf->A[0], sf->B[0], sf->C[0], sf->D[0], sf->E[0]);
return 0;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "pmreorder_flushes");
util_init();
if ((argc < 4) || (strchr("gc", argv[1][0]) == NULL) ||
argv[1][1] != '\0')
UT_FATAL("usage: %s g|c file log_file", argv[0]);
int fd = OPEN(argv[2], O_RDWR);
size_t size;
/* mmap and register in valgrind pmemcheck */
void *map = pmem_map_file(argv[2], 0, 0, 0, &size, NULL);
UT_ASSERTne(map, NULL);
struct stores_fields *sf = map;
char opt = argv[1][0];
/* clear the struct to get a consistent start state for writing */
if (strchr("g", opt))
pmem_memset_persist(sf, 0, sizeof(*sf));
switch (opt) {
case 'g':
write_consistent(sf);
break;
case 'c':
fp = os_fopen(argv[3], "a");
if (fp == NULL)
UT_FATAL("!fopen");
int ret;
ret = check_consistency(sf);
fclose(fp);
return ret;
default:
UT_FATAL("Unrecognized option %c", opt);
}
CLOSE(fd);
DONE(NULL);
}
| 3,207 | 20.105263 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/libpmempool_api_win/libpmempool_test_win.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* libpmempool_test_win -- test of libpmempool.
*
*/
#include <stddef.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include "unittest.h"
/*
* Exact copy of the struct pmempool_check_args from libpmempool 1.0 provided to
* test libpmempool against various pmempool_check_args structure versions.
*/
struct pmempool_check_args_1_0 {
const wchar_t *path;
const wchar_t *backup_path;
enum pmempool_pool_type pool_type;
int flags;
};
/*
* check_pool -- check given pool
*/
static void
check_pool(struct pmempool_check_argsW *args, size_t args_size)
{
const char *status2str[] = {
[PMEMPOOL_CHECK_RESULT_CONSISTENT] = "consistent",
[PMEMPOOL_CHECK_RESULT_NOT_CONSISTENT] = "not consistent",
[PMEMPOOL_CHECK_RESULT_REPAIRED] = "repaired",
[PMEMPOOL_CHECK_RESULT_CANNOT_REPAIR] = "cannot repair",
[PMEMPOOL_CHECK_RESULT_ERROR] = "fatal",
};
PMEMpoolcheck *ppc = pmempool_check_initW(args, args_size);
if (!ppc) {
char buff[UT_MAX_ERR_MSG];
ut_strerror(errno, buff, UT_MAX_ERR_MSG);
UT_OUT("Error: %s", buff);
return;
}
struct pmempool_check_statusW *status = NULL;
while ((status = pmempool_checkW(ppc)) != NULL) {
char *msg = ut_toUTF8(status->str.msg);
switch (status->type) {
case PMEMPOOL_CHECK_MSG_TYPE_ERROR:
UT_OUT("%s", msg);
break;
case PMEMPOOL_CHECK_MSG_TYPE_INFO:
UT_OUT("%s", msg);
break;
case PMEMPOOL_CHECK_MSG_TYPE_QUESTION:
UT_OUT("%s", msg);
status->str.answer = L"yes";
break;
default:
pmempool_check_end(ppc);
free(msg);
exit(EXIT_FAILURE);
}
free(msg);
}
enum pmempool_check_result ret = pmempool_check_end(ppc);
UT_OUT("status = %s", status2str[ret]);
}
/*
* print_usage -- print usage of program
*/
static void
print_usage(wchar_t *name)
{
UT_OUT("Usage: %S [-t <pool_type>] [-r <repair>] [-d <dry_run>] "
"[-y <always_yes>] [-f <flags>] [-a <advanced>] "
"[-b <backup_path>] <pool_path>", name);
}
/*
* set_flag -- parse the value and set the flag according to a obtained value
*/
static void
set_flag(const wchar_t *value, int *flags, int flag)
{
if (_wtoi(value) > 0)
*flags |= flag;
else
*flags &= ~flag;
}
int
wmain(int argc, wchar_t *argv[])
{
STARTW(argc, argv, "libpmempool_test_win");
struct pmempool_check_args_1_0 args = {
.path = NULL,
.backup_path = NULL,
.pool_type = PMEMPOOL_POOL_TYPE_LOG,
.flags = PMEMPOOL_CHECK_FORMAT_STR |
PMEMPOOL_CHECK_REPAIR | PMEMPOOL_CHECK_VERBOSE
};
size_t args_size = sizeof(struct pmempool_check_args_1_0);
for (int i = 1; i < argc - 1; i += 2) {
wchar_t *optarg = argv[i + 1];
if (wcscmp(L"-t", argv[i]) == 0) {
if (wcscmp(optarg, L"blk") == 0) {
args.pool_type = PMEMPOOL_POOL_TYPE_BLK;
} else if (wcscmp(optarg, L"log") == 0) {
args.pool_type = PMEMPOOL_POOL_TYPE_LOG;
} else if (wcscmp(optarg, L"obj") == 0) {
args.pool_type = PMEMPOOL_POOL_TYPE_OBJ;
} else if (wcscmp(optarg, L"btt") == 0) {
args.pool_type = PMEMPOOL_POOL_TYPE_BTT;
} else {
args.pool_type =
(uint32_t)wcstoul(optarg, NULL, 0);
}
} else if (wcscmp(L"-r", argv[i]) == 0) {
set_flag(optarg, &args.flags, PMEMPOOL_CHECK_REPAIR);
} else if (wcscmp(L"-d", argv[i]) == 0) {
set_flag(optarg, &args.flags, PMEMPOOL_CHECK_DRY_RUN);
} else if (wcscmp(L"-a", argv[i]) == 0) {
set_flag(optarg, &args.flags, PMEMPOOL_CHECK_ADVANCED);
} else if (wcscmp(L"-y", argv[i]) == 0) {
set_flag(optarg, &args.flags,
PMEMPOOL_CHECK_ALWAYS_YES);
} else if (wcscmp(L"-s", argv[i]) == 0) {
args_size = wcstoul(optarg, NULL, 0);
} else if (wcscmp(L"-b", argv[i]) == 0) {
args.backup_path = optarg;
} else {
print_usage(argv[0]);
UT_FATAL("unknown option: %c", argv[i][1]);
}
}
args.path = argv[argc - 1];
check_pool((struct pmempool_check_argsW *)&args, args_size);
DONEW(NULL);
}
| 3,912 | 24.743421 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/pmem_is_pmem_windows/pmem_is_pmem_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* Copyright (c) 2015-2017, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmem_is_pmem_windows.c -- Windows specific unit test for is_pmem_detect()
*
* usage: pmem_is_pmem_windows file [env]
*/
#include "unittest.h"
#include "pmem.h"
#include "queue.h"
#include "win_mmap.h"
#include "util.h"
#define NTHREAD 16
static void *Addr;
static size_t Size;
static int pmem_is_pmem_force = 0;
enum test_mmap_scenarios {
TEST_MMAP_SCENARIO_UNKNOWN,
TEST_MMAP_SCENARIO_BEGIN_HOLE,
TEST_MMAP_SCENARIO_END_HOLE,
TEST_MMAP_SCENARIO_MIDDLE_HOLE,
TEST_MMAP_SCENARIO_NO_HOLE
};
enum test_mmap_scenarios
get_mmap_scenarios(char *name)
{
if (stricmp(name, "nothing") == 0)
return TEST_MMAP_SCENARIO_NO_HOLE;
if (stricmp(name, "begin") == 0)
return TEST_MMAP_SCENARIO_BEGIN_HOLE;
if (stricmp(name, "end") == 0)
return TEST_MMAP_SCENARIO_END_HOLE;
if (stricmp(name, "middle") == 0)
return TEST_MMAP_SCENARIO_MIDDLE_HOLE;
return TEST_MMAP_SCENARIO_UNKNOWN;
}
/*
* mmap_file_mapping_comparer -- (internal) compares the two file mapping
* trackers
*/
static LONG_PTR
mmap_file_mapping_comparer(PFILE_MAPPING_TRACKER a, PFILE_MAPPING_TRACKER b)
{
return ((LONG_PTR)a->BaseAddress - (LONG_PTR)b->BaseAddress);
}
/*
* worker -- the work each thread performs
*/
static void *
worker(void *arg)
{
int *ret = (int *)arg;
/*
* We honor the force just to let the scenarios that require pmem fs
* work in the environment that forces pmem.
*
* NOTE: We can't use pmem_is_pmem instead of checking for the ENV
* variable explicitly, because we want to call is_pmem_detect that is
* defined in this test so that it will use the FileMappingQHead
* that's defined here. Because we are crafting the Q in the test.
*/
if (pmem_is_pmem_force)
*ret = 1;
else
*ret = is_pmem_detect(Addr, Size);
return NULL;
}
extern SRWLOCK FileMappingQLock;
extern struct FMLHead FileMappingQHead;
int
main(int argc, char *argv[])
{
HANDLE file_map;
SIZE_T chunk_length;
enum test_mmap_scenarios scenario;
int still_holey = 1;
int already_holey = 0;
START(argc, argv, "pmem_is_pmem_windows");
if (argc != 3)
UT_FATAL("usage: %s file {begin|end|middle|nothing}", argv[0]);
util_init(); /* to initialize Mmap_align */
char *str_pmem_is_pmem_force = os_getenv("PMEM_IS_PMEM_FORCE");
if (str_pmem_is_pmem_force && atoi(str_pmem_is_pmem_force) == 1)
pmem_is_pmem_force = 1;
scenario = get_mmap_scenarios(argv[2]);
UT_ASSERT(scenario != TEST_MMAP_SCENARIO_UNKNOWN);
int fd = OPEN(argv[1], O_RDWR);
os_stat_t stbuf;
FSTAT(fd, &stbuf);
Size = stbuf.st_size;
chunk_length = Mmap_align;
/*
* We don't support too small a file size.
*/
UT_ASSERT(Size / 8 > chunk_length);
file_map = CreateFileMapping((HANDLE)_get_osfhandle(fd), NULL,
PAGE_READONLY, 0, 0, NULL);
UT_ASSERT(file_map != NULL);
Addr = MapViewOfFile(file_map, FILE_MAP_READ, 0, 0, 0);
/*
* let's setup FileMappingQHead such that, it appears to have lot of
* DAX mapping created through our mmap. Here are our cases based
* on the input:
* - entire region in mapped through our mmap
* - there is a region at the beginning that's not mapped through our
* mmap
* - there is a region at the end that's not mapped through our mmap
* - there is a region in the middle that mapped through our mmap
*/
for (size_t offset = 0;
offset < Size;
offset += chunk_length) {
void *base_address = (void *)((char *)Addr + offset);
switch (scenario) {
case TEST_MMAP_SCENARIO_BEGIN_HOLE:
if (still_holey &&
((offset == 0) || ((rand() % 2) == 0)) &&
(offset < (Size / 2)))
continue;
else
still_holey = 0;
break;
case TEST_MMAP_SCENARIO_END_HOLE:
if ((offset > (Size / 2)) &&
(already_holey || ((rand() % 2) == 0) ||
(offset >= (Size - chunk_length)))) {
already_holey = 1;
continue;
} else
UT_ASSERT(!already_holey);
break;
case TEST_MMAP_SCENARIO_MIDDLE_HOLE:
if ((((offset > (Size / 8)) && ((rand() % 2) == 0)) ||
(offset > (Size / 8) * 6)) &&
(offset < (Size / 8) * 7))
continue;
break;
}
PFILE_MAPPING_TRACKER mt =
MALLOC(sizeof(struct FILE_MAPPING_TRACKER));
mt->Flags = FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED;
mt->FileHandle = (HANDLE)_get_osfhandle(fd);
mt->FileMappingHandle = file_map;
mt->BaseAddress = base_address;
mt->EndAddress = (void *)((char *)base_address + chunk_length);
mt->Access = FILE_MAP_READ;
mt->Offset = offset;
AcquireSRWLockExclusive(&FileMappingQLock);
PMDK_SORTEDQ_INSERT(&FileMappingQHead, mt, ListEntry,
FILE_MAPPING_TRACKER,
mmap_file_mapping_comparer);
ReleaseSRWLockExclusive(&FileMappingQLock);
}
CloseHandle(file_map);
CLOSE(fd);
os_thread_t threads[NTHREAD];
int ret[NTHREAD];
/* kick off NTHREAD threads */
for (int i = 0; i < NTHREAD; i++)
THREAD_CREATE(&threads[i], NULL, worker, &ret[i]);
/* wait for all the threads to complete */
for (int i = 0; i < NTHREAD; i++)
THREAD_JOIN(&threads[i], NULL);
/* verify that all the threads return the same value */
for (int i = 1; i < NTHREAD; i++)
UT_ASSERTeq(ret[0], ret[i]);
UT_OUT("%d", ret[0]);
DONE(NULL);
}
/*
* Since libpmem is linked statically, we need to invoke its ctor/dtor.
*/
MSVC_CONSTR(libpmem_init)
MSVC_DESTR(libpmem_fini)
| 6,946 | 27.239837 | 76 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_fragmentation2/obj_fragmentation2.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* obj_fragmentation.c -- measures average heap external fragmentation
*
* This test is based on the workloads proposed in:
* Log-structured Memory for DRAM-based Storage
* by Stephen M. Rumble, Ankita Kejriwal, and John Ousterhout
*
* https://www.usenix.org/system/files/conference/fast14/fast14-paper_rumble.pdf
*/
#include <stdlib.h>
#include <math.h>
#include "rand.h"
#include "unittest.h"
#define LAYOUT_NAME "obj_fragmentation"
#define MEGABYTE (1ULL << 20)
#define GIGABYTE (1ULL << 30)
#define RRAND(max, min)\
((min) == (max) ? (min) : (rnd64() % ((max) - (min)) + (min)))
static PMEMoid *objects;
static size_t nobjects;
static size_t allocated_current;
#define MAX_OBJECTS (200ULL * 1000000)
#define ALLOC_TOTAL (5000ULL * MEGABYTE)
#define ALLOC_CURR (1000 * MEGABYTE)
#define FREES_P 200
#define DEFAULT_FILE_SIZE (3 * GIGABYTE)
static void
shuffle_objects(size_t start, size_t end)
{
PMEMoid tmp;
size_t dest;
for (size_t n = start; n < end; ++n) {
dest = RRAND(nobjects - 1, 0);
tmp = objects[n];
objects[n] = objects[dest];
objects[dest] = tmp;
}
}
static PMEMoid
remove_last()
{
UT_ASSERT(nobjects > 0);
PMEMoid obj = objects[--nobjects];
return obj;
}
static void
delete_objects(PMEMobjpool *pop, float pct)
{
size_t nfree = (size_t)(nobjects * pct);
PMEMoid oid = pmemobj_root(pop, 1);
shuffle_objects(0, nobjects);
while (nfree--) {
oid = remove_last();
allocated_current -= pmemobj_alloc_usable_size(oid);
pmemobj_free(&oid);
}
}
/*
* object_next_size -- generates random sizes in range with
* exponential distribution
*/
static size_t
object_next_size(size_t max, size_t min)
{
float fmax = (float)max;
float fmin = (float)min;
float n = (float)rnd64() / ((float)UINT64_MAX / 1.0f);
return (size_t)(fmin + (fmax - fmin) * (float)exp(n * - 4.0));
}
/*
* allocate_exponential -- allocates objects from a large range of sizes.
*
* This is designed to stress the recycler subsystem that will have to
* constantly look for freed/empty runs and reuse them.
*
* For small pools (single digit gigabytes), this test will show large
* fragmentation because it can use a large number of runs - which is fine.
*/
static void
allocate_exponential(PMEMobjpool *pop, size_t size_min, size_t size_max)
{
size_t allocated_total = 0;
PMEMoid oid;
while (allocated_total < ALLOC_TOTAL) {
size_t s = object_next_size(size_max, size_min);
int ret = pmemobj_alloc(pop, &oid, s, 0, NULL, NULL);
if (ret != 0) {
/* delete a random percentage of allocated objects */
float delete_pct = (float)RRAND(90, 10) / 100.0f;
delete_objects(pop, delete_pct);
continue;
}
s = pmemobj_alloc_usable_size(oid);
objects[nobjects++] = oid;
UT_ASSERT(nobjects < MAX_OBJECTS);
allocated_total += s;
allocated_current += s;
}
}
static void
allocate_objects(PMEMobjpool *pop, size_t size_min, size_t size_max)
{
size_t allocated_total = 0;
size_t sstart = 0;
PMEMoid oid;
while (allocated_total < ALLOC_TOTAL) {
size_t s = RRAND(size_max, size_min);
pmemobj_alloc(pop, &oid, s, 0, NULL, NULL);
UT_ASSERTeq(OID_IS_NULL(oid), 0);
s = pmemobj_alloc_usable_size(oid);
objects[nobjects++] = oid;
UT_ASSERT(nobjects < MAX_OBJECTS);
allocated_total += s;
allocated_current += s;
if (allocated_current > ALLOC_CURR) {
shuffle_objects(sstart, nobjects);
for (int i = 0; i < FREES_P; ++i) {
oid = remove_last();
allocated_current -=
pmemobj_alloc_usable_size(oid);
pmemobj_free(&oid);
}
sstart = nobjects;
}
}
}
typedef void workload(PMEMobjpool *pop);
static void w0(PMEMobjpool *pop) {
allocate_objects(pop, 100, 100);
}
static void w1(PMEMobjpool *pop) {
allocate_objects(pop, 100, 100);
allocate_objects(pop, 130, 130);
}
static void w2(PMEMobjpool *pop) {
allocate_objects(pop, 100, 100);
delete_objects(pop, 0.9F);
allocate_objects(pop, 130, 130);
}
static void w3(PMEMobjpool *pop) {
allocate_objects(pop, 100, 150);
allocate_objects(pop, 200, 250);
}
static void w4(PMEMobjpool *pop) {
allocate_objects(pop, 100, 150);
delete_objects(pop, 0.9F);
allocate_objects(pop, 200, 250);
}
static void w5(PMEMobjpool *pop) {
allocate_objects(pop, 100, 200);
delete_objects(pop, 0.5);
allocate_objects(pop, 1000, 2000);
}
static void w6(PMEMobjpool *pop) {
allocate_objects(pop, 1000, 2000);
delete_objects(pop, 0.9F);
allocate_objects(pop, 1500, 2500);
}
static void w7(PMEMobjpool *pop) {
allocate_objects(pop, 50, 150);
delete_objects(pop, 0.9F);
allocate_objects(pop, 5000, 15000);
}
static void w8(PMEMobjpool *pop) {
allocate_objects(pop, 2 * MEGABYTE, 2 * MEGABYTE);
}
static void w9(PMEMobjpool *pop) {
allocate_exponential(pop, 1, 5 * MEGABYTE);
}
static workload *workloads[] = {
w0, w1, w2, w3, w4, w5, w6, w7, w8, w9
};
static float workloads_target[] = {
0.01f, 0.01f, 0.01f, 0.9f, 0.8f, 0.7f, 0.3f, 0.8f, 0.73f, 3.0f
};
static float workloads_defrag_target[] = {
0.01f, 0.01f, 0.01f, 0.01f, 0.01f, 0.05f, 0.09f, 0.13f, 0.01f, 0.16f
};
/*
* Last two workloads operates mostly on huge chunks, so run
* stats are useless.
*/
static float workloads_stat_target[] = {
0.01f, 1.1f, 1.1f, 0.86f, 0.76f, 1.01f, 0.23f, 1.24f, 2100.f, 2100.f
};
static float workloads_defrag_stat_target[] = {
0.01f, 0.01f, 0.01f, 0.02f, 0.02f, 0.04f, 0.08f, 0.12f, 2100.f, 2100.f
};
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_fragmentation2");
if (argc < 3)
UT_FATAL("usage: %s filename workload [seed] [defrag]",
argv[0]);
const char *path = argv[1];
PMEMobjpool *pop = pmemobj_create(path, LAYOUT_NAME, DEFAULT_FILE_SIZE,
S_IWUSR | S_IRUSR);
if (pop == NULL)
UT_FATAL("!pmemobj_create: %s", path);
int w = atoi(argv[2]);
if (argc > 3)
randomize((unsigned)atoi(argv[3]));
else
randomize(0);
int defrag = argc > 4 ? atoi(argv[4]) != 0 : 0;
objects = ZALLOC(sizeof(PMEMoid) * MAX_OBJECTS);
UT_ASSERTne(objects, NULL);
workloads[w](pop);
/* this is to trigger global recycling */
pmemobj_defrag(pop, NULL, 0, NULL);
size_t active = 0;
size_t allocated = 0;
pmemobj_ctl_get(pop, "stats.heap.run_active", &active);
pmemobj_ctl_get(pop, "stats.heap.run_allocated", &allocated);
float stat_frag = 0;
if (active != 0 && allocated != 0) {
stat_frag = ((float)active / allocated) - 1.f;
UT_ASSERT(stat_frag <= workloads_stat_target[w]);
}
if (defrag) {
PMEMoid **objectsf = ZALLOC(sizeof(PMEMoid) * nobjects);
for (size_t i = 0; i < nobjects; ++i)
objectsf[i] = &objects[i];
pmemobj_defrag(pop, objectsf, nobjects, NULL);
FREE(objectsf);
active = 0;
allocated = 0;
/* this is to trigger global recycling */
pmemobj_defrag(pop, NULL, 0, NULL);
pmemobj_ctl_get(pop, "stats.heap.run_active", &active);
pmemobj_ctl_get(pop, "stats.heap.run_allocated", &allocated);
if (active != 0 && allocated != 0) {
stat_frag = ((float)active / allocated) - 1.f;
UT_ASSERT(stat_frag <= workloads_defrag_stat_target[w]);
}
}
PMEMoid oid;
size_t remaining = 0;
size_t chunk = (100); /* calc at chunk level */
while (pmemobj_alloc(pop, &oid, chunk, 0, NULL, NULL) == 0)
remaining += pmemobj_alloc_usable_size(oid) + 16;
size_t allocated_sum = 0;
oid = pmemobj_root(pop, 1);
for (size_t n = 0; n < nobjects; ++n) {
if (OID_IS_NULL(objects[n]))
continue;
oid = objects[n];
allocated_sum += pmemobj_alloc_usable_size(oid) + 16;
}
size_t used = DEFAULT_FILE_SIZE - remaining;
float frag = ((float)used / allocated_sum) - 1.f;
UT_OUT("FRAG: %f\n", frag);
UT_ASSERT(frag <= (defrag ?
workloads_defrag_target[w] : workloads_target[w]));
pmemobj_close(pop);
FREE(objects);
DONE(NULL);
}
| 7,747 | 22.337349 | 80 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/getopt/getopt.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* getopt.c -- test for windows getopt() implementation
*/
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include "unittest.h"
/*
* long_options -- command line arguments
*/
static const struct option long_options[] = {
{ "arg_a", no_argument, NULL, 'a' },
{ "arg_b", no_argument, NULL, 'b' },
{ "arg_c", no_argument, NULL, 'c' },
{ "arg_d", no_argument, NULL, 'd' },
{ "arg_e", no_argument, NULL, 'e' },
{ "arg_f", no_argument, NULL, 'f' },
{ "arg_g", no_argument, NULL, 'g' },
{ "arg_h", no_argument, NULL, 'h' },
{ "arg_A", required_argument, NULL, 'A' },
{ "arg_B", required_argument, NULL, 'B' },
{ "arg_C", required_argument, NULL, 'C' },
{ "arg_D", required_argument, NULL, 'D' },
{ "arg_E", required_argument, NULL, 'E' },
{ "arg_F", required_argument, NULL, 'F' },
{ "arg_G", required_argument, NULL, 'G' },
{ "arg_H", required_argument, NULL, 'H' },
{ "arg_1", optional_argument, NULL, '1' },
{ "arg_2", optional_argument, NULL, '2' },
{ "arg_3", optional_argument, NULL, '3' },
{ "arg_4", optional_argument, NULL, '4' },
{ "arg_5", optional_argument, NULL, '5' },
{ "arg_6", optional_argument, NULL, '6' },
{ "arg_7", optional_argument, NULL, '7' },
{ "arg_8", optional_argument, NULL, '8' },
{ NULL, 0, NULL, 0 },
};
int
main(int argc, char *argv[])
{
int opt;
int option_index;
START(argc, argv, "getopt");
while ((opt = getopt_long(argc, argv,
"abcdefghA:B:C:D:E:F:G::H1::2::3::4::5::6::7::8::",
long_options, &option_index)) != -1) {
switch (opt) {
case '?':
UT_OUT("unknown argument");
break;
case 'a':
case 'b':
case 'c':
case 'd':
case 'e':
case 'f':
case 'g':
case 'h':
UT_OUT("arg_%c", opt);
break;
case 'A':
case 'B':
case 'C':
case 'D':
case 'E':
case 'F':
case 'G':
case 'H':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
UT_OUT("arg_%c=%s", opt,
optarg == NULL ? "null": optarg);
break;
}
}
while (optind < argc) {
UT_OUT("%s", argv[optind++]);
}
DONE(NULL);
}
| 2,159 | 21.736842 | 55 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/util_sds/util_sds.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* util_sds.c -- unit test for shutdown state functions
*/
#include <stdlib.h>
#include "unittest.h"
#include "ut_pmem2.h"
#include "shutdown_state.h"
#include "set.h"
#define PMEM_LEN 4096
static char **uids;
static size_t uids_size;
static size_t uid_it;
static uint64_t *uscs;
static size_t uscs_size;
static size_t usc_it;
static pmem2_persist_fn persist;
#define FAIL(X, Y) \
if ((X) == (Y)) { \
goto out; \
}
int
main(int argc, char *argv[])
{
START(argc, argv, "util_sds");
if (argc < 2)
UT_FATAL("usage: %s init fail (file uuid usc)...", argv[0]);
unsigned files = (unsigned)(argc - 2) / 3;
char **pmemaddr = MALLOC(files * sizeof(char *));
int *fds = MALLOC(files * sizeof(fds[0]));
struct pmem2_map **maps = MALLOC(files * sizeof(maps[0]));
uids = MALLOC(files * sizeof(uids[0]));
uscs = MALLOC(files * sizeof(uscs[0]));
uids_size = files;
uscs_size = files;
int init = atoi(argv[1]);
int fail_on = atoi(argv[2]);
char **args = argv + 3;
struct pmem2_config *cfg;
PMEM2_CONFIG_NEW(&cfg);
pmem2_config_set_required_store_granularity(cfg,
PMEM2_GRANULARITY_PAGE);
for (unsigned i = 0; i < files; i++) {
fds[i] = OPEN(args[i * 3], O_CREAT | O_RDWR, 0666);
POSIX_FALLOCATE(fds[i], 0, PMEM_LEN);
struct pmem2_source *src;
PMEM2_SOURCE_FROM_FD(&src, fds[i]);
if (pmem2_map(cfg, src, &maps[i])) {
UT_FATAL("pmem2_map: %s", pmem2_errormsg());
}
pmemaddr[0] = pmem2_map_get_address(maps[i]);
uids[i] = args[i * 3 + 1];
uscs[i] = strtoull(args[i * 3 + 2], NULL, 0);
PMEM2_SOURCE_DELETE(&src);
}
persist = pmem2_get_persist_fn(maps[0]);
FAIL(fail_on, 1);
struct pool_replica *rep = MALLOC(
sizeof(*rep) + sizeof(struct pool_set_part));
memset(rep, 0, sizeof(*rep) + sizeof(struct pool_set_part));
struct shutdown_state *pool_sds = (struct shutdown_state *)pmemaddr[0];
if (init) {
/* initialize pool shutdown state */
shutdown_state_init(pool_sds, rep);
FAIL(fail_on, 2);
for (unsigned i = 0; i < files; i++) {
if (shutdown_state_add_part(pool_sds, fds[i], rep))
UT_FATAL("shutdown_state_add_part");
FAIL(fail_on, 3);
}
} else {
/* verify a shutdown state saved in the pool */
struct shutdown_state current_sds;
shutdown_state_init(¤t_sds, NULL);
FAIL(fail_on, 2);
for (unsigned i = 0; i < files; i++) {
if (shutdown_state_add_part(¤t_sds,
fds[i], NULL))
UT_FATAL("shutdown_state_add_part");
FAIL(fail_on, 3);
}
if (shutdown_state_check(¤t_sds, pool_sds, rep)) {
UT_FATAL(
"An ADR failure is detected, the pool might be corrupted");
}
}
FAIL(fail_on, 4);
shutdown_state_set_dirty(pool_sds, rep);
/* pool is open */
FAIL(fail_on, 5);
/* close pool */
shutdown_state_clear_dirty(pool_sds, rep);
FAIL(fail_on, 6);
out: for (unsigned i = 0; i < files; i++) {
pmem2_unmap(&maps[i]);
CLOSE(fds[i]);
}
PMEM2_CONFIG_DELETE(&cfg);
FREE(pmemaddr);
FREE(uids);
FREE(uscs);
FREE(fds);
FREE(maps);
DONE(NULL);
}
FUNC_MOCK(pmem2_source_device_id, int, const struct pmem2_source *src,
char *uid, size_t *len)
FUNC_MOCK_RUN_DEFAULT {
if (uid_it < uids_size) {
if (uid != NULL) {
strcpy(uid, uids[uid_it]);
uid_it++;
} else {
*len = strlen(uids[uid_it]) + 1;
}
} else {
return -1;
}
return 0;
}
FUNC_MOCK_END
FUNC_MOCK(pmem2_source_device_usc, int, const struct pmem2_source *src,
uint64_t *usc)
FUNC_MOCK_RUN_DEFAULT {
if (usc_it < uscs_size) {
*usc = uscs[usc_it];
usc_it++;
} else {
return -1;
}
return 0;
}
FUNC_MOCK_END
int os_part_deep_common(struct pool_replica *rep, unsigned partidx, void *addr,
size_t len, int flush);
/*
* os_part_deep_common -- XXX temporally workaround until we will have pmem2
* integrated with common
*/
int
os_part_deep_common(struct pool_replica *rep, unsigned partidx, void *addr,
size_t len, int flush)
{
/*
* this is test - we don't need to deep persist anything -
* just call regular persist to make valgrind happy
*/
persist(addr, len);
return 0;
}
#ifdef _MSC_VER
MSVC_CONSTR(libpmem2_init)
MSVC_DESTR(libpmem2_fini)
#endif
| 4,175 | 21.572973 | 79 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/obj_recreate/obj_recreate.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* obj_recreate.c -- recreate pool on dirty file and check consistency
*/
#include "unittest.h"
POBJ_LAYOUT_BEGIN(recreate);
POBJ_LAYOUT_ROOT(recreate, struct root);
POBJ_LAYOUT_TOID(recreate, struct foo);
POBJ_LAYOUT_END(recreate);
struct foo {
int bar;
};
struct root {
TOID(struct foo) foo;
};
#define LAYOUT_NAME "obj_recreate"
#define N PMEMOBJ_MIN_POOL
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_recreate");
/* root doesn't count */
UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(recreate) != 1);
if (argc < 2)
UT_FATAL("usage: %s file-name [trunc]", argv[0]);
const char *path = argv[1];
PMEMobjpool *pop = NULL;
/* create pool 2*N */
pop = pmemobj_create(path, LAYOUT_NAME, 2 * N, S_IWUSR | S_IRUSR);
if (pop == NULL)
UT_FATAL("!pmemobj_create: %s", path);
/* allocate 1.5*N */
TOID(struct root) root = (TOID(struct root))pmemobj_root(pop,
(size_t)(1.5 * N));
/* use root object for something */
POBJ_NEW(pop, &D_RW(root)->foo, struct foo, NULL, NULL);
pmemobj_close(pop);
int fd = OPEN(path, O_RDWR);
if (argc >= 3 && strcmp(argv[2], "trunc") == 0) {
UT_OUT("truncating");
/* shrink file to N */
FTRUNCATE(fd, N);
}
size_t zero_len = Ut_pagesize;
/* zero first page */
void *p = MMAP(NULL, zero_len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
memset(p, 0, zero_len);
MUNMAP(p, zero_len);
CLOSE(fd);
/* create pool on existing file */
pop = pmemobj_create(path, LAYOUT_NAME, 0, S_IWUSR | S_IRUSR);
if (pop == NULL)
UT_FATAL("!pmemobj_create: %s", path);
/* try to allocate 0.7*N */
root = (TOID(struct root))pmemobj_root(pop, (size_t)(0.5 * N));
if (TOID_IS_NULL(root))
UT_FATAL("couldn't allocate root object");
/* validate root object is empty */
if (!TOID_IS_NULL(D_RW(root)->foo))
UT_FATAL("root object is already filled after pmemobj_create!");
pmemobj_close(pop);
DONE(NULL);
}
| 1,968 | 21.123596 | 73 | c |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/test/util_ctl/util_ctl.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* util_ctl.c -- tests for the control module
*/
#include "unittest.h"
#include "ctl.h"
#include "out.h"
#include "pmemcommon.h"
#include "fault_injection.h"
#define LOG_PREFIX "ut"
#define LOG_LEVEL_VAR "TEST_LOG_LEVEL"
#define LOG_FILE_VAR "TEST_LOG_FILE"
#define MAJOR_VERSION 1
#define MINOR_VERSION 0
struct pool {
struct ctl *ctl;
};
static char *testconfig_path;
static int test_config_written;
static int
CTL_READ_HANDLER(test_rw)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
UT_ASSERTeq(source, CTL_QUERY_PROGRAMMATIC);
int *arg_rw = arg;
*arg_rw = 0;
return 0;
}
static int
CTL_WRITE_HANDLER(test_rw)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
int *arg_rw = arg;
*arg_rw = 1;
test_config_written++;
return 0;
}
static struct ctl_argument CTL_ARG(test_rw) = CTL_ARG_INT;
static int
CTL_WRITE_HANDLER(test_wo)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
int *arg_wo = arg;
*arg_wo = 1;
test_config_written++;
return 0;
}
static struct ctl_argument CTL_ARG(test_wo) = CTL_ARG_INT;
#define TEST_CONFIG_VALUE "abcd"
static int
CTL_WRITE_HANDLER(test_config)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
UT_ASSERTeq(source, CTL_QUERY_CONFIG_INPUT);
char *config_value = arg;
UT_ASSERTeq(strcmp(config_value, TEST_CONFIG_VALUE), 0);
test_config_written++;
return 0;
}
static struct ctl_argument CTL_ARG(test_config) = CTL_ARG_STRING(8);
struct complex_arg {
int a;
char b[5];
long long c;
int d;
};
#define COMPLEX_ARG_TEST_A 12345
#define COMPLEX_ARG_TEST_B "abcd"
#define COMPLEX_ARG_TEST_C 3147483647
#define COMPLEX_ARG_TEST_D 1
static int
CTL_WRITE_HANDLER(test_config_complex_arg)(void *ctx,
enum ctl_query_source source, void *arg,
struct ctl_indexes *indexes)
{
UT_ASSERTeq(source, CTL_QUERY_CONFIG_INPUT);
struct complex_arg *c = arg;
UT_ASSERTeq(c->a, COMPLEX_ARG_TEST_A);
UT_ASSERT(strcmp(COMPLEX_ARG_TEST_B, c->b) == 0);
UT_ASSERTeq(c->c, COMPLEX_ARG_TEST_C);
UT_ASSERTeq(c->d, COMPLEX_ARG_TEST_D);
test_config_written++;
return 0;
}
static struct ctl_argument CTL_ARG(test_config_complex_arg) = {
.dest_size = sizeof(struct complex_arg),
.parsers = {
CTL_ARG_PARSER_STRUCT(struct complex_arg, a, ctl_arg_integer),
CTL_ARG_PARSER_STRUCT(struct complex_arg, b, ctl_arg_string),
CTL_ARG_PARSER_STRUCT(struct complex_arg, c, ctl_arg_integer),
CTL_ARG_PARSER_STRUCT(struct complex_arg, d, ctl_arg_boolean),
CTL_ARG_PARSER_END
}
};
static int
CTL_READ_HANDLER(test_ro)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
UT_ASSERTeq(source, CTL_QUERY_PROGRAMMATIC);
int *arg_ro = arg;
*arg_ro = 0;
return 0;
}
static int
CTL_READ_HANDLER(index_value)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
UT_ASSERTeq(source, CTL_QUERY_PROGRAMMATIC);
long *index_value = arg;
struct ctl_index *idx = PMDK_SLIST_FIRST(indexes);
UT_ASSERT(strcmp(idx->name, "test_index") == 0);
*index_value = idx->value;
return 0;
}
static int
CTL_RUNNABLE_HANDLER(test_runnable)(void *ctx,
enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
UT_ASSERTeq(source, CTL_QUERY_PROGRAMMATIC);
int *arg_runnable = arg;
*arg_runnable = 0;
return 0;
}
static const struct ctl_node CTL_NODE(test_index)[] = {
CTL_LEAF_RO(index_value),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(debug)[] = {
CTL_LEAF_RO(test_ro),
CTL_LEAF_WO(test_wo),
CTL_LEAF_RUNNABLE(test_runnable),
CTL_LEAF_RW(test_rw),
CTL_INDEXED(test_index),
CTL_LEAF_WO(test_config),
CTL_LEAF_WO(test_config_complex_arg),
CTL_NODE_END
};
static int
CTL_WRITE_HANDLER(gtest_config)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
UT_ASSERTeq(source, CTL_QUERY_CONFIG_INPUT);
char *config_value = arg;
UT_ASSERTeq(strcmp(config_value, TEST_CONFIG_VALUE), 0);
test_config_written = 1;
return 0;
}
static struct ctl_argument CTL_ARG(gtest_config) = CTL_ARG_STRING(8);
static int
CTL_READ_HANDLER(gtest_ro)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
UT_ASSERTeq(source, CTL_QUERY_PROGRAMMATIC);
int *arg_ro = arg;
*arg_ro = 0;
return 0;
}
static const struct ctl_node CTL_NODE(global_debug)[] = {
CTL_LEAF_RO(gtest_ro),
CTL_LEAF_WO(gtest_config),
CTL_NODE_END
};
static int
util_ctl_get(struct pool *pop, const char *name, void *arg)
{
LOG(3, "pop %p name %s arg %p", pop, name, arg);
return ctl_query(pop ? pop->ctl : NULL, pop,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_READ, arg);
}
static int
util_ctl_set(struct pool *pop, const char *name, void *arg)
{
LOG(3, "pop %p name %s arg %p", pop, name, arg);
return ctl_query(pop ? pop->ctl : NULL, pop,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_WRITE, arg);
}
static int
util_ctl_exec(struct pool *pop, const char *name, void *arg)
{
LOG(3, "pop %p name %s arg %p", pop, name, arg);
return ctl_query(pop ? pop->ctl : NULL, pop,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_RUNNABLE, arg);
}
static void
test_ctl_parser(struct pool *pop)
{
errno = 0;
int ret;
ret = util_ctl_get(pop, NULL, NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_get(pop, "a.b.c.d", NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_get(pop, "", NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_get(pop, "debug.", NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_get(pop, ".", NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_get(pop, "..", NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_get(pop, "1.2.3.4", NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_get(pop, "debug.1.", NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_get(pop, "debug.1.invalid", NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
/* test methods set read to 0 and write to 1 if successful */
int arg_read = 1;
int arg_write = 0;
errno = 0;
/* correct name, wrong args */
ret = util_ctl_get(pop, "debug.test_rw", NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_set(pop, "debug.test_rw", NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_get(pop, "debug.test_wo", &arg_read);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_get(pop, "debug.test_wo", NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_set(pop, "debug.test_ro", &arg_write);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_set(pop, "debug.test_ro", NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_get(pop, "debug.test_rw", &arg_read);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg_read, 0);
UT_ASSERTeq(arg_write, 0);
UT_ASSERTeq(errno, 0);
ret = util_ctl_set(pop, "debug.test_rw", &arg_write);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg_read, 0);
UT_ASSERTeq(arg_write, 1);
arg_read = 1;
arg_write = 0;
ret = util_ctl_get(pop, "debug.test_ro", &arg_read);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg_read, 0);
UT_ASSERTeq(arg_write, 0);
arg_read = 1;
arg_write = 0;
ret = util_ctl_set(pop, "debug.test_wo", &arg_write);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg_read, 1);
UT_ASSERTeq(arg_write, 1);
long index_value = 0;
ret = util_ctl_get(pop, "debug.5.index_value", &index_value);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(index_value, 5);
ret = util_ctl_get(pop, "debug.10.index_value", &index_value);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(index_value, 10);
arg_read = 1;
arg_write = 1;
int arg_runnable = 1;
ret = util_ctl_exec(pop, "debug.test_runnable", &arg_runnable);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg_read, 1);
UT_ASSERTeq(arg_write, 1);
UT_ASSERTeq(arg_runnable, 0);
}
static void
test_string_config(struct pool *pop)
{
UT_ASSERTne(pop, NULL);
int ret;
test_config_written = 0;
ret = ctl_load_config_from_string(pop->ctl, pop, "");
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(test_config_written, 0);
test_config_written = 0;
ret = ctl_load_config_from_string(pop->ctl, pop, ";;");
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(test_config_written, 0);
test_config_written = 0;
ret = ctl_load_config_from_string(pop->ctl, pop, ";=;");
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(test_config_written, 0);
test_config_written = 0;
ret = ctl_load_config_from_string(pop->ctl, pop, "=");
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(test_config_written, 0);
test_config_written = 0;
ret = ctl_load_config_from_string(pop->ctl, pop,
"debug.test_wo=");
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(test_config_written, 0);
test_config_written = 0;
ret = ctl_load_config_from_string(pop->ctl, pop, "=b");
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(test_config_written, 0);
test_config_written = 0;
ret = ctl_load_config_from_string(pop->ctl, pop,
"debug.test_wo=111=222");
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(test_config_written, 0);
test_config_written = 0;
ret = ctl_load_config_from_string(pop->ctl, pop,
"debug.test_wo=333;debug.test_rw=444;");
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(test_config_written, 2);
test_config_written = 0;
ret = ctl_load_config_from_string(pop->ctl, pop,
"debug.test_config="TEST_CONFIG_VALUE";");
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(test_config_written, 1);
}
static void
config_file_create(const char *buf)
{
/* the test script will take care of removing this file for us */
FILE *f = os_fopen(testconfig_path, "w+");
fwrite(buf, sizeof(char), strlen(buf), f);
fclose(f);
}
static void
create_and_test_file_config(struct pool *pop, const char *buf, int ret,
int result)
{
config_file_create(buf);
test_config_written = 0;
int r = ctl_load_config_from_file(pop ? pop->ctl : NULL,
pop, testconfig_path);
UT_ASSERTeq(r, ret);
UT_ASSERTeq(test_config_written, result);
}
static void
test_too_large_file(struct pool *pop)
{
char *too_large_buf = calloc(1, 1 << 21);
UT_ASSERTne(too_large_buf, NULL);
memset(too_large_buf, 0xc, (1 << 21) - 1);
config_file_create(too_large_buf);
int ret = ctl_load_config_from_file(pop->ctl, pop,
testconfig_path);
UT_ASSERTne(ret, 0);
free(too_large_buf);
}
static void
test_file_config(struct pool *pop)
{
create_and_test_file_config(pop,
"debug.test_config="TEST_CONFIG_VALUE";", 0, 1);
create_and_test_file_config(pop,
"debug.test_config="TEST_CONFIG_VALUE";"
"debug.test_config="TEST_CONFIG_VALUE";", 0, 2);
create_and_test_file_config(pop,
"#this is a comment\n"
"debug.test_config="TEST_CONFIG_VALUE";", 0, 1);
create_and_test_file_config(pop,
"debug.#this is a comment\n"
"test_config#this is a comment\n"
"="TEST_CONFIG_VALUE";", 0, 1);
create_and_test_file_config(pop,
"debug.test_config="TEST_CONFIG_VALUE";#this is a comment",
0, 1);
create_and_test_file_config(pop,
"\n\n\ndebug\n.\ntest\t_\tconfig="TEST_CONFIG_VALUE";\n", 0, 1);
create_and_test_file_config(pop,
" d e b u g . t e s t _ c o n f i g = "TEST_CONFIG_VALUE";",
0, 1);
create_and_test_file_config(pop,
"#debug.test_config="TEST_CONFIG_VALUE";", 0, 0);
create_and_test_file_config(pop,
"debug.#this is a comment\n"
"test_config#this is a not properly terminated comment"
"="TEST_CONFIG_VALUE";", -1, 0);
create_and_test_file_config(pop,
"invalid", -1, 0);
create_and_test_file_config(pop,
"", 0, 0);
create_and_test_file_config(pop,
"debug.test_config_complex_arg=;", -1, 0);
create_and_test_file_config(pop,
"debug.test_config_complex_arg=1,2,3;", -1, 0);
create_and_test_file_config(pop,
"debug.test_config_complex_arg=12345,abcd,,1;", -1, 0);
create_and_test_file_config(pop,
"debug.test_config_complex_arg=12345,abcd,3147483647,1;", 0, 1);
create_and_test_file_config(NULL,
"global_debug.gtest_config="TEST_CONFIG_VALUE";", 0, 1);
create_and_test_file_config(NULL, "private.missing.query=1;"
"global_debug.gtest_config="TEST_CONFIG_VALUE";", 0, 1);
test_too_large_file(pop);
int ret = ctl_load_config_from_file(pop->ctl,
pop, "does_not_exist");
UT_ASSERTne(ret, 0);
}
static void
test_ctl_global_namespace(struct pool *pop)
{
int arg_read = 1;
int ret = util_ctl_get(pop, "global_debug.gtest_ro", &arg_read);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg_read, 0);
}
static void
test_ctl_arg_parsers()
{
char *input;
input = "";
int boolean = -1;
int ret = ctl_arg_boolean(input, &boolean, sizeof(int));
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(boolean, -1);
input = "abcdefgh";
boolean = -1;
ret = ctl_arg_boolean(input, &boolean, sizeof(int));
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(boolean, -1);
input = "-999";
boolean = -1;
ret = ctl_arg_boolean(input, &boolean, sizeof(int));
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(boolean, -1);
input = "N";
boolean = -1;
ret = ctl_arg_boolean(input, &boolean, sizeof(int));
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(boolean, 0);
input = "0";
boolean = -1;
ret = ctl_arg_boolean(input, &boolean, sizeof(int));
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(boolean, 0);
input = "yes";
boolean = -1;
ret = ctl_arg_boolean(input, &boolean, sizeof(int));
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(boolean, 1);
input = "Yes";
boolean = -1;
ret = ctl_arg_boolean(input, &boolean, sizeof(int));
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(boolean, 1);
input = "1";
boolean = -1;
ret = ctl_arg_boolean(input, &boolean, sizeof(int));
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(boolean, 1);
input = "1234";
boolean = -1;
ret = ctl_arg_boolean(input, &boolean, sizeof(int));
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(boolean, 1);
input = "";
int small_int = -1;
ret = ctl_arg_integer(input, &small_int, sizeof(small_int));
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(small_int, -1);
input = "abcd";
small_int = -1;
ret = ctl_arg_integer(input, &small_int, sizeof(small_int));
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(small_int, -1);
input = "12345678901234567890";
small_int = -1;
ret = ctl_arg_integer(input, &small_int, sizeof(small_int));
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(small_int, -1);
input = "-12345678901234567890";
small_int = -1;
ret = ctl_arg_integer(input, &small_int, sizeof(small_int));
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(small_int, -1);
input = "2147483648"; /* INT_MAX + 1 */
small_int = -1;
ret = ctl_arg_integer(input, &small_int, sizeof(small_int));
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(small_int, -1);
input = "-2147483649"; /* INT_MIN - 2 */
small_int = -1;
ret = ctl_arg_integer(input, &small_int, sizeof(small_int));
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(small_int, -1);
input = "0";
small_int = -1;
ret = ctl_arg_integer(input, &small_int, sizeof(small_int));
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(small_int, 0);
input = "500";
small_int = -1;
ret = ctl_arg_integer(input, &small_int, sizeof(small_int));
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(small_int, 500);
input = "-500";
small_int = -1;
ret = ctl_arg_integer(input, &small_int, sizeof(small_int));
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(small_int, -500);
input = "";
long long ll_int = -1;
ret = ctl_arg_integer(input, &ll_int, sizeof(ll_int));
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(ll_int, -1);
input = "12345678901234567890";
ll_int = -1;
ret = ctl_arg_integer(input, &ll_int, sizeof(ll_int));
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(ll_int, -1);
input = "-12345678901234567890";
ll_int = -1;
ret = ctl_arg_integer(input, &ll_int, sizeof(ll_int));
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(ll_int, -1);
input = "2147483648";
ll_int = -1;
ret = ctl_arg_integer(input, &ll_int, sizeof(ll_int));
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(ll_int, 2147483648);
input = "-2147483649";
ll_int = -1;
ret = ctl_arg_integer(input, &ll_int, sizeof(ll_int));
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(ll_int, -2147483649LL);
input = "";
char string[1000] = {0};
ret = ctl_arg_string(input, string, 0);
UT_ASSERTeq(ret, -1);
input = "abcd";
ret = ctl_arg_string(input, string, 3);
UT_ASSERTeq(ret, -1);
input = "abcdefg";
ret = ctl_arg_string(input, string, 3);
UT_ASSERTeq(ret, -1);
input = "abcd";
ret = ctl_arg_string(input, string, 4);
UT_ASSERTeq(ret, -1);
input = "abc";
ret = ctl_arg_string(input, string, 4);
UT_ASSERTeq(ret, 0);
UT_ASSERT(strcmp(input, string) == 0);
}
static void
test_fault_injection(struct pool *pop)
{
if (!core_fault_injection_enabled())
return;
UT_ASSERTne(pop, NULL);
core_inject_fault_at(PMEM_MALLOC, 1, "ctl_parse_args");
test_config_written = 0;
int ret = ctl_load_config_from_string(pop->ctl, pop,
"debug.test_wo=333;debug.test_rw=444;");
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOMEM);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "util_ctl");
common_init(LOG_PREFIX, LOG_LEVEL_VAR, LOG_FILE_VAR,
MAJOR_VERSION, MINOR_VERSION);
if (argc != 2)
UT_FATAL("usage: %s testconfig", argv[0]);
testconfig_path = argv[1];
CTL_REGISTER_MODULE(NULL, global_debug);
test_ctl_global_namespace(NULL);
struct pool *pop = malloc(sizeof(pop));
pop->ctl = ctl_new();
test_ctl_global_namespace(NULL);
CTL_REGISTER_MODULE(pop->ctl, debug);
test_ctl_global_namespace(pop);
test_fault_injection(pop);
test_ctl_parser(pop);
test_string_config(pop);
test_file_config(pop);
test_ctl_arg_parsers();
ctl_delete(pop->ctl);
free(pop);
common_fini();
DONE(NULL);
}
| 17,492 | 22.639189 | 72 | c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.