repo
stringlengths 1
152
⌀ | file
stringlengths 15
205
| code
stringlengths 0
41.6M
| file_length
int64 0
41.6M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 90
values |
---|---|---|---|---|---|---|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/src/quicklist.h
|
/* quicklist.h - A generic doubly linked quicklist implementation
*
* Copyright (c) 2014, Matt Stancliff <[email protected]>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this quicklist of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this quicklist of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __QUICKLIST_H__
#define __QUICKLIST_H__
/* Node, quicklist, and Iterator are the only data structures used currently. */
/* quicklistNode is a 32 byte struct describing a ziplist for a quicklist.
* We use bit fields keep the quicklistNode at 32 bytes.
* count: 16 bits, max 65536 (max zl bytes is 65k, so max count actually < 32k).
* encoding: 2 bits, RAW=1, LZF=2.
* container: 2 bits, NONE=1, ZIPLIST=2.
* recompress: 1 bit, bool, true if node is temporarry decompressed for usage.
* attempted_compress: 1 bit, boolean, used for verifying during testing.
* extra: 12 bits, free for future use; pads out the remainder of 32 bits */
typedef struct quicklistNode {
struct quicklistNode *prev;
struct quicklistNode *next;
unsigned char *zl;
unsigned int sz; /* ziplist size in bytes */
unsigned int count : 16; /* count of items in ziplist */
unsigned int encoding : 2; /* RAW==1 or LZF==2 */
unsigned int container : 2; /* NONE==1 or ZIPLIST==2 */
unsigned int recompress : 1; /* was this node previous compressed? */
unsigned int attempted_compress : 1; /* node can't compress; too small */
unsigned int extra : 10; /* more bits to steal for future usage */
} quicklistNode;
/* quicklistLZF is a 4+N byte struct holding 'sz' followed by 'compressed'.
* 'sz' is byte length of 'compressed' field.
* 'compressed' is LZF data with total (compressed) length 'sz'
* NOTE: uncompressed length is stored in quicklistNode->sz.
* When quicklistNode->zl is compressed, node->zl points to a quicklistLZF */
typedef struct quicklistLZF {
unsigned int sz; /* LZF size in bytes*/
char compressed[];
} quicklistLZF;
/* quicklist is a 32 byte struct (on 64-bit systems) describing a quicklist.
* 'count' is the number of total entries.
* 'len' is the number of quicklist nodes.
* 'compress' is: -1 if compression disabled, otherwise it's the number
* of quicklistNodes to leave uncompressed at ends of quicklist.
* 'fill' is the user-requested (or default) fill factor. */
typedef struct quicklist {
quicklistNode *head;
quicklistNode *tail;
unsigned long count; /* total count of all entries in all ziplists */
unsigned int len; /* number of quicklistNodes */
int fill : 16; /* fill factor for individual nodes */
unsigned int compress : 16; /* depth of end nodes not to compress;0=off */
} quicklist;
typedef struct quicklistIter {
const quicklist *quicklist;
quicklistNode *current;
unsigned char *zi;
long offset; /* offset in current ziplist */
int direction;
} quicklistIter;
typedef struct quicklistEntry {
const quicklist *quicklist;
quicklistNode *node;
unsigned char *zi;
unsigned char *value;
long long longval;
unsigned int sz;
int offset;
} quicklistEntry;
#define QUICKLIST_HEAD 0
#define QUICKLIST_TAIL -1
/* quicklist node encodings */
#define QUICKLIST_NODE_ENCODING_RAW 1
#define QUICKLIST_NODE_ENCODING_LZF 2
/* quicklist compression disable */
#define QUICKLIST_NOCOMPRESS 0
/* quicklist container formats */
#define QUICKLIST_NODE_CONTAINER_NONE 1
#define QUICKLIST_NODE_CONTAINER_ZIPLIST 2
#define quicklistNodeIsCompressed(node) \
((node)->encoding == QUICKLIST_NODE_ENCODING_LZF)
/* Prototypes */
quicklist *quicklistCreate(void);
quicklist *quicklistNew(int fill, int compress);
void quicklistSetCompressDepth(quicklist *quicklist, int depth);
void quicklistSetFill(quicklist *quicklist, int fill);
void quicklistSetOptions(quicklist *quicklist, int fill, int depth);
void quicklistRelease(quicklist *quicklist);
int quicklistPushHead(quicklist *quicklist, void *value, const size_t sz);
int quicklistPushTail(quicklist *quicklist, void *value, const size_t sz);
void quicklistPush(quicklist *quicklist, void *value, const size_t sz,
int where);
void quicklistAppendZiplist(quicklist *quicklist, unsigned char *zl);
quicklist *quicklistAppendValuesFromZiplist(quicklist *quicklist,
unsigned char *zl);
quicklist *quicklistCreateFromZiplist(int fill, int compress,
unsigned char *zl);
void quicklistInsertAfter(quicklist *quicklist, quicklistEntry *node,
void *value, const size_t sz);
void quicklistInsertBefore(quicklist *quicklist, quicklistEntry *node,
void *value, const size_t sz);
void quicklistDelEntry(quicklistIter *iter, quicklistEntry *entry);
int quicklistReplaceAtIndex(quicklist *quicklist, long index, void *data,
int sz);
int quicklistDelRange(quicklist *quicklist, const long start, const long stop);
quicklistIter *quicklistGetIterator(const quicklist *quicklist, int direction);
quicklistIter *quicklistGetIteratorAtIdx(const quicklist *quicklist,
int direction, const long long idx);
int quicklistNext(quicklistIter *iter, quicklistEntry *node);
void quicklistReleaseIterator(quicklistIter *iter);
quicklist *quicklistDup(quicklist *orig);
int quicklistIndex(const quicklist *quicklist, const long long index,
quicklistEntry *entry);
void quicklistRewind(quicklist *quicklist, quicklistIter *li);
void quicklistRewindTail(quicklist *quicklist, quicklistIter *li);
void quicklistRotate(quicklist *quicklist);
int quicklistPopCustom(quicklist *quicklist, int where, unsigned char **data,
unsigned int *sz, long long *sval,
void *(*saver)(unsigned char *data, unsigned int sz));
int quicklistPop(quicklist *quicklist, int where, unsigned char **data,
unsigned int *sz, long long *slong);
unsigned int quicklistCount(quicklist *ql);
int quicklistCompare(unsigned char *p1, unsigned char *p2, int p2_len);
size_t quicklistGetLzf(const quicklistNode *node, void **data);
#ifdef REDIS_TEST
int quicklistTest(int argc, char *argv[]);
#endif
/* Directions for iterators */
#define AL_START_HEAD 0
#define AL_START_TAIL 1
#endif /* __QUICKLIST_H__ */
| 7,808 | 44.935294 | 80 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/src/asciilogo.h
|
/*
* Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
char *ascii_logo =
" _._ \n"
" _.-``__ ''-._ \n"
" _.-`` `. `_. ''-._ Redis %s (%s/%d) %s bit\n"
" .-`` .-```. ```\\/ _.,_ ''-._ \n"
" ( ' , .-` | `, ) Running in %s mode\n"
" |`-._`-...-` __...-.``-._|'` _.-'| Port: %d\n"
" | `-._ `._ / _.-' | PID: %ld\n"
" `-._ `-._ `-./ _.-' _.-' \n"
" |`-._`-._ `-.__.-' _.-'_.-'| \n"
" | `-._`-._ _.-'_.-' | http://redis.io \n"
" `-._ `-._`-.__.-'_.-' _.-' \n"
" |`-._`-._ `-.__.-' _.-'_.-'| \n"
" | `-._`-._ _.-'_.-' | \n"
" `-._ `-._`-.__.-'_.-' _.-' \n"
" `-._ `-.__.-' _.-' \n"
" `-._ _.-' \n"
" `-.__.-' \n\n";
| 2,833 | 58.041667 | 78 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/src/adlist.h
|
/* adlist.h - A generic doubly linked list implementation
*
* Copyright (c) 2006-2012, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __ADLIST_H__
#define __ADLIST_H__
/* Node, List, and Iterator are the only data structures used currently. */
typedef struct listNode {
struct listNode *prev;
struct listNode *next;
void *value;
} listNode;
typedef struct listIter {
listNode *next;
int direction;
} listIter;
typedef struct list {
listNode *head;
listNode *tail;
void *(*dup)(void *ptr);
void (*free)(void *ptr);
int (*match)(void *ptr, void *key);
unsigned long len;
} list;
/* Functions implemented as macros */
#define listLength(l) ((l)->len)
#define listFirst(l) ((l)->head)
#define listLast(l) ((l)->tail)
#define listPrevNode(n) ((n)->prev)
#define listNextNode(n) ((n)->next)
#define listNodeValue(n) ((n)->value)
#define listSetDupMethod(l,m) ((l)->dup = (m))
#define listSetFreeMethod(l,m) ((l)->free = (m))
#define listSetMatchMethod(l,m) ((l)->match = (m))
#define listGetDupMethod(l) ((l)->dup)
#define listGetFree(l) ((l)->free)
#define listGetMatchMethod(l) ((l)->match)
/* Prototypes */
list *listCreate(void);
void listRelease(list *list);
list *listAddNodeHead(list *list, void *value);
list *listAddNodeTail(list *list, void *value);
list *listInsertNode(list *list, listNode *old_node, void *value, int after);
void listDelNode(list *list, listNode *node);
listIter *listGetIterator(list *list, int direction);
listNode *listNext(listIter *iter);
void listReleaseIterator(listIter *iter);
list *listDup(list *orig);
listNode *listSearchKey(list *list, void *key);
listNode *listIndex(list *list, long index);
void listRewind(list *list, listIter *li);
void listRewindTail(list *list, listIter *li);
void listRotate(list *list);
/* Directions for iterators */
#define AL_START_HEAD 0
#define AL_START_TAIL 1
#endif /* __ADLIST_H__ */
| 3,451 | 35.723404 | 78 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/src/server.h
|
/*
* Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __REDIS_H
#define __REDIS_H
#include "fmacros.h"
#include "config.h"
#include "solarisfixes.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <limits.h>
#include <unistd.h>
#include <errno.h>
#include <inttypes.h>
#include <pthread.h>
#include <syslog.h>
#include <netinet/in.h>
#include <lua.h>
#include <signal.h>
#ifdef USE_PMDK
#include <stdbool.h>
#include <sys/queue.h>
#include "libpmemobj.h"
#define PM_LAYOUT_NAME "store_db"
POBJ_LAYOUT_BEGIN(store_db);
POBJ_LAYOUT_TOID(store_db, struct redis_pmem_root);
POBJ_LAYOUT_TOID(store_db, struct key_val_pair_PM);
POBJ_LAYOUT_END(store_db);
#include "pmem.h"
uint64_t pm_type_root_type_id;
uint64_t pm_type_key_val_pair_PM;
uint64_t pm_type_sds_type_id;
uint64_t pm_type_emb_sds_type_id;
/* Type key_val_pair_PM Object */
#define PM_TYPE_KEY_VAL_PAIR_PM pm_type_key_val_pair_PM
/* Type SDS Object */
#define PM_TYPE_SDS pm_type_sds_type_id
/* Type Embedded SDS Object */
#define PM_TYPE_EMB_SDS pm_type_emb_sds_type_id
struct redis_pmem_root {
uint64_t num_dict_entries;
TOID(struct key_val_pair_PM) pe_first;
};
#endif
typedef long long mstime_t; /* millisecond time type. */
#include "ae.h" /* Event driven programming library */
#include "sds.h" /* Dynamic safe strings */
#include "dict.h" /* Hash tables */
#include "adlist.h" /* Linked lists */
#include "zmalloc.h" /* total memory usage aware version of malloc/free */
#include "anet.h" /* Networking the easy way */
#include "ziplist.h" /* Compact list data structure */
#include "intset.h" /* Compact integer set structure */
#include "version.h" /* Version macro */
#include "util.h" /* Misc functions useful in many places */
#include "latency.h" /* Latency monitor API */
#include "sparkline.h" /* ASCII graphs API */
#include "quicklist.h"
/* Following includes allow test functions to be called from Redis main() */
#include "zipmap.h"
#include "sha1.h"
#include "endianconv.h"
#include "crc64.h"
/* Error codes */
#define C_OK 0
#define C_ERR -1
/* Static server configuration */
#define CONFIG_DEFAULT_HZ 10 /* Time interrupt calls/sec. */
#define CONFIG_MIN_HZ 1
#define CONFIG_MAX_HZ 500
#define CONFIG_DEFAULT_SERVER_PORT 6379 /* TCP port */
#define CONFIG_DEFAULT_TCP_BACKLOG 511 /* TCP listen backlog */
#define CONFIG_DEFAULT_CLIENT_TIMEOUT 0 /* default client timeout: infinite */
#define CONFIG_DEFAULT_DBNUM 16
#define CONFIG_MAX_LINE 1024
#define CRON_DBS_PER_CALL 16
#define NET_MAX_WRITES_PER_EVENT (1024*64)
#define PROTO_SHARED_SELECT_CMDS 10
#define OBJ_SHARED_INTEGERS 10000
#define OBJ_SHARED_BULKHDR_LEN 32
#define LOG_MAX_LEN 1024 /* Default maximum length of syslog messages */
#define AOF_REWRITE_PERC 100
#define AOF_REWRITE_MIN_SIZE (64*1024*1024)
#define AOF_REWRITE_ITEMS_PER_CMD 64
#define CONFIG_DEFAULT_SLOWLOG_LOG_SLOWER_THAN 10000
#define CONFIG_DEFAULT_SLOWLOG_MAX_LEN 128
#define CONFIG_DEFAULT_MAX_CLIENTS 10000
#define CONFIG_AUTHPASS_MAX_LEN 512
#define CONFIG_DEFAULT_SLAVE_PRIORITY 100
#define CONFIG_DEFAULT_REPL_TIMEOUT 60
#define CONFIG_DEFAULT_REPL_PING_SLAVE_PERIOD 10
#define CONFIG_RUN_ID_SIZE 40
#define RDB_EOF_MARK_SIZE 40
#define CONFIG_DEFAULT_REPL_BACKLOG_SIZE (1024*1024) /* 1mb */
#define CONFIG_DEFAULT_REPL_BACKLOG_TIME_LIMIT (60*60) /* 1 hour */
#define CONFIG_REPL_BACKLOG_MIN_SIZE (1024*16) /* 16k */
#define CONFIG_BGSAVE_RETRY_DELAY 5 /* Wait a few secs before trying again. */
#define CONFIG_DEFAULT_PID_FILE "/var/run/redis.pid"
#define CONFIG_DEFAULT_SYSLOG_IDENT "redis"
#define CONFIG_DEFAULT_CLUSTER_CONFIG_FILE "nodes.conf"
#define CONFIG_DEFAULT_DAEMONIZE 0
#define CONFIG_DEFAULT_UNIX_SOCKET_PERM 0
#define CONFIG_DEFAULT_TCP_KEEPALIVE 300
#define CONFIG_DEFAULT_PROTECTED_MODE 1
#define CONFIG_DEFAULT_LOGFILE ""
#define CONFIG_DEFAULT_SYSLOG_ENABLED 0
#define CONFIG_DEFAULT_STOP_WRITES_ON_BGSAVE_ERROR 1
#define CONFIG_DEFAULT_RDB_COMPRESSION 1
#define CONFIG_DEFAULT_RDB_CHECKSUM 1
#define CONFIG_DEFAULT_RDB_FILENAME "dump.rdb"
#define CONFIG_DEFAULT_REPL_DISKLESS_SYNC 0
#define CONFIG_DEFAULT_REPL_DISKLESS_SYNC_DELAY 5
#define CONFIG_DEFAULT_SLAVE_SERVE_STALE_DATA 1
#define CONFIG_DEFAULT_SLAVE_READ_ONLY 1
#define CONFIG_DEFAULT_SLAVE_ANNOUNCE_IP NULL
#define CONFIG_DEFAULT_SLAVE_ANNOUNCE_PORT 0
#define CONFIG_DEFAULT_REPL_DISABLE_TCP_NODELAY 0
#define CONFIG_DEFAULT_MAXMEMORY 0
#define CONFIG_DEFAULT_MAXMEMORY_SAMPLES 5
#define CONFIG_DEFAULT_AOF_FILENAME "appendonly.aof"
#define CONFIG_DEFAULT_AOF_NO_FSYNC_ON_REWRITE 0
#define CONFIG_DEFAULT_AOF_LOAD_TRUNCATED 1
#define CONFIG_DEFAULT_ACTIVE_REHASHING 1
#define CONFIG_DEFAULT_AOF_REWRITE_INCREMENTAL_FSYNC 1
#define CONFIG_DEFAULT_MIN_SLAVES_TO_WRITE 0
#define CONFIG_DEFAULT_MIN_SLAVES_MAX_LAG 10
#define NET_IP_STR_LEN 46 /* INET6_ADDRSTRLEN is 46, but we need to be sure */
#define NET_PEER_ID_LEN (NET_IP_STR_LEN+32) /* Must be enough for ip:port */
#define CONFIG_BINDADDR_MAX 16
#define CONFIG_MIN_RESERVED_FDS 32
#define CONFIG_DEFAULT_LATENCY_MONITOR_THRESHOLD 0
#ifdef USE_PMDK
#define CONFIG_MIN_PM_FILE_SIZE PMEMOBJ_MIN_POOL
#define CONFIG_DEFAULT_PM_FILE_SIZE (1024*1024*1024) /* 1GB */
#endif
#define ACTIVE_EXPIRE_CYCLE_LOOKUPS_PER_LOOP 20 /* Loopkups per loop. */
#define ACTIVE_EXPIRE_CYCLE_FAST_DURATION 1000 /* Microseconds */
#define ACTIVE_EXPIRE_CYCLE_SLOW_TIME_PERC 25 /* CPU max % for keys collection */
#define ACTIVE_EXPIRE_CYCLE_SLOW 0
#define ACTIVE_EXPIRE_CYCLE_FAST 1
/* Instantaneous metrics tracking. */
#define STATS_METRIC_SAMPLES 16 /* Number of samples per metric. */
#define STATS_METRIC_COMMAND 0 /* Number of commands executed. */
#define STATS_METRIC_NET_INPUT 1 /* Bytes read to network .*/
#define STATS_METRIC_NET_OUTPUT 2 /* Bytes written to network. */
#define STATS_METRIC_COUNT 3
/* Protocol and I/O related defines */
#define PROTO_MAX_QUERYBUF_LEN (1024*1024*1024) /* 1GB max query buffer. */
#define PROTO_IOBUF_LEN (1024*16) /* Generic I/O buffer size */
#define PROTO_REPLY_CHUNK_BYTES (16*1024) /* 16k output buffer */
#define PROTO_INLINE_MAX_SIZE (1024*64) /* Max size of inline reads */
#define PROTO_MBULK_BIG_ARG (1024*32)
#define LONG_STR_SIZE 21 /* Bytes needed for long -> str + '\0' */
#define AOF_AUTOSYNC_BYTES (1024*1024*32) /* fdatasync every 32MB */
/* When configuring the server eventloop, we setup it so that the total number
* of file descriptors we can handle are server.maxclients + RESERVED_FDS +
* a few more to stay safe. Since RESERVED_FDS defaults to 32, we add 96
* in order to make sure of not over provisioning more than 128 fds. */
#define CONFIG_FDSET_INCR (CONFIG_MIN_RESERVED_FDS+96)
/* Hash table parameters */
#define HASHTABLE_MIN_FILL 10 /* Minimal hash table fill 10% */
/* Command flags. Please check the command table defined in the redis.c file
* for more information about the meaning of every flag. */
#define CMD_WRITE 1 /* "w" flag */
#define CMD_READONLY 2 /* "r" flag */
#define CMD_DENYOOM 4 /* "m" flag */
#define CMD_NOT_USED_1 8 /* no longer used flag */
#define CMD_ADMIN 16 /* "a" flag */
#define CMD_PUBSUB 32 /* "p" flag */
#define CMD_NOSCRIPT 64 /* "s" flag */
#define CMD_RANDOM 128 /* "R" flag */
#define CMD_SORT_FOR_SCRIPT 256 /* "S" flag */
#define CMD_LOADING 512 /* "l" flag */
#define CMD_STALE 1024 /* "t" flag */
#define CMD_SKIP_MONITOR 2048 /* "M" flag */
#define CMD_ASKING 4096 /* "k" flag */
#define CMD_FAST 8192 /* "F" flag */
/* Object types */
#define OBJ_STRING 0
#define OBJ_LIST 1
#define OBJ_SET 2
#define OBJ_ZSET 3
#define OBJ_HASH 4
/* Objects encoding. Some kind of objects like Strings and Hashes can be
* internally represented in multiple ways. The 'encoding' field of the object
* is set to one of this fields for this object. */
#define OBJ_ENCODING_RAW 0 /* Raw representation */
#define OBJ_ENCODING_INT 1 /* Encoded as integer */
#define OBJ_ENCODING_HT 2 /* Encoded as hash table */
#define OBJ_ENCODING_ZIPMAP 3 /* Encoded as zipmap */
#define OBJ_ENCODING_LINKEDLIST 4 /* Encoded as regular linked list */
#define OBJ_ENCODING_ZIPLIST 5 /* Encoded as ziplist */
#define OBJ_ENCODING_INTSET 6 /* Encoded as intset */
#define OBJ_ENCODING_SKIPLIST 7 /* Encoded as skiplist */
#define OBJ_ENCODING_EMBSTR 8 /* Embedded sds string encoding */
#define OBJ_ENCODING_QUICKLIST 9 /* Encoded as linked list of ziplists */
/* Defines related to the dump file format. To store 32 bits lengths for short
* keys requires a lot of space, so we check the most significant 2 bits of
* the first byte to interpreter the length:
*
* 00|000000 => if the two MSB are 00 the len is the 6 bits of this byte
* 01|000000 00000000 => 01, the len is 14 byes, 6 bits + 8 bits of next byte
* 10|000000 [32 bit integer] => if it's 10, a full 32 bit len will follow
* 11|000000 this means: specially encoded object will follow. The six bits
* number specify the kind of object that follows.
* See the RDB_ENC_* defines.
*
* Lengths up to 63 are stored using a single byte, most DB keys, and may
* values, will fit inside. */
#define RDB_6BITLEN 0
#define RDB_14BITLEN 1
#define RDB_32BITLEN 2
#define RDB_ENCVAL 3
#define RDB_LENERR UINT_MAX
/* When a length of a string object stored on disk has the first two bits
* set, the remaining two bits specify a special encoding for the object
* accordingly to the following defines: */
#define RDB_ENC_INT8 0 /* 8 bit signed integer */
#define RDB_ENC_INT16 1 /* 16 bit signed integer */
#define RDB_ENC_INT32 2 /* 32 bit signed integer */
#define RDB_ENC_LZF 3 /* string compressed with FASTLZ */
/* AOF states */
#define AOF_OFF 0 /* AOF is off */
#define AOF_ON 1 /* AOF is on */
#define AOF_WAIT_REWRITE 2 /* AOF waits rewrite to start appending */
/* Client flags */
#define CLIENT_SLAVE (1<<0) /* This client is a slave server */
#define CLIENT_MASTER (1<<1) /* This client is a master server */
#define CLIENT_MONITOR (1<<2) /* This client is a slave monitor, see MONITOR */
#define CLIENT_MULTI (1<<3) /* This client is in a MULTI context */
#define CLIENT_BLOCKED (1<<4) /* The client is waiting in a blocking operation */
#define CLIENT_DIRTY_CAS (1<<5) /* Watched keys modified. EXEC will fail. */
#define CLIENT_CLOSE_AFTER_REPLY (1<<6) /* Close after writing entire reply. */
#define CLIENT_UNBLOCKED (1<<7) /* This client was unblocked and is stored in
server.unblocked_clients */
#define CLIENT_LUA (1<<8) /* This is a non connected client used by Lua */
#define CLIENT_ASKING (1<<9) /* Client issued the ASKING command */
#define CLIENT_CLOSE_ASAP (1<<10)/* Close this client ASAP */
#define CLIENT_UNIX_SOCKET (1<<11) /* Client connected via Unix domain socket */
#define CLIENT_DIRTY_EXEC (1<<12) /* EXEC will fail for errors while queueing */
#define CLIENT_MASTER_FORCE_REPLY (1<<13) /* Queue replies even if is master */
#define CLIENT_FORCE_AOF (1<<14) /* Force AOF propagation of current cmd. */
#define CLIENT_FORCE_REPL (1<<15) /* Force replication of current cmd. */
#define CLIENT_PRE_PSYNC (1<<16) /* Instance don't understand PSYNC. */
#define CLIENT_READONLY (1<<17) /* Cluster client is in read-only state. */
#define CLIENT_PUBSUB (1<<18) /* Client is in Pub/Sub mode. */
#define CLIENT_PREVENT_AOF_PROP (1<<19) /* Don't propagate to AOF. */
#define CLIENT_PREVENT_REPL_PROP (1<<20) /* Don't propagate to slaves. */
#define CLIENT_PREVENT_PROP (CLIENT_PREVENT_AOF_PROP|CLIENT_PREVENT_REPL_PROP)
#define CLIENT_PENDING_WRITE (1<<21) /* Client has output to send but a write
handler is yet not installed. */
#define CLIENT_REPLY_OFF (1<<22) /* Don't send replies to client. */
#define CLIENT_REPLY_SKIP_NEXT (1<<23) /* Set CLIENT_REPLY_SKIP for next cmd */
#define CLIENT_REPLY_SKIP (1<<24) /* Don't send just this reply. */
#define CLIENT_LUA_DEBUG (1<<25) /* Run EVAL in debug mode. */
#define CLIENT_LUA_DEBUG_SYNC (1<<26) /* EVAL debugging without fork() */
/* Client block type (btype field in client structure)
* if CLIENT_BLOCKED flag is set. */
#define BLOCKED_NONE 0 /* Not blocked, no CLIENT_BLOCKED flag set. */
#define BLOCKED_LIST 1 /* BLPOP & co. */
#define BLOCKED_WAIT 2 /* WAIT for synchronous replication. */
/* Client request types */
#define PROTO_REQ_INLINE 1
#define PROTO_REQ_MULTIBULK 2
/* Client classes for client limits, currently used only for
* the max-client-output-buffer limit implementation. */
#define CLIENT_TYPE_NORMAL 0 /* Normal req-reply clients + MONITORs */
#define CLIENT_TYPE_SLAVE 1 /* Slaves. */
#define CLIENT_TYPE_PUBSUB 2 /* Clients subscribed to PubSub channels. */
#define CLIENT_TYPE_MASTER 3 /* Master. */
#define CLIENT_TYPE_OBUF_COUNT 3 /* Number of clients to expose to output
buffer configuration. Just the first
three: normal, slave, pubsub. */
/* Slave replication state. Used in server.repl_state for slaves to remember
* what to do next. */
#define REPL_STATE_NONE 0 /* No active replication */
#define REPL_STATE_CONNECT 1 /* Must connect to master */
#define REPL_STATE_CONNECTING 2 /* Connecting to master */
/* --- Handshake states, must be ordered --- */
#define REPL_STATE_RECEIVE_PONG 3 /* Wait for PING reply */
#define REPL_STATE_SEND_AUTH 4 /* Send AUTH to master */
#define REPL_STATE_RECEIVE_AUTH 5 /* Wait for AUTH reply */
#define REPL_STATE_SEND_PORT 6 /* Send REPLCONF listening-port */
#define REPL_STATE_RECEIVE_PORT 7 /* Wait for REPLCONF reply */
#define REPL_STATE_SEND_IP 8 /* Send REPLCONF ip-address */
#define REPL_STATE_RECEIVE_IP 9 /* Wait for REPLCONF reply */
#define REPL_STATE_SEND_CAPA 10 /* Send REPLCONF capa */
#define REPL_STATE_RECEIVE_CAPA 11 /* Wait for REPLCONF reply */
#define REPL_STATE_SEND_PSYNC 12 /* Send PSYNC */
#define REPL_STATE_RECEIVE_PSYNC 13 /* Wait for PSYNC reply */
/* --- End of handshake states --- */
#define REPL_STATE_TRANSFER 14 /* Receiving .rdb from master */
#define REPL_STATE_CONNECTED 15 /* Connected to master */
/* State of slaves from the POV of the master. Used in client->replstate.
* In SEND_BULK and ONLINE state the slave receives new updates
* in its output queue. In the WAIT_BGSAVE states instead the server is waiting
* to start the next background saving in order to send updates to it. */
#define SLAVE_STATE_WAIT_BGSAVE_START 6 /* We need to produce a new RDB file. */
#define SLAVE_STATE_WAIT_BGSAVE_END 7 /* Waiting RDB file creation to finish. */
#define SLAVE_STATE_SEND_BULK 8 /* Sending RDB file to slave. */
#define SLAVE_STATE_ONLINE 9 /* RDB file transmitted, sending just updates. */
/* Slave capabilities. */
#define SLAVE_CAPA_NONE 0
#define SLAVE_CAPA_EOF (1<<0) /* Can parse the RDB EOF streaming format. */
/* Synchronous read timeout - slave side */
#define CONFIG_REPL_SYNCIO_TIMEOUT 5
/* List related stuff */
#define REDIS_LIST_HEAD 0
#define REDIS_LIST_TAIL 1
/* Sort operations */
#define SORT_OP_GET 0
/* Log levels */
#define LL_DEBUG 0
#define LL_VERBOSE 1
#define LL_NOTICE 2
#define LL_WARNING 3
#define LL_RAW (1<<10) /* Modifier to log without timestamp */
#define CONFIG_DEFAULT_VERBOSITY LL_NOTICE
/* Supervision options */
#define SUPERVISED_NONE 0
#define SUPERVISED_AUTODETECT 1
#define SUPERVISED_SYSTEMD 2
#define SUPERVISED_UPSTART 3
/* Anti-warning macro... */
#define UNUSED(V) ((void) V)
#define ZSKIPLIST_MAXLEVEL 32 /* Should be enough for 2^32 elements */
#define ZSKIPLIST_P 0.25 /* Skiplist P = 1/4 */
/* Append only defines */
#define AOF_FSYNC_NO 0
#define AOF_FSYNC_ALWAYS 1
#define AOF_FSYNC_EVERYSEC 2
#define CONFIG_DEFAULT_AOF_FSYNC AOF_FSYNC_EVERYSEC
/* Zip structure related defaults */
#define OBJ_HASH_MAX_ZIPLIST_ENTRIES 512
#define OBJ_HASH_MAX_ZIPLIST_VALUE 64
#define OBJ_SET_MAX_INTSET_ENTRIES 512
#define OBJ_ZSET_MAX_ZIPLIST_ENTRIES 128
#define OBJ_ZSET_MAX_ZIPLIST_VALUE 64
/* List defaults */
#define OBJ_LIST_MAX_ZIPLIST_SIZE -2
#define OBJ_LIST_COMPRESS_DEPTH 0
/* HyperLogLog defines */
#define CONFIG_DEFAULT_HLL_SPARSE_MAX_BYTES 3000
/* Sets operations codes */
#define SET_OP_UNION 0
#define SET_OP_DIFF 1
#define SET_OP_INTER 2
/* Redis maxmemory strategies */
#define MAXMEMORY_VOLATILE_LRU 0
#define MAXMEMORY_VOLATILE_TTL 1
#define MAXMEMORY_VOLATILE_RANDOM 2
#define MAXMEMORY_ALLKEYS_LRU 3
#define MAXMEMORY_ALLKEYS_RANDOM 4
#define MAXMEMORY_NO_EVICTION 5
#define CONFIG_DEFAULT_MAXMEMORY_POLICY MAXMEMORY_NO_EVICTION
/* Scripting */
#define LUA_SCRIPT_TIME_LIMIT 5000 /* milliseconds */
/* Units */
#define UNIT_SECONDS 0
#define UNIT_MILLISECONDS 1
/* SHUTDOWN flags */
#define SHUTDOWN_NOFLAGS 0 /* No flags. */
#define SHUTDOWN_SAVE 1 /* Force SAVE on SHUTDOWN even if no save
points are configured. */
#define SHUTDOWN_NOSAVE 2 /* Don't SAVE on SHUTDOWN. */
/* Command call flags, see call() function */
#define CMD_CALL_NONE 0
#define CMD_CALL_SLOWLOG (1<<0)
#define CMD_CALL_STATS (1<<1)
#define CMD_CALL_PROPAGATE_AOF (1<<2)
#define CMD_CALL_PROPAGATE_REPL (1<<3)
#define CMD_CALL_PROPAGATE (CMD_CALL_PROPAGATE_AOF|CMD_CALL_PROPAGATE_REPL)
#define CMD_CALL_FULL (CMD_CALL_SLOWLOG | CMD_CALL_STATS | CMD_CALL_PROPAGATE)
/* Command propagation flags, see propagate() function */
#define PROPAGATE_NONE 0
#define PROPAGATE_AOF 1
#define PROPAGATE_REPL 2
/* RDB active child save type. */
#define RDB_CHILD_TYPE_NONE 0
#define RDB_CHILD_TYPE_DISK 1 /* RDB is written to disk. */
#define RDB_CHILD_TYPE_SOCKET 2 /* RDB is written to slave socket. */
/* Keyspace changes notification classes. Every class is associated with a
* character for configuration purposes. */
#define NOTIFY_KEYSPACE (1<<0) /* K */
#define NOTIFY_KEYEVENT (1<<1) /* E */
#define NOTIFY_GENERIC (1<<2) /* g */
#define NOTIFY_STRING (1<<3) /* $ */
#define NOTIFY_LIST (1<<4) /* l */
#define NOTIFY_SET (1<<5) /* s */
#define NOTIFY_HASH (1<<6) /* h */
#define NOTIFY_ZSET (1<<7) /* z */
#define NOTIFY_EXPIRED (1<<8) /* x */
#define NOTIFY_EVICTED (1<<9) /* e */
#define NOTIFY_ALL (NOTIFY_GENERIC | NOTIFY_STRING | NOTIFY_LIST | NOTIFY_SET | NOTIFY_HASH | NOTIFY_ZSET | NOTIFY_EXPIRED | NOTIFY_EVICTED) /* A */
/* Get the first bind addr or NULL */
#define NET_FIRST_BIND_ADDR (server.bindaddr_count ? server.bindaddr[0] : NULL)
/* Using the following macro you can run code inside serverCron() with the
* specified period, specified in milliseconds.
* The actual resolution depends on server.hz. */
#define run_with_period(_ms_) if ((_ms_ <= 1000/server.hz) || !(server.cronloops%((_ms_)/(1000/server.hz))))
/* We can print the stacktrace, so our assert is defined this way: */
#define serverAssertWithInfo(_c,_o,_e) ((_e)?(void)0 : (_serverAssertWithInfo(_c,_o,#_e,__FILE__,__LINE__),_exit(1)))
#define serverAssert(_e) ((_e)?(void)0 : (_serverAssert(#_e,__FILE__,__LINE__),_exit(1)))
#define serverPanic(_e) _serverPanic(#_e,__FILE__,__LINE__),_exit(1)
/*-----------------------------------------------------------------------------
* Data types
*----------------------------------------------------------------------------*/
/* A redis object, that is a type able to hold a string / list / set */
/* The actual Redis Object */
#define LRU_BITS 24
#define LRU_CLOCK_MAX ((1<<LRU_BITS)-1) /* Max value of obj->lru */
#define LRU_CLOCK_RESOLUTION 1000 /* LRU clock resolution in ms */
typedef struct redisObject {
unsigned type:4;
unsigned encoding:4;
unsigned lru:LRU_BITS; /* lru time (relative to server.lruclock) */
int refcount;
void *ptr;
} robj;
/* Macro used to obtain the current LRU clock.
* If the current resolution is lower than the frequency we refresh the
* LRU clock (as it should be in production servers) we return the
* precomputed value, otherwise we need to resort to a system call. */
#define LRU_CLOCK() ((1000/server.hz <= LRU_CLOCK_RESOLUTION) ? server.lruclock : getLRUClock())
/* Macro used to initialize a Redis object allocated on the stack.
* Note that this macro is taken near the structure definition to make sure
* we'll update it when the structure is changed, to avoid bugs like
* bug #85 introduced exactly in this way. */
#define initStaticStringObject(_var,_ptr) do { \
_var.refcount = 1; \
_var.type = OBJ_STRING; \
_var.encoding = OBJ_ENCODING_RAW; \
_var.ptr = _ptr; \
} while(0)
/* To improve the quality of the LRU approximation we take a set of keys
* that are good candidate for eviction across freeMemoryIfNeeded() calls.
*
* Entries inside the eviciton pool are taken ordered by idle time, putting
* greater idle times to the right (ascending order).
*
* Empty entries have the key pointer set to NULL. */
#define MAXMEMORY_EVICTION_POOL_SIZE 16
struct evictionPoolEntry {
unsigned long long idle; /* Object idle time. */
sds key; /* Key name. */
};
/* Redis database representation. There are multiple databases identified
* by integers from 0 (the default database) up to the max configured
* database. The database number is the 'id' field in the structure. */
typedef struct redisDb {
dict *dict; /* The keyspace for this DB */
dict *expires; /* Timeout of keys with a timeout set */
dict *blocking_keys; /* Keys with clients waiting for data (BLPOP) */
dict *ready_keys; /* Blocked keys that received a PUSH */
dict *watched_keys; /* WATCHED keys for MULTI/EXEC CAS */
struct evictionPoolEntry *eviction_pool; /* Eviction pool of keys */
int id; /* Database ID */
long long avg_ttl; /* Average TTL, just for stats */
} redisDb;
/* Client MULTI/EXEC state */
typedef struct multiCmd {
robj **argv;
int argc;
struct redisCommand *cmd;
} multiCmd;
typedef struct multiState {
multiCmd *commands; /* Array of MULTI commands */
int count; /* Total number of MULTI commands */
int minreplicas; /* MINREPLICAS for synchronous replication */
time_t minreplicas_timeout; /* MINREPLICAS timeout as unixtime. */
} multiState;
/* This structure holds the blocking operation state for a client.
* The fields used depend on client->btype. */
typedef struct blockingState {
/* Generic fields. */
mstime_t timeout; /* Blocking operation timeout. If UNIX current time
* is > timeout then the operation timed out. */
/* BLOCKED_LIST */
dict *keys; /* The keys we are waiting to terminate a blocking
* operation such as BLPOP. Otherwise NULL. */
robj *target; /* The key that should receive the element,
* for BRPOPLPUSH. */
/* BLOCKED_WAIT */
int numreplicas; /* Number of replicas we are waiting for ACK. */
long long reploffset; /* Replication offset to reach. */
} blockingState;
/* The following structure represents a node in the server.ready_keys list,
* where we accumulate all the keys that had clients blocked with a blocking
* operation such as B[LR]POP, but received new data in the context of the
* last executed command.
*
* After the execution of every command or script, we run this list to check
* if as a result we should serve data to clients blocked, unblocking them.
* Note that server.ready_keys will not have duplicates as there dictionary
* also called ready_keys in every structure representing a Redis database,
* where we make sure to remember if a given key was already added in the
* server.ready_keys list. */
typedef struct readyList {
redisDb *db;
robj *key;
} readyList;
/* With multiplexing we need to take per-client state.
* Clients are taken in a linked list. */
typedef struct client {
uint64_t id; /* Client incremental unique ID. */
int fd; /* Client socket. */
redisDb *db; /* Pointer to currently SELECTed DB. */
int dictid; /* ID of the currently SELECTed DB. */
robj *name; /* As set by CLIENT SETNAME. */
sds querybuf; /* Buffer we use to accumulate client queries. */
size_t querybuf_peak; /* Recent (100ms or more) peak of querybuf size. */
int argc; /* Num of arguments of current command. */
robj **argv; /* Arguments of current command. */
struct redisCommand *cmd, *lastcmd; /* Last command executed. */
int reqtype; /* Request protocol type: PROTO_REQ_* */
int multibulklen; /* Number of multi bulk arguments left to read. */
long bulklen; /* Length of bulk argument in multi bulk request. */
list *reply; /* List of reply objects to send to the client. */
unsigned long long reply_bytes; /* Tot bytes of objects in reply list. */
size_t sentlen; /* Amount of bytes already sent in the current
buffer or object being sent. */
time_t ctime; /* Client creation time. */
time_t lastinteraction; /* Time of the last interaction, used for timeout */
time_t obuf_soft_limit_reached_time;
int flags; /* Client flags: CLIENT_* macros. */
int authenticated; /* When requirepass is non-NULL. */
int replstate; /* Replication state if this is a slave. */
int repl_put_online_on_ack; /* Install slave write handler on ACK. */
int repldbfd; /* Replication DB file descriptor. */
off_t repldboff; /* Replication DB file offset. */
off_t repldbsize; /* Replication DB file size. */
sds replpreamble; /* Replication DB preamble. */
long long reploff; /* Replication offset if this is our master. */
long long repl_ack_off; /* Replication ack offset, if this is a slave. */
long long repl_ack_time;/* Replication ack time, if this is a slave. */
long long psync_initial_offset; /* FULLRESYNC reply offset other slaves
copying this slave output buffer
should use. */
char replrunid[CONFIG_RUN_ID_SIZE+1]; /* Master run id if is a master. */
int slave_listening_port; /* As configured with: REPLCONF listening-port */
char slave_ip[NET_IP_STR_LEN]; /* Optionally given by REPLCONF ip-address */
int slave_capa; /* Slave capabilities: SLAVE_CAPA_* bitwise OR. */
multiState mstate; /* MULTI/EXEC state */
int btype; /* Type of blocking op if CLIENT_BLOCKED. */
blockingState bpop; /* blocking state */
long long woff; /* Last write global replication offset. */
list *watched_keys; /* Keys WATCHED for MULTI/EXEC CAS */
dict *pubsub_channels; /* channels a client is interested in (SUBSCRIBE) */
list *pubsub_patterns; /* patterns a client is interested in (SUBSCRIBE) */
sds peerid; /* Cached peer ID. */
/* Response buffer */
int bufpos;
char buf[PROTO_REPLY_CHUNK_BYTES];
} client;
struct saveparam {
time_t seconds;
int changes;
};
struct sharedObjectsStruct {
robj *crlf, *ok, *err, *emptybulk, *czero, *cone, *cnegone, *pong, *space,
*colon, *nullbulk, *nullmultibulk, *queued,
*emptymultibulk, *wrongtypeerr, *nokeyerr, *syntaxerr, *sameobjecterr,
*outofrangeerr, *noscripterr, *loadingerr, *slowscripterr, *bgsaveerr,
*masterdownerr, *roslaveerr, *execaborterr, *noautherr, *noreplicaserr,
*busykeyerr, *oomerr, *plus, *messagebulk, *pmessagebulk, *subscribebulk,
*unsubscribebulk, *psubscribebulk, *punsubscribebulk, *del, *rpop, *lpop,
*lpush, *emptyscan, *minstring, *maxstring,
*select[PROTO_SHARED_SELECT_CMDS],
*integers[OBJ_SHARED_INTEGERS],
*mbulkhdr[OBJ_SHARED_BULKHDR_LEN], /* "*<value>\r\n" */
*bulkhdr[OBJ_SHARED_BULKHDR_LEN]; /* "$<value>\r\n" */
};
/* ZSETs use a specialized version of Skiplists */
typedef struct zskiplistNode {
robj *obj;
double score;
struct zskiplistNode *backward;
struct zskiplistLevel {
struct zskiplistNode *forward;
unsigned int span;
} level[];
} zskiplistNode;
typedef struct zskiplist {
struct zskiplistNode *header, *tail;
unsigned long length;
int level;
} zskiplist;
typedef struct zset {
dict *dict;
zskiplist *zsl;
} zset;
typedef struct clientBufferLimitsConfig {
unsigned long long hard_limit_bytes;
unsigned long long soft_limit_bytes;
time_t soft_limit_seconds;
} clientBufferLimitsConfig;
extern clientBufferLimitsConfig clientBufferLimitsDefaults[CLIENT_TYPE_OBUF_COUNT];
/* The redisOp structure defines a Redis Operation, that is an instance of
* a command with an argument vector, database ID, propagation target
* (PROPAGATE_*), and command pointer.
*
* Currently only used to additionally propagate more commands to AOF/Replication
* after the propagation of the executed command. */
typedef struct redisOp {
robj **argv;
int argc, dbid, target;
struct redisCommand *cmd;
} redisOp;
/* Defines an array of Redis operations. There is an API to add to this
* structure in a easy way.
*
* redisOpArrayInit();
* redisOpArrayAppend();
* redisOpArrayFree();
*/
typedef struct redisOpArray {
redisOp *ops;
int numops;
} redisOpArray;
/*-----------------------------------------------------------------------------
* Global server state
*----------------------------------------------------------------------------*/
struct clusterState;
/* AIX defines hz to __hz, we don't use this define and in order to allow
* Redis build on AIX we need to undef it. */
#ifdef _AIX
#undef hz
#endif
struct redisServer {
/* General */
pid_t pid; /* Main process pid. */
char *configfile; /* Absolute config file path, or NULL */
char *executable; /* Absolute executable file path. */
char **exec_argv; /* Executable argv vector (copy). */
int hz; /* serverCron() calls frequency in hertz */
redisDb *db;
dict *commands; /* Command table */
dict *orig_commands; /* Command table before command renaming. */
aeEventLoop *el;
unsigned lruclock:LRU_BITS; /* Clock for LRU eviction */
int shutdown_asap; /* SHUTDOWN needed ASAP */
int activerehashing; /* Incremental rehash in serverCron() */
char *requirepass; /* Pass for AUTH command, or NULL */
char *pidfile; /* PID file path */
int arch_bits; /* 32 or 64 depending on sizeof(long) */
int cronloops; /* Number of times the cron function run */
char runid[CONFIG_RUN_ID_SIZE+1]; /* ID always different at every exec. */
int sentinel_mode; /* True if this instance is a Sentinel. */
/* Networking */
int port; /* TCP listening port */
int tcp_backlog; /* TCP listen() backlog */
char *bindaddr[CONFIG_BINDADDR_MAX]; /* Addresses we should bind to */
int bindaddr_count; /* Number of addresses in server.bindaddr[] */
char *unixsocket; /* UNIX socket path */
mode_t unixsocketperm; /* UNIX socket permission */
int ipfd[CONFIG_BINDADDR_MAX]; /* TCP socket file descriptors */
int ipfd_count; /* Used slots in ipfd[] */
int sofd; /* Unix socket file descriptor */
int cfd[CONFIG_BINDADDR_MAX];/* Cluster bus listening socket */
int cfd_count; /* Used slots in cfd[] */
list *clients; /* List of active clients */
list *clients_to_close; /* Clients to close asynchronously */
list *clients_pending_write; /* There is to write or install handler. */
list *slaves, *monitors; /* List of slaves and MONITORs */
client *current_client; /* Current client, only used on crash report */
int clients_paused; /* True if clients are currently paused */
mstime_t clients_pause_end_time; /* Time when we undo clients_paused */
char neterr[ANET_ERR_LEN]; /* Error buffer for anet.c */
dict *migrate_cached_sockets;/* MIGRATE cached sockets */
uint64_t next_client_id; /* Next client unique ID. Incremental. */
int protected_mode; /* Don't accept external connections. */
/* RDB / AOF loading information */
int loading; /* We are loading data from disk if true */
off_t loading_total_bytes;
off_t loading_loaded_bytes;
time_t loading_start_time;
off_t loading_process_events_interval_bytes;
/* Fast pointers to often looked up command */
struct redisCommand *delCommand, *multiCommand, *lpushCommand, *lpopCommand,
*rpopCommand, *sremCommand, *execCommand;
/* Fields used only for stats */
time_t stat_starttime; /* Server start time */
long long stat_numcommands; /* Number of processed commands */
long long stat_numconnections; /* Number of connections received */
long long stat_expiredkeys; /* Number of expired keys */
long long stat_evictedkeys; /* Number of evicted keys (maxmemory) */
long long stat_keyspace_hits; /* Number of successful lookups of keys */
long long stat_keyspace_misses; /* Number of failed lookups of keys */
size_t stat_peak_memory; /* Max used memory record */
long long stat_fork_time; /* Time needed to perform latest fork() */
double stat_fork_rate; /* Fork rate in GB/sec. */
long long stat_rejected_conn; /* Clients rejected because of maxclients */
long long stat_sync_full; /* Number of full resyncs with slaves. */
long long stat_sync_partial_ok; /* Number of accepted PSYNC requests. */
long long stat_sync_partial_err;/* Number of unaccepted PSYNC requests. */
list *slowlog; /* SLOWLOG list of commands */
long long slowlog_entry_id; /* SLOWLOG current entry ID */
long long slowlog_log_slower_than; /* SLOWLOG time limit (to get logged) */
unsigned long slowlog_max_len; /* SLOWLOG max number of items logged */
size_t resident_set_size; /* RSS sampled in serverCron(). */
long long stat_net_input_bytes; /* Bytes read from network. */
long long stat_net_output_bytes; /* Bytes written to network. */
/* The following two are used to track instantaneous metrics, like
* number of operations per second, network traffic. */
struct {
long long last_sample_time; /* Timestamp of last sample in ms */
long long last_sample_count;/* Count in last sample */
long long samples[STATS_METRIC_SAMPLES];
int idx;
} inst_metric[STATS_METRIC_COUNT];
/* Configuration */
int verbosity; /* Loglevel in redis.conf */
int maxidletime; /* Client timeout in seconds */
int tcpkeepalive; /* Set SO_KEEPALIVE if non-zero. */
int active_expire_enabled; /* Can be disabled for testing purposes. */
size_t client_max_querybuf_len; /* Limit for client query buffer length */
int dbnum; /* Total number of configured DBs */
int supervised; /* 1 if supervised, 0 otherwise. */
int supervised_mode; /* See SUPERVISED_* */
int daemonize; /* True if running as a daemon */
clientBufferLimitsConfig client_obuf_limits[CLIENT_TYPE_OBUF_COUNT];
#ifdef USE_PMDK
/* Persistent memory */
char* pm_file_path; /* Path to persistent memory file */
size_t pm_file_size; /* If PM file does not exist, create new one with given size */
bool persistent; /* Persistence enabled/disabled */
bool pm_reconstruct_required; /* reconstruct database form PMEM */
PMEMobjpool *pm_pool; /* PMEM pool handle */
TOID(struct redis_pmem_root) pm_rootoid; /*PMEM root object OID*/
uint64_t pool_uuid_lo; /* PMEM pool UUID */
#endif
/* AOF persistence */
int aof_state; /* AOF_(ON|OFF|WAIT_REWRITE) */
int aof_fsync; /* Kind of fsync() policy */
char *aof_filename; /* Name of the AOF file */
int aof_no_fsync_on_rewrite; /* Don't fsync if a rewrite is in prog. */
int aof_rewrite_perc; /* Rewrite AOF if % growth is > M and... */
off_t aof_rewrite_min_size; /* the AOF file is at least N bytes. */
off_t aof_rewrite_base_size; /* AOF size on latest startup or rewrite. */
off_t aof_current_size; /* AOF current size. */
int aof_rewrite_scheduled; /* Rewrite once BGSAVE terminates. */
pid_t aof_child_pid; /* PID if rewriting process */
list *aof_rewrite_buf_blocks; /* Hold changes during an AOF rewrite. */
sds aof_buf; /* AOF buffer, written before entering the event loop */
int aof_fd; /* File descriptor of currently selected AOF file */
int aof_selected_db; /* Currently selected DB in AOF */
time_t aof_flush_postponed_start; /* UNIX time of postponed AOF flush */
time_t aof_last_fsync; /* UNIX time of last fsync() */
time_t aof_rewrite_time_last; /* Time used by last AOF rewrite run. */
time_t aof_rewrite_time_start; /* Current AOF rewrite start time. */
int aof_lastbgrewrite_status; /* C_OK or C_ERR */
unsigned long aof_delayed_fsync; /* delayed AOF fsync() counter */
int aof_rewrite_incremental_fsync;/* fsync incrementally while rewriting? */
int aof_last_write_status; /* C_OK or C_ERR */
int aof_last_write_errno; /* Valid if aof_last_write_status is ERR */
int aof_load_truncated; /* Don't stop on unexpected AOF EOF. */
/* AOF pipes used to communicate between parent and child during rewrite. */
int aof_pipe_write_data_to_child;
int aof_pipe_read_data_from_parent;
int aof_pipe_write_ack_to_parent;
int aof_pipe_read_ack_from_child;
int aof_pipe_write_ack_to_child;
int aof_pipe_read_ack_from_parent;
int aof_stop_sending_diff; /* If true stop sending accumulated diffs
to child process. */
sds aof_child_diff; /* AOF diff accumulator child side. */
/* RDB persistence */
long long dirty; /* Changes to DB from the last save */
long long dirty_before_bgsave; /* Used to restore dirty on failed BGSAVE */
pid_t rdb_child_pid; /* PID of RDB saving child */
struct saveparam *saveparams; /* Save points array for RDB */
int saveparamslen; /* Number of saving points */
char *rdb_filename; /* Name of RDB file */
int rdb_compression; /* Use compression in RDB? */
int rdb_checksum; /* Use RDB checksum? */
time_t lastsave; /* Unix time of last successful save */
time_t lastbgsave_try; /* Unix time of last attempted bgsave */
time_t rdb_save_time_last; /* Time used by last RDB save run. */
time_t rdb_save_time_start; /* Current RDB save start time. */
int rdb_bgsave_scheduled; /* BGSAVE when possible if true. */
int rdb_child_type; /* Type of save by active child. */
int lastbgsave_status; /* C_OK or C_ERR */
int stop_writes_on_bgsave_err; /* Don't allow writes if can't BGSAVE */
int rdb_pipe_write_result_to_parent; /* RDB pipes used to return the state */
int rdb_pipe_read_result_from_child; /* of each slave in diskless SYNC. */
/* Propagation of commands in AOF / replication */
redisOpArray also_propagate; /* Additional command to propagate. */
/* Logging */
char *logfile; /* Path of log file */
int syslog_enabled; /* Is syslog enabled? */
char *syslog_ident; /* Syslog ident */
int syslog_facility; /* Syslog facility */
/* Replication (master) */
int slaveseldb; /* Last SELECTed DB in replication output */
long long master_repl_offset; /* Global replication offset */
int repl_ping_slave_period; /* Master pings the slave every N seconds */
char *repl_backlog; /* Replication backlog for partial syncs */
long long repl_backlog_size; /* Backlog circular buffer size */
long long repl_backlog_histlen; /* Backlog actual data length */
long long repl_backlog_idx; /* Backlog circular buffer current offset */
long long repl_backlog_off; /* Replication offset of first byte in the
backlog buffer. */
time_t repl_backlog_time_limit; /* Time without slaves after the backlog
gets released. */
time_t repl_no_slaves_since; /* We have no slaves since that time.
Only valid if server.slaves len is 0. */
int repl_min_slaves_to_write; /* Min number of slaves to write. */
int repl_min_slaves_max_lag; /* Max lag of <count> slaves to write. */
int repl_good_slaves_count; /* Number of slaves with lag <= max_lag. */
int repl_diskless_sync; /* Send RDB to slaves sockets directly. */
int repl_diskless_sync_delay; /* Delay to start a diskless repl BGSAVE. */
/* Replication (slave) */
char *masterauth; /* AUTH with this password with master */
char *masterhost; /* Hostname of master */
int masterport; /* Port of master */
int repl_timeout; /* Timeout after N seconds of master idle */
client *master; /* Client that is master for this slave */
client *cached_master; /* Cached master to be reused for PSYNC. */
int repl_syncio_timeout; /* Timeout for synchronous I/O calls */
int repl_state; /* Replication status if the instance is a slave */
off_t repl_transfer_size; /* Size of RDB to read from master during sync. */
off_t repl_transfer_read; /* Amount of RDB read from master during sync. */
off_t repl_transfer_last_fsync_off; /* Offset when we fsync-ed last time. */
int repl_transfer_s; /* Slave -> Master SYNC socket */
int repl_transfer_fd; /* Slave -> Master SYNC temp file descriptor */
char *repl_transfer_tmpfile; /* Slave-> master SYNC temp file name */
time_t repl_transfer_lastio; /* Unix time of the latest read, for timeout */
int repl_serve_stale_data; /* Serve stale data when link is down? */
int repl_slave_ro; /* Slave is read only? */
time_t repl_down_since; /* Unix time at which link with master went down */
int repl_disable_tcp_nodelay; /* Disable TCP_NODELAY after SYNC? */
int slave_priority; /* Reported in INFO and used by Sentinel. */
int slave_announce_port; /* Give the master this listening port. */
char *slave_announce_ip; /* Give the master this ip address. */
char repl_master_runid[CONFIG_RUN_ID_SIZE+1]; /* Master run id for PSYNC.*/
long long repl_master_initial_offset; /* Master PSYNC offset. */
/* Replication script cache. */
dict *repl_scriptcache_dict; /* SHA1 all slaves are aware of. */
list *repl_scriptcache_fifo; /* First in, first out LRU eviction. */
unsigned int repl_scriptcache_size; /* Max number of elements. */
/* Synchronous replication. */
list *clients_waiting_acks; /* Clients waiting in WAIT command. */
int get_ack_from_slaves; /* If true we send REPLCONF GETACK. */
/* Limits */
unsigned int maxclients; /* Max number of simultaneous clients */
unsigned long long maxmemory; /* Max number of memory bytes to use */
int maxmemory_policy; /* Policy for key eviction */
int maxmemory_samples; /* Pricision of random sampling */
/* Blocked clients */
unsigned int bpop_blocked_clients; /* Number of clients blocked by lists */
list *unblocked_clients; /* list of clients to unblock before next loop */
list *ready_keys; /* List of readyList structures for BLPOP & co */
/* Sort parameters - qsort_r() is only available under BSD so we
* have to take this state global, in order to pass it to sortCompare() */
int sort_desc;
int sort_alpha;
int sort_bypattern;
int sort_store;
/* Zip structure config, see redis.conf for more information */
size_t hash_max_ziplist_entries;
size_t hash_max_ziplist_value;
size_t set_max_intset_entries;
size_t zset_max_ziplist_entries;
size_t zset_max_ziplist_value;
size_t hll_sparse_max_bytes;
/* List parameters */
int list_max_ziplist_size;
int list_compress_depth;
/* time cache */
time_t unixtime; /* Unix time sampled every cron cycle. */
long long mstime; /* Like 'unixtime' but with milliseconds resolution. */
/* Pubsub */
dict *pubsub_channels; /* Map channels to list of subscribed clients */
list *pubsub_patterns; /* A list of pubsub_patterns */
int notify_keyspace_events; /* Events to propagate via Pub/Sub. This is an
xor of NOTIFY_... flags. */
/* Cluster */
int cluster_enabled; /* Is cluster enabled? */
mstime_t cluster_node_timeout; /* Cluster node timeout. */
char *cluster_configfile; /* Cluster auto-generated config file name. */
struct clusterState *cluster; /* State of the cluster */
int cluster_migration_barrier; /* Cluster replicas migration barrier. */
int cluster_slave_validity_factor; /* Slave max data age for failover. */
int cluster_require_full_coverage; /* If true, put the cluster down if
there is at least an uncovered slot.*/
/* Scripting */
lua_State *lua; /* The Lua interpreter. We use just one for all clients */
client *lua_client; /* The "fake client" to query Redis from Lua */
client *lua_caller; /* The client running EVAL right now, or NULL */
dict *lua_scripts; /* A dictionary of SHA1 -> Lua scripts */
mstime_t lua_time_limit; /* Script timeout in milliseconds */
mstime_t lua_time_start; /* Start time of script, milliseconds time */
int lua_write_dirty; /* True if a write command was called during the
execution of the current script. */
int lua_random_dirty; /* True if a random command was called during the
execution of the current script. */
int lua_replicate_commands; /* True if we are doing single commands repl. */
int lua_multi_emitted;/* True if we already proagated MULTI. */
int lua_repl; /* Script replication flags for redis.set_repl(). */
int lua_timedout; /* True if we reached the time limit for script
execution. */
int lua_kill; /* Kill the script if true. */
int lua_always_replicate_commands; /* Default replication type. */
/* Latency monitor */
long long latency_monitor_threshold;
dict *latency_events;
/* Assert & bug reporting */
char *assert_failed;
char *assert_file;
int assert_line;
int bug_report_start; /* True if bug report header was already logged. */
int watchdog_period; /* Software watchdog period in ms. 0 = off */
/* System hardware info */
size_t system_memory_size; /* Total memory in system as reported by OS */
};
typedef struct pubsubPattern {
client *client;
robj *pattern;
} pubsubPattern;
typedef void redisCommandProc(client *c);
typedef int *redisGetKeysProc(struct redisCommand *cmd, robj **argv, int argc, int *numkeys);
struct redisCommand {
char *name;
redisCommandProc *proc;
int arity;
char *sflags; /* Flags as string representation, one char per flag. */
int flags; /* The actual flags, obtained from the 'sflags' field. */
/* Use a function to determine keys arguments in a command line.
* Used for Redis Cluster redirect. */
redisGetKeysProc *getkeys_proc;
/* What keys should be loaded in background when calling this command? */
int firstkey; /* The first argument that's a key (0 = no keys) */
int lastkey; /* The last argument that's a key */
int keystep; /* The step between first and last key */
long long microseconds, calls;
};
struct redisFunctionSym {
char *name;
unsigned long pointer;
};
typedef struct _redisSortObject {
robj *obj;
union {
double score;
robj *cmpobj;
} u;
} redisSortObject;
typedef struct _redisSortOperation {
int type;
robj *pattern;
} redisSortOperation;
/* Structure to hold list iteration abstraction. */
typedef struct {
robj *subject;
unsigned char encoding;
unsigned char direction; /* Iteration direction */
quicklistIter *iter;
} listTypeIterator;
/* Structure for an entry while iterating over a list. */
typedef struct {
listTypeIterator *li;
quicklistEntry entry; /* Entry in quicklist */
} listTypeEntry;
/* Structure to hold set iteration abstraction. */
typedef struct {
robj *subject;
int encoding;
int ii; /* intset iterator */
dictIterator *di;
} setTypeIterator;
/* Structure to hold hash iteration abstraction. Note that iteration over
* hashes involves both fields and values. Because it is possible that
* not both are required, store pointers in the iterator to avoid
* unnecessary memory allocation for fields/values. */
typedef struct {
robj *subject;
int encoding;
unsigned char *fptr, *vptr;
dictIterator *di;
dictEntry *de;
} hashTypeIterator;
#define OBJ_HASH_KEY 1
#define OBJ_HASH_VALUE 2
/*-----------------------------------------------------------------------------
* Extern declarations
*----------------------------------------------------------------------------*/
extern struct redisServer server;
extern struct sharedObjectsStruct shared;
extern dictType setDictType;
extern dictType zsetDictType;
extern dictType clusterNodesDictType;
extern dictType clusterNodesBlackListDictType;
extern dictType dbDictType;
extern dictType shaScriptObjectDictType;
extern double R_Zero, R_PosInf, R_NegInf, R_Nan;
extern dictType hashDictType;
extern dictType replScriptCacheDictType;
/*-----------------------------------------------------------------------------
* Functions prototypes
*----------------------------------------------------------------------------*/
/* Utils */
long long ustime(void);
long long mstime(void);
void getRandomHexChars(char *p, unsigned int len);
uint64_t crc64(uint64_t crc, const unsigned char *s, uint64_t l);
void exitFromChild(int retcode);
size_t redisPopcount(void *s, long count);
void redisSetProcTitle(char *title);
/* networking.c -- Networking and Client related operations */
client *createClient(int fd);
void closeTimedoutClients(void);
void freeClient(client *c);
void freeClientAsync(client *c);
void resetClient(client *c);
void sendReplyToClient(aeEventLoop *el, int fd, void *privdata, int mask);
void *addDeferredMultiBulkLength(client *c);
void setDeferredMultiBulkLength(client *c, void *node, long length);
void processInputBuffer(client *c);
void acceptHandler(aeEventLoop *el, int fd, void *privdata, int mask);
void acceptTcpHandler(aeEventLoop *el, int fd, void *privdata, int mask);
void acceptUnixHandler(aeEventLoop *el, int fd, void *privdata, int mask);
void readQueryFromClient(aeEventLoop *el, int fd, void *privdata, int mask);
void addReplyBulk(client *c, robj *obj);
void addReplyBulkCString(client *c, const char *s);
void addReplyBulkCBuffer(client *c, const void *p, size_t len);
void addReplyBulkLongLong(client *c, long long ll);
void addReply(client *c, robj *obj);
void addReplySds(client *c, sds s);
void addReplyBulkSds(client *c, sds s);
void addReplyError(client *c, const char *err);
void addReplyStatus(client *c, const char *status);
void addReplyDouble(client *c, double d);
void addReplyHumanLongDouble(client *c, long double d);
void addReplyLongLong(client *c, long long ll);
void addReplyMultiBulkLen(client *c, long length);
void copyClientOutputBuffer(client *dst, client *src);
void *dupClientReplyValue(void *o);
void getClientsMaxBuffers(unsigned long *longest_output_list,
unsigned long *biggest_input_buffer);
char *getClientPeerId(client *client);
sds catClientInfoString(sds s, client *client);
sds getAllClientsInfoString(void);
void rewriteClientCommandVector(client *c, int argc, ...);
void rewriteClientCommandArgument(client *c, int i, robj *newval);
void replaceClientCommandVector(client *c, int argc, robj **argv);
unsigned long getClientOutputBufferMemoryUsage(client *c);
void freeClientsInAsyncFreeQueue(void);
void asyncCloseClientOnOutputBufferLimitReached(client *c);
int getClientType(client *c);
int getClientTypeByName(char *name);
char *getClientTypeName(int class);
void flushSlavesOutputBuffers(void);
void disconnectSlaves(void);
int listenToPort(int port, int *fds, int *count);
void pauseClients(mstime_t duration);
int clientsArePaused(void);
int processEventsWhileBlocked(void);
int handleClientsWithPendingWrites(void);
int clientHasPendingReplies(client *c);
void unlinkClient(client *c);
int writeToClient(int fd, client *c, int handler_installed);
#ifdef __GNUC__
void addReplyErrorFormat(client *c, const char *fmt, ...)
__attribute__((format(printf, 2, 3)));
void addReplyStatusFormat(client *c, const char *fmt, ...)
__attribute__((format(printf, 2, 3)));
#else
void addReplyErrorFormat(client *c, const char *fmt, ...);
void addReplyStatusFormat(client *c, const char *fmt, ...);
#endif
/* List data type */
void listTypeTryConversion(robj *subject, robj *value);
void listTypePush(robj *subject, robj *value, int where);
robj *listTypePop(robj *subject, int where);
unsigned long listTypeLength(robj *subject);
listTypeIterator *listTypeInitIterator(robj *subject, long index, unsigned char direction);
void listTypeReleaseIterator(listTypeIterator *li);
int listTypeNext(listTypeIterator *li, listTypeEntry *entry);
robj *listTypeGet(listTypeEntry *entry);
void listTypeInsert(listTypeEntry *entry, robj *value, int where);
int listTypeEqual(listTypeEntry *entry, robj *o);
void listTypeDelete(listTypeIterator *iter, listTypeEntry *entry);
void listTypeConvert(robj *subject, int enc);
void unblockClientWaitingData(client *c);
void handleClientsBlockedOnLists(void);
void popGenericCommand(client *c, int where);
void signalListAsReady(redisDb *db, robj *key);
/* MULTI/EXEC/WATCH... */
void unwatchAllKeys(client *c);
void initClientMultiState(client *c);
void freeClientMultiState(client *c);
void queueMultiCommand(client *c);
void touchWatchedKey(redisDb *db, robj *key);
void touchWatchedKeysOnFlush(int dbid);
void discardTransaction(client *c);
void flagTransaction(client *c);
void execCommandPropagateMulti(client *c);
/* Redis object implementation */
void decrRefCount(robj *o);
void decrRefCountVoid(void *o);
void incrRefCount(robj *o);
robj *resetRefCount(robj *obj);
void freeStringObject(robj *o);
void freeListObject(robj *o);
void freeSetObject(robj *o);
void freeZsetObject(robj *o);
void freeHashObject(robj *o);
robj *createObject(int type, void *ptr);
robj *createStringObject(const char *ptr, size_t len);
robj *createRawStringObject(const char *ptr, size_t len);
robj *createEmbeddedStringObject(const char *ptr, size_t len);
robj *dupStringObject(robj *o);
int isObjectRepresentableAsLongLong(robj *o, long long *llongval);
robj *tryObjectEncoding(robj *o);
robj *getDecodedObject(robj *o);
size_t stringObjectLen(robj *o);
robj *createStringObjectFromLongLong(long long value);
robj *createStringObjectFromLongDouble(long double value, int humanfriendly);
robj *createQuicklistObject(void);
robj *createZiplistObject(void);
robj *createSetObject(void);
robj *createIntsetObject(void);
robj *createHashObject(void);
robj *createZsetObject(void);
robj *createZsetZiplistObject(void);
int getLongFromObjectOrReply(client *c, robj *o, long *target, const char *msg);
int checkType(client *c, robj *o, int type);
int getLongLongFromObjectOrReply(client *c, robj *o, long long *target, const char *msg);
int getDoubleFromObjectOrReply(client *c, robj *o, double *target, const char *msg);
int getLongLongFromObject(robj *o, long long *target);
int getLongDoubleFromObject(robj *o, long double *target);
int getLongDoubleFromObjectOrReply(client *c, robj *o, long double *target, const char *msg);
char *strEncoding(int encoding);
int compareStringObjects(robj *a, robj *b);
int collateStringObjects(robj *a, robj *b);
int equalStringObjects(robj *a, robj *b);
unsigned long long estimateObjectIdleTime(robj *o);
#define sdsEncodedObject(objptr) (objptr->encoding == OBJ_ENCODING_RAW || objptr->encoding == OBJ_ENCODING_EMBSTR)
#ifdef USE_PMDK
/* Persistent Memory support */
void decrRefCountPM(robj *o);
void freeStringObjectPM(robj *o);
robj *createObjectPM(int type, void *ptr);
robj *createRawStringObjectPM(const char *ptr, size_t len);
robj *dupStringObjectPM(robj *o);
#endif
/* Synchronous I/O with timeout */
ssize_t syncWrite(int fd, char *ptr, ssize_t size, long long timeout);
ssize_t syncRead(int fd, char *ptr, ssize_t size, long long timeout);
ssize_t syncReadLine(int fd, char *ptr, ssize_t size, long long timeout);
/* Replication */
void replicationFeedSlaves(list *slaves, int dictid, robj **argv, int argc);
void replicationFeedMonitors(client *c, list *monitors, int dictid, robj **argv, int argc);
void updateSlavesWaitingBgsave(int bgsaveerr, int type);
void replicationCron(void);
void replicationHandleMasterDisconnection(void);
void replicationCacheMaster(client *c);
void resizeReplicationBacklog(long long newsize);
void replicationSetMaster(char *ip, int port);
void replicationUnsetMaster(void);
void refreshGoodSlavesCount(void);
void replicationScriptCacheInit(void);
void replicationScriptCacheFlush(void);
void replicationScriptCacheAdd(sds sha1);
int replicationScriptCacheExists(sds sha1);
void processClientsWaitingReplicas(void);
void unblockClientWaitingReplicas(client *c);
int replicationCountAcksByOffset(long long offset);
void replicationSendNewlineToMaster(void);
long long replicationGetSlaveOffset(void);
char *replicationGetSlaveName(client *c);
long long getPsyncInitialOffset(void);
int replicationSetupSlaveForFullResync(client *slave, long long offset);
/* Generic persistence functions */
void startLoading(FILE *fp);
void loadingProgress(off_t pos);
void stopLoading(void);
/* RDB persistence */
#include "rdb.h"
/* AOF persistence */
void flushAppendOnlyFile(int force);
void feedAppendOnlyFile(struct redisCommand *cmd, int dictid, robj **argv, int argc);
void aofRemoveTempFile(pid_t childpid);
int rewriteAppendOnlyFileBackground(void);
int loadAppendOnlyFile(char *filename);
void stopAppendOnly(void);
int startAppendOnly(void);
void backgroundRewriteDoneHandler(int exitcode, int bysignal);
void aofRewriteBufferReset(void);
unsigned long aofRewriteBufferSize(void);
/* Sorted sets data type */
/* Struct to hold a inclusive/exclusive range spec by score comparison. */
typedef struct {
double min, max;
int minex, maxex; /* are min or max exclusive? */
} zrangespec;
/* Struct to hold an inclusive/exclusive range spec by lexicographic comparison. */
typedef struct {
robj *min, *max; /* May be set to shared.(minstring|maxstring) */
int minex, maxex; /* are min or max exclusive? */
} zlexrangespec;
zskiplist *zslCreate(void);
void zslFree(zskiplist *zsl);
zskiplistNode *zslInsert(zskiplist *zsl, double score, robj *obj);
unsigned char *zzlInsert(unsigned char *zl, robj *ele, double score);
int zslDelete(zskiplist *zsl, double score, robj *obj);
zskiplistNode *zslFirstInRange(zskiplist *zsl, zrangespec *range);
zskiplistNode *zslLastInRange(zskiplist *zsl, zrangespec *range);
double zzlGetScore(unsigned char *sptr);
void zzlNext(unsigned char *zl, unsigned char **eptr, unsigned char **sptr);
void zzlPrev(unsigned char *zl, unsigned char **eptr, unsigned char **sptr);
unsigned int zsetLength(robj *zobj);
void zsetConvert(robj *zobj, int encoding);
void zsetConvertToZiplistIfNeeded(robj *zobj, size_t maxelelen);
int zsetScore(robj *zobj, robj *member, double *score);
unsigned long zslGetRank(zskiplist *zsl, double score, robj *o);
/* Core functions */
int freeMemoryIfNeeded(void);
int processCommand(client *c);
void setupSignalHandlers(void);
struct redisCommand *lookupCommand(sds name);
struct redisCommand *lookupCommandByCString(char *s);
struct redisCommand *lookupCommandOrOriginal(sds name);
void call(client *c, int flags);
void propagate(struct redisCommand *cmd, int dbid, robj **argv, int argc, int flags);
void alsoPropagate(struct redisCommand *cmd, int dbid, robj **argv, int argc, int target);
void forceCommandPropagation(client *c, int flags);
void preventCommandPropagation(client *c);
void preventCommandAOF(client *c);
void preventCommandReplication(client *c);
int prepareForShutdown();
#ifdef __GNUC__
void serverLog(int level, const char *fmt, ...)
__attribute__((format(printf, 2, 3)));
#else
void serverLog(int level, const char *fmt, ...);
#endif
void serverLogRaw(int level, const char *msg);
void serverLogFromHandler(int level, const char *msg);
void usage(void);
void updateDictResizePolicy(void);
int htNeedsResize(dict *dict);
void populateCommandTable(void);
void resetCommandTableStats(void);
void adjustOpenFilesLimit(void);
void closeListeningSockets(int unlink_unix_socket);
void updateCachedTime(void);
void resetServerStats(void);
unsigned int getLRUClock(void);
const char *evictPolicyToString(void);
#define RESTART_SERVER_NONE 0
#define RESTART_SERVER_GRACEFULLY (1<<0) /* Do proper shutdown. */
#define RESTART_SERVER_CONFIG_REWRITE (1<<1) /* CONFIG REWRITE before restart.*/
int restartServer(int flags, mstime_t delay);
/* Set data type */
robj *setTypeCreate(robj *value);
int setTypeAdd(robj *subject, robj *value);
int setTypeRemove(robj *subject, robj *value);
int setTypeIsMember(robj *subject, robj *value);
setTypeIterator *setTypeInitIterator(robj *subject);
void setTypeReleaseIterator(setTypeIterator *si);
int setTypeNext(setTypeIterator *si, robj **objele, int64_t *llele);
robj *setTypeNextObject(setTypeIterator *si);
int setTypeRandomElement(robj *setobj, robj **objele, int64_t *llele);
unsigned long setTypeRandomElements(robj *set, unsigned long count, robj *aux_set);
unsigned long setTypeSize(robj *subject);
void setTypeConvert(robj *subject, int enc);
/* Hash data type */
void hashTypeConvert(robj *o, int enc);
void hashTypeTryConversion(robj *subject, robj **argv, int start, int end);
void hashTypeTryObjectEncoding(robj *subject, robj **o1, robj **o2);
robj *hashTypeGetObject(robj *o, robj *key);
int hashTypeExists(robj *o, robj *key);
int hashTypeSet(robj *o, robj *key, robj *value);
int hashTypeDelete(robj *o, robj *key);
unsigned long hashTypeLength(robj *o);
hashTypeIterator *hashTypeInitIterator(robj *subject);
void hashTypeReleaseIterator(hashTypeIterator *hi);
int hashTypeNext(hashTypeIterator *hi);
void hashTypeCurrentFromZiplist(hashTypeIterator *hi, int what,
unsigned char **vstr,
unsigned int *vlen,
long long *vll);
void hashTypeCurrentFromHashTable(hashTypeIterator *hi, int what, robj **dst);
robj *hashTypeCurrentObject(hashTypeIterator *hi, int what);
robj *hashTypeLookupWriteOrCreate(client *c, robj *key);
/* Pub / Sub */
int pubsubUnsubscribeAllChannels(client *c, int notify);
int pubsubUnsubscribeAllPatterns(client *c, int notify);
void freePubsubPattern(void *p);
int listMatchPubsubPattern(void *a, void *b);
int pubsubPublishMessage(robj *channel, robj *message);
/* Keyspace events notification */
void notifyKeyspaceEvent(int type, char *event, robj *key, int dbid);
int keyspaceEventsStringToFlags(char *classes);
sds keyspaceEventsFlagsToString(int flags);
/* Configuration */
void loadServerConfig(char *filename, char *options);
void appendServerSaveParams(time_t seconds, int changes);
void resetServerSaveParams(void);
struct rewriteConfigState; /* Forward declaration to export API. */
void rewriteConfigRewriteLine(struct rewriteConfigState *state, const char *option, sds line, int force);
int rewriteConfig(char *path);
/* db.c -- Keyspace access API */
int removeExpire(redisDb *db, robj *key);
void propagateExpire(redisDb *db, robj *key);
int expireIfNeeded(redisDb *db, robj *key);
long long getExpire(redisDb *db, robj *key);
void setExpire(redisDb *db, robj *key, long long when);
robj *lookupKey(redisDb *db, robj *key, int flags);
robj *lookupKeyRead(redisDb *db, robj *key);
robj *lookupKeyWrite(redisDb *db, robj *key);
robj *lookupKeyReadOrReply(client *c, robj *key, robj *reply);
robj *lookupKeyWriteOrReply(client *c, robj *key, robj *reply);
robj *lookupKeyReadWithFlags(redisDb *db, robj *key, int flags);
#define LOOKUP_NONE 0
#define LOOKUP_NOTOUCH (1<<0)
void dbAdd(redisDb *db, robj *key, robj *val);
void dbAddPM(redisDb *db, robj *key, robj *val);
void dbOverwrite(redisDb *db, robj *key, robj *val);
void dbOverwritePM(redisDb *db, robj *key, robj *val);
void setKey(redisDb *db, robj *key, robj *val);
void setKeyPM(redisDb *db, robj *key, robj *val);
int dbExists(redisDb *db, robj *key);
robj *dbRandomKey(redisDb *db);
int dbDelete(redisDb *db, robj *key);
robj *dbUnshareStringValue(redisDb *db, robj *key, robj *o);
long long emptyDb(void(callback)(void*));
int selectDb(client *c, int id);
void signalModifiedKey(redisDb *db, robj *key);
void signalFlushedDb(int dbid);
unsigned int getKeysInSlot(unsigned int hashslot, robj **keys, unsigned int count);
unsigned int countKeysInSlot(unsigned int hashslot);
unsigned int delKeysInSlot(unsigned int hashslot);
int verifyClusterConfigWithData(void);
void scanGenericCommand(client *c, robj *o, unsigned long cursor);
int parseScanCursorOrReply(client *c, robj *o, unsigned long *cursor);
/* API to get key arguments from commands */
int *getKeysFromCommand(struct redisCommand *cmd, robj **argv, int argc, int *numkeys);
void getKeysFreeResult(int *result);
int *zunionInterGetKeys(struct redisCommand *cmd,robj **argv, int argc, int *numkeys);
int *evalGetKeys(struct redisCommand *cmd, robj **argv, int argc, int *numkeys);
int *sortGetKeys(struct redisCommand *cmd, robj **argv, int argc, int *numkeys);
int *migrateGetKeys(struct redisCommand *cmd, robj **argv, int argc, int *numkeys);
/* Cluster */
void clusterInit(void);
unsigned short crc16(const char *buf, int len);
unsigned int keyHashSlot(char *key, int keylen);
void clusterCron(void);
void clusterPropagatePublish(robj *channel, robj *message);
void migrateCloseTimedoutSockets(void);
void clusterBeforeSleep(void);
/* Sentinel */
void initSentinelConfig(void);
void initSentinel(void);
void sentinelTimer(void);
char *sentinelHandleConfiguration(char **argv, int argc);
void sentinelIsRunning(void);
/* redis-check-rdb */
int redis_check_rdb(char *rdbfilename);
int redis_check_rdb_main(int argc, char **argv);
/* Scripting */
void scriptingInit(int setup);
int ldbRemoveChild(pid_t pid);
void ldbKillForkedSessions(void);
int ldbPendingChildren(void);
/* Blocked clients */
void processUnblockedClients(void);
void blockClient(client *c, int btype);
void unblockClient(client *c);
void replyToBlockedClientTimedOut(client *c);
int getTimeoutFromObjectOrReply(client *c, robj *object, mstime_t *timeout, int unit);
void disconnectAllBlockedClients(void);
/* Git SHA1 */
char *redisGitSHA1(void);
char *redisGitDirty(void);
uint64_t redisBuildId(void);
/* Commands prototypes */
void authCommand(client *c);
void pingCommand(client *c);
void echoCommand(client *c);
void commandCommand(client *c);
void setCommand(client *c);
void setnxCommand(client *c);
void setexCommand(client *c);
void psetexCommand(client *c);
void getCommand(client *c);
void delCommand(client *c);
void existsCommand(client *c);
void setbitCommand(client *c);
void getbitCommand(client *c);
void bitfieldCommand(client *c);
void setrangeCommand(client *c);
void getrangeCommand(client *c);
void incrCommand(client *c);
void decrCommand(client *c);
void incrbyCommand(client *c);
void decrbyCommand(client *c);
void incrbyfloatCommand(client *c);
void selectCommand(client *c);
void randomkeyCommand(client *c);
void keysCommand(client *c);
void scanCommand(client *c);
void dbsizeCommand(client *c);
void lastsaveCommand(client *c);
void saveCommand(client *c);
void bgsaveCommand(client *c);
void bgrewriteaofCommand(client *c);
void shutdownCommand(client *c);
void moveCommand(client *c);
void renameCommand(client *c);
void renamenxCommand(client *c);
void lpushCommand(client *c);
void rpushCommand(client *c);
void lpushxCommand(client *c);
void rpushxCommand(client *c);
void linsertCommand(client *c);
void lpopCommand(client *c);
void rpopCommand(client *c);
void llenCommand(client *c);
void lindexCommand(client *c);
void lrangeCommand(client *c);
void ltrimCommand(client *c);
void typeCommand(client *c);
void lsetCommand(client *c);
void saddCommand(client *c);
void sremCommand(client *c);
void smoveCommand(client *c);
void sismemberCommand(client *c);
void scardCommand(client *c);
void spopCommand(client *c);
void srandmemberCommand(client *c);
void sinterCommand(client *c);
void sinterstoreCommand(client *c);
void sunionCommand(client *c);
void sunionstoreCommand(client *c);
void sdiffCommand(client *c);
void sdiffstoreCommand(client *c);
void sscanCommand(client *c);
void syncCommand(client *c);
void flushdbCommand(client *c);
void flushallCommand(client *c);
void sortCommand(client *c);
void lremCommand(client *c);
void rpoplpushCommand(client *c);
void infoCommand(client *c);
void mgetCommand(client *c);
void monitorCommand(client *c);
void expireCommand(client *c);
void expireatCommand(client *c);
void pexpireCommand(client *c);
void pexpireatCommand(client *c);
void getsetCommand(client *c);
void ttlCommand(client *c);
void touchCommand(client *c);
void pttlCommand(client *c);
void persistCommand(client *c);
void slaveofCommand(client *c);
void roleCommand(client *c);
void debugCommand(client *c);
void msetCommand(client *c);
void msetnxCommand(client *c);
void zaddCommand(client *c);
void zincrbyCommand(client *c);
void zrangeCommand(client *c);
void zrangebyscoreCommand(client *c);
void zrevrangebyscoreCommand(client *c);
void zrangebylexCommand(client *c);
void zrevrangebylexCommand(client *c);
void zcountCommand(client *c);
void zlexcountCommand(client *c);
void zrevrangeCommand(client *c);
void zcardCommand(client *c);
void zremCommand(client *c);
void zscoreCommand(client *c);
void zremrangebyscoreCommand(client *c);
void zremrangebylexCommand(client *c);
void multiCommand(client *c);
void execCommand(client *c);
void discardCommand(client *c);
void blpopCommand(client *c);
void brpopCommand(client *c);
void brpoplpushCommand(client *c);
void appendCommand(client *c);
void strlenCommand(client *c);
void zrankCommand(client *c);
void zrevrankCommand(client *c);
void hsetCommand(client *c);
void hsetnxCommand(client *c);
void hgetCommand(client *c);
void hmsetCommand(client *c);
void hmgetCommand(client *c);
void hdelCommand(client *c);
void hlenCommand(client *c);
void hstrlenCommand(client *c);
void zremrangebyrankCommand(client *c);
void zunionstoreCommand(client *c);
void zinterstoreCommand(client *c);
void zscanCommand(client *c);
void hkeysCommand(client *c);
void hvalsCommand(client *c);
void hgetallCommand(client *c);
void hexistsCommand(client *c);
void hscanCommand(client *c);
void configCommand(client *c);
void hincrbyCommand(client *c);
void hincrbyfloatCommand(client *c);
void subscribeCommand(client *c);
void unsubscribeCommand(client *c);
void psubscribeCommand(client *c);
void punsubscribeCommand(client *c);
void publishCommand(client *c);
void pubsubCommand(client *c);
void watchCommand(client *c);
void unwatchCommand(client *c);
void clusterCommand(client *c);
void restoreCommand(client *c);
void migrateCommand(client *c);
void askingCommand(client *c);
void readonlyCommand(client *c);
void readwriteCommand(client *c);
void dumpCommand(client *c);
void objectCommand(client *c);
void clientCommand(client *c);
void evalCommand(client *c);
void evalShaCommand(client *c);
void scriptCommand(client *c);
void timeCommand(client *c);
void bitopCommand(client *c);
void bitcountCommand(client *c);
void bitposCommand(client *c);
void replconfCommand(client *c);
void waitCommand(client *c);
void geoencodeCommand(client *c);
void geodecodeCommand(client *c);
void georadiusByMemberCommand(client *c);
void georadiusCommand(client *c);
void geoaddCommand(client *c);
void geohashCommand(client *c);
void geoposCommand(client *c);
void geodistCommand(client *c);
void pfselftestCommand(client *c);
void pfaddCommand(client *c);
void pfcountCommand(client *c);
void pfmergeCommand(client *c);
void pfdebugCommand(client *c);
void latencyCommand(client *c);
void securityWarningCommand(client *c);
#if defined(__GNUC__)
void *calloc(size_t count, size_t size) __attribute__ ((deprecated));
void free(void *ptr) __attribute__ ((deprecated));
void *malloc(size_t size) __attribute__ ((deprecated));
void *realloc(void *ptr, size_t size) __attribute__ ((deprecated));
#endif
/* Debugging stuff */
void _serverAssertWithInfo(client *c, robj *o, char *estr, char *file, int line);
void _serverAssert(char *estr, char *file, int line);
void _serverPanic(char *msg, char *file, int line);
void bugReportStart(void);
void serverLogObjectDebugInfo(robj *o);
void sigsegvHandler(int sig, siginfo_t *info, void *secret);
sds genRedisInfoString(char *section);
void enableWatchdog(int period);
void disableWatchdog(void);
void watchdogScheduleSignal(int period);
void serverLogHexDump(int level, char *descr, void *value, size_t len);
int memtest_preserving_test(unsigned long *m, size_t bytes, int passes);
#define redisDebug(fmt, ...) \
printf("DEBUG %s:%d > " fmt "\n", __FILE__, __LINE__, __VA_ARGS__)
#define redisDebugMark() \
printf("-- MARK %s:%d --\n", __FILE__, __LINE__)
#endif
| 76,348 | 42.929229 | 153 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/src/sparkline.h
|
/* sparkline.h -- ASCII Sparklines header file
*
* ---------------------------------------------------------------------------
*
* Copyright(C) 2011-2014 Salvatore Sanfilippo <[email protected]>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __SPARKLINE_H
#define __SPARKLINE_H
/* A sequence is represented of many "samples" */
struct sample {
double value;
char *label;
};
struct sequence {
int length;
int labels;
struct sample *samples;
double min, max;
};
#define SPARKLINE_NO_FLAGS 0
#define SPARKLINE_FILL 1 /* Fill the area under the curve. */
#define SPARKLINE_LOG_SCALE 2 /* Use logarithmic scale. */
struct sequence *createSparklineSequence(void);
void sparklineSequenceAddSample(struct sequence *seq, double value, char *label);
void freeSparklineSequence(struct sequence *seq);
sds sparklineRenderRange(sds output, struct sequence *seq, int rows, int offset, int len, int flags);
sds sparklineRender(sds output, struct sequence *seq, int columns, int rows, int flags);
#endif /* __SPARKLINE_H */
| 2,345 | 40.157895 | 101 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/src/pmem.h
|
/*
* Copyright (c) 2017, Andreas Bluemle <andreas dot bluemle at itxperts dot de>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __PMEM_H
#define __PMEM_H
#ifdef USE_PMDK
typedef struct key_val_pair_PM {
PMEMoid key_oid;
PMEMoid val_oid;
TOID(struct key_val_pair_PM) pmem_list_next;
TOID(struct key_val_pair_PM) pmem_list_prev;
} key_val_pair_PM;
int pmemReconstruct(void);
void pmemKVpairSet(void *key, void *val);
PMEMoid pmemAddToPmemList(void *key, void *val);
void pmemRemoveFromPmemList(PMEMoid kv_PM_oid);
#endif
#endif
| 2,040 | 41.520833 | 79 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/src/dict.h
|
/* Hash Tables Implementation.
*
* This file implements in-memory hash tables with insert/del/replace/find/
* get-random-element operations. Hash tables will auto-resize if needed
* tables of power of two in size are used, collisions are handled by
* chaining. See the source code for more information... :)
*
* Copyright (c) 2006-2012, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdint.h>
#ifndef __DICT_H
#define __DICT_H
#define DICT_OK 0
#define DICT_ERR 1
/* Unused arguments generate annoying warnings... */
#define DICT_NOTUSED(V) ((void) V)
typedef struct dictEntry {
void *key;
union {
void *val;
uint64_t u64;
int64_t s64;
double d;
} v;
struct dictEntry *next;
} dictEntry;
typedef struct dictType {
unsigned int (*hashFunction)(const void *key);
void *(*keyDup)(void *privdata, const void *key);
void *(*valDup)(void *privdata, const void *obj);
int (*keyCompare)(void *privdata, const void *key1, const void *key2);
void (*keyDestructor)(void *privdata, void *key);
void (*valDestructor)(void *privdata, void *obj);
} dictType;
/* This is our hash table structure. Every dictionary has two of this as we
* implement incremental rehashing, for the old to the new table. */
typedef struct dictht {
dictEntry **table;
unsigned long size;
unsigned long sizemask;
unsigned long used;
} dictht;
typedef struct dict {
dictType *type;
void *privdata;
dictht ht[2];
long rehashidx; /* rehashing not in progress if rehashidx == -1 */
int iterators; /* number of iterators currently running */
} dict;
/* If safe is set to 1 this is a safe iterator, that means, you can call
* dictAdd, dictFind, and other functions against the dictionary even while
* iterating. Otherwise it is a non safe iterator, and only dictNext()
* should be called while iterating. */
typedef struct dictIterator {
dict *d;
long index;
int table, safe;
dictEntry *entry, *nextEntry;
/* unsafe iterator fingerprint for misuse detection. */
long long fingerprint;
} dictIterator;
typedef void (dictScanFunction)(void *privdata, const dictEntry *de);
/* This is the initial size of every hash table */
#define DICT_HT_INITIAL_SIZE 4
/* ------------------------------- Macros ------------------------------------*/
#define dictFreeVal(d, entry) \
if ((d)->type->valDestructor) \
(d)->type->valDestructor((d)->privdata, (entry)->v.val)
#define dictSetVal(d, entry, _val_) do { \
if ((d)->type->valDup) \
entry->v.val = (d)->type->valDup((d)->privdata, _val_); \
else \
entry->v.val = (_val_); \
} while(0)
#define dictSetSignedIntegerVal(entry, _val_) \
do { entry->v.s64 = _val_; } while(0)
#define dictSetUnsignedIntegerVal(entry, _val_) \
do { entry->v.u64 = _val_; } while(0)
#define dictSetDoubleVal(entry, _val_) \
do { entry->v.d = _val_; } while(0)
#define dictFreeKey(d, entry) \
if ((d)->type->keyDestructor) \
(d)->type->keyDestructor((d)->privdata, (entry)->key)
#define dictSetKey(d, entry, _key_) do { \
if ((d)->type->keyDup) \
entry->key = (d)->type->keyDup((d)->privdata, _key_); \
else \
entry->key = (_key_); \
} while(0)
#define dictCompareKeys(d, key1, key2) \
(((d)->type->keyCompare) ? \
(d)->type->keyCompare((d)->privdata, key1, key2) : \
(key1) == (key2))
#define dictHashKey(d, key) (d)->type->hashFunction(key)
#define dictGetKey(he) ((he)->key)
#define dictGetVal(he) ((he)->v.val)
#define dictGetSignedIntegerVal(he) ((he)->v.s64)
#define dictGetUnsignedIntegerVal(he) ((he)->v.u64)
#define dictGetDoubleVal(he) ((he)->v.d)
#define dictSlots(d) ((d)->ht[0].size+(d)->ht[1].size)
#define dictSize(d) ((d)->ht[0].used+(d)->ht[1].used)
#define dictIsRehashing(d) ((d)->rehashidx != -1)
/* API */
dict *dictCreate(dictType *type, void *privDataPtr);
int dictExpand(dict *d, unsigned long size);
int dictAdd(dict *d, void *key, void *val);
dictEntry *dictAddRaw(dict *d, void *key);
int dictReplace(dict *d, void *key, void *val);
dictEntry *dictReplaceRaw(dict *d, void *key);
int dictDelete(dict *d, const void *key);
int dictDeleteNoFree(dict *d, const void *key);
void dictRelease(dict *d);
dictEntry * dictFind(dict *d, const void *key);
void *dictFetchValue(dict *d, const void *key);
int dictResize(dict *d);
dictIterator *dictGetIterator(dict *d);
dictIterator *dictGetSafeIterator(dict *d);
dictEntry *dictNext(dictIterator *iter);
void dictReleaseIterator(dictIterator *iter);
dictEntry *dictGetRandomKey(dict *d);
unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count);
void dictGetStats(char *buf, size_t bufsize, dict *d);
unsigned int dictGenHashFunction(const void *key, int len);
unsigned int dictGenCaseHashFunction(const unsigned char *buf, int len);
void dictEmpty(dict *d, void(callback)(void*));
void dictEnableResize(void);
void dictDisableResize(void);
int dictRehash(dict *d, int n);
int dictRehashMilliseconds(dict *d, int ms);
void dictSetHashFunctionSeed(unsigned int initval);
unsigned int dictGetHashFunctionSeed(void);
unsigned long dictScan(dict *d, unsigned long v, dictScanFunction *fn, void *privdata);
#ifdef USE_PMDK
/* PMEM-specific API */
int dictAddPM(dict *d, void *key, void *val);
dictEntry *dictAddRawPM(dict *d, void *key);
dictEntry *dictAddReconstructedPM(dict *d, void *key, void *val);
int dictReplacePM(dict *d, void *key, void *val);
#endif
/* Hash table types */
extern dictType dictTypeHeapStringCopyKey;
extern dictType dictTypeHeapStrings;
extern dictType dictTypeHeapStringCopyKeyValue;
#endif /* __DICT_H */
| 7,201 | 36.123711 | 87 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/src/rio.h
|
/*
* Copyright (c) 2009-2012, Pieter Noordhuis <pcnoordhuis at gmail dot com>
* Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __REDIS_RIO_H
#define __REDIS_RIO_H
#include <stdio.h>
#include <stdint.h>
#include "sds.h"
struct _rio {
/* Backend functions.
* Since this functions do not tolerate short writes or reads the return
* value is simplified to: zero on error, non zero on complete success. */
size_t (*read)(struct _rio *, void *buf, size_t len);
size_t (*write)(struct _rio *, const void *buf, size_t len);
off_t (*tell)(struct _rio *);
int (*flush)(struct _rio *);
/* The update_cksum method if not NULL is used to compute the checksum of
* all the data that was read or written so far. The method should be
* designed so that can be called with the current checksum, and the buf
* and len fields pointing to the new block of data to add to the checksum
* computation. */
void (*update_cksum)(struct _rio *, const void *buf, size_t len);
/* The current checksum */
uint64_t cksum;
/* number of bytes read or written */
size_t processed_bytes;
/* maximum single read or write chunk size */
size_t max_processing_chunk;
/* Backend-specific vars. */
union {
/* In-memory buffer target. */
struct {
sds ptr;
off_t pos;
} buffer;
/* Stdio file pointer target. */
struct {
FILE *fp;
off_t buffered; /* Bytes written since last fsync. */
off_t autosync; /* fsync after 'autosync' bytes written. */
} file;
/* Multiple FDs target (used to write to N sockets). */
struct {
int *fds; /* File descriptors. */
int *state; /* Error state of each fd. 0 (if ok) or errno. */
int numfds;
off_t pos;
sds buf;
} fdset;
} io;
};
typedef struct _rio rio;
/* The following functions are our interface with the stream. They'll call the
* actual implementation of read / write / tell, and will update the checksum
* if needed. */
static inline size_t rioWrite(rio *r, const void *buf, size_t len) {
while (len) {
size_t bytes_to_write = (r->max_processing_chunk && r->max_processing_chunk < len) ? r->max_processing_chunk : len;
if (r->update_cksum) r->update_cksum(r,buf,bytes_to_write);
if (r->write(r,buf,bytes_to_write) == 0)
return 0;
buf = (char*)buf + bytes_to_write;
len -= bytes_to_write;
r->processed_bytes += bytes_to_write;
}
return 1;
}
static inline size_t rioRead(rio *r, void *buf, size_t len) {
while (len) {
size_t bytes_to_read = (r->max_processing_chunk && r->max_processing_chunk < len) ? r->max_processing_chunk : len;
if (r->read(r,buf,bytes_to_read) == 0)
return 0;
if (r->update_cksum) r->update_cksum(r,buf,bytes_to_read);
buf = (char*)buf + bytes_to_read;
len -= bytes_to_read;
r->processed_bytes += bytes_to_read;
}
return 1;
}
static inline off_t rioTell(rio *r) {
return r->tell(r);
}
static inline int rioFlush(rio *r) {
return r->flush(r);
}
void rioInitWithFile(rio *r, FILE *fp);
void rioInitWithBuffer(rio *r, sds s);
void rioInitWithFdset(rio *r, int *fds, int numfds);
void rioFreeFdset(rio *r);
size_t rioWriteBulkCount(rio *r, char prefix, int count);
size_t rioWriteBulkString(rio *r, const char *buf, size_t len);
size_t rioWriteBulkLongLong(rio *r, long long l);
size_t rioWriteBulkDouble(rio *r, double d);
void rioGenericUpdateChecksum(rio *r, const void *buf, size_t len);
void rioSetAutoSync(rio *r, off_t bytes);
#endif
| 5,290 | 36.260563 | 123 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/src/endianconv.h
|
/* See endianconv.c top comments for more information
*
* ----------------------------------------------------------------------------
*
* Copyright (c) 2011-2012, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __ENDIANCONV_H
#define __ENDIANCONV_H
#include "config.h"
#include <stdint.h>
void memrev16(void *p);
void memrev32(void *p);
void memrev64(void *p);
uint16_t intrev16(uint16_t v);
uint32_t intrev32(uint32_t v);
uint64_t intrev64(uint64_t v);
/* variants of the function doing the actual convertion only if the target
* host is big endian */
#if (BYTE_ORDER == LITTLE_ENDIAN)
#define memrev16ifbe(p)
#define memrev32ifbe(p)
#define memrev64ifbe(p)
#define intrev16ifbe(v) (v)
#define intrev32ifbe(v) (v)
#define intrev64ifbe(v) (v)
#else
#define memrev16ifbe(p) memrev16(p)
#define memrev32ifbe(p) memrev32(p)
#define memrev64ifbe(p) memrev64(p)
#define intrev16ifbe(v) intrev16(v)
#define intrev32ifbe(v) intrev32(v)
#define intrev64ifbe(v) intrev64(v)
#endif
/* The functions htonu64() and ntohu64() convert the specified value to
* network byte ordering and back. In big endian systems they are no-ops. */
#if (BYTE_ORDER == BIG_ENDIAN)
#define htonu64(v) (v)
#define ntohu64(v) (v)
#else
#define htonu64(v) intrev64(v)
#define ntohu64(v) intrev64(v)
#endif
#ifdef REDIS_TEST
int endianconvTest(int argc, char *argv[]);
#endif
#endif
| 2,901 | 35.734177 | 79 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/update-jemalloc.sh
|
#!/bin/bash
VER=$1
URL="http://www.canonware.com/download/jemalloc/jemalloc-${VER}.tar.bz2"
echo "Downloading $URL"
curl $URL > /tmp/jemalloc.tar.bz2
tar xvjf /tmp/jemalloc.tar.bz2
rm -rf jemalloc
mv jemalloc-${VER} jemalloc
echo "Use git status, add all files and commit changes."
| 282 | 27.3 | 72 |
sh
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/linenoise/linenoise.h
|
/* linenoise.h -- VERSION 1.0
*
* Guerrilla line editing library against the idea that a line editing lib
* needs to be 20,000 lines of C code.
*
* See linenoise.c for more information.
*
* ------------------------------------------------------------------------
*
* Copyright (c) 2010-2014, Salvatore Sanfilippo <antirez at gmail dot com>
* Copyright (c) 2010-2013, Pieter Noordhuis <pcnoordhuis at gmail dot com>
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __LINENOISE_H
#define __LINENOISE_H
#ifdef __cplusplus
extern "C" {
#endif
typedef struct linenoiseCompletions {
size_t len;
char **cvec;
} linenoiseCompletions;
typedef void(linenoiseCompletionCallback)(const char *, linenoiseCompletions *);
typedef char*(linenoiseHintsCallback)(const char *, int *color, int *bold);
typedef void(linenoiseFreeHintsCallback)(void *);
void linenoiseSetCompletionCallback(linenoiseCompletionCallback *);
void linenoiseSetHintsCallback(linenoiseHintsCallback *);
void linenoiseSetFreeHintsCallback(linenoiseFreeHintsCallback *);
void linenoiseAddCompletion(linenoiseCompletions *, const char *);
char *linenoise(const char *prompt);
void linenoiseFree(void *ptr);
int linenoiseHistoryAdd(const char *line);
int linenoiseHistorySetMaxLen(int len);
int linenoiseHistorySave(const char *filename);
int linenoiseHistoryLoad(const char *filename);
void linenoiseClearScreen(void);
void linenoiseSetMultiLine(int ml);
void linenoisePrintKeyCodes(void);
#ifdef __cplusplus
}
#endif
#endif /* __LINENOISE_H */
| 2,825 | 37.189189 | 80 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/hiredis/net.h
|
/* Extracted from anet.c to work properly with Hiredis error reporting.
*
* Copyright (c) 2006-2011, Salvatore Sanfilippo <antirez at gmail dot com>
* Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com>
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __NET_H
#define __NET_H
#include "hiredis.h"
#if defined(__sun) || defined(_AIX)
#define AF_LOCAL AF_UNIX
#endif
int redisCheckSocketError(redisContext *c);
int redisContextSetTimeout(redisContext *c, const struct timeval tv);
int redisContextConnectTcp(redisContext *c, const char *addr, int port, const struct timeval *timeout);
int redisContextConnectBindTcp(redisContext *c, const char *addr, int port,
const struct timeval *timeout,
const char *source_addr);
int redisContextConnectUnix(redisContext *c, const char *path, const struct timeval *timeout);
int redisKeepAlive(redisContext *c, int interval);
#endif
| 2,453 | 46.192308 | 103 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/hiredis/sds.h
|
/* SDSLib 2.0 -- A C dynamic strings library
*
* Copyright (c) 2006-2015, Salvatore Sanfilippo <antirez at gmail dot com>
* Copyright (c) 2015, Oran Agra
* Copyright (c) 2015, Redis Labs, Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __SDS_H
#define __SDS_H
#define SDS_MAX_PREALLOC (1024*1024)
#include <sys/types.h>
#include <stdarg.h>
#include <stdint.h>
typedef char *sds;
/* Note: sdshdr5 is never used, we just access the flags byte directly.
* However is here to document the layout of type 5 SDS strings. */
struct __attribute__ ((__packed__)) sdshdr5 {
unsigned char flags; /* 3 lsb of type, and 5 msb of string length */
char buf[];
};
struct __attribute__ ((__packed__)) sdshdr8 {
uint8_t len; /* used */
uint8_t alloc; /* excluding the header and null terminator */
unsigned char flags; /* 3 lsb of type, 5 unused bits */
char buf[];
};
struct __attribute__ ((__packed__)) sdshdr16 {
uint16_t len; /* used */
uint16_t alloc; /* excluding the header and null terminator */
unsigned char flags; /* 3 lsb of type, 5 unused bits */
char buf[];
};
struct __attribute__ ((__packed__)) sdshdr32 {
uint32_t len; /* used */
uint32_t alloc; /* excluding the header and null terminator */
unsigned char flags; /* 3 lsb of type, 5 unused bits */
char buf[];
};
struct __attribute__ ((__packed__)) sdshdr64 {
uint64_t len; /* used */
uint64_t alloc; /* excluding the header and null terminator */
unsigned char flags; /* 3 lsb of type, 5 unused bits */
char buf[];
};
#define SDS_TYPE_5 0
#define SDS_TYPE_8 1
#define SDS_TYPE_16 2
#define SDS_TYPE_32 3
#define SDS_TYPE_64 4
#define SDS_TYPE_MASK 7
#define SDS_TYPE_BITS 3
#define SDS_HDR_VAR(T,s) struct sdshdr##T *sh = (void*)((s)-(sizeof(struct sdshdr##T)));
#define SDS_HDR(T,s) ((struct sdshdr##T *)((s)-(sizeof(struct sdshdr##T))))
#define SDS_TYPE_5_LEN(f) ((f)>>SDS_TYPE_BITS)
static inline size_t sdslen(const sds s) {
unsigned char flags = s[-1];
switch(flags&SDS_TYPE_MASK) {
case SDS_TYPE_5:
return SDS_TYPE_5_LEN(flags);
case SDS_TYPE_8:
return SDS_HDR(8,s)->len;
case SDS_TYPE_16:
return SDS_HDR(16,s)->len;
case SDS_TYPE_32:
return SDS_HDR(32,s)->len;
case SDS_TYPE_64:
return SDS_HDR(64,s)->len;
}
return 0;
}
static inline size_t sdsavail(const sds s) {
unsigned char flags = s[-1];
switch(flags&SDS_TYPE_MASK) {
case SDS_TYPE_5: {
return 0;
}
case SDS_TYPE_8: {
SDS_HDR_VAR(8,s);
return sh->alloc - sh->len;
}
case SDS_TYPE_16: {
SDS_HDR_VAR(16,s);
return sh->alloc - sh->len;
}
case SDS_TYPE_32: {
SDS_HDR_VAR(32,s);
return sh->alloc - sh->len;
}
case SDS_TYPE_64: {
SDS_HDR_VAR(64,s);
return sh->alloc - sh->len;
}
}
return 0;
}
static inline void sdssetlen(sds s, size_t newlen) {
unsigned char flags = s[-1];
switch(flags&SDS_TYPE_MASK) {
case SDS_TYPE_5:
{
unsigned char *fp = ((unsigned char*)s)-1;
*fp = SDS_TYPE_5 | (newlen << SDS_TYPE_BITS);
}
break;
case SDS_TYPE_8:
SDS_HDR(8,s)->len = newlen;
break;
case SDS_TYPE_16:
SDS_HDR(16,s)->len = newlen;
break;
case SDS_TYPE_32:
SDS_HDR(32,s)->len = newlen;
break;
case SDS_TYPE_64:
SDS_HDR(64,s)->len = newlen;
break;
}
}
static inline void sdsinclen(sds s, size_t inc) {
unsigned char flags = s[-1];
switch(flags&SDS_TYPE_MASK) {
case SDS_TYPE_5:
{
unsigned char *fp = ((unsigned char*)s)-1;
unsigned char newlen = SDS_TYPE_5_LEN(flags)+inc;
*fp = SDS_TYPE_5 | (newlen << SDS_TYPE_BITS);
}
break;
case SDS_TYPE_8:
SDS_HDR(8,s)->len += inc;
break;
case SDS_TYPE_16:
SDS_HDR(16,s)->len += inc;
break;
case SDS_TYPE_32:
SDS_HDR(32,s)->len += inc;
break;
case SDS_TYPE_64:
SDS_HDR(64,s)->len += inc;
break;
}
}
/* sdsalloc() = sdsavail() + sdslen() */
static inline size_t sdsalloc(const sds s) {
unsigned char flags = s[-1];
switch(flags&SDS_TYPE_MASK) {
case SDS_TYPE_5:
return SDS_TYPE_5_LEN(flags);
case SDS_TYPE_8:
return SDS_HDR(8,s)->alloc;
case SDS_TYPE_16:
return SDS_HDR(16,s)->alloc;
case SDS_TYPE_32:
return SDS_HDR(32,s)->alloc;
case SDS_TYPE_64:
return SDS_HDR(64,s)->alloc;
}
return 0;
}
static inline void sdssetalloc(sds s, size_t newlen) {
unsigned char flags = s[-1];
switch(flags&SDS_TYPE_MASK) {
case SDS_TYPE_5:
/* Nothing to do, this type has no total allocation info. */
break;
case SDS_TYPE_8:
SDS_HDR(8,s)->alloc = newlen;
break;
case SDS_TYPE_16:
SDS_HDR(16,s)->alloc = newlen;
break;
case SDS_TYPE_32:
SDS_HDR(32,s)->alloc = newlen;
break;
case SDS_TYPE_64:
SDS_HDR(64,s)->alloc = newlen;
break;
}
}
sds sdsnewlen(const void *init, size_t initlen);
sds sdsnew(const char *init);
sds sdsempty(void);
sds sdsdup(const sds s);
void sdsfree(sds s);
sds sdsgrowzero(sds s, size_t len);
sds sdscatlen(sds s, const void *t, size_t len);
sds sdscat(sds s, const char *t);
sds sdscatsds(sds s, const sds t);
sds sdscpylen(sds s, const char *t, size_t len);
sds sdscpy(sds s, const char *t);
sds sdscatvprintf(sds s, const char *fmt, va_list ap);
#ifdef __GNUC__
sds sdscatprintf(sds s, const char *fmt, ...)
__attribute__((format(printf, 2, 3)));
#else
sds sdscatprintf(sds s, const char *fmt, ...);
#endif
sds sdscatfmt(sds s, char const *fmt, ...);
sds sdstrim(sds s, const char *cset);
void sdsrange(sds s, int start, int end);
void sdsupdatelen(sds s);
void sdsclear(sds s);
int sdscmp(const sds s1, const sds s2);
sds *sdssplitlen(const char *s, int len, const char *sep, int seplen, int *count);
void sdsfreesplitres(sds *tokens, int count);
void sdstolower(sds s);
void sdstoupper(sds s);
sds sdsfromlonglong(long long value);
sds sdscatrepr(sds s, const char *p, size_t len);
sds *sdssplitargs(const char *line, int *argc);
sds sdsmapchars(sds s, const char *from, const char *to, size_t setlen);
sds sdsjoin(char **argv, int argc, char *sep);
sds sdsjoinsds(sds *argv, int argc, const char *sep, size_t seplen);
/* Low level functions exposed to the user API */
sds sdsMakeRoomFor(sds s, size_t addlen);
void sdsIncrLen(sds s, int incr);
sds sdsRemoveFreeSpace(sds s);
size_t sdsAllocSize(sds s);
void *sdsAllocPtr(sds s);
/* Export the allocator used by SDS to the program using SDS.
* Sometimes the program SDS is linked to, may use a different set of
* allocators, but may want to allocate or free things that SDS will
* respectively free or allocate. */
void *sds_malloc(size_t size);
void *sds_realloc(void *ptr, size_t size);
void sds_free(void *ptr);
#ifdef REDIS_TEST
int sdsTest(int argc, char *argv[]);
#endif
#endif
| 8,934 | 31.609489 | 88 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/hiredis/hiredis.h
|
/*
* Copyright (c) 2009-2011, Salvatore Sanfilippo <antirez at gmail dot com>
* Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com>
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __HIREDIS_H
#define __HIREDIS_H
#include <stdio.h> /* for size_t */
#include <stdarg.h> /* for va_list */
#include <sys/time.h> /* for struct timeval */
#define HIREDIS_MAJOR 0
#define HIREDIS_MINOR 11
#define HIREDIS_PATCH 0
#define REDIS_ERR -1
#define REDIS_OK 0
/* When an error occurs, the err flag in a context is set to hold the type of
* error that occured. REDIS_ERR_IO means there was an I/O error and you
* should use the "errno" variable to find out what is wrong.
* For other values, the "errstr" field will hold a description. */
#define REDIS_ERR_IO 1 /* Error in read or write */
#define REDIS_ERR_EOF 3 /* End of file */
#define REDIS_ERR_PROTOCOL 4 /* Protocol error */
#define REDIS_ERR_OOM 5 /* Out of memory */
#define REDIS_ERR_OTHER 2 /* Everything else... */
/* Connection type can be blocking or non-blocking and is set in the
* least significant bit of the flags field in redisContext. */
#define REDIS_BLOCK 0x1
/* Connection may be disconnected before being free'd. The second bit
* in the flags field is set when the context is connected. */
#define REDIS_CONNECTED 0x2
/* The async API might try to disconnect cleanly and flush the output
* buffer and read all subsequent replies before disconnecting.
* This flag means no new commands can come in and the connection
* should be terminated once all replies have been read. */
#define REDIS_DISCONNECTING 0x4
/* Flag specific to the async API which means that the context should be clean
* up as soon as possible. */
#define REDIS_FREEING 0x8
/* Flag that is set when an async callback is executed. */
#define REDIS_IN_CALLBACK 0x10
/* Flag that is set when the async context has one or more subscriptions. */
#define REDIS_SUBSCRIBED 0x20
/* Flag that is set when monitor mode is active */
#define REDIS_MONITORING 0x40
#define REDIS_REPLY_STRING 1
#define REDIS_REPLY_ARRAY 2
#define REDIS_REPLY_INTEGER 3
#define REDIS_REPLY_NIL 4
#define REDIS_REPLY_STATUS 5
#define REDIS_REPLY_ERROR 6
#define REDIS_READER_MAX_BUF (1024*16) /* Default max unused reader buffer. */
#define REDIS_KEEPALIVE_INTERVAL 15 /* seconds */
#ifdef __cplusplus
extern "C" {
#endif
/* This is the reply object returned by redisCommand() */
typedef struct redisReply {
int type; /* REDIS_REPLY_* */
long long integer; /* The integer when type is REDIS_REPLY_INTEGER */
int len; /* Length of string */
char *str; /* Used for both REDIS_REPLY_ERROR and REDIS_REPLY_STRING */
size_t elements; /* number of elements, for REDIS_REPLY_ARRAY */
struct redisReply **element; /* elements vector for REDIS_REPLY_ARRAY */
} redisReply;
typedef struct redisReadTask {
int type;
int elements; /* number of elements in multibulk container */
int idx; /* index in parent (array) object */
void *obj; /* holds user-generated value for a read task */
struct redisReadTask *parent; /* parent task */
void *privdata; /* user-settable arbitrary field */
} redisReadTask;
typedef struct redisReplyObjectFunctions {
void *(*createString)(const redisReadTask*, char*, size_t);
void *(*createArray)(const redisReadTask*, int);
void *(*createInteger)(const redisReadTask*, long long);
void *(*createNil)(const redisReadTask*);
void (*freeObject)(void*);
} redisReplyObjectFunctions;
/* State for the protocol parser */
typedef struct redisReader {
int err; /* Error flags, 0 when there is no error */
char errstr[128]; /* String representation of error when applicable */
char *buf; /* Read buffer */
size_t pos; /* Buffer cursor */
size_t len; /* Buffer length */
size_t maxbuf; /* Max length of unused buffer */
redisReadTask rstack[9];
int ridx; /* Index of current read task */
void *reply; /* Temporary reply pointer */
redisReplyObjectFunctions *fn;
void *privdata;
} redisReader;
/* Public API for the protocol parser. */
redisReader *redisReaderCreate(void);
void redisReaderFree(redisReader *r);
int redisReaderFeed(redisReader *r, const char *buf, size_t len);
int redisReaderGetReply(redisReader *r, void **reply);
/* Backwards compatibility, can be removed on big version bump. */
#define redisReplyReaderCreate redisReaderCreate
#define redisReplyReaderFree redisReaderFree
#define redisReplyReaderFeed redisReaderFeed
#define redisReplyReaderGetReply redisReaderGetReply
#define redisReplyReaderSetPrivdata(_r, _p) (int)(((redisReader*)(_r))->privdata = (_p))
#define redisReplyReaderGetObject(_r) (((redisReader*)(_r))->reply)
#define redisReplyReaderGetError(_r) (((redisReader*)(_r))->errstr)
/* Function to free the reply objects hiredis returns by default. */
void freeReplyObject(void *reply);
/* Functions to format a command according to the protocol. */
int redisvFormatCommand(char **target, const char *format, va_list ap);
int redisFormatCommand(char **target, const char *format, ...);
int redisFormatCommandArgv(char **target, int argc, const char **argv, const size_t *argvlen);
/* Context for a connection to Redis */
typedef struct redisContext {
int err; /* Error flags, 0 when there is no error */
char errstr[128]; /* String representation of error when applicable */
int fd;
int flags;
char *obuf; /* Write buffer */
redisReader *reader; /* Protocol reader */
} redisContext;
redisContext *redisConnect(const char *ip, int port);
redisContext *redisConnectWithTimeout(const char *ip, int port, const struct timeval tv);
redisContext *redisConnectNonBlock(const char *ip, int port);
redisContext *redisConnectBindNonBlock(const char *ip, int port, const char *source_addr);
redisContext *redisConnectUnix(const char *path);
redisContext *redisConnectUnixWithTimeout(const char *path, const struct timeval tv);
redisContext *redisConnectUnixNonBlock(const char *path);
redisContext *redisConnectFd(int fd);
int redisSetTimeout(redisContext *c, const struct timeval tv);
int redisEnableKeepAlive(redisContext *c);
void redisFree(redisContext *c);
int redisFreeKeepFd(redisContext *c);
int redisBufferRead(redisContext *c);
int redisBufferWrite(redisContext *c, int *done);
/* In a blocking context, this function first checks if there are unconsumed
* replies to return and returns one if so. Otherwise, it flushes the output
* buffer to the socket and reads until it has a reply. In a non-blocking
* context, it will return unconsumed replies until there are no more. */
int redisGetReply(redisContext *c, void **reply);
int redisGetReplyFromReader(redisContext *c, void **reply);
/* Write a formatted command to the output buffer. Use these functions in blocking mode
* to get a pipeline of commands. */
int redisAppendFormattedCommand(redisContext *c, const char *cmd, size_t len);
/* Write a command to the output buffer. Use these functions in blocking mode
* to get a pipeline of commands. */
int redisvAppendCommand(redisContext *c, const char *format, va_list ap);
int redisAppendCommand(redisContext *c, const char *format, ...);
int redisAppendCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen);
/* Issue a command to Redis. In a blocking context, it is identical to calling
* redisAppendCommand, followed by redisGetReply. The function will return
* NULL if there was an error in performing the request, otherwise it will
* return the reply. In a non-blocking context, it is identical to calling
* only redisAppendCommand and will always return NULL. */
void *redisvCommand(redisContext *c, const char *format, va_list ap);
void *redisCommand(redisContext *c, const char *format, ...);
void *redisCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen);
#ifdef __cplusplus
}
#endif
#endif
| 9,403 | 41.552036 | 96 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/hiredis/zmalloc.h
|
/* Drop in replacement for zmalloc.h in order to just use libc malloc without
* any wrappering. */
#ifndef ZMALLOC_H
#define ZMALLOC_H
#define zmalloc malloc
#define zrealloc realloc
#define zcalloc(x) calloc(x,1)
#define zfree free
#define zstrdup strdup
#endif
| 267 | 18.142857 | 77 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/hiredis/sdsalloc.h
|
/* SDSLib 2.0 -- A C dynamic strings library
*
* Copyright (c) 2006-2015, Salvatore Sanfilippo <antirez at gmail dot com>
* Copyright (c) 2015, Redis Labs, Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/* SDS allocator selection.
*
* This file is used in order to change the SDS allocator at compile time.
* Just define the following defines to what you want to use. Also add
* the include of your alternate allocator if needed (not needed in order
* to use the default libc allocator). */
#include "zmalloc.h"
#define s_malloc zmalloc
#define s_realloc zrealloc
#define s_free zfree
| 2,083 | 47.465116 | 78 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/hiredis/fmacros.h
|
#ifndef __HIREDIS_FMACRO_H
#define __HIREDIS_FMACRO_H
#if !defined(_BSD_SOURCE)
#define _BSD_SOURCE
#endif
#if defined(_AIX)
#define _ALL_SOURCE
#endif
#if defined(__sun__)
#define _POSIX_C_SOURCE 200112L
#elif defined(__linux__) || defined(__OpenBSD__) || defined(__NetBSD__)
#define _XOPEN_SOURCE 600
#else
#define _XOPEN_SOURCE
#endif
#if __APPLE__ && __MACH__
#define _OSX
#endif
#endif
| 396 | 14.88 | 71 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/hiredis/async.h
|
/*
* Copyright (c) 2009-2011, Salvatore Sanfilippo <antirez at gmail dot com>
* Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com>
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __HIREDIS_ASYNC_H
#define __HIREDIS_ASYNC_H
#include "hiredis.h"
#ifdef __cplusplus
extern "C" {
#endif
struct redisAsyncContext; /* need forward declaration of redisAsyncContext */
struct dict; /* dictionary header is included in async.c */
/* Reply callback prototype and container */
typedef void (redisCallbackFn)(struct redisAsyncContext*, void*, void*);
typedef struct redisCallback {
struct redisCallback *next; /* simple singly linked list */
redisCallbackFn *fn;
void *privdata;
} redisCallback;
/* List of callbacks for either regular replies or pub/sub */
typedef struct redisCallbackList {
redisCallback *head, *tail;
} redisCallbackList;
/* Connection callback prototypes */
typedef void (redisDisconnectCallback)(const struct redisAsyncContext*, int status);
typedef void (redisConnectCallback)(const struct redisAsyncContext*, int status);
/* Context for an async connection to Redis */
typedef struct redisAsyncContext {
/* Hold the regular context, so it can be realloc'ed. */
redisContext c;
/* Setup error flags so they can be used directly. */
int err;
char *errstr;
/* Not used by hiredis */
void *data;
/* Event library data and hooks */
struct {
void *data;
/* Hooks that are called when the library expects to start
* reading/writing. These functions should be idempotent. */
void (*addRead)(void *privdata);
void (*delRead)(void *privdata);
void (*addWrite)(void *privdata);
void (*delWrite)(void *privdata);
void (*cleanup)(void *privdata);
} ev;
/* Called when either the connection is terminated due to an error or per
* user request. The status is set accordingly (REDIS_OK, REDIS_ERR). */
redisDisconnectCallback *onDisconnect;
/* Called when the first write event was received. */
redisConnectCallback *onConnect;
/* Regular command callbacks */
redisCallbackList replies;
/* Subscription callbacks */
struct {
redisCallbackList invalid;
struct dict *channels;
struct dict *patterns;
} sub;
} redisAsyncContext;
/* Functions that proxy to hiredis */
redisAsyncContext *redisAsyncConnect(const char *ip, int port);
redisAsyncContext *redisAsyncConnectBind(const char *ip, int port, const char *source_addr);
redisAsyncContext *redisAsyncConnectUnix(const char *path);
int redisAsyncSetConnectCallback(redisAsyncContext *ac, redisConnectCallback *fn);
int redisAsyncSetDisconnectCallback(redisAsyncContext *ac, redisDisconnectCallback *fn);
void redisAsyncDisconnect(redisAsyncContext *ac);
void redisAsyncFree(redisAsyncContext *ac);
/* Handle read/write events */
void redisAsyncHandleRead(redisAsyncContext *ac);
void redisAsyncHandleWrite(redisAsyncContext *ac);
/* Command functions for an async context. Write the command to the
* output buffer and register the provided callback. */
int redisvAsyncCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *format, va_list ap);
int redisAsyncCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *format, ...);
int redisAsyncCommandArgv(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, int argc, const char **argv, const size_t *argvlen);
#ifdef __cplusplus
}
#endif
#endif
| 5,021 | 38.543307 | 138 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/hiredis/dict.h
|
/* Hash table implementation.
*
* This file implements in memory hash tables with insert/del/replace/find/
* get-random-element operations. Hash tables will auto resize if needed
* tables of power of two in size are used, collisions are handled by
* chaining. See the source code for more information... :)
*
* Copyright (c) 2006-2010, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __DICT_H
#define __DICT_H
#define DICT_OK 0
#define DICT_ERR 1
/* Unused arguments generate annoying warnings... */
#define DICT_NOTUSED(V) ((void) V)
typedef struct dictEntry {
void *key;
void *val;
struct dictEntry *next;
} dictEntry;
typedef struct dictType {
unsigned int (*hashFunction)(const void *key);
void *(*keyDup)(void *privdata, const void *key);
void *(*valDup)(void *privdata, const void *obj);
int (*keyCompare)(void *privdata, const void *key1, const void *key2);
void (*keyDestructor)(void *privdata, void *key);
void (*valDestructor)(void *privdata, void *obj);
} dictType;
typedef struct dict {
dictEntry **table;
dictType *type;
unsigned long size;
unsigned long sizemask;
unsigned long used;
void *privdata;
} dict;
typedef struct dictIterator {
dict *ht;
int index;
dictEntry *entry, *nextEntry;
} dictIterator;
/* This is the initial size of every hash table */
#define DICT_HT_INITIAL_SIZE 4
/* ------------------------------- Macros ------------------------------------*/
#define dictFreeEntryVal(ht, entry) \
if ((ht)->type->valDestructor) \
(ht)->type->valDestructor((ht)->privdata, (entry)->val)
#define dictSetHashVal(ht, entry, _val_) do { \
if ((ht)->type->valDup) \
entry->val = (ht)->type->valDup((ht)->privdata, _val_); \
else \
entry->val = (_val_); \
} while(0)
#define dictFreeEntryKey(ht, entry) \
if ((ht)->type->keyDestructor) \
(ht)->type->keyDestructor((ht)->privdata, (entry)->key)
#define dictSetHashKey(ht, entry, _key_) do { \
if ((ht)->type->keyDup) \
entry->key = (ht)->type->keyDup((ht)->privdata, _key_); \
else \
entry->key = (_key_); \
} while(0)
#define dictCompareHashKeys(ht, key1, key2) \
(((ht)->type->keyCompare) ? \
(ht)->type->keyCompare((ht)->privdata, key1, key2) : \
(key1) == (key2))
#define dictHashKey(ht, key) (ht)->type->hashFunction(key)
#define dictGetEntryKey(he) ((he)->key)
#define dictGetEntryVal(he) ((he)->val)
#define dictSlots(ht) ((ht)->size)
#define dictSize(ht) ((ht)->used)
/* API */
static unsigned int dictGenHashFunction(const unsigned char *buf, int len);
static dict *dictCreate(dictType *type, void *privDataPtr);
static int dictExpand(dict *ht, unsigned long size);
static int dictAdd(dict *ht, void *key, void *val);
static int dictReplace(dict *ht, void *key, void *val);
static int dictDelete(dict *ht, const void *key);
static void dictRelease(dict *ht);
static dictEntry * dictFind(dict *ht, const void *key);
static dictIterator *dictGetIterator(dict *ht);
static dictEntry *dictNext(dictIterator *iter);
static void dictReleaseIterator(dictIterator *iter);
#endif /* __DICT_H */
| 4,691 | 35.944882 | 80 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/hiredis/adapters/ae.h
|
/*
* Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com>
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __HIREDIS_AE_H__
#define __HIREDIS_AE_H__
#include <sys/types.h>
#include <ae.h>
#include "../hiredis.h"
#include "../async.h"
typedef struct redisAeEvents {
redisAsyncContext *context;
aeEventLoop *loop;
int fd;
int reading, writing;
} redisAeEvents;
static void redisAeReadEvent(aeEventLoop *el, int fd, void *privdata, int mask) {
((void)el); ((void)fd); ((void)mask);
redisAeEvents *e = (redisAeEvents*)privdata;
redisAsyncHandleRead(e->context);
}
static void redisAeWriteEvent(aeEventLoop *el, int fd, void *privdata, int mask) {
((void)el); ((void)fd); ((void)mask);
redisAeEvents *e = (redisAeEvents*)privdata;
redisAsyncHandleWrite(e->context);
}
static void redisAeAddRead(void *privdata) {
redisAeEvents *e = (redisAeEvents*)privdata;
aeEventLoop *loop = e->loop;
if (!e->reading) {
e->reading = 1;
aeCreateFileEvent(loop,e->fd,AE_READABLE,redisAeReadEvent,e);
}
}
static void redisAeDelRead(void *privdata) {
redisAeEvents *e = (redisAeEvents*)privdata;
aeEventLoop *loop = e->loop;
if (e->reading) {
e->reading = 0;
aeDeleteFileEvent(loop,e->fd,AE_READABLE);
}
}
static void redisAeAddWrite(void *privdata) {
redisAeEvents *e = (redisAeEvents*)privdata;
aeEventLoop *loop = e->loop;
if (!e->writing) {
e->writing = 1;
aeCreateFileEvent(loop,e->fd,AE_WRITABLE,redisAeWriteEvent,e);
}
}
static void redisAeDelWrite(void *privdata) {
redisAeEvents *e = (redisAeEvents*)privdata;
aeEventLoop *loop = e->loop;
if (e->writing) {
e->writing = 0;
aeDeleteFileEvent(loop,e->fd,AE_WRITABLE);
}
}
static void redisAeCleanup(void *privdata) {
redisAeEvents *e = (redisAeEvents*)privdata;
redisAeDelRead(privdata);
redisAeDelWrite(privdata);
free(e);
}
static int redisAeAttach(aeEventLoop *loop, redisAsyncContext *ac) {
redisContext *c = &(ac->c);
redisAeEvents *e;
/* Nothing should be attached when something is already attached */
if (ac->ev.data != NULL)
return REDIS_ERR;
/* Create container for context and r/w events */
e = (redisAeEvents*)malloc(sizeof(*e));
e->context = ac;
e->loop = loop;
e->fd = c->fd;
e->reading = e->writing = 0;
/* Register functions to start/stop listening for events */
ac->ev.addRead = redisAeAddRead;
ac->ev.delRead = redisAeDelRead;
ac->ev.addWrite = redisAeAddWrite;
ac->ev.delWrite = redisAeDelWrite;
ac->ev.cleanup = redisAeCleanup;
ac->ev.data = e;
return REDIS_OK;
}
#endif
| 4,219 | 31.96875 | 82 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/hiredis/adapters/libuv.h
|
#ifndef __HIREDIS_LIBUV_H__
#define __HIREDIS_LIBUV_H__
#include <uv.h>
#include "../hiredis.h"
#include "../async.h"
#include <string.h>
typedef struct redisLibuvEvents {
redisAsyncContext* context;
uv_poll_t handle;
int events;
} redisLibuvEvents;
int redisLibuvAttach(redisAsyncContext*, uv_loop_t*);
static void redisLibuvPoll(uv_poll_t* handle, int status, int events) {
redisLibuvEvents* p = (redisLibuvEvents*)handle->data;
if (status != 0) {
return;
}
if (events & UV_READABLE) {
redisAsyncHandleRead(p->context);
}
if (events & UV_WRITABLE) {
redisAsyncHandleWrite(p->context);
}
}
static void redisLibuvAddRead(void *privdata) {
redisLibuvEvents* p = (redisLibuvEvents*)privdata;
p->events |= UV_READABLE;
uv_poll_start(&p->handle, p->events, redisLibuvPoll);
}
static void redisLibuvDelRead(void *privdata) {
redisLibuvEvents* p = (redisLibuvEvents*)privdata;
p->events &= ~UV_READABLE;
if (p->events) {
uv_poll_start(&p->handle, p->events, redisLibuvPoll);
} else {
uv_poll_stop(&p->handle);
}
}
static void redisLibuvAddWrite(void *privdata) {
redisLibuvEvents* p = (redisLibuvEvents*)privdata;
p->events |= UV_WRITABLE;
uv_poll_start(&p->handle, p->events, redisLibuvPoll);
}
static void redisLibuvDelWrite(void *privdata) {
redisLibuvEvents* p = (redisLibuvEvents*)privdata;
p->events &= ~UV_WRITABLE;
if (p->events) {
uv_poll_start(&p->handle, p->events, redisLibuvPoll);
} else {
uv_poll_stop(&p->handle);
}
}
static void on_close(uv_handle_t* handle) {
redisLibuvEvents* p = (redisLibuvEvents*)handle->data;
free(p);
}
static void redisLibuvCleanup(void *privdata) {
redisLibuvEvents* p = (redisLibuvEvents*)privdata;
uv_close((uv_handle_t*)&p->handle, on_close);
}
static int redisLibuvAttach(redisAsyncContext* ac, uv_loop_t* loop) {
redisContext *c = &(ac->c);
if (ac->ev.data != NULL) {
return REDIS_ERR;
}
ac->ev.addRead = redisLibuvAddRead;
ac->ev.delRead = redisLibuvDelRead;
ac->ev.addWrite = redisLibuvAddWrite;
ac->ev.delWrite = redisLibuvDelWrite;
ac->ev.cleanup = redisLibuvCleanup;
redisLibuvEvents* p = (redisLibuvEvents*)malloc(sizeof(*p));
if (!p) {
return REDIS_ERR;
}
memset(p, 0, sizeof(*p));
if (uv_poll_init(loop, &p->handle, c->fd) != 0) {
return REDIS_ERR;
}
ac->ev.data = p;
p->handle.data = p;
p->context = ac;
return REDIS_OK;
}
#endif
| 2,487 | 19.393443 | 71 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/hiredis/adapters/libevent.h
|
/*
* Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com>
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __HIREDIS_LIBEVENT_H__
#define __HIREDIS_LIBEVENT_H__
#include <event.h>
#include "../hiredis.h"
#include "../async.h"
typedef struct redisLibeventEvents {
redisAsyncContext *context;
struct event rev, wev;
} redisLibeventEvents;
static void redisLibeventReadEvent(int fd, short event, void *arg) {
((void)fd); ((void)event);
redisLibeventEvents *e = (redisLibeventEvents*)arg;
redisAsyncHandleRead(e->context);
}
static void redisLibeventWriteEvent(int fd, short event, void *arg) {
((void)fd); ((void)event);
redisLibeventEvents *e = (redisLibeventEvents*)arg;
redisAsyncHandleWrite(e->context);
}
static void redisLibeventAddRead(void *privdata) {
redisLibeventEvents *e = (redisLibeventEvents*)privdata;
event_add(&e->rev,NULL);
}
static void redisLibeventDelRead(void *privdata) {
redisLibeventEvents *e = (redisLibeventEvents*)privdata;
event_del(&e->rev);
}
static void redisLibeventAddWrite(void *privdata) {
redisLibeventEvents *e = (redisLibeventEvents*)privdata;
event_add(&e->wev,NULL);
}
static void redisLibeventDelWrite(void *privdata) {
redisLibeventEvents *e = (redisLibeventEvents*)privdata;
event_del(&e->wev);
}
static void redisLibeventCleanup(void *privdata) {
redisLibeventEvents *e = (redisLibeventEvents*)privdata;
event_del(&e->rev);
event_del(&e->wev);
free(e);
}
static int redisLibeventAttach(redisAsyncContext *ac, struct event_base *base) {
redisContext *c = &(ac->c);
redisLibeventEvents *e;
/* Nothing should be attached when something is already attached */
if (ac->ev.data != NULL)
return REDIS_ERR;
/* Create container for context and r/w events */
e = (redisLibeventEvents*)malloc(sizeof(*e));
e->context = ac;
/* Register functions to start/stop listening for events */
ac->ev.addRead = redisLibeventAddRead;
ac->ev.delRead = redisLibeventDelRead;
ac->ev.addWrite = redisLibeventAddWrite;
ac->ev.delWrite = redisLibeventDelWrite;
ac->ev.cleanup = redisLibeventCleanup;
ac->ev.data = e;
/* Initialize and install read/write events */
event_set(&e->rev,c->fd,EV_READ,redisLibeventReadEvent,e);
event_set(&e->wev,c->fd,EV_WRITE,redisLibeventWriteEvent,e);
event_base_set(base,&e->rev);
event_base_set(base,&e->wev);
return REDIS_OK;
}
#endif
| 3,980 | 35.522936 | 80 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/hiredis/adapters/libev.h
|
/*
* Copyright (c) 2010-2011, Pieter Noordhuis <pcnoordhuis at gmail dot com>
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __HIREDIS_LIBEV_H__
#define __HIREDIS_LIBEV_H__
#include <stdlib.h>
#include <sys/types.h>
#include <ev.h>
#include "../hiredis.h"
#include "../async.h"
typedef struct redisLibevEvents {
redisAsyncContext *context;
struct ev_loop *loop;
int reading, writing;
ev_io rev, wev;
} redisLibevEvents;
static void redisLibevReadEvent(EV_P_ ev_io *watcher, int revents) {
#if EV_MULTIPLICITY
((void)loop);
#endif
((void)revents);
redisLibevEvents *e = (redisLibevEvents*)watcher->data;
redisAsyncHandleRead(e->context);
}
static void redisLibevWriteEvent(EV_P_ ev_io *watcher, int revents) {
#if EV_MULTIPLICITY
((void)loop);
#endif
((void)revents);
redisLibevEvents *e = (redisLibevEvents*)watcher->data;
redisAsyncHandleWrite(e->context);
}
static void redisLibevAddRead(void *privdata) {
redisLibevEvents *e = (redisLibevEvents*)privdata;
struct ev_loop *loop = e->loop;
((void)loop);
if (!e->reading) {
e->reading = 1;
ev_io_start(EV_A_ &e->rev);
}
}
static void redisLibevDelRead(void *privdata) {
redisLibevEvents *e = (redisLibevEvents*)privdata;
struct ev_loop *loop = e->loop;
((void)loop);
if (e->reading) {
e->reading = 0;
ev_io_stop(EV_A_ &e->rev);
}
}
static void redisLibevAddWrite(void *privdata) {
redisLibevEvents *e = (redisLibevEvents*)privdata;
struct ev_loop *loop = e->loop;
((void)loop);
if (!e->writing) {
e->writing = 1;
ev_io_start(EV_A_ &e->wev);
}
}
static void redisLibevDelWrite(void *privdata) {
redisLibevEvents *e = (redisLibevEvents*)privdata;
struct ev_loop *loop = e->loop;
((void)loop);
if (e->writing) {
e->writing = 0;
ev_io_stop(EV_A_ &e->wev);
}
}
static void redisLibevCleanup(void *privdata) {
redisLibevEvents *e = (redisLibevEvents*)privdata;
redisLibevDelRead(privdata);
redisLibevDelWrite(privdata);
free(e);
}
static int redisLibevAttach(EV_P_ redisAsyncContext *ac) {
redisContext *c = &(ac->c);
redisLibevEvents *e;
/* Nothing should be attached when something is already attached */
if (ac->ev.data != NULL)
return REDIS_ERR;
/* Create container for context and r/w events */
e = (redisLibevEvents*)malloc(sizeof(*e));
e->context = ac;
#if EV_MULTIPLICITY
e->loop = loop;
#else
e->loop = NULL;
#endif
e->reading = e->writing = 0;
e->rev.data = e;
e->wev.data = e;
/* Register functions to start/stop listening for events */
ac->ev.addRead = redisLibevAddRead;
ac->ev.delRead = redisLibevDelRead;
ac->ev.addWrite = redisLibevAddWrite;
ac->ev.delWrite = redisLibevDelWrite;
ac->ev.cleanup = redisLibevCleanup;
ac->ev.data = e;
/* Initialize read/write events */
ev_io_init(&e->rev,redisLibevReadEvent,c->fd,EV_READ);
ev_io_init(&e->wev,redisLibevWriteEvent,c->fd,EV_WRITE);
return REDIS_OK;
}
#endif
| 4,587 | 30 | 78 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/tools/rpmemd/rpmemd_config.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmemd_config.h -- internal definitions for rpmemd config
*/
#include <stdint.h>
#include <stdbool.h>
#ifndef RPMEMD_DEFAULT_LOG_FILE
#define RPMEMD_DEFAULT_LOG_FILE ("/var/log/" DAEMON_NAME ".log")
#endif
#ifndef RPMEMD_GLOBAL_CONFIG_FILE
#define RPMEMD_GLOBAL_CONFIG_FILE ("/etc/" DAEMON_NAME "/" DAEMON_NAME\
".conf")
#endif
#define RPMEMD_USER_CONFIG_FILE ("." DAEMON_NAME ".conf")
#define RPMEM_DEFAULT_MAX_LANES 1024
#define RPMEM_DEFAULT_NTHREADS 0
#define HOME_ENV "HOME"
#define HOME_STR_PLACEHOLDER ("$" HOME_ENV)
struct rpmemd_config {
char *log_file;
char *poolset_dir;
const char *rm_poolset;
bool force;
bool pool_set;
bool persist_apm;
bool persist_general;
bool use_syslog;
uint64_t max_lanes;
enum rpmemd_log_level log_level;
size_t nthreads;
};
int rpmemd_config_read(struct rpmemd_config *config, int argc, char *argv[]);
void rpmemd_config_free(struct rpmemd_config *config);
| 2,527 | 32.706667 | 77 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/tools/rpmemd/rpmemd_log.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmemd_log.h -- rpmemd logging functions declarations
*/
#include <string.h>
#include "util.h"
#define FORMAT_PRINTF(a, b) __attribute__((__format__(__printf__, (a), (b))))
/*
* The tab character is not allowed in rpmemd log,
* because it is not well handled by syslog.
* Please use RPMEMD_LOG_INDENT instead.
*/
#define RPMEMD_LOG_INDENT " "
#ifdef DEBUG
#define RPMEMD_LOG(level, fmt, arg...) do {\
COMPILE_ERROR_ON(strchr(fmt, '\t') != 0);\
rpmemd_log(RPD_LOG_##level, __FILE__, __LINE__, fmt, ## arg);\
} while (0)
#else
#define RPMEMD_LOG(level, fmt, arg...) do {\
COMPILE_ERROR_ON(strchr(fmt, '\t') != 0);\
rpmemd_log(RPD_LOG_##level, NULL, 0, fmt, ## arg);\
} while (0)
#endif
#ifdef DEBUG
#define RPMEMD_DBG(fmt, arg...) do {\
COMPILE_ERROR_ON(strchr(fmt, '\t') != 0);\
rpmemd_log(_RPD_LOG_DBG, __FILE__, __LINE__, fmt, ## arg);\
} while (0)
#else
#define RPMEMD_DBG(fmt, arg...) do {} while (0)
#endif
#define RPMEMD_ERR(fmt, arg...) do {\
RPMEMD_LOG(ERR, fmt, ## arg);\
} while (0)
#define RPMEMD_FATAL(fmt, arg...) do {\
RPMEMD_LOG(ERR, fmt, ## arg);\
abort();\
} while (0)
#define RPMEMD_ASSERT(cond) do {\
if (!(cond)) {\
rpmemd_log(RPD_LOG_ERR, __FILE__, __LINE__,\
"assertion fault: %s", #cond);\
abort();\
}\
} while (0)
enum rpmemd_log_level {
RPD_LOG_ERR,
RPD_LOG_WARN,
RPD_LOG_NOTICE,
RPD_LOG_INFO,
_RPD_LOG_DBG, /* disallow to use this with LOG macro */
MAX_RPD_LOG,
};
enum rpmemd_log_level rpmemd_log_level_from_str(const char *str);
const char *rpmemd_log_level_to_str(enum rpmemd_log_level level);
extern enum rpmemd_log_level rpmemd_log_level;
int rpmemd_log_init(const char *ident, const char *fname, int use_syslog);
void rpmemd_log_close(void);
int rpmemd_prefix(const char *fmt, ...) FORMAT_PRINTF(1, 2);
void rpmemd_log(enum rpmemd_log_level level, const char *fname,
int lineno, const char *fmt, ...) FORMAT_PRINTF(4, 5);
| 3,506 | 32.4 | 77 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/tools/rpmemd/rpmemd_db.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmemd_db.h -- internal definitions for rpmemd database of pool set files
*/
struct rpmemd_db;
struct rpmem_pool_attr;
/*
* struct rpmemd_db_pool -- remote pool context
*/
struct rpmemd_db_pool {
void *pool_addr;
size_t pool_size;
struct pool_set *set;
};
struct rpmemd_db *rpmemd_db_init(const char *root_dir, mode_t mode);
struct rpmemd_db_pool *rpmemd_db_pool_create(struct rpmemd_db *db,
const char *pool_desc, size_t pool_size,
const struct rpmem_pool_attr *rattr);
struct rpmemd_db_pool *rpmemd_db_pool_open(struct rpmemd_db *db,
const char *pool_desc, size_t pool_size, struct rpmem_pool_attr *rattr);
int rpmemd_db_pool_remove(struct rpmemd_db *db, const char *pool_desc,
int force, int pool_set);
int rpmemd_db_pool_set_attr(struct rpmemd_db_pool *prp,
const struct rpmem_pool_attr *rattr);
void rpmemd_db_pool_close(struct rpmemd_db *db, struct rpmemd_db_pool *prp);
void rpmemd_db_fini(struct rpmemd_db *db);
int rpmemd_db_check_dir(struct rpmemd_db *db);
int rpmemd_db_pool_is_pmem(struct rpmemd_db_pool *pool);
| 2,647 | 41.031746 | 76 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/tools/rpmemd/rpmemd_util.h
|
/*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmemd_util.h -- rpmemd utility functions declarations
*/
int rpmemd_pmem_persist(const void *addr, size_t len);
int rpmemd_flush_fatal(const void *addr, size_t len);
int rpmemd_apply_pm_policy(enum rpmem_persist_method *persist_method,
int (**persist)(const void *addr, size_t len),
void *(**memcpy_persist)(void *pmemdest, const void *src, size_t len),
const int is_pmem);
| 1,988 | 45.255814 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/tools/rpmemd/rpmemd_fip.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmemd_fip.h -- rpmemd libfabric provider module header file
*/
#include <stddef.h>
struct rpmemd_fip;
struct rpmemd_fip_attr {
void *addr;
size_t size;
unsigned nlanes;
size_t nthreads;
size_t buff_size;
enum rpmem_provider provider;
enum rpmem_persist_method persist_method;
int (*persist)(const void *addr, size_t len);
void *(*memcpy_persist)(void *pmemdest, const void *src, size_t len);
int (*deep_persist)(const void *addr, size_t len, void *ctx);
void *ctx;
};
struct rpmemd_fip *rpmemd_fip_init(const char *node,
const char *service,
struct rpmemd_fip_attr *attr,
struct rpmem_resp_attr *resp,
enum rpmem_err *err);
void rpmemd_fip_fini(struct rpmemd_fip *fip);
int rpmemd_fip_accept(struct rpmemd_fip *fip, int timeout);
int rpmemd_fip_process_start(struct rpmemd_fip *fip);
int rpmemd_fip_process_stop(struct rpmemd_fip *fip);
int rpmemd_fip_wait_close(struct rpmemd_fip *fip, int timeout);
int rpmemd_fip_close(struct rpmemd_fip *fip);
| 2,581 | 37.537313 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/tools/rpmemd/rpmemd.h
|
/*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmemd.h -- rpmemd main header file
*/
#define DAEMON_NAME "rpmemd"
| 1,673 | 43.052632 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/tools/rpmemd/rpmemd_obc.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmemd_obc.h -- rpmemd out-of-band connection declarations
*/
#include <stdint.h>
#include <sys/types.h>
#include <sys/socket.h>
struct rpmemd_obc;
struct rpmemd_obc_requests {
int (*create)(struct rpmemd_obc *obc, void *arg,
const struct rpmem_req_attr *req,
const struct rpmem_pool_attr *pool_attr);
int (*open)(struct rpmemd_obc *obc, void *arg,
const struct rpmem_req_attr *req);
int (*close)(struct rpmemd_obc *obc, void *arg, int flags);
int (*set_attr)(struct rpmemd_obc *obc, void *arg,
const struct rpmem_pool_attr *pool_attr);
};
struct rpmemd_obc *rpmemd_obc_init(int fd_in, int fd_out);
void rpmemd_obc_fini(struct rpmemd_obc *obc);
int rpmemd_obc_status(struct rpmemd_obc *obc, uint32_t status);
int rpmemd_obc_process(struct rpmemd_obc *obc,
struct rpmemd_obc_requests *req_cb, void *arg);
int rpmemd_obc_create_resp(struct rpmemd_obc *obc,
int status, const struct rpmem_resp_attr *res);
int rpmemd_obc_open_resp(struct rpmemd_obc *obc,
int status, const struct rpmem_resp_attr *res,
const struct rpmem_pool_attr *pool_attr);
int rpmemd_obc_set_attr_resp(struct rpmemd_obc *obc, int status);
int rpmemd_obc_close_resp(struct rpmemd_obc *obc,
int status);
| 2,811 | 39.753623 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/tools/pmempool/check.h
|
/*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* check.h -- pmempool check command header file
*/
int pmempool_check_func(const char *appname, int argc, char *argv[]);
void pmempool_check_help(const char *appname);
| 1,776 | 44.564103 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/tools/pmempool/create.h
|
/*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* create.h -- pmempool create command header file
*/
int pmempool_create_func(const char *appname, int argc, char *argv[]);
void pmempool_create_help(const char *appname);
| 1,780 | 44.666667 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/tools/pmempool/dump.h
|
/*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* dump.h -- pmempool dump command header file
*/
int pmempool_dump_func(const char *appname, int argc, char *argv[]);
void pmempool_dump_help(const char *appname);
| 1,772 | 44.461538 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/tools/pmempool/rm.h
|
/*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rm.h -- pmempool rm command header file
*/
void pmempool_rm_help(const char *appname);
int pmempool_rm_func(const char *appname, int argc, char *argv[]);
| 1,764 | 44.25641 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/tools/pmempool/feature.h
|
/*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* feature.h -- pmempool feature command header file
*/
int pmempool_feature_func(const char *appname, int argc, char *argv[]);
void pmempool_feature_help(const char *appname);
| 1,779 | 44.641026 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/tools/pmempool/convert.h
|
/*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* convert.h -- pmempool convert command header file
*/
#include <sys/types.h>
int pmempool_convert_func(const char *appname, int argc, char *argv[]);
void pmempool_convert_help(const char *appname);
| 1,808 | 43.121951 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/tools/pmempool/synchronize.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* synchronize.h -- pmempool sync command header file
*/
int pmempool_sync_func(const char *appname, int argc, char *argv[]);
void pmempool_sync_help(const char *appname);
| 1,779 | 44.641026 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/tools/pmempool/common.h
|
/*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* common.h -- declarations of common functions
*/
#include <stdint.h>
#include <stddef.h>
#include <stdarg.h>
#include <stdbool.h>
#include "queue.h"
#include "log.h"
#include "blk.h"
#include "libpmemobj.h"
#include "libpmemcto.h"
#include "cto.h"
#include "lane.h"
#include "ulog.h"
#include "memops.h"
#include "pmalloc.h"
#include "list.h"
#include "obj.h"
#include "memblock.h"
#include "heap_layout.h"
#include "tx.h"
#include "heap.h"
#include "btt_layout.h"
/* XXX - modify Linux makefiles to generate srcversion.h and remove #ifdef */
#ifdef _WIN32
#include "srcversion.h"
#endif
#define COUNT_OF(x) (sizeof(x) / sizeof(0[x]))
#define OPT_SHIFT 12
#define OPT_MASK (~((1 << OPT_SHIFT) - 1))
#define OPT_LOG (1 << (PMEM_POOL_TYPE_LOG + OPT_SHIFT))
#define OPT_BLK (1 << (PMEM_POOL_TYPE_BLK + OPT_SHIFT))
#define OPT_OBJ (1 << (PMEM_POOL_TYPE_OBJ + OPT_SHIFT))
#define OPT_BTT (1 << (PMEM_POOL_TYPE_BTT + OPT_SHIFT))
#define OPT_CTO (1 << (PMEM_POOL_TYPE_CTO + OPT_SHIFT))
#define OPT_ALL (OPT_LOG | OPT_BLK | OPT_OBJ | OPT_BTT | OPT_CTO)
#define OPT_REQ_SHIFT 8
#define OPT_REQ_MASK ((1 << OPT_REQ_SHIFT) - 1)
#define _OPT_REQ(c, n) ((c) << (OPT_REQ_SHIFT * (n)))
#define OPT_REQ0(c) _OPT_REQ(c, 0)
#define OPT_REQ1(c) _OPT_REQ(c, 1)
#define OPT_REQ2(c) _OPT_REQ(c, 2)
#define OPT_REQ3(c) _OPT_REQ(c, 3)
#define OPT_REQ4(c) _OPT_REQ(c, 4)
#define OPT_REQ5(c) _OPT_REQ(c, 5)
#define OPT_REQ6(c) _OPT_REQ(c, 6)
#define OPT_REQ7(c) _OPT_REQ(c, 7)
#ifndef min
#define min(a, b) ((a) < (b) ? (a) : (b))
#endif
#define FOREACH_RANGE(range, ranges)\
LIST_FOREACH(range, &(ranges)->head, next)
#define PLIST_OFF_TO_PTR(pop, off)\
((off) == 0 ? NULL : (void *)((uintptr_t)(pop) + (off) - OBJ_OOB_SIZE))
#define ENTRY_TO_ALLOC_HDR(entry)\
((void *)((uintptr_t)(entry) - sizeof(struct allocation_header)))
#define OBJH_FROM_PTR(ptr)\
((void *)((uintptr_t)(ptr) - sizeof(struct legacy_object_header)))
#define DEFAULT_HDR_SIZE 4096UL /* 4 KB */
#define DEFAULT_DESC_SIZE 4096UL /* 4 KB */
#define POOL_HDR_DESC_SIZE (DEFAULT_HDR_SIZE + DEFAULT_DESC_SIZE)
#define PTR_TO_ALLOC_HDR(ptr)\
((void *)((uintptr_t)(ptr) -\
sizeof(struct legacy_object_header)))
#define OBJH_TO_PTR(objh)\
((void *)((uintptr_t)(objh) + sizeof(struct legacy_object_header)))
/* invalid answer for ask_* functions */
#define INV_ANS '\0'
#define FORMAT_PRINTF(a, b) __attribute__((__format__(__printf__, (a), (b))))
/*
* pmem_pool_type_t -- pool types
*/
typedef enum {
PMEM_POOL_TYPE_LOG = 0x01,
PMEM_POOL_TYPE_BLK = 0x02,
PMEM_POOL_TYPE_OBJ = 0x04,
PMEM_POOL_TYPE_BTT = 0x08,
PMEM_POOL_TYPE_CTO = 0x10,
PMEM_POOL_TYPE_ALL = 0x1f,
PMEM_POOL_TYPE_UNKNOWN = 0x80,
} pmem_pool_type_t;
struct option_requirement {
int opt;
pmem_pool_type_t type;
uint64_t req;
};
struct options {
const struct option *opts;
size_t noptions;
char *bitmap;
const struct option_requirement *req;
};
struct pmem_pool_params {
pmem_pool_type_t type;
char signature[POOL_HDR_SIG_LEN];
uint64_t size;
mode_t mode;
int is_poolset;
int is_part;
int is_checksum_ok;
union {
struct {
uint64_t bsize;
} blk;
struct {
char layout[PMEMOBJ_MAX_LAYOUT];
} obj;
struct {
char layout[PMEMCTO_MAX_LAYOUT];
} cto;
};
};
struct pool_set_file {
int fd;
char *fname;
void *addr;
size_t size;
struct pool_set *poolset;
size_t replica;
time_t mtime;
mode_t mode;
bool fileio;
};
struct pool_set_file *pool_set_file_open(const char *fname,
int rdonly, int check);
void pool_set_file_close(struct pool_set_file *file);
int pool_set_file_read(struct pool_set_file *file, void *buff,
size_t nbytes, uint64_t off);
int pool_set_file_write(struct pool_set_file *file, void *buff,
size_t nbytes, uint64_t off);
int pool_set_file_set_replica(struct pool_set_file *file, size_t replica);
size_t pool_set_file_nreplicas(struct pool_set_file *file);
void *pool_set_file_map(struct pool_set_file *file, uint64_t offset);
void pool_set_file_persist(struct pool_set_file *file,
const void *addr, size_t len);
struct range {
LIST_ENTRY(range) next;
uint64_t first;
uint64_t last;
};
struct ranges {
LIST_HEAD(rangeshead, range) head;
};
pmem_pool_type_t pmem_pool_type_parse_hdr(const struct pool_hdr *hdrp);
pmem_pool_type_t pmem_pool_type(const void *base_pool_addr);
int pmem_pool_checksum(const void *base_pool_addr);
pmem_pool_type_t pmem_pool_type_parse_str(const char *str);
uint64_t pmem_pool_get_min_size(pmem_pool_type_t type);
int pmem_pool_parse_params(const char *fname, struct pmem_pool_params *paramsp,
int check);
int util_poolset_map(const char *fname, struct pool_set **poolset, int rdonly);
struct options *util_options_alloc(const struct option *options,
size_t nopts, const struct option_requirement *req);
void util_options_free(struct options *opts);
int util_options_verify(const struct options *opts, pmem_pool_type_t type);
int util_options_getopt(int argc, char *argv[], const char *optstr,
const struct options *opts);
int util_validate_checksum(void *addr, size_t len, uint64_t *csum,
uint64_t skip_off);
pmem_pool_type_t util_get_pool_type_second_page(const void *pool_base_addr);
int util_parse_mode(const char *str, mode_t *mode);
int util_parse_ranges(const char *str, struct ranges *rangesp,
struct range entire);
int util_ranges_add(struct ranges *rangesp, struct range range);
void util_ranges_clear(struct ranges *rangesp);
int util_ranges_contain(const struct ranges *rangesp, uint64_t n);
int util_ranges_empty(const struct ranges *rangesp);
int util_check_memory(const uint8_t *buff, size_t len, uint8_t val);
int util_parse_chunk_types(const char *str, uint64_t *types);
int util_parse_lane_sections(const char *str, uint64_t *types);
char ask(char op, char *answers, char def_ans, const char *fmt, va_list ap);
char ask_yn(char op, char def_ans, const char *fmt, va_list ap);
char ask_Yn(char op, const char *fmt, ...) FORMAT_PRINTF(2, 3);
char ask_yN(char op, const char *fmt, ...) FORMAT_PRINTF(2, 3);
unsigned util_heap_max_zone(size_t size);
int util_pool_clear_badblocks(const char *path, int create);
static const struct range ENTIRE_UINT64 = {
{ NULL, NULL }, /* range */
0, /* first */
UINT64_MAX /* last */
};
| 7,787 | 31.181818 | 79 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/tools/pmempool/transform.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* transform.h -- pmempool transform command header file
*/
int pmempool_transform_func(const char *appname, int argc, char *argv[]);
void pmempool_transform_help(const char *appname);
| 1,792 | 44.974359 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/tools/pmempool/info.h
|
/*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* info.h -- pmempool info command header file
*/
#include "vec.h"
/*
* Verbose levels used in application:
*
* VERBOSE_DEFAULT:
* Default value for application's verbosity level.
* This is also set for data structures which should be
* printed without any command line argument.
*
* VERBOSE_MAX:
* Maximum value for application's verbosity level.
* This value is used when -v command line argument passed.
*
* VERBOSE_SILENT:
* This value is higher than VERBOSE_MAX and it is used only
* for verbosity levels of data structures which should _not_ be
* printed without specified command line arguments.
*/
#define VERBOSE_SILENT 0
#define VERBOSE_DEFAULT 1
#define VERBOSE_MAX 2
/*
* pmempool_info_args -- structure for storing command line arguments
*/
struct pmempool_info_args {
char *file; /* input file */
unsigned col_width; /* column width for printing fields */
bool human; /* sizes in human-readable formats */
bool force; /* force parsing pool */
pmem_pool_type_t type; /* forced pool type */
bool use_range; /* use range for blocks */
struct ranges ranges; /* range of block/chunks to dump */
int vlevel; /* verbosity level */
int vdata; /* verbosity level for data dump */
int vhdrdump; /* verbosity level for headers hexdump */
int vstats; /* verbosity level for statistics */
int vbadblocks; /* verbosity level for bad blocks */
struct {
size_t walk; /* data chunk size */
} log;
struct {
int vmap; /* verbosity level for BTT Map */
int vflog; /* verbosity level for BTT FLOG */
int vbackup; /* verbosity level for BTT Info backup */
bool skip_zeros; /* skip blocks marked with zero flag */
bool skip_error; /* skip blocks marked with error flag */
bool skip_no_flag; /* skip blocks not marked with any flag */
} blk;
struct {
int vlanes; /* verbosity level for lanes */
int vroot;
int vobjects;
int valloc;
int voobhdr;
int vheap;
int vzonehdr;
int vchunkhdr;
int vbitmap;
bool lanes_recovery;
bool ignore_empty_obj;
uint64_t chunk_types;
size_t replica;
struct ranges lane_ranges;
struct ranges type_ranges;
struct ranges zone_ranges;
struct ranges chunk_ranges;
} obj;
};
/*
* pmem_blk_stats -- structure with statistics for pmemblk
*/
struct pmem_blk_stats {
uint32_t total; /* number of processed blocks */
uint32_t zeros; /* number of blocks marked by zero flag */
uint32_t errors; /* number of blocks marked by error flag */
uint32_t noflag; /* number of blocks not marked with any flag */
};
struct pmem_obj_class_stats {
uint64_t n_units;
uint64_t n_used;
uint64_t unit_size;
uint64_t alignment;
uint32_t nallocs;
uint16_t flags;
};
struct pmem_obj_zone_stats {
uint64_t n_chunks;
uint64_t n_chunks_type[MAX_CHUNK_TYPE];
uint64_t size_chunks;
uint64_t size_chunks_type[MAX_CHUNK_TYPE];
VEC(, struct pmem_obj_class_stats) class_stats;
};
struct pmem_obj_type_stats {
TAILQ_ENTRY(pmem_obj_type_stats) next;
uint64_t type_num;
uint64_t n_objects;
uint64_t n_bytes;
};
struct pmem_obj_stats {
uint64_t n_total_objects;
uint64_t n_total_bytes;
uint64_t n_zones;
uint64_t n_zones_used;
struct pmem_obj_zone_stats *zone_stats;
TAILQ_HEAD(obj_type_stats_head, pmem_obj_type_stats) type_stats;
};
/*
* pmem_info -- context for pmeminfo application
*/
struct pmem_info {
const char *file_name; /* current file name */
struct pool_set_file *pfile;
struct pmempool_info_args args; /* arguments parsed from command line */
struct options *opts;
struct pool_set *poolset;
pmem_pool_type_t type;
struct pmem_pool_params params;
struct {
struct pmem_blk_stats stats;
} blk;
struct {
struct pmemobjpool *pop;
struct palloc_heap *heap;
struct alloc_class_collection *alloc_classes;
size_t size;
struct pmem_obj_stats stats;
uint64_t uuid_lo;
uint64_t objid;
} obj;
struct {
struct pmemcto *pcp;
size_t size;
} cto;
};
int pmempool_info_func(const char *appname, int argc, char *argv[]);
void pmempool_info_help(const char *appname);
int pmempool_info_read(struct pmem_info *pip, void *buff,
size_t nbytes, uint64_t off);
int pmempool_info_blk(struct pmem_info *pip);
int pmempool_info_log(struct pmem_info *pip);
int pmempool_info_obj(struct pmem_info *pip);
int pmempool_info_btt(struct pmem_info *pip);
int pmempool_info_cto(struct pmem_info *pip);
| 5,934 | 30.236842 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/tools/pmempool/output.h
|
/*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* output.h -- declarations of output printing related functions
*/
#include <time.h>
#include <stdint.h>
#include <stdio.h>
void out_set_vlevel(int vlevel);
void out_set_stream(FILE *stream);
void out_set_prefix(const char *prefix);
void out_set_col_width(unsigned col_width);
void outv_err(const char *fmt, ...) FORMAT_PRINTF(1, 2);
void out_err(const char *file, int line, const char *func,
const char *fmt, ...) FORMAT_PRINTF(4, 5);
void outv_err_vargs(const char *fmt, va_list ap);
void outv_indent(int vlevel, int i);
void outv(int vlevel, const char *fmt, ...) FORMAT_PRINTF(2, 3);
void outv_nl(int vlevel);
int outv_check(int vlevel);
void outv_title(int vlevel, const char *fmt, ...) FORMAT_PRINTF(2, 3);
void outv_field(int vlevel, const char *field, const char *fmt,
...) FORMAT_PRINTF(3, 4);
void outv_hexdump(int vlevel, const void *addr, size_t len, size_t offset,
int sep);
const char *out_get_uuid_str(uuid_t uuid);
const char *out_get_time_str(time_t time);
const char *out_get_size_str(uint64_t size, int human);
const char *out_get_percentage(double percentage);
const char *out_get_checksum(void *addr, size_t len, uint64_t *csump,
uint64_t skip_off);
const char *out_get_btt_map_entry(uint32_t map);
const char *out_get_pool_type_str(pmem_pool_type_t type);
const char *out_get_pool_signature(pmem_pool_type_t type);
const char *out_get_tx_state_str(uint64_t state);
const char *out_get_chunk_type_str(enum chunk_type type);
const char *out_get_chunk_flags(uint16_t flags);
const char *out_get_zone_magic_str(uint32_t magic);
const char *out_get_pmemoid_str(PMEMoid oid, uint64_t uuid_lo);
const char *out_get_arch_machine_class_str(uint8_t machine_class);
const char *out_get_arch_data_str(uint8_t data);
const char *out_get_arch_machine_str(uint16_t machine);
const char *out_get_last_shutdown_str(uint8_t dirty);
const char *out_get_alignment_desc_str(uint64_t ad, uint64_t cur_ad);
const char *out_get_incompat_features_str(uint32_t incompat);
| 3,585 | 44.974359 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/libpmemlog/log.h
|
/*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* log.h -- internal definitions for libpmem log module
*/
#ifndef LOG_H
#define LOG_H 1
#include <stdint.h>
#include <stddef.h>
#include <endian.h>
#include "ctl.h"
#include "util.h"
#include "os_thread.h"
#include "pool_hdr.h"
#ifdef __cplusplus
extern "C" {
#endif
#define PMEMLOG_LOG_PREFIX "libpmemlog"
#define PMEMLOG_LOG_LEVEL_VAR "PMEMLOG_LOG_LEVEL"
#define PMEMLOG_LOG_FILE_VAR "PMEMLOG_LOG_FILE"
/* attributes of the log memory pool format for the pool header */
#define LOG_HDR_SIG "PMEMLOG" /* must be 8 bytes including '\0' */
#define LOG_FORMAT_MAJOR 1
#define LOG_FORMAT_FEAT_DEFAULT \
{0x0000, POOL_FEAT_INCOMPAT_DEFAULT, 0x0000}
#define LOG_FORMAT_FEAT_CHECK \
{0x0000, POOL_FEAT_INCOMPAT_VALID, 0x0000}
static const features_t log_format_feat_default = LOG_FORMAT_FEAT_DEFAULT;
struct pmemlog {
struct pool_hdr hdr; /* memory pool header */
/* root info for on-media format... */
uint64_t start_offset; /* start offset of the usable log space */
uint64_t end_offset; /* maximum offset of the usable log space */
uint64_t write_offset; /* current write point for the log */
/* some run-time state, allocated out of memory pool... */
void *addr; /* mapped region */
size_t size; /* size of mapped region */
int is_pmem; /* true if pool is PMEM */
int rdonly; /* true if pool is opened read-only */
os_rwlock_t *rwlockp; /* pointer to RW lock */
int is_dev_dax; /* true if mapped on device dax */
struct ctl *ctl; /* top level node of the ctl tree structure */
struct pool_set *set; /* pool set info */
};
/* data area starts at this alignment after the struct pmemlog above */
#define LOG_FORMAT_DATA_ALIGN ((uintptr_t)4096)
/*
* log_convert2h -- convert pmemlog structure to host byte order
*/
static inline void
log_convert2h(struct pmemlog *plp)
{
plp->start_offset = le64toh(plp->start_offset);
plp->end_offset = le64toh(plp->end_offset);
plp->write_offset = le64toh(plp->write_offset);
}
/*
* log_convert2le -- convert pmemlog structure to LE byte order
*/
static inline void
log_convert2le(struct pmemlog *plp)
{
plp->start_offset = htole64(plp->start_offset);
plp->end_offset = htole64(plp->end_offset);
plp->write_offset = htole64(plp->write_offset);
}
#ifdef __cplusplus
}
#endif
#endif
| 3,867 | 31.504202 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/freebsd/include/endian.h
|
/*
* Copyright 2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* endian.h -- redirect for FreeBSD <sys/endian.h>
*/
#include <sys/endian.h>
| 1,680 | 43.236842 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/freebsd/include/features.h
|
/*
* Copyright 2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* features.h -- Empty file redirect
*/
| 1,641 | 44.611111 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/freebsd/include/sys/sysmacros.h
|
/*
* Copyright 2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* sys/sysmacros.h -- Empty file redirect
*/
| 1,646 | 44.75 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/freebsd/include/linux/kdev_t.h
|
/*
* Copyright 2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* linux/kdev_t.h -- Empty file redirect
*/
| 1,645 | 44.722222 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/freebsd/include/linux/limits.h
|
/*
* Copyright 2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* linux/limits.h -- Empty file redirect
*/
| 1,645 | 44.722222 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/clo_vec.hpp
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* clo_vec.hpp -- command line options vector declarations
*/
#include "queue.h"
#include <cstdlib>
struct clo_vec_args {
TAILQ_ENTRY(clo_vec_args) next;
void *args;
};
struct clo_vec_alloc {
TAILQ_ENTRY(clo_vec_alloc) next;
void *ptr;
};
struct clo_vec_value {
TAILQ_ENTRY(clo_vec_value) next;
void *ptr;
};
struct clo_vec_vlist {
TAILQ_HEAD(valueshead, clo_vec_value) head;
size_t nvalues;
};
struct clo_vec {
size_t size;
TAILQ_HEAD(argshead, clo_vec_args) args;
size_t nargs;
TAILQ_HEAD(allochead, clo_vec_alloc) allocs;
size_t nallocs;
};
struct clo_vec *clo_vec_alloc(size_t size);
void clo_vec_free(struct clo_vec *clovec);
void *clo_vec_get_args(struct clo_vec *clovec, size_t i);
int clo_vec_add_alloc(struct clo_vec *clovec, void *ptr);
int clo_vec_memcpy(struct clo_vec *clovec, size_t off, size_t size, void *ptr);
int clo_vec_memcpy_list(struct clo_vec *clovec, size_t off, size_t size,
struct clo_vec_vlist *list);
struct clo_vec_vlist *clo_vec_vlist_alloc(void);
void clo_vec_vlist_free(struct clo_vec_vlist *list);
void clo_vec_vlist_add(struct clo_vec_vlist *list, void *ptr, size_t size);
| 2,734 | 34.986842 | 79 |
hpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/pmem_flush.cpp
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmem_flush.cpp -- benchmark implementation for pmem_persist and pmem_msync
*/
#include <cassert>
#include <cerrno>
#include <climits>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fcntl.h>
#include <libpmem.h>
#include <sys/mman.h>
#include <unistd.h>
#include "benchmark.hpp"
#include "file.h"
#define PAGE_4K ((uintptr_t)1 << 12)
#define PAGE_2M ((uintptr_t)1 << 21)
/*
* align_addr -- round addr down to given boundary
*/
static void *
align_addr(void *addr, uintptr_t align)
{
return (char *)((uintptr_t)addr & ~(align - 1));
}
/*
* align_len -- increase len by the amount we gain when we round addr down
*/
static size_t
align_len(size_t len, void *addr, uintptr_t align)
{
return len + ((uintptr_t)addr & (align - 1));
}
/*
* roundup_len -- increase len by the amount we gain when we round addr down,
* then round up to the nearest multiple of 4K
*/
static size_t
roundup_len(size_t len, void *addr, uintptr_t align)
{
return (align_len(len, addr, align) + align - 1) & ~(align - 1);
}
/*
* pmem_args -- benchmark specific arguments
*/
struct pmem_args {
char *operation; /* msync, dummy_msync, persist, ... */
char *mode; /* stat, seq, rand */
bool no_warmup; /* don't do warmup */
};
/*
* pmem_bench -- benchmark context
*/
struct pmem_bench {
uint64_t *offsets; /* write offsets */
size_t n_offsets; /* number of elements in offsets array */
size_t fsize; /* The size of the allocated PMEM */
struct pmem_args *pargs; /* prog_args structure */
void *pmem_addr; /* PMEM base address */
size_t pmem_len; /* length of PMEM mapping */
void *invalid_addr; /* invalid pages */
void *nondirty_addr; /* non-dirty pages */
void *pmem_addr_aligned; /* PMEM pages - 2M aligned */
void *invalid_addr_aligned; /* invalid pages - 2M aligned */
void *nondirty_addr_aligned; /* non-dirty pages - 2M aligned */
/* the actual benchmark operation */
int (*func_op)(struct pmem_bench *pmb, void *addr, size_t len);
};
/*
* mode_seq -- if copy mode is sequential, returns index of a chunk.
*/
static uint64_t
mode_seq(struct pmem_bench *pmb, uint64_t index)
{
return index;
}
/*
* mode_stat -- if mode is static, the offset is always 0
*/
static uint64_t
mode_stat(struct pmem_bench *pmb, uint64_t index)
{
return 0;
}
/*
* mode_rand -- if mode is random, returns index of a random chunk
*/
static uint64_t
mode_rand(struct pmem_bench *pmb, uint64_t index)
{
return rand() % pmb->n_offsets;
}
/*
* operation_mode -- the mode of the copy process
*
* * static - write always the same chunk,
* * sequential - write chunk by chunk,
* * random - write to chunks selected randomly.
*/
struct op_mode {
const char *mode;
uint64_t (*func_mode)(struct pmem_bench *pmb, uint64_t index);
};
static struct op_mode modes[] = {
{"stat", mode_stat}, {"seq", mode_seq}, {"rand", mode_rand},
};
#define MODES (sizeof(modes) / sizeof(modes[0]))
/*
* parse_op_mode -- parses command line "--mode"
* and returns proper operation mode index.
*/
static int
parse_op_mode(const char *arg)
{
for (unsigned i = 0; i < MODES; i++) {
if (strcmp(arg, modes[i].mode) == 0)
return i;
}
return -1;
}
/*
* flush_noop -- dummy flush, does nothing
*/
static int
flush_noop(struct pmem_bench *pmb, void *addr, size_t len)
{
return 0;
}
/*
* flush_persist -- flush data to persistence using pmem_persist()
*/
static int
flush_persist(struct pmem_bench *pmb, void *addr, size_t len)
{
pmem_persist(addr, len);
return 0;
}
/*
* flush_persist_4K -- always flush entire 4K page(s) using pmem_persist()
*/
static int
flush_persist_4K(struct pmem_bench *pmb, void *addr, size_t len)
{
void *ptr = align_addr(addr, PAGE_4K);
len = roundup_len(len, addr, PAGE_4K);
pmem_persist(ptr, len);
return 0;
}
/*
* flush_persist_2M -- always flush entire 2M page(s) using pmem_persist()
*/
static int
flush_persist_2M(struct pmem_bench *pmb, void *addr, size_t len)
{
void *ptr = align_addr(addr, PAGE_2M);
len = roundup_len(len, addr, PAGE_2M);
pmem_persist(ptr, len);
return 0;
}
/*
* flush_msync -- flush data to persistence using pmem_msync()
*/
static int
flush_msync(struct pmem_bench *pmb, void *addr, size_t len)
{
pmem_msync(addr, len);
return 0;
}
/*
* flush_msync_async -- emulate dummy msync() using MS_ASYNC flag
*/
static int
flush_msync_async(struct pmem_bench *pmb, void *addr, size_t len)
{
void *ptr = align_addr(addr, PAGE_4K);
len = align_len(len, addr, PAGE_4K);
msync(ptr, len, MS_ASYNC);
return 0;
}
/*
* flush_msync_0 -- emulate dummy msync() using zero length
*/
static int
flush_msync_0(struct pmem_bench *pmb, void *addr, size_t len)
{
void *ptr = align_addr(addr, PAGE_4K);
(void)len;
msync(ptr, 0, MS_SYNC);
return 0;
}
/*
* flush_persist_4K_msync_0 -- emulate msync() that only flushes CPU cache
*
* Do flushing in user space (4K pages) + dummy syscall.
*/
static int
flush_persist_4K_msync_0(struct pmem_bench *pmb, void *addr, size_t len)
{
void *ptr = align_addr(addr, PAGE_4K);
len = roundup_len(len, addr, PAGE_4K);
pmem_persist(ptr, len);
msync(ptr, 0, MS_SYNC);
return 0;
}
/*
* flush_persist_2M_msync_0 -- emulate msync() that only flushes CPU cache
*
* Do flushing in user space (2M pages) + dummy syscall.
*/
static int
flush_persist_2M_msync_0(struct pmem_bench *pmb, void *addr, size_t len)
{
void *ptr = align_addr(addr, PAGE_2M);
len = roundup_len(len, addr, PAGE_2M);
pmem_persist(ptr, len);
msync(ptr, 0, MS_SYNC);
return 0;
}
/*
* flush_msync_err -- emulate dummy msync() using invalid flags
*/
static int
flush_msync_err(struct pmem_bench *pmb, void *addr, size_t len)
{
void *ptr = align_addr(addr, PAGE_4K);
len = align_len(len, addr, PAGE_4K);
msync(ptr, len, MS_SYNC | MS_ASYNC);
return 0;
}
/*
* flush_msync_nodirty -- call msync() on non-dirty pages
*/
static int
flush_msync_nodirty(struct pmem_bench *pmb, void *addr, size_t len)
{
uintptr_t uptr = (uintptr_t)addr - (uintptr_t)pmb->pmem_addr_aligned;
uptr += (uintptr_t)pmb->nondirty_addr_aligned;
void *ptr = align_addr((void *)uptr, PAGE_4K);
len = align_len(len, (void *)uptr, PAGE_4K);
pmem_msync(ptr, len);
return 0;
}
/*
* flush_msync_invalid -- emulate dummy msync() using invalid address
*/
static int
flush_msync_invalid(struct pmem_bench *pmb, void *addr, size_t len)
{
uintptr_t uptr = (uintptr_t)addr - (uintptr_t)pmb->pmem_addr_aligned;
uptr += (uintptr_t)pmb->invalid_addr_aligned;
void *ptr = align_addr((void *)uptr, PAGE_4K);
len = align_len(len, (void *)uptr, PAGE_4K);
pmem_msync(ptr, len);
return 0;
}
struct op {
const char *opname;
int (*func_op)(struct pmem_bench *pmb, void *addr, size_t len);
};
static struct op ops[] = {
{"noop", flush_noop},
{"persist", flush_persist},
{"persist_4K", flush_persist_4K},
{"persist_2M", flush_persist_2M},
{"msync", flush_msync},
{"msync_0", flush_msync_0},
{"msync_err", flush_msync_err},
{"persist_4K_msync_0", flush_persist_4K_msync_0},
{"persist_2M_msync_0", flush_persist_2M_msync_0},
{"msync_async", flush_msync_async},
{"msync_nodirty", flush_msync_nodirty},
{"msync_invalid", flush_msync_invalid},
};
#define NOPS (sizeof(ops) / sizeof(ops[0]))
/*
* parse_op_type -- parses command line "--operation" argument
* and returns proper operation type.
*/
static int
parse_op_type(const char *arg)
{
for (unsigned i = 0; i < NOPS; i++) {
if (strcmp(arg, ops[i].opname) == 0)
return i;
}
return -1;
}
/*
* pmem_flush_init -- benchmark initialization
*
* Parses command line arguments, allocates persistent memory, and maps it.
*/
static int
pmem_flush_init(struct benchmark *bench, struct benchmark_args *args)
{
assert(bench != nullptr);
assert(args != nullptr);
size_t file_size = 0;
int flags = 0;
enum file_type type = util_file_get_type(args->fname);
if (type == OTHER_ERROR) {
fprintf(stderr, "could not check type of file %s\n",
args->fname);
return -1;
}
uint64_t (*func_mode)(struct pmem_bench * pmb, uint64_t index);
auto *pmb = (struct pmem_bench *)malloc(sizeof(struct pmem_bench));
assert(pmb != nullptr);
pmb->pargs = (struct pmem_args *)args->opts;
assert(pmb->pargs != nullptr);
int i = parse_op_type(pmb->pargs->operation);
if (i == -1) {
fprintf(stderr, "wrong operation: %s\n", pmb->pargs->operation);
goto err_free_pmb;
}
pmb->func_op = ops[i].func_op;
pmb->n_offsets = args->n_ops_per_thread * args->n_threads;
pmb->fsize = pmb->n_offsets * args->dsize + (2 * PAGE_2M);
/* round up to 2M boundary */
pmb->fsize = (pmb->fsize + PAGE_2M - 1) & ~(PAGE_2M - 1);
i = parse_op_mode(pmb->pargs->mode);
if (i == -1) {
fprintf(stderr, "wrong mode: %s\n", pmb->pargs->mode);
goto err_free_pmb;
}
func_mode = modes[i].func_mode;
/* populate offsets array */
assert(pmb->n_offsets != 0);
pmb->offsets = (size_t *)malloc(pmb->n_offsets * sizeof(*pmb->offsets));
assert(pmb->offsets != nullptr);
for (size_t i = 0; i < pmb->n_offsets; ++i)
pmb->offsets[i] = func_mode(pmb, i);
if (type != TYPE_DEVDAX) {
file_size = pmb->fsize;
flags = PMEM_FILE_CREATE | PMEM_FILE_EXCL;
}
/* create a pmem file and memory map it */
pmb->pmem_addr = pmem_map_file(args->fname, file_size, flags,
args->fmode, &pmb->pmem_len, nullptr);
if (pmb->pmem_addr == nullptr) {
perror("pmem_map_file");
goto err_free_pmb;
}
pmb->nondirty_addr = mmap(nullptr, pmb->fsize, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, -1, 0);
if (pmb->nondirty_addr == MAP_FAILED) {
perror("mmap(1)");
goto err_unmap1;
}
pmb->invalid_addr = mmap(nullptr, pmb->fsize, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, -1, 0);
if (pmb->invalid_addr == MAP_FAILED) {
perror("mmap(2)");
goto err_unmap2;
}
munmap(pmb->invalid_addr, pmb->fsize);
pmb->pmem_addr_aligned =
(void *)(((uintptr_t)pmb->pmem_addr + PAGE_2M - 1) &
~(PAGE_2M - 1));
pmb->nondirty_addr_aligned =
(void *)(((uintptr_t)pmb->nondirty_addr + PAGE_2M - 1) &
~(PAGE_2M - 1));
pmb->invalid_addr_aligned =
(void *)(((uintptr_t)pmb->invalid_addr + PAGE_2M - 1) &
~(PAGE_2M - 1));
pmembench_set_priv(bench, pmb);
if (!pmb->pargs->no_warmup) {
size_t off;
for (off = 0; off < pmb->fsize - PAGE_2M; off += PAGE_4K) {
*(int *)((char *)pmb->pmem_addr_aligned + off) = 0;
*(int *)((char *)pmb->nondirty_addr_aligned + off) = 0;
}
}
return 0;
err_unmap2:
munmap(pmb->nondirty_addr, pmb->fsize);
err_unmap1:
pmem_unmap(pmb->pmem_addr, pmb->pmem_len);
err_free_pmb:
free(pmb);
return -1;
}
/*
* pmem_flush_exit -- benchmark cleanup
*/
static int
pmem_flush_exit(struct benchmark *bench, struct benchmark_args *args)
{
auto *pmb = (struct pmem_bench *)pmembench_get_priv(bench);
pmem_unmap(pmb->pmem_addr, pmb->fsize);
munmap(pmb->nondirty_addr, pmb->fsize);
free(pmb);
return 0;
}
/*
* pmem_flush_operation -- actual benchmark operation
*/
static int
pmem_flush_operation(struct benchmark *bench, struct operation_info *info)
{
auto *pmb = (struct pmem_bench *)pmembench_get_priv(bench);
size_t op_idx = info->index;
assert(op_idx < pmb->n_offsets);
uint64_t chunk_idx = pmb->offsets[op_idx];
void *addr =
(char *)pmb->pmem_addr_aligned + chunk_idx * info->args->dsize;
/* store + flush */
*(int *)addr = *(int *)addr + 1;
pmb->func_op(pmb, addr, info->args->dsize);
return 0;
}
/* structure to define command line arguments */
static struct benchmark_clo pmem_flush_clo[3];
/* Stores information about benchmark. */
static struct benchmark_info pmem_flush_bench;
CONSTRUCTOR(pmem_flush_constructor)
void
pmem_flush_constructor(void)
{
pmem_flush_clo[0].opt_short = 'o';
pmem_flush_clo[0].opt_long = "operation";
pmem_flush_clo[0].descr = "Operation type - persist,"
" msync, ...";
pmem_flush_clo[0].type = CLO_TYPE_STR;
pmem_flush_clo[0].off = clo_field_offset(struct pmem_args, operation);
pmem_flush_clo[0].def = "noop";
pmem_flush_clo[1].opt_short = 0;
pmem_flush_clo[1].opt_long = "mode";
pmem_flush_clo[1].descr = "mode - stat, seq or rand";
pmem_flush_clo[1].type = CLO_TYPE_STR;
pmem_flush_clo[1].off = clo_field_offset(struct pmem_args, mode);
pmem_flush_clo[1].def = "stat";
pmem_flush_clo[2].opt_short = 'w';
pmem_flush_clo[2].opt_long = "no-warmup";
pmem_flush_clo[2].descr = "Don't do warmup";
pmem_flush_clo[2].type = CLO_TYPE_FLAG;
pmem_flush_clo[2].off = clo_field_offset(struct pmem_args, no_warmup);
pmem_flush_bench.name = "pmem_flush";
pmem_flush_bench.brief = "Benchmark for pmem_msync() "
"and pmem_persist()";
pmem_flush_bench.init = pmem_flush_init;
pmem_flush_bench.exit = pmem_flush_exit;
pmem_flush_bench.multithread = true;
pmem_flush_bench.multiops = true;
pmem_flush_bench.operation = pmem_flush_operation;
pmem_flush_bench.measure_time = true;
pmem_flush_bench.clos = pmem_flush_clo;
pmem_flush_bench.nclos = ARRAY_SIZE(pmem_flush_clo);
pmem_flush_bench.opts_size = sizeof(struct pmem_args);
pmem_flush_bench.rm_file = true;
pmem_flush_bench.allow_poolset = false;
REGISTER_BENCHMARK(pmem_flush_bench);
}
| 14,656 | 24.804577 | 77 |
cpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/pmembench.cpp
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmembench.cpp -- main source file for benchmark framework
*/
#include <cassert>
#include <cerrno>
#include <cfloat>
#include <cinttypes>
#include <cmath>
#include <cstdio>
#include <cstring>
#include <dirent.h>
#include <err.h>
#include <getopt.h>
#include <linux/limits.h>
#include <sched.h>
#include <sys/wait.h>
#include <unistd.h>
#include "benchmark.hpp"
#include "benchmark_worker.hpp"
#include "clo.hpp"
#include "clo_vec.hpp"
#include "config_reader.hpp"
#include "file.h"
#include "libpmempool.h"
#include "mmap.h"
#include "os.h"
#include "os_thread.h"
#include "queue.h"
#include "scenario.hpp"
#include "set.h"
#include "util.h"
#ifndef _WIN32
#include "rpmem_common.h"
#include "rpmem_ssh.h"
#include "rpmem_util.h"
#endif
/* average time required to get a current time from the system */
unsigned long long Get_time_avg;
#define MIN_EXE_TIME_E 0.5
/*
* struct pmembench -- main context
*/
struct pmembench {
int argc;
char **argv;
struct scenario *scenario;
struct clo_vec *clovec;
bool override_clos;
};
/*
* struct benchmark -- benchmark's context
*/
struct benchmark {
LIST_ENTRY(benchmark) next;
struct benchmark_info *info;
void *priv;
struct benchmark_clo *clos;
size_t nclos;
size_t args_size;
};
/*
* struct bench_list -- list of available benchmarks
*/
struct bench_list {
LIST_HEAD(benchmarks_head, benchmark) head;
bool initialized;
};
/*
* struct benchmark_opts -- arguments for pmembench
*/
struct benchmark_opts {
bool help;
bool version;
const char *file_name;
};
static struct version_s {
unsigned major;
unsigned minor;
} version = {1, 0};
/* benchmarks list initialization */
static struct bench_list benchmarks;
/* common arguments for benchmarks */
static struct benchmark_clo pmembench_clos[13];
/* list of arguments for pmembench */
static struct benchmark_clo pmembench_opts[2];
CONSTRUCTOR(pmembench_constructor)
void
pmembench_constructor(void)
{
pmembench_opts[0].opt_short = 'h';
pmembench_opts[0].opt_long = "help";
pmembench_opts[0].descr = "Print help";
pmembench_opts[0].type = CLO_TYPE_FLAG;
pmembench_opts[0].off = clo_field_offset(struct benchmark_opts, help);
pmembench_opts[0].ignore_in_res = true;
pmembench_opts[1].opt_short = 'v';
pmembench_opts[1].opt_long = "version";
pmembench_opts[1].descr = "Print version";
pmembench_opts[1].type = CLO_TYPE_FLAG;
pmembench_opts[1].off =
clo_field_offset(struct benchmark_opts, version);
pmembench_opts[1].ignore_in_res = true;
pmembench_clos[0].opt_short = 'h';
pmembench_clos[0].opt_long = "help";
pmembench_clos[0].descr = "Print help for single benchmark";
pmembench_clos[0].type = CLO_TYPE_FLAG;
pmembench_clos[0].off = clo_field_offset(struct benchmark_args, help);
pmembench_clos[0].ignore_in_res = true;
pmembench_clos[1].opt_short = 't';
pmembench_clos[1].opt_long = "threads";
pmembench_clos[1].type = CLO_TYPE_UINT;
pmembench_clos[1].descr = "Number of working threads";
pmembench_clos[1].off =
clo_field_offset(struct benchmark_args, n_threads);
pmembench_clos[1].def = "1";
pmembench_clos[1].type_uint.size =
clo_field_size(struct benchmark_args, n_threads);
pmembench_clos[1].type_uint.base = CLO_INT_BASE_DEC;
pmembench_clos[1].type_uint.min = 1;
pmembench_clos[1].type_uint.max = UINT_MAX;
pmembench_clos[2].opt_short = 'n';
pmembench_clos[2].opt_long = "ops-per-thread";
pmembench_clos[2].type = CLO_TYPE_UINT;
pmembench_clos[2].descr = "Number of operations per thread";
pmembench_clos[2].off =
clo_field_offset(struct benchmark_args, n_ops_per_thread);
pmembench_clos[2].def = "1";
pmembench_clos[2].type_uint.size =
clo_field_size(struct benchmark_args, n_ops_per_thread);
pmembench_clos[2].type_uint.base = CLO_INT_BASE_DEC;
pmembench_clos[2].type_uint.min = 1;
pmembench_clos[2].type_uint.max = ULLONG_MAX;
pmembench_clos[3].opt_short = 'd';
pmembench_clos[3].opt_long = "data-size";
pmembench_clos[3].type = CLO_TYPE_UINT;
pmembench_clos[3].descr = "IO data size";
pmembench_clos[3].off = clo_field_offset(struct benchmark_args, dsize);
pmembench_clos[3].def = "1";
pmembench_clos[3].type_uint.size =
clo_field_size(struct benchmark_args, dsize);
pmembench_clos[3].type_uint.base = CLO_INT_BASE_DEC | CLO_INT_BASE_HEX;
pmembench_clos[3].type_uint.min = 1;
pmembench_clos[3].type_uint.max = ULONG_MAX;
pmembench_clos[4].opt_short = 'f';
pmembench_clos[4].opt_long = "file";
pmembench_clos[4].type = CLO_TYPE_STR;
pmembench_clos[4].descr = "File name";
pmembench_clos[4].off = clo_field_offset(struct benchmark_args, fname);
pmembench_clos[4].def = "/mnt/pmem/testfile";
pmembench_clos[4].ignore_in_res = true;
pmembench_clos[5].opt_short = 'm';
pmembench_clos[5].opt_long = "fmode";
pmembench_clos[5].type = CLO_TYPE_UINT;
pmembench_clos[5].descr = "File mode";
pmembench_clos[5].off = clo_field_offset(struct benchmark_args, fmode);
pmembench_clos[5].def = "0666";
pmembench_clos[5].ignore_in_res = true;
pmembench_clos[5].type_uint.size =
clo_field_size(struct benchmark_args, fmode);
pmembench_clos[5].type_uint.base = CLO_INT_BASE_OCT;
pmembench_clos[5].type_uint.min = 0;
pmembench_clos[5].type_uint.max = ULONG_MAX;
pmembench_clos[6].opt_short = 's';
pmembench_clos[6].opt_long = "seed";
pmembench_clos[6].type = CLO_TYPE_UINT;
pmembench_clos[6].descr = "PRNG seed";
pmembench_clos[6].off = clo_field_offset(struct benchmark_args, seed);
pmembench_clos[6].def = "0";
pmembench_clos[6].type_uint.size =
clo_field_size(struct benchmark_args, seed);
pmembench_clos[6].type_uint.base = CLO_INT_BASE_DEC;
pmembench_clos[6].type_uint.min = 0;
pmembench_clos[6].type_uint.max = ~0;
pmembench_clos[7].opt_short = 'r';
pmembench_clos[7].opt_long = "repeats";
pmembench_clos[7].type = CLO_TYPE_UINT;
pmembench_clos[7].descr = "Number of repeats of scenario";
pmembench_clos[7].off =
clo_field_offset(struct benchmark_args, repeats);
pmembench_clos[7].def = "1";
pmembench_clos[7].type_uint.size =
clo_field_size(struct benchmark_args, repeats);
pmembench_clos[7].type_uint.base = CLO_INT_BASE_DEC | CLO_INT_BASE_HEX;
pmembench_clos[7].type_uint.min = 1;
pmembench_clos[7].type_uint.max = ULONG_MAX;
pmembench_clos[8].opt_short = 'F';
pmembench_clos[8].opt_long = "thread-affinity";
pmembench_clos[8].descr = "Set worker threads CPU affinity mask";
pmembench_clos[8].type = CLO_TYPE_FLAG;
pmembench_clos[8].off =
clo_field_offset(struct benchmark_args, thread_affinity);
pmembench_clos[8].def = "false";
/*
* XXX: add link to blog post about optimal affinity
* when it will be done
*/
pmembench_clos[9].opt_short = 'I';
pmembench_clos[9].opt_long = "affinity-list";
pmembench_clos[9].descr =
"Set affinity mask as a list of CPUs separated by semicolon";
pmembench_clos[9].type = CLO_TYPE_STR;
pmembench_clos[9].off =
clo_field_offset(struct benchmark_args, affinity_list);
pmembench_clos[9].def = "";
pmembench_clos[9].ignore_in_res = true;
pmembench_clos[10].opt_long = "main-affinity";
pmembench_clos[10].descr = "Set affinity for main thread";
pmembench_clos[10].type = CLO_TYPE_INT;
pmembench_clos[10].off =
clo_field_offset(struct benchmark_args, main_affinity);
pmembench_clos[10].def = "-1";
pmembench_clos[10].ignore_in_res = false;
pmembench_clos[10].type_int.size =
clo_field_size(struct benchmark_args, main_affinity);
pmembench_clos[10].type_int.base = CLO_INT_BASE_DEC;
pmembench_clos[10].type_int.min = (-1);
pmembench_clos[10].type_int.max = LONG_MAX;
pmembench_clos[11].opt_short = 'e';
pmembench_clos[11].opt_long = "min-exe-time";
pmembench_clos[11].type = CLO_TYPE_UINT;
pmembench_clos[11].descr = "Minimal execution time in seconds";
pmembench_clos[11].off =
clo_field_offset(struct benchmark_args, min_exe_time);
pmembench_clos[11].def = "0";
pmembench_clos[11].type_uint.size =
clo_field_size(struct benchmark_args, min_exe_time);
pmembench_clos[11].type_uint.base = CLO_INT_BASE_DEC;
pmembench_clos[11].type_uint.min = 0;
pmembench_clos[11].type_uint.max = ULONG_MAX;
pmembench_clos[12].opt_short = 'p';
pmembench_clos[12].opt_long = "dynamic-poolset";
pmembench_clos[12].type = CLO_TYPE_FLAG;
pmembench_clos[12].descr =
"Allow benchmark to create poolset and reuse files";
pmembench_clos[12].off =
clo_field_offset(struct benchmark_args, is_dynamic_poolset);
pmembench_clos[12].ignore_in_res = true;
}
/*
* pmembench_get_priv -- return private structure of benchmark
*/
void *
pmembench_get_priv(struct benchmark *bench)
{
return bench->priv;
}
/*
* pmembench_set_priv -- set private structure of benchmark
*/
void
pmembench_set_priv(struct benchmark *bench, void *priv)
{
bench->priv = priv;
}
/*
* pmembench_register -- register benchmark
*/
int
pmembench_register(struct benchmark_info *bench_info)
{
struct benchmark *bench = (struct benchmark *)calloc(1, sizeof(*bench));
assert(bench != nullptr);
bench->info = bench_info;
if (!benchmarks.initialized) {
LIST_INIT(&benchmarks.head);
benchmarks.initialized = true;
}
LIST_INSERT_HEAD(&benchmarks.head, bench, next);
return 0;
}
/*
* pmembench_get_info -- return structure with information about benchmark
*/
struct benchmark_info *
pmembench_get_info(struct benchmark *bench)
{
return bench->info;
}
/*
* pmembench_release_clos -- release CLO structure
*/
static void
pmembench_release_clos(struct benchmark *bench)
{
free(bench->clos);
}
/*
* pmembench_merge_clos -- merge benchmark's CLOs with common CLOs
*/
static void
pmembench_merge_clos(struct benchmark *bench)
{
size_t size = sizeof(struct benchmark_args);
size_t pb_nclos = ARRAY_SIZE(pmembench_clos);
size_t nclos = pb_nclos;
size_t i;
if (bench->info->clos) {
size += bench->info->opts_size;
nclos += bench->info->nclos;
}
auto *clos = (struct benchmark_clo *)calloc(
nclos, sizeof(struct benchmark_clo));
assert(clos != nullptr);
memcpy(clos, pmembench_clos, pb_nclos * sizeof(struct benchmark_clo));
if (bench->info->clos) {
memcpy(&clos[pb_nclos], bench->info->clos,
bench->info->nclos * sizeof(struct benchmark_clo));
for (i = 0; i < bench->info->nclos; i++) {
clos[pb_nclos + i].off += sizeof(struct benchmark_args);
}
}
bench->clos = clos;
bench->nclos = nclos;
bench->args_size = size;
}
/*
* pmembench_run_worker -- run worker with benchmark operation
*/
static int
pmembench_run_worker(struct benchmark *bench, struct worker_info *winfo)
{
benchmark_time_get(&winfo->beg);
for (size_t i = 0; i < winfo->nops; i++) {
if (bench->info->operation(bench, &winfo->opinfo[i]))
return -1;
benchmark_time_get(&winfo->opinfo[i].end);
}
benchmark_time_get(&winfo->end);
return 0;
}
/*
* pmembench_print_header -- print header of benchmark's results
*/
static void
pmembench_print_header(struct pmembench *pb, struct benchmark *bench,
struct clo_vec *clovec)
{
if (pb->scenario) {
printf("%s: %s [%" PRIu64 "]%s%s%s\n", pb->scenario->name,
bench->info->name, clovec->nargs,
pb->scenario->group ? " [group: " : "",
pb->scenario->group ? pb->scenario->group : "",
pb->scenario->group ? "]" : "");
} else {
printf("%s [%" PRIu64 "]\n", bench->info->name, clovec->nargs);
}
printf("total-avg[sec];"
"ops-per-second[1/sec];"
"total-max[sec];"
"total-min[sec];"
"total-median[sec];"
"total-std-dev[sec];"
"latency-avg[nsec];"
"latency-min[nsec];"
"latency-max[nsec];"
"latency-std-dev[nsec];"
"latency-pctl-50.0%%[nsec];"
"latency-pctl-99.0%%[nsec];"
"latency-pctl-99.9%%[nsec]");
size_t i;
for (i = 0; i < bench->nclos; i++) {
if (!bench->clos[i].ignore_in_res) {
printf(";%s", bench->clos[i].opt_long);
}
}
if (bench->info->print_bandwidth)
printf(";bandwidth[MiB/s]");
if (bench->info->print_extra_headers)
bench->info->print_extra_headers();
printf("\n");
}
/*
* pmembench_print_results -- print benchmark's results
*/
static void
pmembench_print_results(struct benchmark *bench, struct benchmark_args *args,
struct total_results *res)
{
printf("%f;%f;%f;%f;%f;%f;%" PRIu64 ";%" PRIu64 ";%" PRIu64
";%f;%" PRIu64 ";%" PRIu64 ";%" PRIu64,
res->total.avg, res->nopsps, res->total.max, res->total.min,
res->total.med, res->total.std_dev, res->latency.avg,
res->latency.min, res->latency.max, res->latency.std_dev,
res->latency.pctl50_0p, res->latency.pctl99_0p,
res->latency.pctl99_9p);
size_t i;
for (i = 0; i < bench->nclos; i++) {
if (!bench->clos[i].ignore_in_res)
printf(";%s", benchmark_clo_str(&bench->clos[i], args,
bench->args_size));
}
if (bench->info->print_bandwidth)
printf(";%f", res->nopsps * args->dsize / 1024 / 1024);
if (bench->info->print_extra_values)
bench->info->print_extra_values(bench, args, res);
printf("\n");
}
/*
* pmembench_parse_clos -- parse command line arguments for benchmark
*/
static int
pmembench_parse_clo(struct pmembench *pb, struct benchmark *bench,
struct clo_vec *clovec)
{
if (!pb->scenario) {
return benchmark_clo_parse(pb->argc, pb->argv, bench->clos,
bench->nclos, clovec);
}
if (pb->override_clos) {
/*
* Use only ARRAY_SIZE(pmembench_clos) clos - these are the
* general clos and are placed at the beginning of the
* clos array.
*/
int ret = benchmark_override_clos_in_scenario(
pb->scenario, pb->argc, pb->argv, bench->clos,
ARRAY_SIZE(pmembench_clos));
/* reset for the next benchmark in the config file */
optind = 1;
if (ret)
return ret;
}
return benchmark_clo_parse_scenario(pb->scenario, bench->clos,
bench->nclos, clovec);
}
/*
* pmembench_parse_affinity -- parse affinity list
*/
static int
pmembench_parse_affinity(const char *list, char **saveptr)
{
char *str = nullptr;
char *end;
int cpu = 0;
if (*saveptr) {
str = strtok(nullptr, ";");
if (str == nullptr) {
/* end of list - we have to start over */
free(*saveptr);
*saveptr = nullptr;
}
}
if (!*saveptr) {
*saveptr = strdup(list);
if (*saveptr == nullptr) {
perror("strdup");
return -1;
}
str = strtok(*saveptr, ";");
if (str == nullptr)
goto err;
}
if ((str == nullptr) || (*str == '\0'))
goto err;
cpu = strtol(str, &end, 10);
if (*end != '\0')
goto err;
return cpu;
err:
errno = EINVAL;
perror("pmembench_parse_affinity");
free(*saveptr);
*saveptr = nullptr;
return -1;
}
/*
* pmembench_init_workers -- init benchmark's workers
*/
static int
pmembench_init_workers(struct benchmark_worker **workers, size_t nworkers,
size_t n_ops, struct benchmark *bench,
struct benchmark_args *args)
{
size_t i;
int ncpus = 0;
char *saveptr = nullptr;
int ret = 0;
if (args->thread_affinity) {
ncpus = sysconf(_SC_NPROCESSORS_ONLN);
if (ncpus <= 0)
return -1;
}
for (i = 0; i < nworkers; i++) {
workers[i] = benchmark_worker_alloc();
if (args->thread_affinity) {
int cpu;
os_cpu_set_t cpuset;
if (*args->affinity_list != '\0') {
cpu = pmembench_parse_affinity(
args->affinity_list, &saveptr);
if (cpu == -1) {
ret = -1;
goto end;
}
} else {
cpu = (int)i;
}
assert(ncpus > 0);
cpu %= ncpus;
os_cpu_zero(&cpuset);
os_cpu_set(cpu, &cpuset);
errno = os_thread_setaffinity_np(&workers[i]->thread,
sizeof(os_cpu_set_t),
&cpuset);
if (errno) {
perror("os_thread_setaffinity_np");
ret = -1;
goto end;
}
}
workers[i]->info.index = i;
workers[i]->info.nops = n_ops;
workers[i]->info.opinfo = (struct operation_info *)calloc(
n_ops, sizeof(struct operation_info));
size_t j;
for (j = 0; j < n_ops; j++) {
workers[i]->info.opinfo[j].worker = &workers[i]->info;
workers[i]->info.opinfo[j].args = args;
workers[i]->info.opinfo[j].index = j;
}
workers[i]->bench = bench;
workers[i]->args = args;
workers[i]->func = pmembench_run_worker;
workers[i]->init = bench->info->init_worker;
workers[i]->exit = bench->info->free_worker;
benchmark_worker_init(workers[i]);
}
end:
free(saveptr);
return ret;
}
/*
* results_store -- store results of a single repeat
*/
static void
results_store(struct bench_results *res, struct benchmark_worker **workers,
unsigned nthreads, size_t nops)
{
for (unsigned i = 0; i < nthreads; i++) {
res->thres[i]->beg = workers[i]->info.beg;
res->thres[i]->end = workers[i]->info.end;
for (size_t j = 0; j < nops; j++) {
res->thres[i]->end_op[j] =
workers[i]->info.opinfo[j].end;
}
}
}
/*
* compare_time -- compare time values
*/
static int
compare_time(const void *p1, const void *p2)
{
const auto *t1 = (const benchmark_time_t *)p1;
const auto *t2 = (const benchmark_time_t *)p2;
return benchmark_time_compare(t1, t2);
}
/*
* compare_doubles -- comparing function used for sorting
*/
static int
compare_doubles(const void *a1, const void *b1)
{
const auto *a = (const double *)a1;
const auto *b = (const double *)b1;
return (*a > *b) - (*a < *b);
}
/*
* compare_uint64t -- comparing function used for sorting
*/
static int
compare_uint64t(const void *a1, const void *b1)
{
const auto *a = (const uint64_t *)a1;
const auto *b = (const uint64_t *)b1;
return (*a > *b) - (*a < *b);
}
/*
* results_alloc -- prepare structure to store all benchmark results
*/
static struct total_results *
results_alloc(size_t nrepeats, size_t nthreads, size_t nops)
{
struct total_results *total =
(struct total_results *)malloc(sizeof(*total));
assert(total != nullptr);
total->nrepeats = nrepeats;
total->nthreads = nthreads;
total->nops = nops;
total->res =
(struct bench_results *)malloc(nrepeats * sizeof(*total->res));
assert(total->res != nullptr);
for (size_t i = 0; i < nrepeats; i++) {
struct bench_results *res = &total->res[i];
assert(nthreads != 0);
res->thres = (struct thread_results **)malloc(
nthreads * sizeof(*res->thres));
assert(res->thres != nullptr);
for (size_t j = 0; j < nthreads; j++) {
res->thres[j] = (struct thread_results *)malloc(
sizeof(*res->thres[j]) +
nops * sizeof(benchmark_time_t));
assert(res->thres[j] != nullptr);
}
}
return total;
}
/*
* results_free -- release results structure
*/
static void
results_free(struct total_results *total)
{
for (size_t i = 0; i < total->nrepeats; i++) {
for (size_t j = 0; j < total->nthreads; j++)
free(total->res[i].thres[j]);
free(total->res[i].thres);
}
free(total->res);
free(total);
}
/*
* get_total_results -- return results of all repeats of scenario
*/
static void
get_total_results(struct total_results *tres)
{
assert(tres->nrepeats != 0);
assert(tres->nthreads != 0);
assert(tres->nops != 0);
/* reset results */
memset(&tres->total, 0, sizeof(tres->total));
memset(&tres->latency, 0, sizeof(tres->latency));
tres->total.min = DBL_MAX;
tres->total.max = DBL_MIN;
tres->latency.min = UINT64_MAX;
tres->latency.max = 0;
/* allocate helper arrays */
benchmark_time_t *tbeg =
(benchmark_time_t *)malloc(tres->nthreads * sizeof(*tbeg));
assert(tbeg != nullptr);
benchmark_time_t *tend =
(benchmark_time_t *)malloc(tres->nthreads * sizeof(*tend));
assert(tend != nullptr);
auto *totals = (double *)malloc(tres->nrepeats * sizeof(double));
assert(totals != nullptr);
/* estimate total penalty of getting time from the system */
benchmark_time_t Tget;
unsigned long long nsecs = tres->nops * Get_time_avg;
benchmark_time_set(&Tget, nsecs);
for (size_t i = 0; i < tres->nrepeats; i++) {
struct bench_results *res = &tres->res[i];
/* get start and end timestamps of each worker */
for (size_t j = 0; j < tres->nthreads; j++) {
tbeg[j] = res->thres[j]->beg;
tend[j] = res->thres[j]->end;
}
/* sort start and end timestamps */
qsort(tbeg, tres->nthreads, sizeof(benchmark_time_t),
compare_time);
qsort(tend, tres->nthreads, sizeof(benchmark_time_t),
compare_time);
/* calculating time interval between start and end time */
benchmark_time_t Tbeg = tbeg[0];
benchmark_time_t Tend = tend[tres->nthreads - 1];
benchmark_time_t Ttot_ove;
benchmark_time_diff(&Ttot_ove, &Tbeg, &Tend);
/*
* subtract time used for getting the current time from the
* system
*/
benchmark_time_t Ttot;
benchmark_time_diff(&Ttot, &Tget, &Ttot_ove);
double Stot = benchmark_time_get_secs(&Ttot);
if (Stot > tres->total.max)
tres->total.max = Stot;
if (Stot < tres->total.min)
tres->total.min = Stot;
tres->total.avg += Stot;
totals[i] = Stot;
}
/* median */
qsort(totals, tres->nrepeats, sizeof(double), compare_doubles);
if (tres->nrepeats % 2) {
tres->total.med = totals[tres->nrepeats / 2];
} else {
double m1 = totals[tres->nrepeats / 2];
double m2 = totals[tres->nrepeats / 2 - 1];
tres->total.med = (m1 + m2) / 2.0;
}
/* total average time */
tres->total.avg /= (double)tres->nrepeats;
/* number of operations per second */
tres->nopsps =
(double)tres->nops * (double)tres->nthreads / tres->total.avg;
/* std deviation of total time */
for (size_t i = 0; i < tres->nrepeats; i++) {
double dev = (totals[i] - tres->total.avg);
dev *= dev;
tres->total.std_dev += dev;
}
tres->total.std_dev = sqrt(tres->total.std_dev / tres->nrepeats);
/* latency */
for (size_t i = 0; i < tres->nrepeats; i++) {
struct bench_results *res = &tres->res[i];
for (size_t j = 0; j < tres->nthreads; j++) {
struct thread_results *thres = res->thres[j];
benchmark_time_t *beg = &thres->beg;
for (size_t o = 0; o < tres->nops; o++) {
benchmark_time_t lat;
benchmark_time_diff(&lat, beg,
&thres->end_op[o]);
uint64_t nsecs = benchmark_time_get_nsecs(&lat);
/* min, max latency */
if (nsecs > tres->latency.max)
tres->latency.max = nsecs;
if (nsecs < tres->latency.min)
tres->latency.min = nsecs;
tres->latency.avg += nsecs;
beg = &thres->end_op[o];
}
}
}
/* average latency */
size_t count = tres->nrepeats * tres->nthreads * tres->nops;
assert(count > 0);
tres->latency.avg /= count;
auto *ntotals = (uint64_t *)calloc(count, sizeof(uint64_t));
assert(ntotals != nullptr);
count = 0;
/* std deviation of latency and percentiles */
for (size_t i = 0; i < tres->nrepeats; i++) {
struct bench_results *res = &tres->res[i];
for (size_t j = 0; j < tres->nthreads; j++) {
struct thread_results *thres = res->thres[j];
benchmark_time_t *beg = &thres->beg;
for (size_t o = 0; o < tres->nops; o++) {
benchmark_time_t lat;
benchmark_time_diff(&lat, beg,
&thres->end_op[o]);
uint64_t nsecs = benchmark_time_get_nsecs(&lat);
uint64_t dev = (nsecs - tres->latency.avg);
dev *= dev;
tres->latency.std_dev += dev;
beg = &thres->end_op[o];
ntotals[count] = nsecs;
++count;
}
}
}
tres->latency.std_dev = sqrt(tres->latency.std_dev / count);
/* find 50%, 99.0% and 99.9% percentiles */
qsort(ntotals, count, sizeof(uint64_t), compare_uint64t);
uint64_t p50_0 = count * 50 / 100;
uint64_t p99_0 = count * 99 / 100;
uint64_t p99_9 = count * 999 / 1000;
tres->latency.pctl50_0p = ntotals[p50_0];
tres->latency.pctl99_0p = ntotals[p99_0];
tres->latency.pctl99_9p = ntotals[p99_9];
free(ntotals);
free(totals);
free(tend);
free(tbeg);
}
/*
* pmembench_print_args -- print arguments for one benchmark
*/
static void
pmembench_print_args(struct benchmark_clo *clos, size_t nclos)
{
struct benchmark_clo clo;
for (size_t i = 0; i < nclos; i++) {
clo = clos[i];
if (clo.opt_short != 0)
printf("\t-%c,", clo.opt_short);
else
printf("\t");
printf("\t--%-15s\t\t%s", clo.opt_long, clo.descr);
if (clo.type != CLO_TYPE_FLAG)
printf(" [default: %s]", clo.def);
if (clo.type == CLO_TYPE_INT) {
if (clo.type_int.min != LONG_MIN)
printf(" [min: %" PRId64 "]", clo.type_int.min);
if (clo.type_int.max != LONG_MAX)
printf(" [max: %" PRId64 "]", clo.type_int.max);
} else if (clo.type == CLO_TYPE_UINT) {
if (clo.type_uint.min != 0)
printf(" [min: %" PRIu64 "]",
clo.type_uint.min);
if (clo.type_uint.max != ULONG_MAX)
printf(" [max: %" PRIu64 "]",
clo.type_uint.max);
}
printf("\n");
}
}
/*
* pmembench_print_help_single -- prints help for single benchmark
*/
static void
pmembench_print_help_single(struct benchmark *bench)
{
struct benchmark_info *info = bench->info;
printf("%s\n%s\n", info->name, info->brief);
printf("\nArguments:\n");
size_t nclos = sizeof(pmembench_clos) / sizeof(struct benchmark_clo);
pmembench_print_args(pmembench_clos, nclos);
if (info->clos == nullptr)
return;
pmembench_print_args(info->clos, info->nclos);
}
/*
* pmembench_print_usage -- print usage of framework
*/
static void
pmembench_print_usage()
{
printf("Usage: $ pmembench [-h|--help] [-v|--version]"
"\t[<benchmark>[<args>]]\n");
printf("\t\t\t\t\t\t[<config>[<scenario>]]\n");
printf("\t\t\t\t\t\t[<config>[<scenario>[<common_args>]]]\n");
}
/*
* pmembench_print_version -- print version of framework
*/
static void
pmembench_print_version()
{
printf("Benchmark framework - version %u.%u\n", version.major,
version.minor);
}
/*
* pmembench_print_examples() -- print examples of using framework
*/
static void
pmembench_print_examples()
{
printf("\nExamples:\n");
printf("$ pmembench <benchmark_name> <args>\n");
printf(" # runs benchmark of name <benchmark> with arguments <args>\n");
printf("or\n");
printf("$ pmembench <config_file>\n");
printf(" # runs all scenarios from config file\n");
printf("or\n");
printf("$ pmembench [<benchmark_name>] [-h|--help [-v|--version]\n");
printf(" # prints help\n");
printf("or\n");
printf("$ pmembench <config_file> <name_of_scenario>\n");
printf(" # runs the specified scenario from config file\n");
printf("$ pmembench <config_file> <name_of_scenario_1> "
"<name_of_scenario_2> <common_args>\n");
printf(" # runs the specified scenarios from config file and overwrites"
" the given common_args from the config file\n");
}
/*
* pmembench_print_help -- print help for framework
*/
static void
pmembench_print_help()
{
pmembench_print_version();
pmembench_print_usage();
printf("\nCommon arguments:\n");
size_t nclos = sizeof(pmembench_opts) / sizeof(struct benchmark_clo);
pmembench_print_args(pmembench_opts, nclos);
printf("\nAvaliable benchmarks:\n");
struct benchmark *bench = nullptr;
LIST_FOREACH(bench, &benchmarks.head, next)
printf("\t%-20s\t\t%s\n", bench->info->name, bench->info->brief);
printf("\n$ pmembench <benchmark> --help to print detailed information"
" about benchmark arguments\n");
pmembench_print_examples();
}
/*
* pmembench_get_bench -- searching benchmarks by name
*/
static struct benchmark *
pmembench_get_bench(const char *name)
{
struct benchmark *bench;
LIST_FOREACH(bench, &benchmarks.head, next)
{
if (strcmp(name, bench->info->name) == 0)
return bench;
}
return nullptr;
}
/*
* pmembench_parse_opts -- parse arguments for framework
*/
static int
pmembench_parse_opts(struct pmembench *pb)
{
int ret = 0;
int argc = ++pb->argc;
char **argv = --pb->argv;
struct benchmark_opts *opts = nullptr;
struct clo_vec *clovec;
size_t size, n_clos;
size = sizeof(struct benchmark_opts);
n_clos = ARRAY_SIZE(pmembench_opts);
clovec = clo_vec_alloc(size);
assert(clovec != nullptr);
if (benchmark_clo_parse(argc, argv, pmembench_opts, n_clos, clovec)) {
ret = -1;
goto out;
}
opts = (struct benchmark_opts *)clo_vec_get_args(clovec, 0);
if (opts == nullptr) {
ret = -1;
goto out;
}
if (opts->help)
pmembench_print_help();
if (opts->version)
pmembench_print_version();
out:
clo_vec_free(clovec);
return ret;
}
/*
* pmembench_remove_file -- remove file or directory if exists
*/
static int
pmembench_remove_file(const char *path)
{
int ret = 0;
os_stat_t status;
char *tmp;
int exists = util_file_exists(path);
if (exists < 0)
return -1;
if (!exists)
return 0;
if (os_stat(path, &status) != 0)
return 0;
if (!(status.st_mode & S_IFDIR))
return pmempool_rm(path, 0);
struct dir_handle it;
struct file_info info;
if (util_file_dir_open(&it, path)) {
return -1;
}
while (util_file_dir_next(&it, &info) == 0) {
if (strcmp(info.filename, ".") == 0 ||
strcmp(info.filename, "..") == 0)
continue;
tmp = (char *)malloc(strlen(path) + strlen(info.filename) + 2);
if (tmp == nullptr)
return -1;
sprintf(tmp, "%s" OS_DIR_SEP_STR "%s", path, info.filename);
ret = info.is_dir ? pmembench_remove_file(tmp)
: util_unlink(tmp);
free(tmp);
if (ret != 0) {
util_file_dir_close(&it);
return ret;
}
}
util_file_dir_close(&it);
return util_file_dir_remove(path);
}
/*
* pmembench_single_repeat -- runs benchmark ones
*/
static int
pmembench_single_repeat(struct benchmark *bench, struct benchmark_args *args,
size_t n_threads, size_t n_ops,
struct bench_results *res)
{
int ret = 0;
if (args->main_affinity != -1) {
os_cpu_set_t cpuset;
os_cpu_zero(&cpuset);
os_thread_t self;
os_thread_self(&self);
os_cpu_set(args->main_affinity, &cpuset);
errno = os_thread_setaffinity_np(&self, sizeof(os_cpu_set_t),
&cpuset);
if (errno) {
perror("os_thread_setaffinity_np");
return -1;
}
sched_yield();
}
if (bench->info->rm_file && !args->is_dynamic_poolset) {
ret = pmembench_remove_file(args->fname);
if (ret != 0) {
perror("removing file failed");
return ret;
}
}
if (bench->info->init) {
if (bench->info->init(bench, args)) {
warn("%s: initialization failed", bench->info->name);
return -1;
}
}
assert(bench->info->operation != nullptr);
assert(args->n_threads != 0);
struct benchmark_worker **workers;
workers = (struct benchmark_worker **)malloc(
args->n_threads * sizeof(struct benchmark_worker *));
assert(workers != nullptr);
if ((ret = pmembench_init_workers(workers, n_threads, n_ops, bench,
args)) != 0) {
goto out;
}
unsigned j;
for (j = 0; j < args->n_threads; j++) {
benchmark_worker_run(workers[j]);
}
for (j = 0; j < args->n_threads; j++) {
benchmark_worker_join(workers[j]);
if (workers[j]->ret != 0) {
ret = workers[j]->ret;
fprintf(stderr, "thread number %u failed\n", j);
}
}
results_store(res, workers, args->n_threads, args->n_ops_per_thread);
for (j = 0; j < args->n_threads; j++) {
benchmark_worker_exit(workers[j]);
free(workers[j]->info.opinfo);
benchmark_worker_free(workers[j]);
}
out:
free(workers);
if (bench->info->exit)
bench->info->exit(bench, args);
return ret;
}
/*
* scale_up_min_exe_time -- scale up the number of operations to obtain an
* execution time not smaller than the assumed minimal execution time
*/
int
scale_up_min_exe_time(struct benchmark *bench, struct benchmark_args *args,
struct total_results **total_results, size_t n_threads,
size_t n_ops)
{
const double min_exe_time = args->min_exe_time;
struct total_results *total_res = *total_results;
total_res->nrepeats = 1;
do {
/*
* run single benchmark repeat to probe execution time
*/
int ret = pmembench_single_repeat(bench, args, n_threads, n_ops,
&total_res->res[0]);
if (ret != 0)
return 1;
get_total_results(total_res);
if (min_exe_time < total_res->total.min + MIN_EXE_TIME_E)
break;
/*
* scale up number of operations to get assumed minimal
* execution time
*/
n_ops = (size_t)((double)n_ops *
(min_exe_time + MIN_EXE_TIME_E) /
total_res->total.min);
args->n_ops_per_thread = n_ops;
results_free(total_res);
*total_results = results_alloc(args->repeats, args->n_threads,
args->n_ops_per_thread);
assert(*total_results != nullptr);
total_res = *total_results;
total_res->nrepeats = 1;
} while (1);
total_res->nrepeats = args->repeats;
return 0;
}
/*
* is_absolute_path_to_directory -- checks if passed argument is absolute
* path to directory
*/
static bool
is_absolute_path_to_directory(const char *path)
{
os_stat_t sb;
return util_is_absolute_path(path) && os_stat(path, &sb) == 0 &&
S_ISDIR(sb.st_mode);
}
/*
* pmembench_run -- runs one benchmark. Parses arguments and performs
* specific functions.
*/
static int
pmembench_run(struct pmembench *pb, struct benchmark *bench)
{
enum file_type type;
char old_wd[PATH_MAX];
int ret = 0;
struct benchmark_args *args = nullptr;
struct total_results *total_res = nullptr;
struct latency *stats = nullptr;
double *workers_times = nullptr;
struct clo_vec *clovec = nullptr;
assert(bench->info != nullptr);
pmembench_merge_clos(bench);
/*
* Check if PMEMBENCH_DIR env var is set and change
* the working directory accordingly.
*/
char *wd = os_getenv("PMEMBENCH_DIR");
if (wd != nullptr) {
/* get current dir name */
if (getcwd(old_wd, PATH_MAX) == nullptr) {
perror("getcwd");
ret = -1;
goto out_release_clos;
}
os_stat_t stat_buf;
if (os_stat(wd, &stat_buf) != 0) {
perror("os_stat");
ret = -1;
goto out_release_clos;
}
if (!S_ISDIR(stat_buf.st_mode)) {
warn("PMEMBENCH_DIR is not a directory: %s", wd);
ret = -1;
goto out_release_clos;
}
if (chdir(wd)) {
perror("chdir(wd)");
ret = -1;
goto out_release_clos;
}
}
if (bench->info->pre_init) {
if (bench->info->pre_init(bench)) {
warn("%s: pre-init failed", bench->info->name);
ret = -1;
goto out_old_wd;
}
}
clovec = clo_vec_alloc(bench->args_size);
assert(clovec != nullptr);
if (pmembench_parse_clo(pb, bench, clovec)) {
warn("%s: parsing command line arguments failed",
bench->info->name);
ret = -1;
goto out_release_args;
}
args = (struct benchmark_args *)clo_vec_get_args(clovec, 0);
if (args == nullptr) {
warn("%s: parsing command line arguments failed",
bench->info->name);
ret = -1;
goto out_release_args;
}
if (args->help) {
pmembench_print_help_single(bench);
goto out;
}
if (strlen(args->fname) > PATH_MAX) {
warn("Filename too long");
ret = -1;
goto out;
}
type = util_file_get_type(args->fname);
if (type == OTHER_ERROR) {
fprintf(stderr, "could not check type of file %s\n",
args->fname);
return -1;
}
pmembench_print_header(pb, bench, clovec);
size_t args_i;
for (args_i = 0; args_i < clovec->nargs; args_i++) {
args = (struct benchmark_args *)clo_vec_get_args(clovec,
args_i);
if (args == nullptr) {
warn("%s: parsing command line arguments failed",
bench->info->name);
ret = -1;
goto out;
}
args->opts = (void *)((uintptr_t)args +
sizeof(struct benchmark_args));
if (args->is_dynamic_poolset) {
if (!bench->info->allow_poolset) {
fprintf(stderr,
"dynamic poolset not supported\n");
goto out;
}
if (!is_absolute_path_to_directory(args->fname)) {
fprintf(stderr, "path must be absolute and "
"point to a directory\n");
goto out;
}
} else {
args->is_poolset =
util_is_poolset_file(args->fname) == 1;
if (args->is_poolset) {
if (!bench->info->allow_poolset) {
fprintf(stderr, "poolset files not "
"supported\n");
goto out;
}
args->fsize = util_poolset_size(args->fname);
if (!args->fsize) {
fprintf(stderr,
"invalid size of poolset\n");
goto out;
}
} else if (type == TYPE_DEVDAX) {
args->fsize = util_file_get_size(args->fname);
if (!args->fsize) {
fprintf(stderr,
"invalid size of device dax\n");
goto out;
}
}
}
size_t n_threads =
!bench->info->multithread ? 1 : args->n_threads;
size_t n_ops =
!bench->info->multiops ? 1 : args->n_ops_per_thread;
size_t n_ops_per_thread_copy = args->n_ops_per_thread;
stats = (struct latency *)calloc(args->repeats,
sizeof(struct latency));
assert(stats != nullptr);
workers_times = (double *)calloc(n_threads * args->repeats,
sizeof(double));
assert(workers_times != nullptr);
total_res = results_alloc(args->repeats, args->n_threads,
args->n_ops_per_thread);
assert(total_res != nullptr);
unsigned i = 0;
if (args->min_exe_time != 0 && bench->info->multiops) {
ret = scale_up_min_exe_time(bench, args, &total_res,
n_threads, n_ops);
if (ret != 0)
goto out;
n_ops = args->n_ops_per_thread;
i = 1;
}
for (; i < args->repeats; i++) {
ret = pmembench_single_repeat(bench, args, n_threads,
n_ops,
&total_res->res[i]);
if (ret != 0)
goto out;
}
get_total_results(total_res);
pmembench_print_results(bench, args, total_res);
args->n_ops_per_thread = n_ops_per_thread_copy;
results_free(total_res);
free(stats);
free(workers_times);
total_res = nullptr;
stats = nullptr;
workers_times = nullptr;
}
out:
if (total_res)
results_free(total_res);
if (stats)
free(stats);
if (workers_times)
free(workers_times);
out_release_args:
clo_vec_free(clovec);
out_old_wd:
/* restore the original working directory */
if (wd != nullptr) { /* Only if PMEMBENCH_DIR env var was defined */
if (chdir(old_wd)) {
perror("chdir(old_wd)");
ret = -1;
}
}
out_release_clos:
pmembench_release_clos(bench);
return ret;
}
/*
* pmembench_free_benchmarks -- release all benchmarks
*/
static void __attribute__((destructor)) pmembench_free_benchmarks(void)
{
while (!LIST_EMPTY(&benchmarks.head)) {
struct benchmark *bench = LIST_FIRST(&benchmarks.head);
LIST_REMOVE(bench, next);
free(bench);
}
}
/*
* pmembench_run_scenario -- run single benchmark's scenario
*/
static int
pmembench_run_scenario(struct pmembench *pb, struct scenario *scenario)
{
struct benchmark *bench = pmembench_get_bench(scenario->benchmark);
if (nullptr == bench) {
fprintf(stderr, "unknown benchmark: %s\n", scenario->benchmark);
return -1;
}
pb->scenario = scenario;
return pmembench_run(pb, bench);
}
/*
* pmembench_run_scenarios -- run all scenarios
*/
static int
pmembench_run_scenarios(struct pmembench *pb, struct scenarios *ss)
{
struct scenario *scenario;
FOREACH_SCENARIO(scenario, ss)
{
if (pmembench_run_scenario(pb, scenario) != 0)
return -1;
}
return 0;
}
/*
* pmembench_run_config -- run one or all scenarios from config file
*/
static int
pmembench_run_config(struct pmembench *pb, const char *config)
{
struct scenarios *ss = nullptr;
struct config_reader *cr = config_reader_alloc();
assert(cr != nullptr);
int ret = 0;
if ((ret = config_reader_read(cr, config)))
goto out;
if ((ret = config_reader_get_scenarios(cr, &ss)))
goto out;
assert(ss != nullptr);
if (pb->argc == 1) {
if ((ret = pmembench_run_scenarios(pb, ss)) != 0)
goto out_scenarios;
} else {
/* Skip the config file name in cmd line params */
int tmp_argc = pb->argc - 1;
char **tmp_argv = pb->argv + 1;
if (!contains_scenarios(tmp_argc, tmp_argv, ss)) {
/* no scenarios in cmd line arguments - parse params */
pb->override_clos = true;
if ((ret = pmembench_run_scenarios(pb, ss)) != 0)
goto out_scenarios;
} else { /* scenarios in cmd line */
struct scenarios *cmd_ss = scenarios_alloc();
assert(cmd_ss != nullptr);
int parsed_scenarios = clo_get_scenarios(
tmp_argc, tmp_argv, ss, cmd_ss);
if (parsed_scenarios < 0)
goto out_cmd;
/*
* If there are any cmd line args left, treat
* them as config file params override.
*/
if (tmp_argc - parsed_scenarios)
pb->override_clos = true;
/*
* Skip the scenarios in the cmd line,
* pmembench_run_scenarios does not expect them and will
* fail otherwise.
*/
pb->argc -= parsed_scenarios;
pb->argv += parsed_scenarios;
ret = pmembench_run_scenarios(pb, cmd_ss);
out_cmd:
scenarios_free(cmd_ss);
}
}
out_scenarios:
scenarios_free(ss);
out:
config_reader_free(cr);
return ret;
}
int
main(int argc, char *argv[])
{
util_init();
util_mmap_init();
/*
* Parse common command line arguments and
* benchmark's specific ones.
*/
if (argc < 2) {
pmembench_print_usage();
exit(EXIT_FAILURE);
}
int ret = 0;
int fexists;
struct benchmark *bench;
struct pmembench *pb = (struct pmembench *)calloc(1, sizeof(*pb));
assert(pb != nullptr);
Get_time_avg = benchmark_get_avg_get_time();
pb->argc = --argc;
pb->argv = ++argv;
char *bench_name = pb->argv[0];
if (nullptr == bench_name) {
ret = -1;
goto out;
}
fexists = os_access(bench_name, R_OK) == 0;
bench = pmembench_get_bench(bench_name);
if (nullptr != bench)
ret = pmembench_run(pb, bench);
else if (fexists)
ret = pmembench_run_config(pb, bench_name);
else if ((ret = pmembench_parse_opts(pb)) != 0) {
pmembench_print_usage();
goto out;
}
out:
free(pb);
util_mmap_fini();
return ret;
}
#ifdef _MSC_VER
extern "C" {
/*
* Since libpmemobj is linked statically,
* we need to invoke its ctor/dtor.
*/
MSVC_CONSTR(libpmemobj_init)
MSVC_DESTR(libpmemobj_fini)
}
#endif
| 42,509 | 24.531532 | 77 |
cpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/pmem_memcpy.cpp
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmem_memcpy.cpp -- benchmark implementation for pmem_memcpy
*/
#include <cassert>
#include <cerrno>
#include <climits>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fcntl.h>
#include <libpmem.h>
#include <sys/mman.h>
#include <unistd.h>
#include "benchmark.hpp"
#include "file.h"
#define FLUSH_ALIGN 64
#define MAX_OFFSET (FLUSH_ALIGN - 1)
struct pmem_bench;
typedef size_t (*offset_fn)(struct pmem_bench *pmb,
struct operation_info *info);
/*
* pmem_args -- benchmark specific arguments
*/
struct pmem_args {
/*
* Defines the copy operation direction. Whether it is
* writing from RAM to PMEM (for argument value "write")
* or PMEM to RAM (for argument value "read").
*/
char *operation;
/*
* The source address offset used to test pmem_memcpy()
* performance when source address is not aligned.
*/
size_t src_off;
/*
* The destination address offset used to test
* pmem_memcpy() performance when destination address
* is not aligned.
*/
size_t dest_off;
/* The size of data chunk. */
size_t chunk_size;
/*
* Specifies the order in which data chunks are selected
* to be copied. There are three modes supported:
* stat, seq, rand.
*/
char *src_mode;
/*
* Specifies the order in which data chunks are written
* to the destination address. There are three modes
* supported: stat, seq, rand.
*/
char *dest_mode;
/*
* When this flag is set to true, PMEM is not used.
* This option is useful, when comparing performance
* of pmem_memcpy() function to regular memcpy().
*/
bool memcpy;
/*
* When this flag is set to true, pmem_persist()
* function is used, otherwise pmem_flush() is performed.
*/
bool persist;
/* do not do warmup */
bool no_warmup;
};
/*
* pmem_bench -- benchmark context
*/
struct pmem_bench {
/* random offsets */
unsigned *rand_offsets;
/* number of elements in randoms array */
size_t n_rand_offsets;
/* The size of the allocated PMEM */
size_t fsize;
/* The size of the allocated buffer */
size_t bsize;
/* Pointer to the allocated volatile memory */
unsigned char *buf;
/* Pointer to the allocated PMEM */
unsigned char *pmem_addr;
/*
* This field gets 'buf' or 'pmem_addr' fields assigned,
* depending on the prog_args operation direction.
*/
unsigned char *src_addr;
/*
* This field gets 'buf' or 'pmem_addr' fields assigned,
* depending on the prog_args operation direction.
*/
unsigned char *dest_addr;
/* Stores prog_args structure */
struct pmem_args *pargs;
/*
* Function which returns src offset. Matches src_mode.
*/
offset_fn func_src;
/*
* Function which returns dst offset. Matches dst_mode.
*/
offset_fn func_dest;
/*
* The actual operation performed based on benchmark specific
* arguments.
*/
int (*func_op)(void *dest, void *source, size_t len);
};
/*
* operation_type -- type of operation relative to persistent memory
*/
enum operation_type { OP_TYPE_UNKNOWN, OP_TYPE_READ, OP_TYPE_WRITE };
/*
* operation_mode -- the mode of the copy process
*
* * static - read/write always the same chunk,
* * sequential - read/write chunk by chunk,
* * random - read/write to chunks selected randomly.
*
* It is used to determine source mode as well as the destination mode.
*/
enum operation_mode {
OP_MODE_UNKNOWN,
OP_MODE_STAT,
OP_MODE_SEQ,
OP_MODE_RAND
};
/*
* parse_op_type -- parses command line "--operation" argument
* and returns proper operation type.
*/
static enum operation_type
parse_op_type(const char *arg)
{
if (strcmp(arg, "read") == 0)
return OP_TYPE_READ;
else if (strcmp(arg, "write") == 0)
return OP_TYPE_WRITE;
else
return OP_TYPE_UNKNOWN;
}
/*
* parse_op_mode -- parses command line "--src-mode" or "--dest-mode"
* and returns proper operation mode.
*/
static enum operation_mode
parse_op_mode(const char *arg)
{
if (strcmp(arg, "stat") == 0)
return OP_MODE_STAT;
else if (strcmp(arg, "seq") == 0)
return OP_MODE_SEQ;
else if (strcmp(arg, "rand") == 0)
return OP_MODE_RAND;
else
return OP_MODE_UNKNOWN;
}
/*
* mode_seq -- if copy mode is sequential mode_seq() returns
* index of a chunk.
*/
static uint64_t
mode_seq(struct pmem_bench *pmb, struct operation_info *info)
{
return info->args->n_ops_per_thread * info->worker->index + info->index;
}
/*
* mode_stat -- if mode is static, the offset is always 0,
* as only one block is used.
*/
static uint64_t
mode_stat(struct pmem_bench *pmb, struct operation_info *info)
{
return 0;
}
/*
* mode_rand -- if mode is random returns index of a random chunk
*/
static uint64_t
mode_rand(struct pmem_bench *pmb, struct operation_info *info)
{
assert(info->index < pmb->n_rand_offsets);
return info->args->n_ops_per_thread * info->worker->index +
pmb->rand_offsets[info->index];
}
/*
* assign_mode_func -- parses "--src-mode" and "--dest-mode" command line
* arguments and returns one of the above mode functions.
*/
static offset_fn
assign_mode_func(char *option)
{
enum operation_mode op_mode = parse_op_mode(option);
switch (op_mode) {
case OP_MODE_STAT:
return mode_stat;
case OP_MODE_SEQ:
return mode_seq;
case OP_MODE_RAND:
return mode_rand;
default:
return nullptr;
}
}
/*
* libc_memcpy -- copy using libc memcpy() function
* followed by pmem_flush().
*/
static int
libc_memcpy(void *dest, void *source, size_t len)
{
memcpy(dest, source, len);
pmem_flush(dest, len);
return 0;
}
/*
* libc_memcpy_persist -- copy using libc memcpy() function
* followed by pmem_persist().
*/
static int
libc_memcpy_persist(void *dest, void *source, size_t len)
{
memcpy(dest, source, len);
pmem_persist(dest, len);
return 0;
}
/*
* lipmem_memcpy_nodrain -- copy using libpmem pmem_memcpy_no_drain()
* function without pmem_persist().
*/
static int
libpmem_memcpy_nodrain(void *dest, void *source, size_t len)
{
pmem_memcpy_nodrain(dest, source, len);
return 0;
}
/*
* libpmem_memcpy_persist -- copy using libpmem pmem_memcpy_persist() function.
*/
static int
libpmem_memcpy_persist(void *dest, void *source, size_t len)
{
pmem_memcpy_persist(dest, source, len);
return 0;
}
/*
* assign_size -- assigns file and buffer size
* depending on the operation mode and type.
*/
static int
assign_size(struct pmem_bench *pmb, struct benchmark_args *args,
enum operation_type *op_type)
{
*op_type = parse_op_type(pmb->pargs->operation);
if (*op_type == OP_TYPE_UNKNOWN) {
fprintf(stderr, "Invalid operation argument '%s'",
pmb->pargs->operation);
return -1;
}
enum operation_mode op_mode_src = parse_op_mode(pmb->pargs->src_mode);
if (op_mode_src == OP_MODE_UNKNOWN) {
fprintf(stderr, "Invalid source mode argument '%s'",
pmb->pargs->src_mode);
return -1;
}
enum operation_mode op_mode_dest = parse_op_mode(pmb->pargs->dest_mode);
if (op_mode_dest == OP_MODE_UNKNOWN) {
fprintf(stderr, "Invalid destination mode argument '%s'",
pmb->pargs->dest_mode);
return -1;
}
size_t large = args->n_ops_per_thread * pmb->pargs->chunk_size *
args->n_threads;
size_t little = pmb->pargs->chunk_size;
if (*op_type == OP_TYPE_WRITE) {
pmb->bsize = op_mode_src == OP_MODE_STAT ? little : large;
pmb->fsize = op_mode_dest == OP_MODE_STAT ? little : large;
if (pmb->pargs->src_off != 0)
pmb->bsize += MAX_OFFSET;
if (pmb->pargs->dest_off != 0)
pmb->fsize += MAX_OFFSET;
} else {
pmb->fsize = op_mode_src == OP_MODE_STAT ? little : large;
pmb->bsize = op_mode_dest == OP_MODE_STAT ? little : large;
if (pmb->pargs->src_off != 0)
pmb->fsize += MAX_OFFSET;
if (pmb->pargs->dest_off != 0)
pmb->bsize += MAX_OFFSET;
}
return 0;
}
/*
* pmem_memcpy_init -- benchmark initialization
*
* Parses command line arguments, allocates persistent memory, and maps it.
*/
static int
pmem_memcpy_init(struct benchmark *bench, struct benchmark_args *args)
{
assert(bench != nullptr);
assert(args != nullptr);
int ret = 0;
size_t file_size = 0;
int flags = 0;
enum file_type type = util_file_get_type(args->fname);
if (type == OTHER_ERROR) {
fprintf(stderr, "could not check type of file %s\n",
args->fname);
return -1;
}
auto *pmb = (struct pmem_bench *)malloc(sizeof(struct pmem_bench));
assert(pmb != nullptr);
pmb->pargs = (struct pmem_args *)args->opts;
assert(pmb->pargs != nullptr);
pmb->pargs->chunk_size = args->dsize;
enum operation_type op_type;
/*
* Assign file and buffer size depending on the operation type
* (READ from PMEM or WRITE to PMEM)
*/
if (assign_size(pmb, args, &op_type) != 0) {
ret = -1;
goto err_free_pmb;
}
pmb->buf =
(unsigned char *)util_aligned_malloc(FLUSH_ALIGN, pmb->bsize);
if (pmb->buf == nullptr) {
perror("posix_memalign");
ret = -1;
goto err_free_pmb;
}
pmb->n_rand_offsets = args->n_ops_per_thread * args->n_threads;
assert(pmb->n_rand_offsets != 0);
pmb->rand_offsets = (unsigned *)malloc(pmb->n_rand_offsets *
sizeof(*pmb->rand_offsets));
if (pmb->rand_offsets == nullptr) {
perror("malloc");
ret = -1;
goto err_free_pmb;
}
for (size_t i = 0; i < pmb->n_rand_offsets; ++i)
pmb->rand_offsets[i] = rand() % args->n_ops_per_thread;
if (type != TYPE_DEVDAX) {
file_size = pmb->fsize;
flags = PMEM_FILE_CREATE | PMEM_FILE_EXCL;
}
/* create a pmem file and memory map it */
pmb->pmem_addr = (unsigned char *)pmem_map_file(
args->fname, file_size, flags, args->fmode, nullptr, nullptr);
if (pmb->pmem_addr == nullptr) {
perror(args->fname);
ret = -1;
goto err_free_buf;
}
if (op_type == OP_TYPE_READ) {
pmb->src_addr = pmb->pmem_addr;
pmb->dest_addr = pmb->buf;
} else {
pmb->src_addr = pmb->buf;
pmb->dest_addr = pmb->pmem_addr;
}
/* set proper func_src() and func_dest() depending on benchmark args */
if ((pmb->func_src = assign_mode_func(pmb->pargs->src_mode)) ==
nullptr) {
fprintf(stderr, "wrong src_mode parameter -- '%s'",
pmb->pargs->src_mode);
ret = -1;
goto err_unmap;
}
if ((pmb->func_dest = assign_mode_func(pmb->pargs->dest_mode)) ==
nullptr) {
fprintf(stderr, "wrong dest_mode parameter -- '%s'",
pmb->pargs->dest_mode);
ret = -1;
goto err_unmap;
}
if (pmb->pargs->memcpy) {
pmb->func_op =
pmb->pargs->persist ? libc_memcpy_persist : libc_memcpy;
} else {
pmb->func_op = pmb->pargs->persist ? libpmem_memcpy_persist
: libpmem_memcpy_nodrain;
}
if (!pmb->pargs->no_warmup) {
memset(pmb->buf, 0, pmb->bsize);
pmem_memset_persist(pmb->pmem_addr, 0, pmb->fsize);
}
pmembench_set_priv(bench, pmb);
return 0;
err_unmap:
pmem_unmap(pmb->pmem_addr, pmb->fsize);
err_free_buf:
util_aligned_free(pmb->buf);
err_free_pmb:
free(pmb);
return ret;
}
/*
* pmem_memcpy_operation -- actual benchmark operation
*
* Depending on the memcpy flag "-m" tested operation will be memcpy()
* or pmem_memcpy_persist().
*/
static int
pmem_memcpy_operation(struct benchmark *bench, struct operation_info *info)
{
auto *pmb = (struct pmem_bench *)pmembench_get_priv(bench);
size_t src_index = pmb->func_src(pmb, info);
size_t dest_index = pmb->func_dest(pmb, info);
void *source = pmb->src_addr + src_index * pmb->pargs->chunk_size +
pmb->pargs->src_off;
void *dest = pmb->dest_addr + dest_index * pmb->pargs->chunk_size +
pmb->pargs->dest_off;
size_t len = pmb->pargs->chunk_size;
pmb->func_op(dest, source, len);
return 0;
}
/*
* pmem_memcpy_exit -- benchmark cleanup
*/
static int
pmem_memcpy_exit(struct benchmark *bench, struct benchmark_args *args)
{
auto *pmb = (struct pmem_bench *)pmembench_get_priv(bench);
pmem_unmap(pmb->pmem_addr, pmb->fsize);
util_aligned_free(pmb->buf);
free(pmb->rand_offsets);
free(pmb);
return 0;
}
/* structure to define command line arguments */
static struct benchmark_clo pmem_memcpy_clo[8];
/* Stores information about benchmark. */
static struct benchmark_info pmem_memcpy_bench;
CONSTRUCTOR(pmem_memcpy_constructor)
void
pmem_memcpy_constructor(void)
{
pmem_memcpy_clo[0].opt_short = 'o';
pmem_memcpy_clo[0].opt_long = "operation";
pmem_memcpy_clo[0].descr = "Operation type - write, read";
pmem_memcpy_clo[0].type = CLO_TYPE_STR;
pmem_memcpy_clo[0].off = clo_field_offset(struct pmem_args, operation);
pmem_memcpy_clo[0].def = "write";
pmem_memcpy_clo[1].opt_short = 'S';
pmem_memcpy_clo[1].opt_long = "src-offset";
pmem_memcpy_clo[1].descr = "Source cache line alignment"
" offset";
pmem_memcpy_clo[1].type = CLO_TYPE_UINT;
pmem_memcpy_clo[1].off = clo_field_offset(struct pmem_args, src_off);
pmem_memcpy_clo[1].def = "0";
pmem_memcpy_clo[1].type_uint.size =
clo_field_size(struct pmem_args, src_off);
pmem_memcpy_clo[1].type_uint.base = CLO_INT_BASE_DEC;
pmem_memcpy_clo[1].type_uint.min = 0;
pmem_memcpy_clo[1].type_uint.max = MAX_OFFSET;
pmem_memcpy_clo[2].opt_short = 'D';
pmem_memcpy_clo[2].opt_long = "dest-offset";
pmem_memcpy_clo[2].descr = "Destination cache line "
"alignment offset";
pmem_memcpy_clo[2].type = CLO_TYPE_UINT;
pmem_memcpy_clo[2].off = clo_field_offset(struct pmem_args, dest_off);
pmem_memcpy_clo[2].def = "0";
pmem_memcpy_clo[2].type_uint.size =
clo_field_size(struct pmem_args, dest_off);
pmem_memcpy_clo[2].type_uint.base = CLO_INT_BASE_DEC;
pmem_memcpy_clo[2].type_uint.min = 0;
pmem_memcpy_clo[2].type_uint.max = MAX_OFFSET;
pmem_memcpy_clo[3].opt_short = 0;
pmem_memcpy_clo[3].opt_long = "src-mode";
pmem_memcpy_clo[3].descr = "Source reading mode";
pmem_memcpy_clo[3].type = CLO_TYPE_STR;
pmem_memcpy_clo[3].off = clo_field_offset(struct pmem_args, src_mode);
pmem_memcpy_clo[3].def = "seq";
pmem_memcpy_clo[4].opt_short = 0;
pmem_memcpy_clo[4].opt_long = "dest-mode";
pmem_memcpy_clo[4].descr = "Destination writing mode";
pmem_memcpy_clo[4].type = CLO_TYPE_STR;
pmem_memcpy_clo[4].off = clo_field_offset(struct pmem_args, dest_mode);
pmem_memcpy_clo[4].def = "seq";
pmem_memcpy_clo[5].opt_short = 'm';
pmem_memcpy_clo[5].opt_long = "libc-memcpy";
pmem_memcpy_clo[5].descr = "Use libc memcpy()";
pmem_memcpy_clo[5].type = CLO_TYPE_FLAG;
pmem_memcpy_clo[5].off = clo_field_offset(struct pmem_args, memcpy);
pmem_memcpy_clo[5].def = "false";
pmem_memcpy_clo[6].opt_short = 'p';
pmem_memcpy_clo[6].opt_long = "persist";
pmem_memcpy_clo[6].descr = "Use pmem_persist()";
pmem_memcpy_clo[6].type = CLO_TYPE_FLAG;
pmem_memcpy_clo[6].off = clo_field_offset(struct pmem_args, persist);
pmem_memcpy_clo[6].def = "true";
pmem_memcpy_clo[7].opt_short = 'w';
pmem_memcpy_clo[7].opt_long = "no-warmup";
pmem_memcpy_clo[7].descr = "Don't do warmup";
pmem_memcpy_clo[7].def = "false";
pmem_memcpy_clo[7].type = CLO_TYPE_FLAG;
pmem_memcpy_clo[7].off = clo_field_offset(struct pmem_args, no_warmup);
pmem_memcpy_bench.name = "pmem_memcpy";
pmem_memcpy_bench.brief = "Benchmark for"
"pmem_memcpy_persist() and "
"pmem_memcpy_nodrain()"
"operations";
pmem_memcpy_bench.init = pmem_memcpy_init;
pmem_memcpy_bench.exit = pmem_memcpy_exit;
pmem_memcpy_bench.multithread = true;
pmem_memcpy_bench.multiops = true;
pmem_memcpy_bench.operation = pmem_memcpy_operation;
pmem_memcpy_bench.measure_time = true;
pmem_memcpy_bench.clos = pmem_memcpy_clo;
pmem_memcpy_bench.nclos = ARRAY_SIZE(pmem_memcpy_clo);
pmem_memcpy_bench.opts_size = sizeof(struct pmem_args);
pmem_memcpy_bench.rm_file = true;
pmem_memcpy_bench.allow_poolset = false;
pmem_memcpy_bench.print_bandwidth = true;
REGISTER_BENCHMARK(pmem_memcpy_bench);
};
| 17,041 | 25.628125 | 79 |
cpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/pmemobj_persist.cpp
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmemobj_persist.cpp -- pmemobj persist benchmarks definition
*/
#include <cassert>
#include <cerrno>
#include <cstddef>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fcntl.h>
#include <sys/file.h>
#include <sys/mman.h>
#include <unistd.h>
#include "benchmark.hpp"
#include "file.h"
#include "libpmemobj.h"
#include "util.h"
/*
* The factor used for PMEM pool size calculation, accounts for metadata,
* fragmentation and etc.
*/
#define FACTOR 3
/* The minimum allocation size that pmalloc can perform */
#define ALLOC_MIN_SIZE 64
/* OOB and allocation header size */
#define OOB_HEADER_SIZE 64
#define CONST_B 0xFF
/*
* prog_args -- benchmark specific command line options
*/
struct prog_args {
size_t minsize; /* minimum size for random allocation size */
bool use_random_size; /* if set, use random size allocations */
bool no_warmup; /* do not do warmup */
unsigned seed; /* seed for random numbers */
};
/*
* obj_bench -- benchmark context
*/
struct obj_bench {
PMEMobjpool *pop; /* persistent pool handle */
struct prog_args *pa; /* prog_args structure */
PMEMoid *oids; /* vector of allocated objects */
void **ptrs; /* pointers to allocated objects */
uint64_t nobjs; /* number of allocated objects */
size_t obj_size; /* size of each allocated objects */
int const_b; /* memset() value */
};
/*
* init_objects -- allocate persistent objects and obtain direct pointers
*/
static int
init_objects(struct obj_bench *ob)
{
assert(ob->nobjs != 0);
ob->oids = (PMEMoid *)malloc(ob->nobjs * sizeof(*ob->oids));
if (!ob->oids) {
perror("malloc");
return -1;
}
ob->ptrs = (void **)malloc(ob->nobjs * sizeof(*ob->ptrs));
if (!ob->ptrs) {
perror("malloc");
goto err_malloc;
}
for (uint64_t i = 0; i < ob->nobjs; i++) {
PMEMoid oid;
void *ptr;
if (pmemobj_alloc(ob->pop, &oid, ob->obj_size, 0, nullptr,
nullptr)) {
perror("pmemobj_alloc");
goto err_palloc;
}
ptr = pmemobj_direct(oid);
if (!ptr) {
perror("pmemobj_direct");
goto err_palloc;
}
ob->oids[i] = oid;
ob->ptrs[i] = ptr;
}
return 0;
err_palloc:
free(ob->ptrs);
err_malloc:
free(ob->oids);
return -1;
}
/*
* do_warmup -- does the warmup by writing the whole pool area
*/
static void
do_warmup(struct obj_bench *ob)
{
for (uint64_t i = 0; i < ob->nobjs; ++i) {
memset(ob->ptrs[i], 0, ob->obj_size);
pmemobj_persist(ob->pop, ob->ptrs[i], ob->obj_size);
}
}
/*
* obj_persist_op -- actual benchmark operation
*/
static int
obj_persist_op(struct benchmark *bench, struct operation_info *info)
{
auto *ob = (struct obj_bench *)pmembench_get_priv(bench);
uint64_t idx = info->worker->index * info->args->n_ops_per_thread +
info->index;
assert(idx < ob->nobjs);
void *ptr = ob->ptrs[idx];
memset(ptr, ob->const_b, ob->obj_size);
pmemobj_persist(ob->pop, ptr, ob->obj_size);
return 0;
}
/*
* obj_persist_init -- initialization function
*/
static int
obj_persist_init(struct benchmark *bench, struct benchmark_args *args)
{
assert(bench != nullptr);
assert(args != nullptr);
assert(args->opts != nullptr);
enum file_type type = util_file_get_type(args->fname);
if (type == OTHER_ERROR) {
fprintf(stderr, "could not check type of file %s\n",
args->fname);
return -1;
}
auto *pa = (struct prog_args *)args->opts;
size_t poolsize;
if (pa->minsize >= args->dsize) {
fprintf(stderr, "Wrong params - allocation size\n");
return -1;
}
auto *ob = (struct obj_bench *)malloc(sizeof(struct obj_bench));
if (ob == nullptr) {
perror("malloc");
return -1;
}
pmembench_set_priv(bench, ob);
ob->pa = pa;
/* initialize memset() value */
ob->const_b = CONST_B;
ob->nobjs = args->n_ops_per_thread * args->n_threads;
/* Create pmemobj pool. */
ob->obj_size = args->dsize;
if (ob->obj_size < ALLOC_MIN_SIZE)
ob->obj_size = ALLOC_MIN_SIZE;
/* For data objects */
poolsize = ob->nobjs * (ob->obj_size + OOB_HEADER_SIZE);
/* multiply by FACTOR for metadata, fragmentation, etc. */
poolsize = poolsize * FACTOR;
if (args->is_poolset || type == TYPE_DEVDAX) {
if (args->fsize < poolsize) {
fprintf(stderr, "file size too large\n");
goto free_ob;
}
poolsize = 0;
} else if (poolsize < PMEMOBJ_MIN_POOL) {
poolsize = PMEMOBJ_MIN_POOL;
}
poolsize = PAGE_ALIGNED_UP_SIZE(poolsize);
ob->pop = pmemobj_create(args->fname, nullptr, poolsize, args->fmode);
if (ob->pop == nullptr) {
fprintf(stderr, "%s\n", pmemobj_errormsg());
goto free_ob;
}
if (init_objects(ob)) {
goto free_pop;
}
if (!ob->pa->no_warmup) {
do_warmup(ob);
}
return 0;
free_pop:
pmemobj_close(ob->pop);
free_ob:
free(ob);
return -1;
}
/*
* obj_persist_exit -- benchmark cleanup function
*/
static int
obj_persist_exit(struct benchmark *bench, struct benchmark_args *args)
{
auto *ob = (struct obj_bench *)pmembench_get_priv(bench);
for (uint64_t i = 0; i < ob->nobjs; ++i) {
pmemobj_free(&ob->oids[i]);
}
pmemobj_close(ob->pop);
free(ob->oids);
free(ob->ptrs);
free(ob);
return 0;
}
static struct benchmark_clo obj_persist_clo[1];
/* Stores information about benchmark. */
static struct benchmark_info obj_persist_info;
CONSTRUCTOR(pmemobj_persist_constructor)
void
pmemobj_persist_constructor(void)
{
obj_persist_clo[0].opt_short = 'w';
obj_persist_clo[0].opt_long = "no-warmup";
obj_persist_clo[0].descr = "Don't do warmup";
obj_persist_clo[0].def = "false";
obj_persist_clo[0].type = CLO_TYPE_FLAG;
obj_persist_clo[0].off = clo_field_offset(struct prog_args, no_warmup);
obj_persist_info.name = "pmemobj_persist";
obj_persist_info.brief = "Benchmark for pmemobj_persist() "
"operation";
obj_persist_info.init = obj_persist_init;
obj_persist_info.exit = obj_persist_exit;
obj_persist_info.multithread = true;
obj_persist_info.multiops = true;
obj_persist_info.operation = obj_persist_op;
obj_persist_info.measure_time = true;
obj_persist_info.clos = obj_persist_clo;
obj_persist_info.nclos = ARRAY_SIZE(obj_persist_clo);
obj_persist_info.opts_size = sizeof(struct prog_args);
obj_persist_info.rm_file = true;
obj_persist_info.allow_poolset = true;
REGISTER_BENCHMARK(obj_persist_info);
};
| 7,793 | 24.893688 | 75 |
cpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/pmemobj_tx.cpp
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmemobj_tx.cpp -- pmemobj_tx_alloc(), pmemobj_tx_free(),
* pmemobj_tx_realloc(), pmemobj_tx_add_range() benchmarks.
*/
#include <cassert>
#include <cerrno>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fcntl.h>
#include <unistd.h>
#include "benchmark.hpp"
#include "file.h"
#include "libpmemobj.h"
#include "poolset_util.hpp"
#define LAYOUT_NAME "benchmark"
#define FACTOR 1.2f
#define ALLOC_OVERHEAD 64
/*
* operations number is limited to prevent stack overflow during
* performing recursive functions.
*/
#define MAX_OPS 10000
TOID_DECLARE(struct item, 0);
struct obj_tx_bench;
struct obj_tx_worker;
int obj_tx_init(struct benchmark *bench, struct benchmark_args *args);
int obj_tx_exit(struct benchmark *bench, struct benchmark_args *args);
/*
* type_num_mode -- type number mode
*/
enum type_num_mode {
NUM_MODE_ONE,
NUM_MODE_PER_THREAD,
NUM_MODE_RAND,
NUM_MODE_UNKNOWN
};
/*
* op_mode -- operation type
*/
enum op_mode {
OP_MODE_COMMIT,
OP_MODE_ABORT,
OP_MODE_ABORT_NESTED,
OP_MODE_ONE_OBJ,
OP_MODE_ONE_OBJ_NESTED,
OP_MODE_ONE_OBJ_RANGE,
OP_MODE_ONE_OBJ_NESTED_RANGE,
OP_MODE_ALL_OBJ,
OP_MODE_ALL_OBJ_NESTED,
OP_MODE_UNKNOWN
};
/*
* lib_mode -- operation type
*/
enum lib_mode {
LIB_MODE_DRAM,
LIB_MODE_OBJ_TX,
LIB_MODE_OBJ_ATOMIC,
LIB_MODE_NONE,
};
/*
* nesting_mode -- nesting type
*/
enum nesting_mode {
NESTING_MODE_SIM,
NESTING_MODE_TX,
NESTING_MODE_UNKNOWN,
};
/*
* add_range_mode -- operation type for obj_add_range benchmark
*/
enum add_range_mode { ADD_RANGE_MODE_ONE_TX, ADD_RANGE_MODE_NESTED_TX };
/*
* parse_mode -- parsing function type
*/
enum parse_mode { PARSE_OP_MODE, PARSE_OP_MODE_ADD_RANGE };
typedef size_t (*fn_type_num_t)(struct obj_tx_bench *obj_bench,
size_t worker_idx, size_t op_idx);
typedef size_t (*fn_num_t)(size_t idx);
typedef int (*fn_op_t)(struct obj_tx_bench *obj_bench,
struct worker_info *worker, size_t idx);
typedef struct offset (*fn_os_off_t)(struct obj_tx_bench *obj_bench,
size_t idx);
typedef enum op_mode (*fn_parse_t)(const char *arg);
/*
* obj_tx_args -- stores command line parsed arguments.
*/
struct obj_tx_args {
/*
* operation which will be performed when flag io set to false.
* modes for obj_tx_alloc, obj_tx_free and obj_tx_realloc:
* - basic - transaction will be committed
* - abort - 'external' transaction will be aborted.
* - abort-nested - all nested transactions will be
* aborted.
*
* modes for obj_tx_add_range benchmark:
* - basic - one object is added to undo log many times in
* one transaction.
* - range - fields of one object are added to undo
* log many times in one transaction.
* - all-obj - all objects are added to undo log in
* one transaction.
* - range-nested - fields of one object are added to undo
* log many times in many nested transactions.
* - one-obj-nested - one object is added to undo log many
* times in many nested transactions.
* - all-obj-nested - all objects are added to undo log in
* many separate, nested transactions.
*/
char *operation;
/*
* type number for each persistent object. There are three modes:
* - one - all of objects have the same type number
* - per-thread - all of object allocated by the same
* thread have the same type number
* - rand - type numbers are assigned randomly for
* each persistent object
*/
char *type_num;
/*
* define s which library will be used in main operations There are
* three modes in which benchmark can be run:
* - tx - uses PMEM transactions
* - pmem - uses PMEM without transactions
* - dram - does not use PMEM
*/
char *lib;
unsigned nested; /* number of nested transactions */
unsigned min_size; /* minimum allocation size */
unsigned min_rsize; /* minimum reallocation size */
unsigned rsize; /* reallocation size */
bool change_type; /* change type number in reallocation */
size_t obj_size; /* size of each allocated object */
size_t n_ops; /* number of operations */
int parse_mode; /* type of parsing function */
};
/*
* obj_tx_bench -- stores variables used in benchmark, passed within functions.
*/
static struct obj_tx_bench {
PMEMobjpool *pop; /* handle to persistent pool */
struct obj_tx_args *obj_args; /* pointer to benchmark arguments */
size_t *random_types; /* array to store random type numbers */
size_t *sizes; /* array to store size of each allocation */
size_t *resizes; /* array to store size of each reallocation */
size_t n_objs; /* number of objects to allocate */
int type_mode; /* type number mode */
int op_mode; /* type of operation */
int lib_mode; /* type of operation used in initialization */
int lib_op; /* type of main operation */
int lib_op_free; /* type of main operation */
int nesting_mode; /* type of nesting in main operation */
fn_num_t n_oid; /* returns object's number in array */
fn_os_off_t fn_off; /* returns offset for proper operation */
/*
* fn_type_num gets proper function assigned, depending on the
* value of the type_mode argument, which returns proper type number for
* each persistent object. Possible functions are:
* - type_mode_one,
* - type_mode_rand.
*/
fn_type_num_t fn_type_num;
/*
* fn_op gets proper array with functions pointer assigned, depending on
* function which is tested by benchmark. Possible arrays are:
* -alloc_op
* -free_op
* -realloc_op
*/
fn_op_t *fn_op;
} obj_bench;
/*
* item -- TOID's structure
*/
struct item;
/*
* obj_tx_worker - stores variables used by one thread.
*/
struct obj_tx_worker {
TOID(struct item) * oids;
char **items;
unsigned tx_level;
unsigned max_level;
};
/*
* offset - stores offset data used in pmemobj_tx_add_range()
*/
struct offset {
uint64_t off;
size_t size;
};
/*
* alloc_dram -- main operations for obj_tx_alloc benchmark in dram mode
*/
static int
alloc_dram(struct obj_tx_bench *obj_bench, struct worker_info *worker,
size_t idx)
{
auto *obj_worker = (struct obj_tx_worker *)worker->priv;
obj_worker->items[idx] = (char *)malloc(obj_bench->sizes[idx]);
if (obj_worker->items[idx] == nullptr) {
perror("malloc");
return -1;
}
return 0;
}
/*
* alloc_pmem -- main operations for obj_tx_alloc benchmark in pmem mode
*/
static int
alloc_pmem(struct obj_tx_bench *obj_bench, struct worker_info *worker,
size_t idx)
{
size_t type_num = obj_bench->fn_type_num(obj_bench, worker->index, idx);
auto *obj_worker = (struct obj_tx_worker *)worker->priv;
if (pmemobj_alloc(obj_bench->pop, &obj_worker->oids[idx].oid,
obj_bench->sizes[idx], type_num, nullptr,
nullptr) != 0) {
perror("pmemobj_alloc");
return -1;
}
return 0;
}
/*
* alloc_tx -- main operations for obj_tx_alloc benchmark in tx mode
*/
static int
alloc_tx(struct obj_tx_bench *obj_bench, struct worker_info *worker, size_t idx)
{
size_t type_num = obj_bench->fn_type_num(obj_bench, worker->index, idx);
auto *obj_worker = (struct obj_tx_worker *)worker->priv;
obj_worker->oids[idx].oid = pmemobj_tx_xalloc(
obj_bench->sizes[idx], type_num, POBJ_XALLOC_NO_FLUSH);
if (OID_IS_NULL(obj_worker->oids[idx].oid)) {
perror("pmemobj_tx_alloc");
return -1;
}
return 0;
}
/*
* free_dram -- main operations for obj_tx_free benchmark in dram mode
*/
static int
free_dram(struct obj_tx_bench *obj_bench, struct worker_info *worker,
size_t idx)
{
auto *obj_worker = (struct obj_tx_worker *)worker->priv;
free(obj_worker->items[idx]);
return 0;
}
/*
* free_pmem -- main operations for obj_tx_free benchmark in pmem mode
*/
static int
free_pmem(struct obj_tx_bench *obj_bench, struct worker_info *worker,
size_t idx)
{
auto *obj_worker = (struct obj_tx_worker *)worker->priv;
POBJ_FREE(&obj_worker->oids[idx]);
return 0;
}
/*
* free_tx -- main operations for obj_tx_free benchmark in tx mode
*/
static int
free_tx(struct obj_tx_bench *obj_bench, struct worker_info *worker, size_t idx)
{
auto *obj_worker = (struct obj_tx_worker *)worker->priv;
TX_FREE(obj_worker->oids[idx]);
return 0;
}
/*
* no_free -- exit operation for benchmarks obj_tx_alloc and obj_tx_free
* if there is no need to free memory
*/
static int
no_free(struct obj_tx_bench *obj_bench, struct worker_info *worker, size_t idx)
{
return 0;
}
/*
* realloc_dram -- main operations for obj_tx_realloc benchmark in dram mode
*/
static int
realloc_dram(struct obj_tx_bench *obj_bench, struct worker_info *worker,
size_t idx)
{
auto *obj_worker = (struct obj_tx_worker *)worker->priv;
auto *tmp = (char *)realloc(obj_worker->items[idx],
obj_bench->resizes[idx]);
if (tmp == nullptr) {
perror("realloc");
return -1;
}
obj_worker->items[idx] = tmp;
return 0;
}
/*
* realloc_pmem -- main operations for obj_tx_realloc benchmark in pmem mode
*/
static int
realloc_pmem(struct obj_tx_bench *obj_bench, struct worker_info *worker,
size_t idx)
{
auto *obj_worker = (struct obj_tx_worker *)worker->priv;
size_t type_num = obj_bench->fn_type_num(obj_bench, worker->index, idx);
if (obj_bench->obj_args->change_type)
type_num++;
if (pmemobj_realloc(obj_bench->pop, &obj_worker->oids[idx].oid,
obj_bench->resizes[idx], type_num) != 0) {
perror("pmemobj_realloc");
return -1;
}
return 0;
}
/*
* realloc_tx -- main operations for obj_tx_realloc benchmark in tx mode
*/
static int
realloc_tx(struct obj_tx_bench *obj_bench, struct worker_info *worker,
size_t idx)
{
auto *obj_worker = (struct obj_tx_worker *)worker->priv;
size_t type_num = obj_bench->fn_type_num(obj_bench, worker->index, idx);
if (obj_bench->obj_args->change_type)
type_num++;
obj_worker->oids[idx].oid = pmemobj_tx_realloc(
obj_worker->oids[idx].oid, obj_bench->sizes[idx], type_num);
if (OID_IS_NULL(obj_worker->oids[idx].oid)) {
perror("pmemobj_tx_realloc");
return -1;
}
return 0;
}
/*
* add_range_nested_tx -- main operations of the obj_tx_add_range with nesting.
*/
static int
add_range_nested_tx(struct obj_tx_bench *obj_bench, struct worker_info *worker,
size_t idx)
{
int ret = 0;
auto *obj_worker = (struct obj_tx_worker *)worker->priv;
TX_BEGIN(obj_bench->pop)
{
if (obj_bench->obj_args->n_ops != obj_worker->tx_level) {
size_t n_oid = obj_bench->n_oid(obj_worker->tx_level);
struct offset offset = obj_bench->fn_off(
obj_bench, obj_worker->tx_level);
pmemobj_tx_add_range(obj_worker->oids[n_oid].oid,
offset.off, offset.size);
obj_worker->tx_level++;
ret = add_range_nested_tx(obj_bench, worker, idx);
}
}
TX_ONABORT
{
fprintf(stderr, "transaction failed\n");
ret = -1;
}
TX_END
return ret;
}
/*
* add_range_tx -- main operations of the obj_tx_add_range without nesting.
*/
static int
add_range_tx(struct obj_tx_bench *obj_bench, struct worker_info *worker,
size_t idx)
{
int ret = 0;
size_t i = 0;
auto *obj_worker = (struct obj_tx_worker *)worker->priv;
TX_BEGIN(obj_bench->pop)
{
for (i = 0; i < obj_bench->obj_args->n_ops; i++) {
size_t n_oid = obj_bench->n_oid(i);
struct offset offset = obj_bench->fn_off(obj_bench, i);
ret = pmemobj_tx_add_range(obj_worker->oids[n_oid].oid,
offset.off, offset.size);
}
}
TX_ONABORT
{
fprintf(stderr, "transaction failed\n");
ret = -1;
}
TX_END
return ret;
}
/*
* obj_op_sim -- main function for benchmarks which simulates nested
* transactions on dram or pmemobj atomic API by calling function recursively.
*/
static int
obj_op_sim(struct obj_tx_bench *obj_bench, struct worker_info *worker,
size_t idx)
{
int ret = 0;
auto *obj_worker = (struct obj_tx_worker *)worker->priv;
if (obj_worker->max_level == obj_worker->tx_level) {
ret = obj_bench->fn_op[obj_bench->lib_op](obj_bench, worker,
idx);
} else {
obj_worker->tx_level++;
ret = obj_op_sim(obj_bench, worker, idx);
}
return ret;
}
/*
* obj_op_tx -- main recursive function for transactional benchmarks
*/
static int
obj_op_tx(struct obj_tx_bench *obj_bench, struct worker_info *worker,
size_t idx)
{
volatile int ret = 0;
auto *obj_worker = (struct obj_tx_worker *)worker->priv;
TX_BEGIN(obj_bench->pop)
{
if (obj_worker->max_level == obj_worker->tx_level) {
ret = obj_bench->fn_op[obj_bench->lib_op](obj_bench,
worker, idx);
if (obj_bench->op_mode == OP_MODE_ABORT_NESTED)
pmemobj_tx_abort(-1);
} else {
obj_worker->tx_level++;
ret = obj_op_tx(obj_bench, worker, idx);
if (--obj_worker->tx_level == 0 &&
obj_bench->op_mode == OP_MODE_ABORT)
pmemobj_tx_abort(-1);
}
}
TX_ONABORT
{
if (obj_bench->op_mode != OP_MODE_ABORT &&
obj_bench->op_mode != OP_MODE_ABORT_NESTED) {
fprintf(stderr, "transaction failed\n");
ret = -1;
}
}
TX_END
return ret;
}
/*
* type_mode_one -- always returns 0, as in the mode NUM_MODE_ONE
* all of the persistent objects have the same type_number value.
*/
static size_t
type_mode_one(struct obj_tx_bench *obj_bench, size_t worker_idx, size_t op_idx)
{
return 0;
}
/*
* type_mode_per_thread -- always returns worker index to all of the persistent
* object allocated by the same thread have the same type number.
*/
static size_t
type_mode_per_thread(struct obj_tx_bench *obj_bench, size_t worker_idx,
size_t op_idx)
{
return worker_idx;
}
/*
* type_mode_rand -- returns the value from the random_types array assigned
* for the specific operation in a specific thread.
*/
static size_t
type_mode_rand(struct obj_tx_bench *obj_bench, size_t worker_idx, size_t op_idx)
{
return obj_bench->random_types[op_idx];
}
/*
* parse_op_mode_add_range -- parses command line "--operation" argument
* and returns proper op_mode enum value for obj_tx_add_range.
*/
static enum op_mode
parse_op_mode_add_range(const char *arg)
{
if (strcmp(arg, "basic") == 0)
return OP_MODE_ONE_OBJ;
else if (strcmp(arg, "one-obj-nested") == 0)
return OP_MODE_ONE_OBJ_NESTED;
else if (strcmp(arg, "range") == 0)
return OP_MODE_ONE_OBJ_RANGE;
else if (strcmp(arg, "range-nested") == 0)
return OP_MODE_ONE_OBJ_NESTED_RANGE;
else if (strcmp(arg, "all-obj") == 0)
return OP_MODE_ALL_OBJ;
else if (strcmp(arg, "all-obj-nested") == 0)
return OP_MODE_ALL_OBJ_NESTED;
else
return OP_MODE_UNKNOWN;
}
/*
* parse_op_mode -- parses command line "--operation" argument
* and returns proper op_mode enum value.
*/
static enum op_mode
parse_op_mode(const char *arg)
{
if (strcmp(arg, "basic") == 0)
return OP_MODE_COMMIT;
else if (strcmp(arg, "abort") == 0)
return OP_MODE_ABORT;
else if (strcmp(arg, "abort-nested") == 0)
return OP_MODE_ABORT_NESTED;
else
return OP_MODE_UNKNOWN;
}
static fn_op_t alloc_op[] = {alloc_dram, alloc_tx, alloc_pmem};
static fn_op_t free_op[] = {free_dram, free_tx, free_pmem, no_free};
static fn_op_t realloc_op[] = {realloc_dram, realloc_tx, realloc_pmem};
static fn_op_t add_range_op[] = {add_range_tx, add_range_nested_tx};
static fn_parse_t parse_op[] = {parse_op_mode, parse_op_mode_add_range};
static fn_op_t nestings[] = {obj_op_sim, obj_op_tx};
/*
* parse_type_num_mode -- converts string to type_num_mode enum
*/
static enum type_num_mode
parse_type_num_mode(const char *arg)
{
if (strcmp(arg, "one") == 0)
return NUM_MODE_ONE;
else if (strcmp(arg, "per-thread") == 0)
return NUM_MODE_PER_THREAD;
else if (strcmp(arg, "rand") == 0)
return NUM_MODE_RAND;
fprintf(stderr, "unknown type number\n");
return NUM_MODE_UNKNOWN;
}
/*
* parse_lib_mode -- converts string to type_num_mode enum
*/
static enum lib_mode
parse_lib_mode(const char *arg)
{
if (strcmp(arg, "dram") == 0)
return LIB_MODE_DRAM;
else if (strcmp(arg, "pmem") == 0)
return LIB_MODE_OBJ_ATOMIC;
else if (strcmp(arg, "tx") == 0)
return LIB_MODE_OBJ_TX;
fprintf(stderr, "unknown lib mode\n");
return LIB_MODE_NONE;
}
static fn_type_num_t type_num_fn[] = {type_mode_one, type_mode_per_thread,
type_mode_rand, nullptr};
/*
* one_num -- returns always the same number.
*/
static size_t
one_num(size_t idx)
{
return 0;
}
/*
* diff_num -- returns number given as argument.
*/
static size_t
diff_num(size_t idx)
{
return idx;
}
/*
* off_entire -- returns zero offset.
*/
static struct offset
off_entire(struct obj_tx_bench *obj_bench, size_t idx)
{
struct offset offset;
offset.off = 0;
offset.size = obj_bench->sizes[obj_bench->n_oid(idx)];
return offset;
}
/*
* off_range -- returns offset for range in object.
*/
static struct offset
off_range(struct obj_tx_bench *obj_bench, size_t idx)
{
struct offset offset;
offset.size = obj_bench->sizes[0] / obj_bench->obj_args->n_ops;
offset.off = offset.size * idx;
return offset;
}
/*
* rand_values -- allocates array and if range mode calculates random
* values as allocation sizes for each object otherwise populates whole array
* with max value. Used only when range flag set.
*/
static size_t *
rand_values(size_t min, size_t max, size_t n_ops)
{
size_t size = max - min;
auto *sizes = (size_t *)calloc(n_ops, sizeof(size_t));
if (sizes == nullptr) {
perror("calloc");
return nullptr;
}
for (size_t i = 0; i < n_ops; i++)
sizes[i] = max;
if (min) {
if (min > max) {
fprintf(stderr, "Invalid size\n");
free(sizes);
return nullptr;
}
for (size_t i = 0; i < n_ops; i++)
sizes[i] = (rand() % size) + min;
}
return sizes;
}
/*
* obj_tx_add_range_op -- main operations of the obj_tx_add_range benchmark.
*/
static int
obj_tx_add_range_op(struct benchmark *bench, struct operation_info *info)
{
auto *obj_bench = (struct obj_tx_bench *)pmembench_get_priv(bench);
auto *obj_worker = (struct obj_tx_worker *)info->worker->priv;
if (add_range_op[obj_bench->lib_op](obj_bench, info->worker,
info->index) != 0)
return -1;
obj_worker->tx_level = 0;
return 0;
}
/*
* obj_tx_op -- main operation for obj_tx_alloc(), obj_tx_free() and
* obj_tx_realloc() benchmarks.
*/
static int
obj_tx_op(struct benchmark *bench, struct operation_info *info)
{
auto *obj_bench = (struct obj_tx_bench *)pmembench_get_priv(bench);
auto *obj_worker = (struct obj_tx_worker *)info->worker->priv;
int ret = nestings[obj_bench->nesting_mode](obj_bench, info->worker,
info->index);
obj_worker->tx_level = 0;
return ret;
}
/*
* obj_tx_init_worker -- common part for the worker initialization functions
* for transactional benchmarks.
*/
static int
obj_tx_init_worker(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
auto *obj_bench = (struct obj_tx_bench *)pmembench_get_priv(bench);
auto *obj_worker =
(struct obj_tx_worker *)calloc(1, sizeof(struct obj_tx_worker));
if (obj_worker == nullptr) {
perror("calloc");
return -1;
}
worker->priv = obj_worker;
obj_worker->tx_level = 0;
obj_worker->max_level = obj_bench->obj_args->nested;
if (obj_bench->lib_mode != LIB_MODE_DRAM)
obj_worker->oids = (TOID(struct item) *)calloc(
obj_bench->n_objs, sizeof(TOID(struct item)));
else
obj_worker->items =
(char **)calloc(obj_bench->n_objs, sizeof(char *));
if (obj_worker->oids == nullptr && obj_worker->items == nullptr) {
free(obj_worker);
perror("calloc");
return -1;
}
return 0;
}
/*
* obj_tx_free_init_worker_alloc_obj -- special part for the worker
* initialization function for benchmarks which needs allocated objects
* before operation.
*/
static int
obj_tx_init_worker_alloc_obj(struct benchmark *bench,
struct benchmark_args *args,
struct worker_info *worker)
{
unsigned i;
if (obj_tx_init_worker(bench, args, worker) != 0)
return -1;
auto *obj_bench = (struct obj_tx_bench *)pmembench_get_priv(bench);
auto *obj_worker = (struct obj_tx_worker *)worker->priv;
for (i = 0; i < obj_bench->n_objs; i++) {
if (alloc_op[obj_bench->lib_mode](obj_bench, worker, i) != 0)
goto out;
}
return 0;
out:
for (; i > 0; i--)
free_op[obj_bench->lib_mode](obj_bench, worker, i - 1);
if (obj_bench->lib_mode == LIB_MODE_DRAM)
free(obj_worker->items);
else
free(obj_worker->oids);
free(obj_worker);
return -1;
}
/*
* obj_tx_exit_worker -- common part for the worker de-initialization.
*/
static void
obj_tx_exit_worker(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
auto *obj_bench = (struct obj_tx_bench *)pmembench_get_priv(bench);
auto *obj_worker = (struct obj_tx_worker *)worker->priv;
for (unsigned i = 0; i < obj_bench->n_objs; i++)
free_op[obj_bench->lib_op_free](obj_bench, worker, i);
if (obj_bench->lib_mode == LIB_MODE_DRAM)
free(obj_worker->items);
else
free(obj_worker->oids);
free(obj_worker);
}
/*
* obj_tx_add_range_init -- specific part of the obj_tx_add_range
* benchmark initialization.
*/
static int
obj_tx_add_range_init(struct benchmark *bench, struct benchmark_args *args)
{
auto *obj_args = (struct obj_tx_args *)args->opts;
obj_args->parse_mode = PARSE_OP_MODE_ADD_RANGE;
if (args->n_ops_per_thread > MAX_OPS)
args->n_ops_per_thread = MAX_OPS;
if (obj_tx_init(bench, args) != 0)
return -1;
auto *obj_bench = (struct obj_tx_bench *)pmembench_get_priv(bench);
obj_bench->n_oid = diff_num;
if (obj_bench->op_mode < OP_MODE_ALL_OBJ) {
obj_bench->n_oid = one_num;
obj_bench->n_objs = 1;
}
obj_bench->fn_off = off_entire;
if (obj_bench->op_mode == OP_MODE_ONE_OBJ_RANGE ||
obj_bench->op_mode == OP_MODE_ONE_OBJ_NESTED_RANGE) {
obj_bench->fn_off = off_range;
if (args->n_ops_per_thread > args->dsize)
args->dsize = args->n_ops_per_thread;
obj_bench->sizes[0] = args->dsize;
}
obj_bench->lib_op = (obj_bench->op_mode == OP_MODE_ONE_OBJ ||
obj_bench->op_mode == OP_MODE_ALL_OBJ)
? ADD_RANGE_MODE_ONE_TX
: ADD_RANGE_MODE_NESTED_TX;
return 0;
}
/*
* obj_tx_free_init -- specific part of the obj_tx_free initialization.
*/
static int
obj_tx_free_init(struct benchmark *bench, struct benchmark_args *args)
{
if (obj_tx_init(bench, args) != 0)
return -1;
auto *obj_bench = (struct obj_tx_bench *)pmembench_get_priv(bench);
obj_bench->fn_op = free_op;
/*
* Generally all objects which were allocated during worker
* initialization are released in main operation so there is no need to
* free them in exit operation. Only exception is situation where
* transaction (inside which object is releasing) is aborted.
* Then object is not released so there there is necessary to free it
* in exit operation.
*/
if (!(obj_bench->lib_op == LIB_MODE_OBJ_TX &&
obj_bench->op_mode != OP_MODE_COMMIT))
obj_bench->lib_op_free = LIB_MODE_NONE;
return 0;
}
/*
* obj_tx_alloc_init -- specific part of the obj_tx_alloc initialization.
*/
static int
obj_tx_alloc_init(struct benchmark *bench, struct benchmark_args *args)
{
if (obj_tx_init(bench, args) != 0)
return -1;
auto *obj_bench = (struct obj_tx_bench *)pmembench_get_priv(bench);
obj_bench->fn_op = alloc_op;
/*
* Generally all objects which will be allocated during main operation
* need to be released. Only exception is situation where transaction
* (inside which object is allocating) is aborted. Then object is not
* allocated so there is no need to free it in exit operation.
*/
if (obj_bench->lib_op == LIB_MODE_OBJ_TX &&
obj_bench->op_mode != OP_MODE_COMMIT)
obj_bench->lib_op_free = LIB_MODE_NONE;
return 0;
}
/*
* obj_tx_realloc_init -- specific part of the obj_tx_realloc initialization.
*/
static int
obj_tx_realloc_init(struct benchmark *bench, struct benchmark_args *args)
{
if (obj_tx_init(bench, args) != 0)
return -1;
auto *obj_bench = (struct obj_tx_bench *)pmembench_get_priv(bench);
obj_bench->resizes =
rand_values(obj_bench->obj_args->min_rsize,
obj_bench->obj_args->rsize, args->n_ops_per_thread);
if (obj_bench->resizes == nullptr) {
obj_tx_exit(bench, args);
return -1;
}
obj_bench->fn_op = realloc_op;
return 0;
}
/*
* obj_tx_init -- common part of the benchmark initialization for transactional
* benchmarks in their init functions. Parses command line arguments, set
* variables and creates persistent pool.
*/
int
obj_tx_init(struct benchmark *bench, struct benchmark_args *args)
{
assert(bench != nullptr);
assert(args != nullptr);
assert(args->opts != nullptr);
char path[PATH_MAX];
if (util_safe_strcpy(path, args->fname, sizeof(path)) != 0)
return -1;
enum file_type type = util_file_get_type(args->fname);
if (type == OTHER_ERROR) {
fprintf(stderr, "could not check type of file %s\n",
args->fname);
return -1;
}
pmembench_set_priv(bench, &obj_bench);
obj_bench.obj_args = (struct obj_tx_args *)args->opts;
obj_bench.obj_args->obj_size = args->dsize;
obj_bench.obj_args->n_ops = args->n_ops_per_thread;
obj_bench.n_objs = args->n_ops_per_thread;
obj_bench.lib_op = obj_bench.obj_args->lib != nullptr
? parse_lib_mode(obj_bench.obj_args->lib)
: LIB_MODE_OBJ_ATOMIC;
if (obj_bench.lib_op == LIB_MODE_NONE)
return -1;
obj_bench.lib_mode = obj_bench.lib_op == LIB_MODE_DRAM
? LIB_MODE_DRAM
: LIB_MODE_OBJ_ATOMIC;
obj_bench.lib_op_free = obj_bench.lib_mode;
obj_bench.nesting_mode = obj_bench.lib_op == LIB_MODE_OBJ_TX
? NESTING_MODE_TX
: NESTING_MODE_SIM;
/*
* Multiplication by FACTOR prevents from out of memory error
* as the actual size of the allocated persistent objects
* is always larger than requested.
*/
size_t dsize = obj_bench.obj_args->rsize > args->dsize
? obj_bench.obj_args->rsize
: args->dsize;
size_t psize = args->n_ops_per_thread * (dsize + ALLOC_OVERHEAD) *
args->n_threads;
psize += PMEMOBJ_MIN_POOL;
psize = (size_t)(psize * FACTOR);
/*
* When adding all allocated objects to undo log there is necessary
* to prepare larger pool to prevent out of memory error.
*/
if (obj_bench.op_mode == OP_MODE_ALL_OBJ ||
obj_bench.op_mode == OP_MODE_ALL_OBJ_NESTED)
psize *= 2;
obj_bench.op_mode = parse_op[obj_bench.obj_args->parse_mode](
obj_bench.obj_args->operation);
if (obj_bench.op_mode == OP_MODE_UNKNOWN) {
fprintf(stderr, "operation mode unknown\n");
return -1;
}
obj_bench.type_mode = parse_type_num_mode(obj_bench.obj_args->type_num);
if (obj_bench.type_mode == NUM_MODE_UNKNOWN)
return -1;
obj_bench.fn_type_num = type_num_fn[obj_bench.type_mode];
if (obj_bench.type_mode == NUM_MODE_RAND) {
obj_bench.random_types =
rand_values(1, UINT32_MAX, args->n_ops_per_thread);
if (obj_bench.random_types == nullptr)
return -1;
}
obj_bench.sizes = rand_values(obj_bench.obj_args->min_size,
obj_bench.obj_args->obj_size,
args->n_ops_per_thread);
if (obj_bench.sizes == nullptr)
goto free_random_types;
if (obj_bench.lib_mode == LIB_MODE_DRAM)
return 0;
/* Create pmemobj pool. */
if (args->is_poolset || type == TYPE_DEVDAX) {
if (args->fsize < psize) {
fprintf(stderr, "file size too large\n");
goto free_all;
}
psize = 0;
} else if (args->is_dynamic_poolset) {
int ret = dynamic_poolset_create(args->fname, psize);
if (ret == -1)
goto free_all;
if (util_safe_strcpy(path, POOLSET_PATH, sizeof(path)) != 0)
goto free_all;
psize = 0;
}
obj_bench.pop = pmemobj_create(path, LAYOUT_NAME, psize, args->fmode);
if (obj_bench.pop == nullptr) {
perror("pmemobj_create");
goto free_all;
}
return 0;
free_all:
free(obj_bench.sizes);
free_random_types:
if (obj_bench.type_mode == NUM_MODE_RAND)
free(obj_bench.random_types);
return -1;
}
/*
* obj_tx_exit -- common part for the exit function of the transactional
* benchmarks in their exit functions.
*/
int
obj_tx_exit(struct benchmark *bench, struct benchmark_args *args)
{
auto *obj_bench = (struct obj_tx_bench *)pmembench_get_priv(bench);
if (obj_bench->lib_mode != LIB_MODE_DRAM)
pmemobj_close(obj_bench->pop);
free(obj_bench->sizes);
if (obj_bench->type_mode == NUM_MODE_RAND)
free(obj_bench->random_types);
return 0;
}
/*
* obj_tx_realloc_exit -- common part for the exit function of the transactional
* benchmarks in their exit functions.
*/
static int
obj_tx_realloc_exit(struct benchmark *bench, struct benchmark_args *args)
{
auto *obj_bench = (struct obj_tx_bench *)pmembench_get_priv(bench);
free(obj_bench->resizes);
return obj_tx_exit(bench, args);
}
/* Array defining common command line arguments. */
static struct benchmark_clo obj_tx_clo[8];
static struct benchmark_info obj_tx_alloc;
static struct benchmark_info obj_tx_free;
static struct benchmark_info obj_tx_realloc;
static struct benchmark_info obj_tx_add_range;
CONSTRUCTOR(pmemobj_tx_constructor)
void
pmemobj_tx_constructor(void)
{
obj_tx_clo[0].opt_short = 'T';
obj_tx_clo[0].opt_long = "type-number";
obj_tx_clo[0].descr = "Type number - one, rand, per-thread";
obj_tx_clo[0].def = "one";
obj_tx_clo[0].type = CLO_TYPE_STR;
obj_tx_clo[0].off = clo_field_offset(struct obj_tx_args, type_num);
obj_tx_clo[1].opt_short = 'O';
obj_tx_clo[1].opt_long = "operation";
obj_tx_clo[1].descr = "Type of operation";
obj_tx_clo[1].def = "basic";
obj_tx_clo[1].off = clo_field_offset(struct obj_tx_args, operation);
obj_tx_clo[1].type = CLO_TYPE_STR;
obj_tx_clo[2].opt_short = 'm';
obj_tx_clo[2].opt_long = "min-size";
obj_tx_clo[2].type = CLO_TYPE_UINT;
obj_tx_clo[2].descr = "Minimum allocation size";
obj_tx_clo[2].off = clo_field_offset(struct obj_tx_args, min_size);
obj_tx_clo[2].def = "0";
obj_tx_clo[2].type_uint.size =
clo_field_size(struct obj_tx_args, min_size);
obj_tx_clo[2].type_uint.base = CLO_INT_BASE_DEC | CLO_INT_BASE_HEX;
obj_tx_clo[2].type_uint.min = 0;
obj_tx_clo[2].type_uint.max = UINT_MAX;
/*
* nclos field in benchmark_info structures is decremented to make this
* options available only for obj_tx_alloc, obj_tx_free and
* obj_tx_realloc benchmarks.
*/
obj_tx_clo[3].opt_short = 'L';
obj_tx_clo[3].opt_long = "lib";
obj_tx_clo[3].descr = "Type of library";
obj_tx_clo[3].def = "tx";
obj_tx_clo[3].off = clo_field_offset(struct obj_tx_args, lib);
obj_tx_clo[3].type = CLO_TYPE_STR;
obj_tx_clo[4].opt_short = 'N';
obj_tx_clo[4].opt_long = "nestings";
obj_tx_clo[4].type = CLO_TYPE_UINT;
obj_tx_clo[4].descr = "Number of nested transactions";
obj_tx_clo[4].off = clo_field_offset(struct obj_tx_args, nested);
obj_tx_clo[4].def = "0";
obj_tx_clo[4].type_uint.size =
clo_field_size(struct obj_tx_args, nested);
obj_tx_clo[4].type_uint.base = CLO_INT_BASE_DEC | CLO_INT_BASE_HEX;
obj_tx_clo[4].type_uint.min = 0;
obj_tx_clo[4].type_uint.max = MAX_OPS;
obj_tx_clo[5].opt_short = 'r';
obj_tx_clo[5].opt_long = "min-rsize";
obj_tx_clo[5].type = CLO_TYPE_UINT;
obj_tx_clo[5].descr = "Minimum reallocation size";
obj_tx_clo[5].off = clo_field_offset(struct obj_tx_args, min_rsize);
obj_tx_clo[5].def = "0";
obj_tx_clo[5].type_uint.size =
clo_field_size(struct obj_tx_args, min_rsize);
obj_tx_clo[5].type_uint.base = CLO_INT_BASE_DEC | CLO_INT_BASE_HEX;
obj_tx_clo[5].type_uint.min = 0;
obj_tx_clo[5].type_uint.max = UINT_MAX;
obj_tx_clo[6].opt_short = 'R';
obj_tx_clo[6].opt_long = "realloc-size";
obj_tx_clo[6].type = CLO_TYPE_UINT;
obj_tx_clo[6].descr = "Reallocation size";
obj_tx_clo[6].off = clo_field_offset(struct obj_tx_args, rsize);
obj_tx_clo[6].def = "1";
obj_tx_clo[6].type_uint.size =
clo_field_size(struct obj_tx_args, rsize);
obj_tx_clo[6].type_uint.base = CLO_INT_BASE_DEC | CLO_INT_BASE_HEX;
obj_tx_clo[6].type_uint.min = 1;
obj_tx_clo[6].type_uint.max = ULONG_MAX;
obj_tx_clo[7].opt_short = 'c';
obj_tx_clo[7].opt_long = "changed-type";
obj_tx_clo[7].descr = "Use another type number in "
"reallocation than in allocation";
obj_tx_clo[7].type = CLO_TYPE_FLAG;
obj_tx_clo[7].off = clo_field_offset(struct obj_tx_args, change_type);
obj_tx_alloc.name = "obj_tx_alloc";
obj_tx_alloc.brief = "pmemobj_tx_alloc() benchmark";
obj_tx_alloc.init = obj_tx_alloc_init;
obj_tx_alloc.exit = obj_tx_exit;
obj_tx_alloc.multithread = true;
obj_tx_alloc.multiops = true;
obj_tx_alloc.init_worker = obj_tx_init_worker;
obj_tx_alloc.free_worker = obj_tx_exit_worker;
obj_tx_alloc.operation = obj_tx_op;
obj_tx_alloc.measure_time = true;
obj_tx_alloc.clos = obj_tx_clo;
obj_tx_alloc.nclos = ARRAY_SIZE(obj_tx_clo) - 3;
obj_tx_alloc.opts_size = sizeof(struct obj_tx_args);
obj_tx_alloc.rm_file = true;
obj_tx_alloc.allow_poolset = true;
REGISTER_BENCHMARK(obj_tx_alloc);
obj_tx_free.name = "obj_tx_free";
obj_tx_free.brief = "pmemobj_tx_free() benchmark";
obj_tx_free.init = obj_tx_free_init;
obj_tx_free.exit = obj_tx_exit;
obj_tx_free.multithread = true;
obj_tx_free.multiops = true;
obj_tx_free.init_worker = obj_tx_init_worker_alloc_obj;
obj_tx_free.free_worker = obj_tx_exit_worker;
obj_tx_free.operation = obj_tx_op;
obj_tx_free.measure_time = true;
obj_tx_free.clos = obj_tx_clo;
obj_tx_free.nclos = ARRAY_SIZE(obj_tx_clo) - 3;
obj_tx_free.opts_size = sizeof(struct obj_tx_args);
obj_tx_free.rm_file = true;
obj_tx_free.allow_poolset = true;
REGISTER_BENCHMARK(obj_tx_free);
obj_tx_realloc.name = "obj_tx_realloc";
obj_tx_realloc.brief = "pmemobj_tx_realloc() benchmark";
obj_tx_realloc.init = obj_tx_realloc_init;
obj_tx_realloc.exit = obj_tx_realloc_exit;
obj_tx_realloc.multithread = true;
obj_tx_realloc.multiops = true;
obj_tx_realloc.init_worker = obj_tx_init_worker_alloc_obj;
obj_tx_realloc.free_worker = obj_tx_exit_worker;
obj_tx_realloc.operation = obj_tx_op;
obj_tx_realloc.measure_time = true;
obj_tx_realloc.clos = obj_tx_clo;
obj_tx_realloc.nclos = ARRAY_SIZE(obj_tx_clo);
obj_tx_realloc.opts_size = sizeof(struct obj_tx_args);
obj_tx_realloc.rm_file = true;
obj_tx_realloc.allow_poolset = true;
REGISTER_BENCHMARK(obj_tx_realloc);
obj_tx_add_range.name = "obj_tx_add_range";
obj_tx_add_range.brief = "pmemobj_tx_add_range() benchmark";
obj_tx_add_range.init = obj_tx_add_range_init;
obj_tx_add_range.exit = obj_tx_exit;
obj_tx_add_range.multithread = true;
obj_tx_add_range.multiops = false;
obj_tx_add_range.init_worker = obj_tx_init_worker_alloc_obj;
obj_tx_add_range.free_worker = obj_tx_exit_worker;
obj_tx_add_range.operation = obj_tx_add_range_op;
obj_tx_add_range.measure_time = true;
obj_tx_add_range.clos = obj_tx_clo;
obj_tx_add_range.nclos = ARRAY_SIZE(obj_tx_clo) - 5;
obj_tx_add_range.opts_size = sizeof(struct obj_tx_args);
obj_tx_add_range.rm_file = true;
obj_tx_add_range.allow_poolset = true;
REGISTER_BENCHMARK(obj_tx_add_range);
}
| 35,938 | 27.797276 | 80 |
cpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/pmemobj_atomic_lists.cpp
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmemobj_atomic_lists.cpp -- benchmark for pmemobj atomic list API
*/
#include "benchmark.hpp"
#include "file.h"
#include "libpmemobj.h"
#include "queue.h"
#include <cassert>
#include <cerrno>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fcntl.h>
#include <unistd.h>
#define FACTOR 8
#define LAYOUT_NAME "benchmark"
struct obj_bench;
struct obj_worker;
struct element;
TOID_DECLARE(struct item, 0);
TOID_DECLARE(struct list, 1);
typedef size_t (*fn_type_num_t)(size_t worker_idx, size_t op_idx);
typedef struct element (*fn_position_t)(struct obj_worker *obj_worker,
size_t op_idx);
typedef int (*fn_init_t)(struct worker_info *worker, size_t n_elm,
size_t list_len);
/*
* args -- stores command line parsed arguments.
*/
struct obj_list_args {
char *type_num; /* type_number mode - one, per-thread, rand */
char *position; /* position - head, tail, middle, rand */
unsigned list_len; /* initial list length */
bool queue; /* use circle queue from <sys/queue.h> */
bool range; /* use random allocation size */
unsigned min_size; /* minimum random allocation size */
unsigned seed; /* seed value */
};
/*
* obj_bench -- stores variables used in benchmark, passed within functions.
*/
static struct obj_bench {
/* handle to persistent pool */
PMEMobjpool *pop;
/* pointer to benchmark specific arguments */
struct obj_list_args *args;
/* array to store random type_number values */
size_t *random_types;
/*
* fn_rpositions array stores random functions returning proper element
* from list, if position where operation is performed is random.
* Possible function which can be in array are:
* - position_head,
* - position_tail,
* - position_middle.
*/
size_t *alloc_sizes; /* array to store random sizes of each object */
size_t max_len; /* maximum list length */
size_t min_len; /* initial list length */
int type_mode; /* type_number mode */
int position_mode; /* list destination mode */
/*
* fn_type_num gets proper function assigned, depending on the
* value of the type_mode argument, which returns proper type number for
* each persistent object. Possible functions are:
* - type_mode_one,
* - type_mode_per_thread,
* - type_mode_rand.
*/
fn_type_num_t fn_type_num;
/*
* fn_position gets proper function assigned, depending on the value
* of the position argument, which returns handle to proper element on
* the list. Possible functions are:
* - position_head,
* - position_tail,
* - position_middle,
* - position_rand.
*/
fn_position_t fn_position;
/*
* fn_init gets proper function assigned, depending on the file_io
* flag, which allocates objects and initializes proper list. Possible
* functions are:
* - obj_init_list,
* - queue_init_list.
*/
fn_init_t fn_init;
} obj_bench;
/*
* item -- structure used to connect elements in lists.
*/
struct item {
POBJ_LIST_ENTRY(struct item) field;
CIRCLEQ_ENTRY(item) fieldq;
};
/*
* element -- struct contains one item from list with proper type.
*/
struct element {
struct item *itemq;
TOID(struct item) itemp;
bool before;
};
/*
* obj_worker -- stores variables used by one thread, concerning one list.
*/
struct obj_worker {
/* head of the pmemobj list */
POBJ_LIST_HEAD(plist, struct item) head;
/* head of the circular queue */
CIRCLEQ_HEAD(qlist, item) headq;
TOID(struct item) * oids; /* persistent pmemobj list elements */
struct item **items; /* volatile elements */
size_t n_elm; /* number of elements in array */
fn_position_t *fn_positions; /* element access functions */
struct element elm; /* pointer to current element */
/*
* list_move is a pointer to structure storing variables used by
* second list (used only for obj_move benchmark).
*/
struct obj_worker *list_move;
};
/*
* position_mode -- list destination type
*/
enum position_mode {
/* object inserted/removed/moved to/from head of list */
POSITION_MODE_HEAD,
/* object inserted/removed/moved to/from tail of list */
POSITION_MODE_TAIL,
/*
* object inserted/removed/moved to/from second element of the list
* or to/from head if list length equal to one
*/
POSITION_MODE_MIDDLE,
/* object inserted/removed/moved to/from head, tail or middle */
POSITION_MODE_RAND,
POSITION_MODE_UNKNOWN,
};
/*
* type_mode -- type number type
*/
enum type_mode {
TYPE_MODE_ONE, /* one type number for all of objects */
/* one type number for objects allocated by the same thread */
TYPE_MODE_PER_THREAD,
TYPE_MODE_RAND, /* random type number for each object */
TYPE_MODE_UNKNOWN,
};
/*
* position_head -- returns head of the persistent list or volatile queue.
*/
static struct element
position_head(struct obj_worker *obj_worker, size_t op_idx)
{
struct element head = {nullptr, OID_NULL, false};
head.before = true;
if (!obj_bench.args->queue)
head.itemp = POBJ_LIST_FIRST(&obj_worker->head);
else
head.itemq = CIRCLEQ_FIRST(&obj_worker->headq);
return head;
}
/*
* position_tail -- returns tail of the persistent list or volatile queue.
*/
static struct element
position_tail(struct obj_worker *obj_worker, size_t op_idx)
{
struct element tail = {nullptr, OID_NULL, false};
tail.before = false;
if (!obj_bench.args->queue)
tail.itemp = POBJ_LIST_LAST(&obj_worker->head, field);
else
tail.itemq = CIRCLEQ_LAST(&obj_worker->headq);
return tail;
}
/*
* position_middle -- returns second or first element from the persistent list
* or volatile queue.
*/
static struct element
position_middle(struct obj_worker *obj_worker, size_t op_idx)
{
struct element elm = position_head(obj_worker, op_idx);
elm.before = true;
if (!obj_bench.args->queue)
elm.itemp = POBJ_LIST_NEXT(elm.itemp, field);
else
elm.itemq = CIRCLEQ_NEXT(elm.itemq, fieldq);
return elm;
}
/*
* position_rand -- returns first, second or last element from the persistent
* list or volatile queue based on r_positions array.
*/
static struct element
position_rand(struct obj_worker *obj_worker, size_t op_idx)
{
struct element elm;
elm = obj_worker->fn_positions[op_idx](obj_worker, op_idx);
elm.before = true;
return elm;
}
/*
* type_mode_one -- always returns 0, as in the mode TYPE_MODE_ONE
* all of the persistent objects have the same type_number value.
*/
static size_t
type_mode_one(size_t worker_idx, size_t op_idx)
{
return 0;
}
/*
* type_mode_per_thread -- always returns the index of the worker,
* as in the TYPE_MODE_PER_THREAD the value of the persistent object
* type_number is specific to the thread.
*/
static size_t
type_mode_per_thread(size_t worker_idx, size_t op_idx)
{
return worker_idx;
}
/*
* type_mode_rand -- returns the value from the random_types array assigned
* for the specific operation in a specific thread.
*/
static size_t
type_mode_rand(size_t worker_idx, size_t op_idx)
{
return obj_bench.random_types[op_idx];
}
const char *type_num_names[] = {"one", "per-thread", "rand"};
const char *position_names[] = {"head", "tail", "middle", "rand"};
static fn_type_num_t type_num_modes[] = {type_mode_one, type_mode_per_thread,
type_mode_rand};
static fn_position_t positions[] = {position_head, position_tail,
position_middle, position_rand};
/* function pointers randomly picked when using rand mode */
static fn_position_t rand_positions[] = {position_head, position_tail,
position_middle};
/*
* get_item -- common part of initial operation of the all benchmarks.
* It gets pointer to element on the list where object will
* be inserted/removed/moved to/from.
*/
static void
get_item(struct benchmark *bench, struct operation_info *info)
{
auto *obj_worker = (struct obj_worker *)info->worker->priv;
obj_worker->elm = obj_bench.fn_position(obj_worker, info->index);
}
/*
* get_move_item -- special part of initial operation of the obj_move
* benchmarks. It gets pointer to element on the list where object will be
* inserted/removed/moved to/from.
*/
static void
get_move_item(struct benchmark *bench, struct operation_info *info)
{
auto *obj_worker = (struct obj_worker *)info->worker->priv;
obj_worker->list_move->elm =
obj_bench.fn_position(obj_worker->list_move, info->index);
get_item(bench, info);
}
/*
* parse_args -- parse command line string argument
*/
static int
parse_args(char *arg, int max, const char **names)
{
int i = 0;
for (; i < max && strcmp(names[i], arg) != 0; i++)
;
if (i == max)
fprintf(stderr, "Invalid argument\n");
return i;
}
/*
* obj_init_list -- special part of worker initialization, performed only if
* queue flag set false. Allocates proper number of items, and inserts proper
* part of them to the pmemobj list.
*/
static int
obj_init_list(struct worker_info *worker, size_t n_oids, size_t list_len)
{
size_t i;
auto *obj_worker = (struct obj_worker *)worker->priv;
obj_worker->oids =
(TOID(struct item) *)calloc(n_oids, sizeof(TOID(struct item)));
if (obj_worker->oids == nullptr) {
perror("calloc");
return -1;
}
for (i = 0; i < n_oids; i++) {
size_t type_num = obj_bench.fn_type_num(worker->index, i);
size_t size = obj_bench.alloc_sizes[i];
auto *tmp = (PMEMoid *)&obj_worker->oids[i];
if (pmemobj_alloc(obj_bench.pop, tmp, size, type_num, nullptr,
nullptr) != 0)
goto err_oids;
}
for (i = 0; i < list_len; i++)
POBJ_LIST_INSERT_TAIL(obj_bench.pop, &obj_worker->head,
obj_worker->oids[i], field);
return 0;
err_oids:
for (; i > 0; i--)
POBJ_FREE(&obj_worker->oids[i - 1]);
free(obj_worker->oids);
return -1;
}
/*
* queue_init_list -- special part of worker initialization, performed only if
* queue flag set. Initiates circle queue, allocates proper number of items and
* inserts proper part of them to the queue.
*/
static int
queue_init_list(struct worker_info *worker, size_t n_items, size_t list_len)
{
size_t i;
auto *obj_worker = (struct obj_worker *)worker->priv;
CIRCLEQ_INIT(&obj_worker->headq);
obj_worker->items =
(struct item **)malloc(n_items * sizeof(struct item *));
if (obj_worker->items == nullptr) {
perror("malloc");
return -1;
}
for (i = 0; i < n_items; i++) {
size_t size = obj_bench.alloc_sizes[i];
obj_worker->items[i] = (struct item *)calloc(1, size);
if (obj_worker->items[i] == nullptr) {
perror("calloc");
goto err;
}
}
for (i = 0; i < list_len; i++)
CIRCLEQ_INSERT_TAIL(&obj_worker->headq, obj_worker->items[i],
fieldq);
return 0;
err:
for (; i > 0; i--)
free(obj_worker->items[i - 1]);
free(obj_worker->items);
return -1;
}
/*
* queue_free_worker_list -- special part for the worker de-initialization when
* queue flag is true. Releases items directly from atomic list.
*/
static void
queue_free_worker_list(struct obj_worker *obj_worker)
{
while (!CIRCLEQ_EMPTY(&obj_worker->headq)) {
struct item *tmp = CIRCLEQ_LAST(&obj_worker->headq);
CIRCLEQ_REMOVE(&obj_worker->headq, tmp, fieldq);
free(tmp);
}
free(obj_worker->items);
}
/*
* obj_free_worker_list -- special part for the worker de-initialization when
* queue flag is false. Releases items directly from atomic list.
*/
static void
obj_free_worker_list(struct obj_worker *obj_worker)
{
while (!POBJ_LIST_EMPTY(&obj_worker->head)) {
TOID(struct item) tmp = POBJ_LIST_FIRST(&obj_worker->head);
POBJ_LIST_REMOVE_FREE(obj_bench.pop, &obj_worker->head, tmp,
field);
}
free(obj_worker->oids);
}
/*
* obj_free_worker_items -- special part for the worker de-initialization when
* queue flag is false. Releases items used for create pmemobj list.
*/
static void
obj_free_worker_items(struct obj_worker *obj_worker)
{
for (size_t i = 0; i < obj_worker->n_elm; i++)
POBJ_FREE(&obj_worker->oids[i]);
free(obj_worker->oids);
}
/*
* queue_free_worker_items -- special part for the worker de-initialization
* when queue flag set. Releases used for create circle queue.
*/
static void
queue_free_worker_items(struct obj_worker *obj_worker)
{
for (size_t i = 0; i < obj_worker->n_elm; i++)
free(obj_worker->items[i]);
free(obj_worker->items);
}
/*
* random_positions -- allocates array and calculates random values for
* defining positions where each operation will be performed. Used only
* in POSITION_MODE_RAND
*/
static fn_position_t *
random_positions(void)
{
auto *positions = (fn_position_t *)calloc(obj_bench.max_len,
sizeof(fn_position_t));
if (positions == nullptr) {
perror("calloc");
return nullptr;
}
if (obj_bench.args->seed != 0)
srand(obj_bench.args->seed);
size_t rmax = ARRAY_SIZE(rand_positions);
for (size_t i = 0; i < obj_bench.max_len; i++) {
size_t id = RRAND(rmax, 0);
positions[i] = rand_positions[id];
}
return positions;
}
/*
* rand_values -- allocates array and if range mode calculates random
* values as allocation sizes for each object otherwise populates whole array
* with max value. Used only when range flag set.
*/
static size_t *
random_values(size_t min, size_t max, size_t n_ops, size_t min_range)
{
auto *randoms = (size_t *)calloc(n_ops, sizeof(size_t));
if (randoms == nullptr) {
perror("calloc");
return nullptr;
}
for (size_t i = 0; i < n_ops; i++)
randoms[i] = max;
if (min > min_range) {
if (min > max) {
fprintf(stderr, "Invalid size\n");
free(randoms);
return nullptr;
}
for (size_t i = 0; i < n_ops; i++)
randoms[i] = RRAND(max, min);
}
return randoms;
}
/*
* queue_insert_op -- main operations of the obj_insert benchmark when queue
* flag set to true.
*/
static int
queue_insert_op(struct operation_info *info)
{
auto *obj_worker = (struct obj_worker *)info->worker->priv;
CIRCLEQ_INSERT_AFTER(&obj_worker->headq, obj_worker->elm.itemq,
obj_worker->items[info->index + obj_bench.min_len],
fieldq);
return 0;
}
/*
* obj_insert_op -- main operations of the obj_insert benchmark when queue flag
* set to false.
*/
static int
obj_insert_op(struct operation_info *info)
{
auto *obj_worker = (struct obj_worker *)info->worker->priv;
POBJ_LIST_INSERT_AFTER(
obj_bench.pop, &obj_worker->head, obj_worker->elm.itemp,
obj_worker->oids[info->index + obj_bench.min_len], field);
return 0;
}
/*
* queue_remove_op -- main operations of the obj_remove benchmark when queue
* flag set to true.
*/
static int
queue_remove_op(struct operation_info *info)
{
auto *obj_worker = (struct obj_worker *)info->worker->priv;
CIRCLEQ_REMOVE(&obj_worker->headq, obj_worker->elm.itemq, fieldq);
return 0;
}
/*
* obj_remove_op -- main operations of the obj_remove benchmark when queue flag
* set to false.
*/
static int
obj_remove_op(struct operation_info *info)
{
auto *obj_worker = (struct obj_worker *)info->worker->priv;
POBJ_LIST_REMOVE(obj_bench.pop, &obj_worker->head,
obj_worker->elm.itemp, field);
return 0;
}
/*
* insert_op -- main operations of the obj_insert benchmark.
*/
static int
insert_op(struct benchmark *bench, struct operation_info *info)
{
get_item(bench, info);
return obj_bench.args->queue ? queue_insert_op(info)
: obj_insert_op(info);
}
/*
* obj_insert_new_op -- main operations of the obj_insert_new benchmark.
*/
static int
obj_insert_new_op(struct benchmark *bench, struct operation_info *info)
{
get_item(bench, info);
auto *obj_worker = (struct obj_worker *)info->worker->priv;
PMEMoid tmp;
size_t size = obj_bench.alloc_sizes[info->index];
size_t type_num =
obj_bench.fn_type_num(info->worker->index, info->index);
tmp = pmemobj_list_insert_new(
obj_bench.pop, offsetof(struct item, field), &obj_worker->head,
obj_worker->elm.itemp.oid, obj_worker->elm.before, size,
type_num, nullptr, nullptr);
if (OID_IS_NULL(tmp)) {
perror("pmemobj_list_insert_new");
return -1;
}
return 0;
}
/*
* remove_op -- main operations of the obj_remove benchmark.
*/
static int
remove_op(struct benchmark *bench, struct operation_info *info)
{
get_item(bench, info);
return obj_bench.args->queue ? queue_remove_op(info)
: obj_remove_op(info);
}
/*
* obj_remove_free_op -- main operation of the obj_remove_free benchmark.
*/
static int
obj_remove_free_op(struct benchmark *bench, struct operation_info *info)
{
get_item(bench, info);
auto *obj_worker = (struct obj_worker *)info->worker->priv;
POBJ_LIST_REMOVE_FREE(obj_bench.pop, &obj_worker->head,
obj_worker->elm.itemp, field);
return 0;
}
/*
* obj_move_op -- main operation of the obj_move benchmark.
*/
static int
obj_move_op(struct benchmark *bench, struct operation_info *info)
{
get_move_item(bench, info);
auto *obj_worker = (struct obj_worker *)info->worker->priv;
POBJ_LIST_MOVE_ELEMENT_BEFORE(obj_bench.pop, &obj_worker->head,
&obj_worker->list_move->head,
obj_worker->list_move->elm.itemp,
obj_worker->elm.itemp, field, field);
return 0;
}
/*
* free_worker -- free common worker state
*/
static void
free_worker(struct obj_worker *obj_worker)
{
if (obj_bench.position_mode == POSITION_MODE_RAND)
free(obj_worker->fn_positions);
free(obj_worker);
}
/*
* free_worker_list -- worker de-initialization function for: obj_insert_new,
* obj_remove_free, obj_move. Requires releasing objects directly from list.
*/
static void
free_worker_list(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
auto *obj_worker = (struct obj_worker *)worker->priv;
obj_bench.args->queue ? queue_free_worker_list(obj_worker)
: obj_free_worker_list(obj_worker);
free_worker(obj_worker);
}
/*
* obj_free_worker_items -- worker de-initialization function of obj_insert and
* obj_remove benchmarks, where deallocation can't be performed directly on the
* list and where is possibility of using queue flag.
*/
static void
free_worker_items(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
auto *obj_worker = (struct obj_worker *)worker->priv;
auto *obj_args = (struct obj_list_args *)args->opts;
obj_args->queue ? queue_free_worker_items(obj_worker)
: obj_free_worker_items(obj_worker);
free_worker(obj_worker);
}
/*
* obj_move_free_worker -- special part for the worker de-initialization
* function of obj_move benchmarks.
*/
static void
obj_move_free_worker(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
auto *obj_worker = (struct obj_worker *)worker->priv;
while (!POBJ_LIST_EMPTY(&obj_worker->list_move->head))
POBJ_LIST_REMOVE_FREE(
obj_bench.pop, &obj_worker->list_move->head,
POBJ_LIST_LAST(&obj_worker->list_move->head, field),
field);
if (obj_bench.position_mode == POSITION_MODE_RAND)
free(obj_worker->list_move->fn_positions);
free(obj_worker->list_move);
free_worker_list(bench, args, worker);
}
/*
* obj_init_worker -- common part for the worker initialization for:
* obj_insert, obj_insert_new, obj_remove obj_remove_free and obj_move.
*/
static int
obj_init_worker(struct worker_info *worker, size_t n_elm, size_t list_len)
{
auto *obj_worker =
(struct obj_worker *)calloc(1, sizeof(struct obj_worker));
if (obj_worker == nullptr) {
perror("calloc");
return -1;
}
worker->priv = obj_worker;
obj_worker->n_elm = obj_bench.max_len;
obj_worker->list_move = nullptr;
if (obj_bench.position_mode == POSITION_MODE_RAND) {
obj_worker->fn_positions = random_positions();
if (obj_worker->fn_positions == nullptr)
goto err;
}
if (obj_bench.fn_init(worker, n_elm, list_len) != 0)
goto err_positions;
return 0;
err_positions:
free(obj_worker->fn_positions);
err:
free(obj_worker);
return -1;
}
/*
* obj_insert_init_worker -- worker initialization functions of the obj_insert
* benchmark.
*/
static int
obj_insert_init_worker(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
return obj_init_worker(worker, obj_bench.max_len, obj_bench.min_len);
}
/*
* obj_insert_new_init_worker -- worker initialization functions of the
* obj_insert_new benchmark.
*/
static int
obj_insert_new_init_worker(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
return obj_init_worker(worker, obj_bench.min_len, obj_bench.min_len);
}
/*
* obj_remove_init_worker -- worker initialization functions of the obj_remove
* and obj_remove_free benchmarks.
*/
static int
obj_remove_init_worker(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
return obj_init_worker(worker, obj_bench.max_len, obj_bench.max_len);
}
/*
* obj_move_init_worker -- worker initialization functions of the obj_move
* benchmark.
*/
static int
obj_move_init_worker(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
if (obj_init_worker(worker, obj_bench.max_len, obj_bench.max_len) != 0)
return -1;
auto *obj_worker = (struct obj_worker *)worker->priv;
obj_worker->list_move =
(struct obj_worker *)calloc(1, sizeof(struct obj_worker));
if (obj_worker->list_move == nullptr) {
perror("calloc");
goto free;
}
size_t i;
if (obj_bench.position_mode == POSITION_MODE_RAND) {
obj_worker->list_move->fn_positions = random_positions();
if (obj_worker->list_move->fn_positions == nullptr)
goto free_list_move;
}
for (i = 0; i < obj_bench.min_len; i++) {
size_t size = obj_bench.alloc_sizes[i];
POBJ_LIST_INSERT_NEW_TAIL(obj_bench.pop,
&obj_worker->list_move->head, field,
size, nullptr, nullptr);
if (TOID_IS_NULL(POBJ_LIST_LAST(&obj_worker->list_move->head,
field))) {
perror("pmemobj_list_insert_new");
goto free_all;
}
}
return 0;
free_all:
for (; i > 0; i--) {
POBJ_LIST_REMOVE_FREE(
obj_bench.pop, &obj_worker->list_move->head,
POBJ_LIST_LAST(&obj_worker->list_move->head, field),
field);
}
free(obj_worker->list_move->fn_positions);
free_list_move:
free(obj_worker->list_move);
free:
free_worker_list(bench, args, worker);
return -1;
}
/*
* obj_init - common part of the benchmark initialization for: obj_insert,
* obj_insert_new, obj_remove, obj_remove_free and obj_move used in their init
* functions. Parses command line arguments, sets variables and
* creates persistent pool.
*/
static int
obj_init(struct benchmark *bench, struct benchmark_args *args)
{
assert(bench != nullptr);
assert(args != nullptr);
assert(args->opts != nullptr);
enum file_type type = util_file_get_type(args->fname);
if (type == OTHER_ERROR) {
fprintf(stderr, "could not check type of file %s\n",
args->fname);
return -1;
}
obj_bench.args = (struct obj_list_args *)args->opts;
obj_bench.min_len = obj_bench.args->list_len + 1;
obj_bench.max_len = args->n_ops_per_thread + obj_bench.min_len;
obj_bench.fn_init =
obj_bench.args->queue ? queue_init_list : obj_init_list;
/* Decide if use random or state allocation sizes */
size_t obj_size = args->dsize < sizeof(struct item)
? sizeof(struct item)
: args->dsize;
size_t min_size = obj_bench.args->min_size < sizeof(struct item)
? sizeof(struct item)
: obj_bench.args->min_size;
obj_bench.alloc_sizes = random_values(
min_size, obj_size, obj_bench.max_len, sizeof(struct item));
if (obj_bench.alloc_sizes == nullptr)
goto free_random_types;
/* Decide where operations will be performed */
obj_bench.position_mode =
parse_args(obj_bench.args->position, POSITION_MODE_UNKNOWN,
position_names);
if (obj_bench.position_mode == POSITION_MODE_UNKNOWN)
goto free_all;
obj_bench.fn_position = positions[obj_bench.position_mode];
if (!obj_bench.args->queue) {
/* Decide what type number will be used */
obj_bench.type_mode =
parse_args(obj_bench.args->type_num, TYPE_MODE_UNKNOWN,
type_num_names);
if (obj_bench.type_mode == TYPE_MODE_UNKNOWN)
return -1;
obj_bench.fn_type_num = type_num_modes[obj_bench.type_mode];
if (obj_bench.type_mode == TYPE_MODE_RAND) {
obj_bench.random_types = random_values(
1, UINT32_MAX, obj_bench.max_len, 0);
if (obj_bench.random_types == nullptr)
return -1;
}
/*
* Multiplication by FACTOR prevents from out of memory error
* as the actual size of the allocated persistent objects
* is always larger than requested.
*/
size_t psize =
(args->n_ops_per_thread + obj_bench.min_len + 1) *
obj_size * args->n_threads * FACTOR;
if (args->is_poolset || type == TYPE_DEVDAX) {
if (args->fsize < psize) {
fprintf(stderr, "file size too large\n");
goto free_all;
}
psize = 0;
} else if (psize < PMEMOBJ_MIN_POOL) {
psize = PMEMOBJ_MIN_POOL;
}
/* Create pmemobj pool. */
if ((obj_bench.pop = pmemobj_create(args->fname, LAYOUT_NAME,
psize, args->fmode)) ==
nullptr) {
perror(pmemobj_errormsg());
goto free_all;
}
}
return 0;
free_all:
free(obj_bench.alloc_sizes);
free_random_types:
if (obj_bench.type_mode == TYPE_MODE_RAND)
free(obj_bench.random_types);
return -1;
}
/*
* obj_exit -- common part for the exit function for: obj_insert,
* obj_insert_new, obj_remove, obj_remove_free and obj_move used in their exit
* functions.
*/
static int
obj_exit(struct benchmark *bench, struct benchmark_args *args)
{
if (!obj_bench.args->queue) {
pmemobj_close(obj_bench.pop);
if (obj_bench.type_mode == TYPE_MODE_RAND)
free(obj_bench.random_types);
}
free(obj_bench.alloc_sizes);
return 0;
}
/* obj_list_clo -- array defining common command line arguments. */
static struct benchmark_clo obj_list_clo[6];
static struct benchmark_info obj_insert;
static struct benchmark_info obj_remove;
static struct benchmark_info obj_insert_new;
static struct benchmark_info obj_remove_free;
static struct benchmark_info obj_move;
CONSTRUCTOR(pmem_atomic_list_constructor)
void
pmem_atomic_list_constructor(void)
{
obj_list_clo[0].opt_short = 'T';
obj_list_clo[0].opt_long = "type-number";
obj_list_clo[0].descr = "Type number mode - one, per-thread, "
"rand";
obj_list_clo[0].def = "one";
obj_list_clo[0].off = clo_field_offset(struct obj_list_args, type_num);
obj_list_clo[0].type = CLO_TYPE_STR;
obj_list_clo[1].opt_short = 'P';
obj_list_clo[1].opt_long = "position";
obj_list_clo[1].descr = "Place where operation will be "
"performed - head, tail, rand, middle";
obj_list_clo[1].def = "middle";
obj_list_clo[1].off = clo_field_offset(struct obj_list_args, position);
obj_list_clo[1].type = CLO_TYPE_STR;
obj_list_clo[2].opt_short = 'l';
obj_list_clo[2].opt_long = "list-len";
obj_list_clo[2].type = CLO_TYPE_UINT;
obj_list_clo[2].descr = "Initial list len";
obj_list_clo[2].off = clo_field_offset(struct obj_list_args, list_len);
obj_list_clo[2].def = "1";
obj_list_clo[2].type_uint.size =
clo_field_size(struct obj_list_args, list_len);
obj_list_clo[2].type_uint.base = CLO_INT_BASE_DEC | CLO_INT_BASE_HEX;
obj_list_clo[2].type_uint.min = 1;
obj_list_clo[2].type_uint.max = ULONG_MAX;
obj_list_clo[3].opt_short = 'm';
obj_list_clo[3].opt_long = "min-size";
obj_list_clo[3].type = CLO_TYPE_UINT;
obj_list_clo[3].descr = "Min allocation size";
obj_list_clo[3].off = clo_field_offset(struct obj_list_args, min_size);
obj_list_clo[3].def = "0";
obj_list_clo[3].type_uint.size =
clo_field_size(struct obj_list_args, min_size);
obj_list_clo[3].type_uint.base = CLO_INT_BASE_DEC;
obj_list_clo[3].type_uint.min = 0;
obj_list_clo[3].type_uint.max = UINT_MAX;
obj_list_clo[4].opt_short = 's';
obj_list_clo[4].type_uint.max = INT_MAX;
obj_list_clo[4].opt_long = "seed";
obj_list_clo[4].type = CLO_TYPE_UINT;
obj_list_clo[4].descr = "Seed value";
obj_list_clo[4].off = clo_field_offset(struct obj_list_args, seed);
obj_list_clo[4].def = "0";
obj_list_clo[4].type_uint.size =
clo_field_size(struct obj_list_args, seed);
obj_list_clo[4].type_uint.base = CLO_INT_BASE_DEC;
obj_list_clo[4].type_uint.min = 0;
/*
* nclos field in benchmark_info structures is decremented to make
* queue option available only for obj_isert, obj_remove
*/
obj_list_clo[5].opt_short = 'q';
obj_list_clo[5].opt_long = "queue";
obj_list_clo[5].descr = "Use circleq from queue.h instead "
"pmemobj";
obj_list_clo[5].type = CLO_TYPE_FLAG;
obj_list_clo[5].off = clo_field_offset(struct obj_list_args, queue);
obj_insert.name = "obj_insert";
obj_insert.brief = "pmemobj_list_insert() benchmark";
obj_insert.init = obj_init;
obj_insert.exit = obj_exit;
obj_insert.multithread = true;
obj_insert.multiops = true;
obj_insert.init_worker = obj_insert_init_worker;
obj_insert.free_worker = free_worker_items;
obj_insert.operation = insert_op;
obj_insert.measure_time = true;
obj_insert.clos = obj_list_clo;
obj_insert.nclos = ARRAY_SIZE(obj_list_clo);
obj_insert.opts_size = sizeof(struct obj_list_args);
obj_insert.rm_file = true;
obj_insert.allow_poolset = true;
REGISTER_BENCHMARK(obj_insert);
obj_remove.name = "obj_remove";
obj_remove.brief = "pmemobj_list_remove() benchmark "
"without freeing element";
obj_remove.init = obj_init;
obj_remove.exit = obj_exit;
obj_remove.multithread = true;
obj_remove.multiops = true;
obj_remove.init_worker = obj_remove_init_worker;
obj_remove.free_worker = free_worker_items;
obj_remove.operation = remove_op;
obj_remove.measure_time = true;
obj_remove.clos = obj_list_clo;
obj_remove.nclos = ARRAY_SIZE(obj_list_clo);
obj_remove.opts_size = sizeof(struct obj_list_args);
obj_remove.rm_file = true;
obj_remove.allow_poolset = true;
REGISTER_BENCHMARK(obj_remove);
obj_insert_new.name = "obj_insert_new";
obj_insert_new.brief = "pmemobj_list_insert_new() benchmark";
obj_insert_new.init = obj_init;
obj_insert_new.exit = obj_exit;
obj_insert_new.multithread = true;
obj_insert_new.multiops = true;
obj_insert_new.init_worker = obj_insert_new_init_worker;
obj_insert_new.free_worker = free_worker_list;
obj_insert_new.operation = obj_insert_new_op;
obj_insert_new.measure_time = true;
obj_insert_new.clos = obj_list_clo;
obj_insert_new.nclos = ARRAY_SIZE(obj_list_clo) - 1;
obj_insert_new.opts_size = sizeof(struct obj_list_args);
obj_insert_new.rm_file = true;
obj_insert_new.allow_poolset = true;
REGISTER_BENCHMARK(obj_insert_new);
obj_remove_free.name = "obj_remove_free";
obj_remove_free.brief = "pmemobj_list_remove() benchmark "
"with freeing element";
obj_remove_free.init = obj_init;
obj_remove_free.exit = obj_exit;
obj_remove_free.multithread = true;
obj_remove_free.multiops = true;
obj_remove_free.init_worker = obj_remove_init_worker;
obj_remove_free.free_worker = free_worker_list;
obj_remove_free.operation = obj_remove_free_op;
obj_remove_free.measure_time = true;
obj_remove_free.clos = obj_list_clo;
obj_remove_free.nclos = ARRAY_SIZE(obj_list_clo) - 1;
obj_remove_free.opts_size = sizeof(struct obj_list_args);
obj_remove_free.rm_file = true;
obj_remove_free.allow_poolset = true;
REGISTER_BENCHMARK(obj_remove_free);
obj_move.name = "obj_move";
obj_move.brief = "pmemobj_list_move() benchmark";
obj_move.init = obj_init;
obj_move.exit = obj_exit;
obj_move.multithread = true;
obj_move.multiops = true;
obj_move.init_worker = obj_move_init_worker;
obj_move.free_worker = obj_move_free_worker;
obj_move.operation = obj_move_op;
obj_move.measure_time = true;
obj_move.clos = obj_list_clo;
obj_move.nclos = ARRAY_SIZE(obj_list_clo) - 1;
obj_move.opts_size = sizeof(struct obj_list_args);
obj_move.rm_file = true;
obj_move.allow_poolset = true;
REGISTER_BENCHMARK(obj_move);
}
| 32,957 | 28.322064 | 80 |
cpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/config_reader.hpp
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* config_reader.hpp -- config reader module declarations
*/
struct config_reader;
struct config_reader *config_reader_alloc(void);
int config_reader_read(struct config_reader *cr, const char *fname);
void config_reader_free(struct config_reader *cr);
int config_reader_get_scenarios(struct config_reader *cr,
struct scenarios **scenarios);
| 1,951 | 45.47619 | 74 |
hpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/benchmark_worker.hpp
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* benchmark_worker.hpp -- benchmark_worker module declarations
*/
#include "benchmark.hpp"
#include "os_thread.h"
/*
*
* The following table shows valid state transitions upon specified
* API calls and operations performed by the worker thread:
*
* +========================+==========================+=============+
* | Application | State | Worker |
* +========================+==========================+=============+
* | benchmark_worker_alloc | WORKER_STATE_IDLE | wait |
* +------------------------+--------------------------+-------------+
* | benchmark_worker_init | WORKER_STATE_INIT | invoke init |
* +------------------------+--------------------------+-------------+
* | wait | WORKER_STATE_INITIALIZED | end of init |
* +------------------------+--------------------------+-------------+
* | benchmark_worker_run | WORKER_STATE_RUN | invoke func |
* +------------------------+--------------------------+-------------+
* | benchmark_worker_join | WORKER_STATE_END | end of func |
* +------------------------+--------------------------+-------------+
* | benchmark_worker_exit | WORKER_STATE_EXIT | invoke exit |
* +------------------------+--------------------------+-------------+
* | wait | WORKER_STATE_DONE | end of exit |
* +------------------------+--------------------------+-------------+
*/
enum benchmark_worker_state {
WORKER_STATE_IDLE,
WORKER_STATE_INIT,
WORKER_STATE_INITIALIZED,
WORKER_STATE_RUN,
WORKER_STATE_END,
WORKER_STATE_EXIT,
WORKER_STATE_DONE,
MAX_WORKER_STATE,
};
struct benchmark_worker {
os_thread_t thread;
struct benchmark *bench;
struct benchmark_args *args;
struct worker_info info;
int ret;
int ret_init;
int (*func)(struct benchmark *bench, struct worker_info *info);
int (*init)(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *info);
void (*exit)(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *info);
os_cond_t cond;
os_mutex_t lock;
enum benchmark_worker_state state;
};
struct benchmark_worker *benchmark_worker_alloc(void);
void benchmark_worker_free(struct benchmark_worker *);
int benchmark_worker_init(struct benchmark_worker *);
void benchmark_worker_exit(struct benchmark_worker *);
int benchmark_worker_run(struct benchmark_worker *);
int benchmark_worker_join(struct benchmark_worker *);
| 4,091 | 41.185567 | 74 |
hpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/clo_vec.cpp
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* clo_vec.cpp -- command line options vector definitions
*/
#include <cassert>
#include <cstdlib>
#include <cstring>
#include "clo_vec.hpp"
/*
* clo_vec_alloc -- allocate new CLO vector
*/
struct clo_vec *
clo_vec_alloc(size_t size)
{
struct clo_vec *clovec = (struct clo_vec *)malloc(sizeof(*clovec));
assert(clovec != nullptr);
/* init list of arguments and allocations */
TAILQ_INIT(&clovec->allocs);
TAILQ_INIT(&clovec->args);
clovec->nallocs = 0;
/* size of each struct */
clovec->size = size;
/* add first struct to list */
struct clo_vec_args *args =
(struct clo_vec_args *)malloc(sizeof(*args));
assert(args != nullptr);
args->args = calloc(1, size);
assert(args->args != nullptr);
TAILQ_INSERT_TAIL(&clovec->args, args, next);
clovec->nargs = 1;
return clovec;
}
/*
* clo_vec_free -- free CLO vector and all allocations
*/
void
clo_vec_free(struct clo_vec *clovec)
{
assert(clovec != nullptr);
/* free all allocations */
while (!TAILQ_EMPTY(&clovec->allocs)) {
struct clo_vec_alloc *alloc = TAILQ_FIRST(&clovec->allocs);
TAILQ_REMOVE(&clovec->allocs, alloc, next);
free(alloc->ptr);
free(alloc);
}
/* free all arguments */
while (!TAILQ_EMPTY(&clovec->args)) {
struct clo_vec_args *args = TAILQ_FIRST(&clovec->args);
TAILQ_REMOVE(&clovec->args, args, next);
free(args->args);
free(args);
}
free(clovec);
}
/*
* clo_vec_get_args -- return pointer to CLO arguments at specified index
*/
void *
clo_vec_get_args(struct clo_vec *clovec, size_t i)
{
if (i >= clovec->nargs)
return nullptr;
size_t c = 0;
struct clo_vec_args *args;
TAILQ_FOREACH(args, &clovec->args, next)
{
if (c == i)
return args->args;
c++;
}
return nullptr;
}
/*
* clo_vec_add_alloc -- add allocation to CLO vector
*/
int
clo_vec_add_alloc(struct clo_vec *clovec, void *ptr)
{
struct clo_vec_alloc *alloc =
(struct clo_vec_alloc *)malloc(sizeof(*alloc));
assert(alloc != nullptr);
alloc->ptr = ptr;
TAILQ_INSERT_TAIL(&clovec->allocs, alloc, next);
clovec->nallocs++;
return 0;
}
/*
* clo_vec_grow -- (internal) grow in size the CLO vector
*/
static void
clo_vec_grow(struct clo_vec *clovec, size_t new_len)
{
size_t nargs = new_len - clovec->nargs;
size_t i;
for (i = 0; i < nargs; i++) {
struct clo_vec_args *args =
(struct clo_vec_args *)calloc(1, sizeof(*args));
assert(args != nullptr);
TAILQ_INSERT_TAIL(&clovec->args, args, next);
args->args = malloc(clovec->size);
assert(args->args != nullptr);
void *argscpy = clo_vec_get_args(clovec, i % clovec->nargs);
assert(argscpy != nullptr);
memcpy(args->args, argscpy, clovec->size);
}
clovec->nargs = new_len;
}
/*
* clo_vec_vlist_alloc -- allocate list of values
*/
struct clo_vec_vlist *
clo_vec_vlist_alloc(void)
{
struct clo_vec_vlist *list =
(struct clo_vec_vlist *)malloc(sizeof(*list));
assert(list != nullptr);
list->nvalues = 0;
TAILQ_INIT(&list->head);
return list;
}
/*
* clo_vec_vlist_free -- release list of values
*/
void
clo_vec_vlist_free(struct clo_vec_vlist *list)
{
assert(list != nullptr);
while (!TAILQ_EMPTY(&list->head)) {
struct clo_vec_value *val = TAILQ_FIRST(&list->head);
TAILQ_REMOVE(&list->head, val, next);
free(val->ptr);
free(val);
}
free(list);
}
/*
* clo_vec_vlist_add -- add value to list
*/
void
clo_vec_vlist_add(struct clo_vec_vlist *list, void *ptr, size_t size)
{
struct clo_vec_value *val =
(struct clo_vec_value *)malloc(sizeof(*val));
assert(val != nullptr);
val->ptr = malloc(size);
assert(val->ptr != nullptr);
memcpy(val->ptr, ptr, size);
TAILQ_INSERT_TAIL(&list->head, val, next);
list->nvalues++;
}
/*
* clo_vec_memcpy -- copy value to CLO vector
*
* - clovec - CLO vector
* - off - offset to value in structure
* - size - size of value field
* - ptr - pointer to value
*/
int
clo_vec_memcpy(struct clo_vec *clovec, size_t off, size_t size, void *ptr)
{
if (off + size > clovec->size)
return -1;
size_t i;
for (i = 0; i < clovec->nargs; i++) {
auto *args = (char *)clo_vec_get_args(clovec, i);
char *dptr = args + off;
memcpy(dptr, ptr, size);
}
return 0;
}
/*
* clo_vec_memcpy_list -- copy values from list to CLO vector
*
* - clovec - CLO vector
* - off - offset to value in structure
* - size - size of value field
* - list - list of values
*/
int
clo_vec_memcpy_list(struct clo_vec *clovec, size_t off, size_t size,
struct clo_vec_vlist *list)
{
if (off + size > clovec->size)
return -1;
size_t len = clovec->nargs;
if (list->nvalues > 1)
clo_vec_grow(clovec, clovec->nargs * list->nvalues);
struct clo_vec_value *value;
size_t value_i = 0;
size_t i;
TAILQ_FOREACH(value, &list->head, next)
{
for (i = value_i * len; i < (value_i + 1) * len; i++) {
auto *args = (char *)clo_vec_get_args(clovec, i);
char *dptr = args + off;
memcpy(dptr, value->ptr, size);
}
value_i++;
}
return 0;
}
| 6,525 | 22.307143 | 74 |
cpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/poolset_util.hpp
|
/*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* poolset_util.hpp -- this file provides interface for creating
* poolsets of specified size
*/
#ifndef POOLSET_UTIL_HPP
#define POOLSET_UTIL_HPP
#include <stddef.h>
#define POOLSET_PATH "pool.set"
int dynamic_poolset_create(const char *path, size_t size);
#endif
| 1,871 | 38.829787 | 74 |
hpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/scenario.cpp
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* scenario.cpp -- scenario module definitions
*/
#include <cassert>
#include <cstdlib>
#include <cstring>
#include "queue.h"
#include "scenario.hpp"
/*
* kv_alloc -- allocate key/value structure
*/
struct kv *
kv_alloc(const char *key, const char *value)
{
struct kv *kv = (struct kv *)malloc(sizeof(*kv));
assert(kv != nullptr);
kv->key = strdup(key);
assert(kv->key != nullptr);
kv->value = strdup(value);
assert(kv->value != nullptr);
return kv;
}
/*
* kv_free -- free the key/value structure
*/
void
kv_free(struct kv *kv)
{
assert(kv != nullptr);
free(kv->key);
free(kv->value);
free(kv);
}
/*
* scenario_alloc -- allocate scenario structure
*/
struct scenario *
scenario_alloc(const char *name, const char *bench)
{
struct scenario *s = (struct scenario *)malloc(sizeof(*s));
assert(s != nullptr);
TAILQ_INIT(&s->head);
s->name = strdup(name);
assert(s->name != nullptr);
s->benchmark = strdup(bench);
assert(s->benchmark != nullptr);
s->group = nullptr;
return s;
}
/*
* scenario_free -- free the scenario structure and all its content
*/
void
scenario_free(struct scenario *s)
{
assert(s != nullptr);
while (!TAILQ_EMPTY(&s->head)) {
struct kv *kv = TAILQ_FIRST(&s->head);
TAILQ_REMOVE(&s->head, kv, next);
kv_free(kv);
}
free(s->group);
free(s->name);
free(s->benchmark);
free(s);
}
/*
* scenario_set_group -- set group of scenario
*/
void
scenario_set_group(struct scenario *s, const char *group)
{
assert(s != nullptr);
s->group = strdup(group);
}
/*
* scenarios_alloc -- allocate scenarios structure
*/
struct scenarios *
scenarios_alloc(void)
{
struct scenarios *scenarios =
(struct scenarios *)malloc(sizeof(*scenarios));
assert(nullptr != scenarios);
TAILQ_INIT(&scenarios->head);
return scenarios;
}
/*
* scenarios_free -- free scenarios structure and all its content
*/
void
scenarios_free(struct scenarios *scenarios)
{
assert(scenarios != nullptr);
while (!TAILQ_EMPTY(&scenarios->head)) {
struct scenario *sce = TAILQ_FIRST(&scenarios->head);
TAILQ_REMOVE(&scenarios->head, sce, next);
scenario_free(sce);
}
free(scenarios);
}
/*
* scenarios_get_scenario -- get scenario of given name
*/
struct scenario *
scenarios_get_scenario(struct scenarios *ss, const char *name)
{
struct scenario *scenario;
FOREACH_SCENARIO(scenario, ss)
{
if (strcmp(scenario->name, name) == 0)
return scenario;
}
return nullptr;
}
/*
* contains_scenarios -- check if cmd line args contain any scenarios from ss
*/
bool
contains_scenarios(int argc, char **argv, struct scenarios *ss)
{
assert(argv != nullptr);
assert(argc > 0);
assert(ss != nullptr);
for (int i = 0; i < argc; i++) {
if (scenarios_get_scenario(ss, argv[i]))
return true;
}
return false;
}
/*
* clone_scenario -- alloc a new scenario and copy all data from src scenario
*/
struct scenario *
clone_scenario(struct scenario *src_scenario)
{
assert(src_scenario != nullptr);
struct scenario *new_scenario =
scenario_alloc(src_scenario->name, src_scenario->benchmark);
assert(new_scenario != nullptr);
struct kv *src_kv;
FOREACH_KV(src_kv, src_scenario)
{
struct kv *new_kv = kv_alloc(src_kv->key, src_kv->value);
assert(new_kv != nullptr);
TAILQ_INSERT_TAIL(&new_scenario->head, new_kv, next);
}
return new_scenario;
}
/*
* find_kv_in_scenario - find a kv in the given scenario with the given key
* value. Function returns the pointer to the kv structure containing the key or
* nullptr if it is not found
*/
struct kv *
find_kv_in_scenario(const char *key, const struct scenario *scenario)
{
struct kv *kv;
FOREACH_KV(kv, scenario)
{
if (strcmp(kv->key, key) == 0)
return kv;
}
return nullptr;
}
| 5,314 | 22.414097 | 80 |
cpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/config_reader.cpp
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* config_reader.cpp -- config reader module definitions
*/
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <glib.h>
#include <sys/queue.h>
#include "config_reader.hpp"
#include "scenario.hpp"
#define SECTION_GLOBAL "global"
#define KEY_BENCHMARK "bench"
#define KEY_GROUP "group"
/*
* config_reader -- handle structure
*/
struct config_reader {
GKeyFile *key_file;
};
/*
* config_reader_alloc -- allocate config reader
*/
struct config_reader *
config_reader_alloc(void)
{
struct config_reader *cr = (struct config_reader *)malloc(sizeof(*cr));
assert(cr != nullptr);
cr->key_file = g_key_file_new();
if (!cr->key_file)
goto err;
return cr;
err:
free(cr);
return nullptr;
}
/*
* config_reader_read -- read config file
*/
int
config_reader_read(struct config_reader *cr, const char *fname)
{
if (g_key_file_load_from_file(cr->key_file, fname, G_KEY_FILE_NONE,
nullptr) != TRUE)
return -1;
return 0;
}
/*
* config_reader_free -- free config reader
*/
void
config_reader_free(struct config_reader *cr)
{
g_key_file_free(cr->key_file);
free(cr);
}
/*
* is_scenario -- (internal) return true if _name_ is scenario name
*
* This filters out the _global_ and _config_ sections.
*/
static int
is_scenario(const char *name)
{
return strcmp(name, SECTION_GLOBAL);
}
/*
* is_argument -- (internal) return true if _name_ is argument name
*
* This filters out the _benchmark_ key.
*/
static int
is_argument(const char *name)
{
return strcmp(name, KEY_BENCHMARK) != 0 && strcmp(name, KEY_GROUP) != 0;
}
/*
* config_reader_get_scenarios -- return scenarios from config file
*
* This function reads the config file and returns a list of scenarios.
* Each scenario contains a list of key/value arguments.
* The scenario's arguments are merged with arguments from global section.
*/
int
config_reader_get_scenarios(struct config_reader *cr,
struct scenarios **scenarios)
{
/*
* Read all groups.
* The config file must have at least one group, otherwise
* it is considered as invalid.
*/
gsize ngroups;
gsize g;
gchar **groups = g_key_file_get_groups(cr->key_file, &ngroups);
assert(nullptr != groups);
if (!groups)
return -1;
/*
* Check if global section is present and read keys from it.
*/
int ret = 0;
int has_global =
g_key_file_has_group(cr->key_file, SECTION_GLOBAL) == TRUE;
gsize ngkeys;
gchar **gkeys = nullptr;
struct scenarios *s;
if (has_global) {
gkeys = g_key_file_get_keys(cr->key_file, SECTION_GLOBAL,
&ngkeys, nullptr);
assert(nullptr != gkeys);
if (!gkeys) {
ret = -1;
goto err_groups;
}
}
s = scenarios_alloc();
assert(nullptr != s);
if (!s) {
ret = -1;
goto err_gkeys;
}
for (g = 0; g < ngroups; g++) {
/*
* Check whether a group is a scenario
* or global section.
*/
if (!is_scenario(groups[g]))
continue;
/*
* Check for KEY_BENCHMARK which contains benchmark name.
* If not present the benchmark name is the same as the
* name of the section.
*/
struct scenario *scenario = nullptr;
if (g_key_file_has_key(cr->key_file, groups[g], KEY_BENCHMARK,
nullptr) == FALSE) {
scenario = scenario_alloc(groups[g], groups[g]);
assert(scenario != nullptr);
} else {
gchar *benchmark =
g_key_file_get_value(cr->key_file, groups[g],
KEY_BENCHMARK, nullptr);
assert(benchmark != nullptr);
if (!benchmark) {
ret = -1;
goto err_scenarios;
}
scenario = scenario_alloc(groups[g], benchmark);
assert(scenario != nullptr);
free(benchmark);
}
gsize k;
if (has_global) {
/*
* Merge key/values from global section.
*/
for (k = 0; k < ngkeys; k++) {
if (g_key_file_has_key(cr->key_file, groups[g],
gkeys[k],
nullptr) == TRUE)
continue;
if (!is_argument(gkeys[k]))
continue;
char *value = g_key_file_get_value(
cr->key_file, SECTION_GLOBAL, gkeys[k],
nullptr);
assert(nullptr != value);
if (!value) {
ret = -1;
goto err_scenarios;
}
struct kv *kv = kv_alloc(gkeys[k], value);
assert(nullptr != kv);
free(value);
if (!kv) {
ret = -1;
goto err_scenarios;
}
TAILQ_INSERT_TAIL(&scenario->head, kv, next);
}
}
/* check for group name */
if (g_key_file_has_key(cr->key_file, groups[g], KEY_GROUP,
nullptr) != FALSE) {
gchar *group = g_key_file_get_value(
cr->key_file, groups[g], KEY_GROUP, nullptr);
assert(group != nullptr);
scenario_set_group(scenario, group);
} else if (g_key_file_has_key(cr->key_file, SECTION_GLOBAL,
KEY_GROUP, nullptr) != FALSE) {
gchar *group = g_key_file_get_value(cr->key_file,
SECTION_GLOBAL,
KEY_GROUP, nullptr);
scenario_set_group(scenario, group);
}
gsize nkeys;
gchar **keys = g_key_file_get_keys(cr->key_file, groups[g],
&nkeys, nullptr);
assert(nullptr != keys);
if (!keys) {
ret = -1;
goto err_scenarios;
}
/*
* Read key/values from the scenario's section.
*/
for (k = 0; k < nkeys; k++) {
if (!is_argument(keys[k]))
continue;
char *value = g_key_file_get_value(
cr->key_file, groups[g], keys[k], nullptr);
assert(nullptr != value);
if (!value) {
ret = -1;
g_strfreev(keys);
goto err_scenarios;
}
struct kv *kv = kv_alloc(keys[k], value);
assert(nullptr != kv);
free(value);
if (!kv) {
g_strfreev(keys);
ret = -1;
goto err_scenarios;
}
TAILQ_INSERT_TAIL(&scenario->head, kv, next);
}
g_strfreev(keys);
TAILQ_INSERT_TAIL(&s->head, scenario, next);
}
g_strfreev(gkeys);
g_strfreev(groups);
*scenarios = s;
return 0;
err_scenarios:
scenarios_free(s);
err_gkeys:
g_strfreev(gkeys);
err_groups:
g_strfreev(groups);
return ret;
}
| 7,448 | 23.185065 | 74 |
cpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/rpmem_persist.cpp
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_persist.cpp -- rpmem persist benchmarks definition
*/
#include <cassert>
#include <cerrno>
#include <cstddef>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fcntl.h>
#include <sys/file.h>
#include <sys/mman.h>
#include <unistd.h>
#include "benchmark.hpp"
#include "libpmem.h"
#include "librpmem.h"
#include "os.h"
#include "set.h"
#include "util.h"
#define CL_ALIGNMENT 64
#define MAX_OFFSET (CL_ALIGNMENT - 1)
#define ALIGN_CL(x) (((x) + CL_ALIGNMENT - 1) & ~(CL_ALIGNMENT - 1))
/*
* rpmem_args -- benchmark specific command line options
*/
struct rpmem_args {
char *mode; /* operation mode: stat, seq, rand */
bool no_warmup; /* do not do warmup */
bool no_memset; /* do not call memset before each persist */
size_t chunk_size; /* elementary chunk size */
size_t dest_off; /* destination address offset */
bool relaxed; /* use RPMEM_PERSIST_RELAXED flag */
};
/*
* rpmem_bench -- benchmark context
*/
struct rpmem_bench {
struct rpmem_args *pargs; /* benchmark specific arguments */
size_t *offsets; /* random/sequential address offsets */
size_t n_offsets; /* number of random elements */
int const_b; /* memset() value */
size_t min_size; /* minimum file size */
void *addrp; /* mapped file address */
void *pool; /* memory pool address */
size_t pool_size; /* size of memory pool */
size_t mapped_len; /* mapped length */
RPMEMpool **rpp; /* rpmem pool pointers */
unsigned *nlanes; /* number of lanes for each remote replica */
unsigned nreplicas; /* number of remote replicas */
size_t csize_align; /* aligned elementary chunk size */
unsigned flags; /* flags for rpmem_persist */
};
/*
* operation_mode -- mode of operation
*/
enum operation_mode {
OP_MODE_UNKNOWN,
OP_MODE_STAT, /* always use the same chunk */
OP_MODE_SEQ, /* use consecutive chunks */
OP_MODE_RAND, /* use random chunks */
OP_MODE_SEQ_WRAP, /* use consequtive chunks, but use file size */
OP_MODE_RAND_WRAP, /* use random chunks, but use file size */
};
/*
* parse_op_mode -- parse operation mode from string
*/
static enum operation_mode
parse_op_mode(const char *arg)
{
if (strcmp(arg, "stat") == 0)
return OP_MODE_STAT;
else if (strcmp(arg, "seq") == 0)
return OP_MODE_SEQ;
else if (strcmp(arg, "rand") == 0)
return OP_MODE_RAND;
else if (strcmp(arg, "seq-wrap") == 0)
return OP_MODE_SEQ_WRAP;
else if (strcmp(arg, "rand-wrap") == 0)
return OP_MODE_RAND_WRAP;
else
return OP_MODE_UNKNOWN;
}
/*
* init_offsets -- initialize offsets[] array depending on the selected mode
*/
static int
init_offsets(struct benchmark_args *args, struct rpmem_bench *mb,
enum operation_mode op_mode)
{
size_t n_ops_by_size = (mb->pool_size - POOL_HDR_SIZE) /
(args->n_threads * mb->csize_align);
mb->n_offsets = args->n_ops_per_thread * args->n_threads;
mb->offsets = (size_t *)malloc(mb->n_offsets * sizeof(*mb->offsets));
if (!mb->offsets) {
perror("malloc");
return -1;
}
unsigned seed = args->seed;
for (size_t i = 0; i < args->n_threads; i++) {
for (size_t j = 0; j < args->n_ops_per_thread; j++) {
size_t off_idx = i * args->n_ops_per_thread + j;
size_t chunk_idx;
switch (op_mode) {
case OP_MODE_STAT:
chunk_idx = i;
break;
case OP_MODE_SEQ:
chunk_idx =
i * args->n_ops_per_thread + j;
break;
case OP_MODE_RAND:
chunk_idx = i * args->n_ops_per_thread +
os_rand_r(&seed) %
args->n_ops_per_thread;
break;
case OP_MODE_SEQ_WRAP:
chunk_idx = i * n_ops_by_size +
j % n_ops_by_size;
break;
case OP_MODE_RAND_WRAP:
chunk_idx = i * n_ops_by_size +
os_rand_r(&seed) %
n_ops_by_size;
break;
default:
assert(0);
return -1;
}
mb->offsets[off_idx] = POOL_HDR_SIZE +
chunk_idx * mb->csize_align +
mb->pargs->dest_off;
}
}
return 0;
}
/*
* do_warmup -- does the warmup by writing the whole pool area
*/
static int
do_warmup(struct rpmem_bench *mb)
{
/* clear the entire pool */
memset((char *)mb->pool + POOL_HDR_SIZE, 0,
mb->pool_size - POOL_HDR_SIZE);
for (unsigned r = 0; r < mb->nreplicas; ++r) {
int ret = rpmem_persist(mb->rpp[r], POOL_HDR_SIZE,
mb->pool_size - POOL_HDR_SIZE, 0,
mb->flags);
if (ret)
return ret;
}
/* if no memset for each operation, do one big memset */
if (mb->pargs->no_memset) {
memset((char *)mb->pool + POOL_HDR_SIZE, 0xFF,
mb->pool_size - POOL_HDR_SIZE);
}
return 0;
}
/*
* rpmem_op -- actual benchmark operation
*/
static int
rpmem_op(struct benchmark *bench, struct operation_info *info)
{
auto *mb = (struct rpmem_bench *)pmembench_get_priv(bench);
assert(info->index < mb->n_offsets);
uint64_t idx = info->worker->index * info->args->n_ops_per_thread +
info->index;
size_t offset = mb->offsets[idx];
size_t len = mb->pargs->chunk_size;
if (!mb->pargs->no_memset) {
void *dest = (char *)mb->pool + offset;
/* thread id on MS 4 bits and operation id on LS 4 bits */
int c = ((info->worker->index & 0xf) << 4) +
((0xf & info->index));
memset(dest, c, len);
}
int ret = 0;
for (unsigned r = 0; r < mb->nreplicas; ++r) {
assert(info->worker->index < mb->nlanes[r]);
ret = rpmem_persist(mb->rpp[r], offset, len,
info->worker->index, mb->flags);
if (ret) {
fprintf(stderr, "rpmem_persist replica #%u: %s\n", r,
rpmem_errormsg());
return ret;
}
}
return 0;
}
/*
* rpmem_map_file -- map local file
*/
static int
rpmem_map_file(const char *path, struct rpmem_bench *mb, size_t size)
{
int mode;
#ifndef _WIN32
mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;
#else
mode = S_IWRITE | S_IREAD;
#endif
mb->addrp = pmem_map_file(path, size, PMEM_FILE_CREATE, mode,
&mb->mapped_len, nullptr);
if (!mb->addrp)
return -1;
return 0;
}
/*
* rpmem_unmap_file -- unmap local file
*/
static int
rpmem_unmap_file(struct rpmem_bench *mb)
{
return pmem_unmap(mb->addrp, mb->mapped_len);
}
/*
* rpmem_poolset_init -- read poolset file and initialize benchmark accordingly
*/
static int
rpmem_poolset_init(const char *path, struct rpmem_bench *mb,
struct benchmark_args *args)
{
struct pool_set *set;
struct pool_replica *rep;
struct remote_replica *remote;
struct pool_set_part *part;
struct rpmem_pool_attr attr;
memset(&attr, 0, sizeof(attr));
memcpy(attr.signature, "PMEMBNCH", sizeof(attr.signature));
/* read and validate poolset */
if (util_poolset_read(&set, path)) {
fprintf(stderr, "Invalid poolset file '%s'\n", path);
return -1;
}
assert(set);
if (set->nreplicas < 2) {
fprintf(stderr, "No replicas defined\n");
goto err_poolset_free;
}
if (set->remote == 0) {
fprintf(stderr, "No remote replicas defined\n");
goto err_poolset_free;
}
for (unsigned i = 1; i < set->nreplicas; ++i) {
if (!set->replica[i]->remote) {
fprintf(stderr, "Local replicas are not supported\n");
goto err_poolset_free;
}
}
/* read and validate master replica */
rep = set->replica[0];
assert(rep);
assert(rep->remote == nullptr);
if (rep->nparts != 1) {
fprintf(stderr, "Multipart master replicas "
"are not supported\n");
goto err_poolset_free;
}
if (rep->repsize < mb->min_size) {
fprintf(stderr, "A master replica is too small (%zu < %zu)\n",
rep->repsize, mb->min_size);
goto err_poolset_free;
}
part = (struct pool_set_part *)&rep->part[0];
if (rpmem_map_file(part->path, mb, rep->repsize)) {
perror(part->path);
goto err_poolset_free;
}
mb->pool_size = mb->mapped_len;
mb->pool = (void *)((uintptr_t)mb->addrp);
/* prepare remote replicas */
mb->nreplicas = set->nreplicas - 1;
mb->nlanes = (unsigned *)malloc(mb->nreplicas * sizeof(unsigned));
if (mb->nlanes == nullptr) {
perror("malloc");
goto err_unmap_file;
}
mb->rpp = (RPMEMpool **)malloc(mb->nreplicas * sizeof(RPMEMpool *));
if (mb->rpp == nullptr) {
perror("malloc");
goto err_free_lanes;
}
unsigned r;
for (r = 0; r < mb->nreplicas; ++r) {
remote = set->replica[r + 1]->remote;
assert(remote);
mb->nlanes[r] = args->n_threads;
/* Temporary WA for librpmem issue */
++mb->nlanes[r];
mb->rpp[r] = rpmem_create(remote->node_addr, remote->pool_desc,
mb->addrp, mb->pool_size,
&mb->nlanes[r], &attr);
if (!mb->rpp[r]) {
perror("rpmem_create");
goto err_rpmem_close;
}
if (mb->nlanes[r] < args->n_threads) {
fprintf(stderr, "Number of threads too large for "
"replica #%u (max: %u)\n",
r, mb->nlanes[r]);
r++; /* close current replica */
goto err_rpmem_close;
}
}
util_poolset_free(set);
return 0;
err_rpmem_close:
for (unsigned i = 0; i < r; i++)
rpmem_close(mb->rpp[i]);
free(mb->rpp);
err_free_lanes:
free(mb->nlanes);
err_unmap_file:
rpmem_unmap_file(mb);
err_poolset_free:
util_poolset_free(set);
return -1;
}
/*
* rpmem_poolset_fini -- close opened local and remote replicas
*/
static void
rpmem_poolset_fini(struct rpmem_bench *mb)
{
for (unsigned r = 0; r < mb->nreplicas; ++r) {
rpmem_close(mb->rpp[r]);
}
rpmem_unmap_file(mb);
}
/*
* rpmem_set_min_size -- compute minimal file size based on benchmark arguments
*/
static void
rpmem_set_min_size(struct rpmem_bench *mb, enum operation_mode op_mode,
struct benchmark_args *args)
{
mb->csize_align = ALIGN_CL(mb->pargs->chunk_size);
switch (op_mode) {
case OP_MODE_STAT:
mb->min_size = mb->csize_align * args->n_threads;
break;
case OP_MODE_SEQ:
case OP_MODE_RAND:
mb->min_size = mb->csize_align *
args->n_ops_per_thread * args->n_threads;
break;
case OP_MODE_SEQ_WRAP:
case OP_MODE_RAND_WRAP:
/*
* at least one chunk per thread to avoid false sharing
*/
mb->min_size = mb->csize_align * args->n_threads;
break;
default:
assert(0);
}
mb->min_size += POOL_HDR_SIZE;
}
/*
* rpmem_init -- initialization function
*/
static int
rpmem_init(struct benchmark *bench, struct benchmark_args *args)
{
assert(bench != nullptr);
assert(args != nullptr);
assert(args->opts != nullptr);
auto *mb = (struct rpmem_bench *)malloc(sizeof(struct rpmem_bench));
if (!mb) {
perror("malloc");
return -1;
}
mb->flags = 0;
mb->pargs = (struct rpmem_args *)args->opts;
mb->pargs->chunk_size = args->dsize;
if (mb->pargs->relaxed)
mb->flags |= RPMEM_PERSIST_RELAXED;
enum operation_mode op_mode = parse_op_mode(mb->pargs->mode);
if (op_mode == OP_MODE_UNKNOWN) {
fprintf(stderr, "Invalid operation mode argument '%s'\n",
mb->pargs->mode);
goto err_parse_mode;
}
rpmem_set_min_size(mb, op_mode, args);
if (rpmem_poolset_init(args->fname, mb, args)) {
goto err_poolset_init;
}
/* initialize offsets[] array depending on benchmark args */
if (init_offsets(args, mb, op_mode) < 0) {
goto err_init_offsets;
}
if (!mb->pargs->no_warmup) {
if (do_warmup(mb) != 0) {
fprintf(stderr, "do_warmup() function failed.\n");
goto err_warmup;
}
}
pmembench_set_priv(bench, mb);
return 0;
err_warmup:
free(mb->offsets);
err_init_offsets:
rpmem_poolset_fini(mb);
err_poolset_init:
err_parse_mode:
free(mb);
return -1;
}
/*
* rpmem_exit -- benchmark cleanup function
*/
static int
rpmem_exit(struct benchmark *bench, struct benchmark_args *args)
{
auto *mb = (struct rpmem_bench *)pmembench_get_priv(bench);
rpmem_poolset_fini(mb);
free(mb->offsets);
free(mb);
return 0;
}
static struct benchmark_clo rpmem_clo[5];
/* Stores information about benchmark. */
static struct benchmark_info rpmem_info;
CONSTRUCTOR(rpmem_persist_constructor)
void
pmem_rpmem_persist(void)
{
rpmem_clo[0].opt_short = 'M';
rpmem_clo[0].opt_long = "mem-mode";
rpmem_clo[0].descr = "Memory writing mode :"
" stat, seq[-wrap], rand[-wrap]";
rpmem_clo[0].def = "seq";
rpmem_clo[0].off = clo_field_offset(struct rpmem_args, mode);
rpmem_clo[0].type = CLO_TYPE_STR;
rpmem_clo[1].opt_short = 'D';
rpmem_clo[1].opt_long = "dest-offset";
rpmem_clo[1].descr = "Destination cache line "
"alignment offset";
rpmem_clo[1].def = "0";
rpmem_clo[1].off = clo_field_offset(struct rpmem_args, dest_off);
rpmem_clo[1].type = CLO_TYPE_UINT;
rpmem_clo[1].type_uint.size =
clo_field_size(struct rpmem_args, dest_off);
rpmem_clo[1].type_uint.base = CLO_INT_BASE_DEC;
rpmem_clo[1].type_uint.min = 0;
rpmem_clo[1].type_uint.max = MAX_OFFSET;
rpmem_clo[2].opt_short = 'w';
rpmem_clo[2].opt_long = "no-warmup";
rpmem_clo[2].descr = "Don't do warmup";
rpmem_clo[2].def = "false";
rpmem_clo[2].type = CLO_TYPE_FLAG;
rpmem_clo[2].off = clo_field_offset(struct rpmem_args, no_warmup);
rpmem_clo[3].opt_short = 'T';
rpmem_clo[3].opt_long = "no-memset";
rpmem_clo[3].descr = "Don't call memset for all rpmem_persist";
rpmem_clo[3].def = "false";
rpmem_clo[3].off = clo_field_offset(struct rpmem_args, no_memset);
rpmem_clo[3].type = CLO_TYPE_FLAG;
rpmem_clo[4].opt_short = 0;
rpmem_clo[4].opt_long = "persist-relaxed";
rpmem_clo[4].descr = "Use RPMEM_PERSIST_RELAXED flag";
rpmem_clo[4].def = "false";
rpmem_clo[4].off = clo_field_offset(struct rpmem_args, relaxed);
rpmem_clo[4].type = CLO_TYPE_FLAG;
rpmem_info.name = "rpmem_persist";
rpmem_info.brief = "Benchmark for rpmem_persist() "
"operation";
rpmem_info.init = rpmem_init;
rpmem_info.exit = rpmem_exit;
rpmem_info.multithread = true;
rpmem_info.multiops = true;
rpmem_info.operation = rpmem_op;
rpmem_info.measure_time = true;
rpmem_info.clos = rpmem_clo;
rpmem_info.nclos = ARRAY_SIZE(rpmem_clo);
rpmem_info.opts_size = sizeof(struct rpmem_args);
rpmem_info.rm_file = true;
rpmem_info.allow_poolset = true;
rpmem_info.print_bandwidth = true;
REGISTER_BENCHMARK(rpmem_info);
};
| 15,282 | 24.815878 | 79 |
cpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/pmemobj_gen.cpp
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmemobj_gen.cpp -- benchmark for pmemobj_direct()
* and pmemobj_open() functions.
*/
#include <cassert>
#include <cerrno>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fcntl.h>
#include <file.h>
#include <sys/stat.h>
#include <unistd.h>
#include "benchmark.hpp"
#include "libpmemobj.h"
#define LAYOUT_NAME "benchmark"
#define FACTOR 4
#define DIR_MODE 0700
#define FILE_MODE 0666
#define PART_NAME "/part"
#define MAX_DIGITS 2
struct pobj_bench;
struct pobj_worker;
typedef size_t (*fn_type_num_t)(struct pobj_bench *ob, size_t worker_idx,
size_t op_idx);
typedef size_t (*fn_size_t)(struct pobj_bench *ob, size_t idx);
typedef size_t (*fn_num_t)(size_t idx);
/*
* Enumeration used to determine the mode of the assigning type_number
* value to the persistent objects.
*/
enum type_mode {
TYPE_MODE_ONE,
TYPE_MODE_PER_THREAD,
TYPE_MODE_RAND,
MAX_TYPE_MODE,
};
/*
* pobj_args - Stores command line parsed arguments.
*
* rand_type : Use random type number for every new allocated object.
* Default, there is one type number for all objects.
*
* range : Use random allocation size.
*
* min_size : Minimum allocation size.
*
* n_objs : Number of objects allocated per thread
*
* one_pool : Use one common pool for all thread
*
* one_obj : Create and use one object per thread
*
* obj_size : Size of each allocated object
*
* n_ops : Number of operations
*/
struct pobj_args {
char *type_num;
bool range;
unsigned min_size;
size_t n_objs;
bool one_pool;
bool one_obj;
size_t obj_size;
size_t n_ops;
};
/*
* pobj_bench - Stores variables used in benchmark, passed within functions.
*
* pop : Pointer to the persistent pool.
*
* pa : Stores pobj_args structure.
*
* sets : Stores files names using to create pool per thread
*
* random_types : Random type numbers for persistent objects.
*
* rand_sizes : random values with allocation sizes.
*
* n_pools : Number of created pools.
*
* n_objs : Number of object created per thread.
*
* type_mode : Type_mode enum value
*
* fn_type_num : Function returning proper type number for each object.
*
* fn_size : Function returning proper size of allocation.
*
* pool : Functions returning number of thread if
* one pool per thread created or index 0 if not.
*
* obj : Function returning number of operation if flag set
* to false or index 0 if set to true.
*/
struct pobj_bench {
PMEMobjpool **pop;
struct pobj_args *args_priv;
const char **sets;
size_t *random_types;
size_t *rand_sizes;
size_t n_pools;
int type_mode;
fn_type_num_t fn_type_num;
fn_size_t fn_size;
fn_num_t pool;
fn_num_t obj;
};
/*
* pobj_worker - Stores variables used by one thread.
*/
struct pobj_worker {
PMEMoid *oids;
};
/*
* type_mode_one -- always returns 0, as in the mode TYPE_MODE_ONE
* all of the persistent objects have the same type_number value.
*/
static size_t
type_mode_one(struct pobj_bench *bench_priv, size_t worker_idx, size_t op_idx)
{
return 0;
}
/*
* type_mode_per_thread -- always returns worker index, as in the mode
* TYPE_MODE_PER_THREAD all persistent object allocated by the same thread
* have the same type_number value.
*/
static size_t
type_mode_per_thread(struct pobj_bench *bench_priv, size_t worker_idx,
size_t op_idx)
{
return worker_idx;
}
/*
* type_mode_rand -- returns the value from the random_types array assigned
* for the specific operation in a specific thread.
*/
static size_t
type_mode_rand(struct pobj_bench *bench_priv, size_t worker_idx, size_t op_idx)
{
return bench_priv->random_types[op_idx];
}
/*
* range_size -- returns size of object allocation from rand_sizes array.
*/
static size_t
range_size(struct pobj_bench *bench_priv, size_t idx)
{
return bench_priv->rand_sizes[idx];
}
/*
* static_size -- returns always the same size of object allocation.
*/
static size_t
static_size(struct pobj_bench *bench_priv, size_t idx)
{
return bench_priv->args_priv->obj_size;
}
/*
* diff_num -- returns given index
*/
static size_t
diff_num(size_t idx)
{
return idx;
}
/*
* one_num -- returns always the same index.
*/
static size_t
one_num(size_t idx)
{
return 0;
}
static fn_type_num_t type_mode_func[MAX_TYPE_MODE] = {
type_mode_one, type_mode_per_thread, type_mode_rand};
const char *type_mode_names[MAX_TYPE_MODE] = {"one", "per-thread", "rand"};
/*
* parse_type_mode -- parses command line "--type-number" argument
* and returns proper type_mode enum value.
*/
static enum type_mode
parse_type_mode(const char *arg)
{
enum type_mode i = TYPE_MODE_ONE;
for (; i < MAX_TYPE_MODE && strcmp(arg, type_mode_names[i]) != 0;
i = (enum type_mode)(i + 1))
;
return i;
}
/*
* rand_sizes -- allocates array and calculates random values as allocation
* sizes for each object. Used only when range flag set.
*/
static size_t *
rand_sizes(size_t min, size_t max, size_t n_ops)
{
assert(n_ops != 0);
auto *rand_sizes = (size_t *)malloc(n_ops * sizeof(size_t));
if (rand_sizes == nullptr) {
perror("malloc");
return nullptr;
}
for (size_t i = 0; i < n_ops; i++) {
rand_sizes[i] = RRAND(max, min);
}
return rand_sizes;
}
/*
* random_types -- allocates array and calculates random values to assign
* type_number for each object.
*/
static int
random_types(struct pobj_bench *bench_priv, struct benchmark_args *args)
{
assert(bench_priv->args_priv->n_objs != 0);
bench_priv->random_types = (size_t *)malloc(
bench_priv->args_priv->n_objs * sizeof(size_t));
if (bench_priv->random_types == nullptr) {
perror("malloc");
return -1;
}
for (size_t i = 0; i < bench_priv->args_priv->n_objs; i++)
bench_priv->random_types[i] = rand() % UINT32_MAX;
return 0;
}
/*
* pobj_init - common part of the benchmark initialization functions.
* Parses command line arguments, set variables and creates persistent pools.
*/
static int
pobj_init(struct benchmark *bench, struct benchmark_args *args)
{
unsigned i = 0;
size_t psize;
size_t n_objs;
assert(bench != nullptr);
assert(args != nullptr);
enum file_type type = util_file_get_type(args->fname);
if (type == OTHER_ERROR) {
fprintf(stderr, "could not check type of file %s\n",
args->fname);
return -1;
}
auto *bench_priv =
(struct pobj_bench *)malloc(sizeof(struct pobj_bench));
if (bench_priv == nullptr) {
perror("malloc");
return -1;
}
assert(args->opts != nullptr);
bench_priv->args_priv = (struct pobj_args *)args->opts;
bench_priv->args_priv->obj_size = args->dsize;
bench_priv->args_priv->range =
bench_priv->args_priv->min_size > 0 ? true : false;
bench_priv->n_pools =
!bench_priv->args_priv->one_pool ? args->n_threads : 1;
bench_priv->pool = bench_priv->n_pools > 1 ? diff_num : one_num;
bench_priv->obj = !bench_priv->args_priv->one_obj ? diff_num : one_num;
if ((args->is_poolset || type == TYPE_DEVDAX) &&
bench_priv->n_pools > 1) {
fprintf(stderr,
"cannot use poolset nor device dax for multiple pools,"
" please use -P|--one-pool option instead");
goto free_bench_priv;
}
/*
* Multiplication by FACTOR prevents from out of memory error
* as the actual size of the allocated persistent objects
* is always larger than requested.
*/
n_objs = bench_priv->args_priv->n_objs;
if (bench_priv->n_pools == 1)
n_objs *= args->n_threads;
psize = n_objs * args->dsize * args->n_threads * FACTOR;
if (psize < PMEMOBJ_MIN_POOL)
psize = PMEMOBJ_MIN_POOL;
/* assign type_number determining function */
bench_priv->type_mode =
parse_type_mode(bench_priv->args_priv->type_num);
switch (bench_priv->type_mode) {
case MAX_TYPE_MODE:
fprintf(stderr, "unknown type mode");
goto free_bench_priv;
case TYPE_MODE_RAND:
if (random_types(bench_priv, args))
goto free_bench_priv;
break;
default:
bench_priv->random_types = nullptr;
}
bench_priv->fn_type_num = type_mode_func[bench_priv->type_mode];
/* assign size determining function */
bench_priv->fn_size =
bench_priv->args_priv->range ? range_size : static_size;
bench_priv->rand_sizes = nullptr;
if (bench_priv->args_priv->range) {
if (bench_priv->args_priv->min_size > args->dsize) {
fprintf(stderr, "Invalid allocation size");
goto free_random_types;
}
bench_priv->rand_sizes =
rand_sizes(bench_priv->args_priv->min_size,
bench_priv->args_priv->obj_size,
bench_priv->args_priv->n_objs);
if (bench_priv->rand_sizes == nullptr)
goto free_random_types;
}
assert(bench_priv->n_pools > 0);
bench_priv->pop = (PMEMobjpool **)calloc(bench_priv->n_pools,
sizeof(PMEMobjpool *));
if (bench_priv->pop == nullptr) {
perror("calloc");
goto free_random_sizes;
}
bench_priv->sets = (const char **)calloc(bench_priv->n_pools,
sizeof(const char *));
if (bench_priv->sets == nullptr) {
perror("calloc");
goto free_pop;
}
if (bench_priv->n_pools > 1) {
assert(!args->is_poolset);
if (util_file_mkdir(args->fname, DIR_MODE) != 0) {
fprintf(stderr, "cannot create directory\n");
goto free_sets;
}
size_t path_len = (strlen(PART_NAME) + strlen(args->fname)) +
MAX_DIGITS + 1;
for (i = 0; i < bench_priv->n_pools; i++) {
bench_priv->sets[i] =
(char *)malloc(path_len * sizeof(char));
if (bench_priv->sets[i] == nullptr) {
perror("malloc");
goto free_sets;
}
int ret =
snprintf((char *)bench_priv->sets[i], path_len,
"%s%s%02x", args->fname, PART_NAME, i);
if (ret < 0 || ret >= (int)path_len) {
perror("snprintf");
goto free_sets;
}
bench_priv->pop[i] =
pmemobj_create(bench_priv->sets[i], LAYOUT_NAME,
psize, FILE_MODE);
if (bench_priv->pop[i] == nullptr) {
perror(pmemobj_errormsg());
goto free_sets;
}
}
} else {
if (args->is_poolset || type == TYPE_DEVDAX) {
if (args->fsize < psize) {
fprintf(stderr, "file size too large\n");
goto free_pools;
}
psize = 0;
}
bench_priv->sets[0] = args->fname;
bench_priv->pop[0] = pmemobj_create(
bench_priv->sets[0], LAYOUT_NAME, psize, FILE_MODE);
if (bench_priv->pop[0] == nullptr) {
perror(pmemobj_errormsg());
goto free_pools;
}
}
pmembench_set_priv(bench, bench_priv);
return 0;
free_sets:
for (; i > 0; i--) {
pmemobj_close(bench_priv->pop[i - 1]);
free((char *)bench_priv->sets[i - 1]);
}
free_pools:
free(bench_priv->sets);
free_pop:
free(bench_priv->pop);
free_random_sizes:
free(bench_priv->rand_sizes);
free_random_types:
free(bench_priv->random_types);
free_bench_priv:
free(bench_priv);
return -1;
}
/*
* pobj_direct_init -- special part of pobj_direct benchmark initialization.
*/
static int
pobj_direct_init(struct benchmark *bench, struct benchmark_args *args)
{
auto *pa = (struct pobj_args *)args->opts;
pa->n_objs = pa->one_obj ? 1 : args->n_ops_per_thread;
if (pobj_init(bench, args) != 0)
return -1;
return 0;
}
/*
* pobj_exit -- common part for the benchmarks exit functions
*/
static int
pobj_exit(struct benchmark *bench, struct benchmark_args *args)
{
size_t i;
auto *bench_priv = (struct pobj_bench *)pmembench_get_priv(bench);
if (bench_priv->n_pools > 1) {
for (i = 0; i < bench_priv->n_pools; i++) {
pmemobj_close(bench_priv->pop[i]);
free((char *)bench_priv->sets[i]);
}
} else {
pmemobj_close(bench_priv->pop[0]);
}
free(bench_priv->sets);
free(bench_priv->pop);
free(bench_priv->rand_sizes);
free(bench_priv->random_types);
free(bench_priv);
return 0;
}
/*
* pobj_init_worker -- worker initialization
*/
static int
pobj_init_worker(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
size_t i, idx = worker->index;
auto *bench_priv = (struct pobj_bench *)pmembench_get_priv(bench);
auto *pw = (struct pobj_worker *)calloc(1, sizeof(struct pobj_worker));
if (pw == nullptr) {
perror("calloc");
return -1;
}
worker->priv = pw;
pw->oids = (PMEMoid *)calloc(bench_priv->args_priv->n_objs,
sizeof(PMEMoid));
if (pw->oids == nullptr) {
free(pw);
perror("calloc");
return -1;
}
PMEMobjpool *pop = bench_priv->pop[bench_priv->pool(idx)];
for (i = 0; i < bench_priv->args_priv->n_objs; i++) {
size_t size = bench_priv->fn_size(bench_priv, i);
size_t type = bench_priv->fn_type_num(bench_priv, idx, i);
if (pmemobj_alloc(pop, &pw->oids[i], size, type, nullptr,
nullptr) != 0) {
perror("pmemobj_alloc");
goto out;
}
}
return 0;
out:
for (; i > 0; i--)
pmemobj_free(&pw->oids[i - 1]);
free(pw->oids);
free(pw);
return -1;
}
/*
* pobj_direct_op -- main operations of the obj_direct benchmark.
*/
static int
pobj_direct_op(struct benchmark *bench, struct operation_info *info)
{
auto *bench_priv = (struct pobj_bench *)pmembench_get_priv(bench);
auto *pw = (struct pobj_worker *)info->worker->priv;
size_t idx = bench_priv->obj(info->index);
if (pmemobj_direct(pw->oids[idx]) == nullptr)
return -1;
return 0;
}
/*
* pobj_open_op -- main operations of the obj_open benchmark.
*/
static int
pobj_open_op(struct benchmark *bench, struct operation_info *info)
{
auto *bench_priv = (struct pobj_bench *)pmembench_get_priv(bench);
size_t idx = bench_priv->pool(info->worker->index);
pmemobj_close(bench_priv->pop[idx]);
bench_priv->pop[idx] = pmemobj_open(bench_priv->sets[idx], LAYOUT_NAME);
if (bench_priv->pop[idx] == nullptr)
return -1;
return 0;
}
/*
* pobj_free_worker -- worker exit function
*/
static void
pobj_free_worker(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
auto *pw = (struct pobj_worker *)worker->priv;
auto *bench_priv = (struct pobj_bench *)pmembench_get_priv(bench);
for (size_t i = 0; i < bench_priv->args_priv->n_objs; i++)
pmemobj_free(&pw->oids[i]);
free(pw->oids);
free(pw);
}
static struct benchmark_info obj_open;
static struct benchmark_info obj_direct;
/* Array defining common command line arguments. */
static struct benchmark_clo pobj_direct_clo[4];
static struct benchmark_clo pobj_open_clo[3];
CONSTRUCTOR(pmemobj_gen_constructor)
void
pmemobj_gen_constructor(void)
{
pobj_direct_clo[0].opt_short = 'T';
pobj_direct_clo[0].opt_long = "type-number";
pobj_direct_clo[0].descr = "Type number mode - one, per-thread, "
"rand";
pobj_direct_clo[0].def = "one";
pobj_direct_clo[0].off = clo_field_offset(struct pobj_args, type_num);
pobj_direct_clo[0].type = CLO_TYPE_STR;
pobj_direct_clo[1].opt_short = 'm';
pobj_direct_clo[1].opt_long = "min-size";
pobj_direct_clo[1].type = CLO_TYPE_UINT;
pobj_direct_clo[1].descr = "Minimum allocation size";
pobj_direct_clo[1].off = clo_field_offset(struct pobj_args, min_size);
pobj_direct_clo[1].def = "0";
pobj_direct_clo[1].type_uint.size =
clo_field_size(struct pobj_args, min_size);
pobj_direct_clo[1].type_uint.base = CLO_INT_BASE_DEC | CLO_INT_BASE_HEX;
pobj_direct_clo[1].type_uint.min = 0;
pobj_direct_clo[1].type_uint.max = UINT_MAX;
pobj_direct_clo[2].opt_short = 'P';
pobj_direct_clo[2].opt_long = "one-pool";
pobj_direct_clo[2].descr = "Create one pool for all threads";
pobj_direct_clo[2].type = CLO_TYPE_FLAG;
pobj_direct_clo[2].off = clo_field_offset(struct pobj_args, one_pool);
pobj_direct_clo[3].opt_short = 'O';
pobj_direct_clo[3].opt_long = "one-object";
pobj_direct_clo[3].descr = "Use only one object per thread";
pobj_direct_clo[3].type = CLO_TYPE_FLAG;
pobj_direct_clo[3].off = clo_field_offset(struct pobj_args, one_obj);
pobj_open_clo[0].opt_short = 'T',
pobj_open_clo[0].opt_long = "type-number",
pobj_open_clo[0].descr = "Type number mode - one, "
"per-thread, rand",
pobj_open_clo[0].def = "one",
pobj_open_clo[0].off = clo_field_offset(struct pobj_args, type_num),
pobj_open_clo[0].type = CLO_TYPE_STR,
pobj_open_clo[1].opt_short = 'm',
pobj_open_clo[1].opt_long = "min-size",
pobj_open_clo[1].type = CLO_TYPE_UINT,
pobj_open_clo[1].descr = "Minimum allocation size",
pobj_open_clo[1].off = clo_field_offset(struct pobj_args, min_size),
pobj_open_clo[1].def = "0",
pobj_open_clo[1].type_uint.size =
clo_field_size(struct pobj_args, min_size),
pobj_open_clo[1].type_uint.base = CLO_INT_BASE_DEC | CLO_INT_BASE_HEX,
pobj_open_clo[1].type_uint.min = 0,
pobj_open_clo[1].type_uint.max = UINT_MAX,
pobj_open_clo[2].opt_short = 'o';
pobj_open_clo[2].opt_long = "objects";
pobj_open_clo[2].type = CLO_TYPE_UINT;
pobj_open_clo[2].descr = "Number of objects in each pool";
pobj_open_clo[2].off = clo_field_offset(struct pobj_args, n_objs);
pobj_open_clo[2].def = "1";
pobj_open_clo[2].type_uint.size =
clo_field_size(struct pobj_args, n_objs);
pobj_open_clo[2].type_uint.base = CLO_INT_BASE_DEC | CLO_INT_BASE_HEX;
pobj_open_clo[2].type_uint.min = 1;
pobj_open_clo[2].type_uint.max = UINT_MAX;
obj_open.name = "obj_open";
obj_open.brief = "pmemobj_open() benchmark";
obj_open.init = pobj_init;
obj_open.exit = pobj_exit;
obj_open.multithread = true;
obj_open.multiops = true;
obj_open.init_worker = pobj_init_worker;
obj_open.free_worker = pobj_free_worker;
obj_open.operation = pobj_open_op;
obj_open.measure_time = true;
obj_open.clos = pobj_open_clo;
obj_open.nclos = ARRAY_SIZE(pobj_open_clo);
obj_open.opts_size = sizeof(struct pobj_args);
obj_open.rm_file = true;
obj_open.allow_poolset = true;
REGISTER_BENCHMARK(obj_open);
obj_direct.name = "obj_direct";
obj_direct.brief = "pmemobj_direct() benchmark";
obj_direct.init = pobj_direct_init;
obj_direct.exit = pobj_exit;
obj_direct.multithread = true;
obj_direct.multiops = true;
obj_direct.init_worker = pobj_init_worker;
obj_direct.free_worker = pobj_free_worker;
obj_direct.operation = pobj_direct_op;
obj_direct.measure_time = true;
obj_direct.clos = pobj_direct_clo;
obj_direct.nclos = ARRAY_SIZE(pobj_direct_clo);
obj_direct.opts_size = sizeof(struct pobj_args);
obj_direct.rm_file = true;
obj_direct.allow_poolset = true;
REGISTER_BENCHMARK(obj_direct);
};
| 19,467 | 27.214493 | 79 |
cpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/config_reader_win.cpp
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* config_reader_win.cpp -- config reader module definitions
*/
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <tchar.h>
#include "config_reader.hpp"
#include "queue.h"
#include "scenario.hpp"
#define SECTION_GLOBAL TEXT("global")
#define KEY_BENCHMARK TEXT("bench")
#define KEY_GROUP TEXT("group")
/*
* Maximum section size according to MSDN documentation
*/
#define SIZEOF_SECTION 32767
#define NULL_LIST_EMPTY(x) (_tcslen(x) == 0)
#define NULL_LIST_NEXT(x) ((x) += (_tcslen(x) + 1))
#define KV_LIST_EMPTY(x) (_tcslen(x) == 0)
#define KV_FIRST(x)
#define KV_LIST_NEXT(x) \
((x) += (_tcslen(x) + 1), (x) += (_tcslen(x) + 1), \
(x) = kv_list_skip_comment(x))
#define KV_LIST_KEY(x) (x)
#define KV_LIST_VALUE(x) ((x) + _tcslen(x) + 1)
#define KV_LIST_INIT(x) kv_list_init(x)
#define LIST LPTSTR
#define KV_LIST LPTSTR
/*
* kv_list_skip_comment -- skip comment lines in ini file
*/
static KV_LIST
kv_list_skip_comment(KV_LIST list)
{
while (list[0] == TEXT('#'))
list += (_tcslen(list) + 1);
return list;
}
/*
* kv_list_init -- init KV list
*/
static KV_LIST
kv_list_init(LPTSTR list)
{
list = kv_list_skip_comment(list);
for (KV_LIST it = list; !KV_LIST_EMPTY(it); KV_LIST_NEXT(it)) {
LPTSTR c = _tcsstr(it, TEXT("="));
if (c == NULL)
return NULL;
*c = TEXT('\0');
}
return list;
}
/*
* config_reader -- handle structure
*/
struct config_reader {
LPTSTR lpFileName;
};
/*
* config_reader_alloc -- allocate config reader
*/
struct config_reader *
config_reader_alloc(void)
{
struct config_reader *cr = (struct config_reader *)malloc(sizeof(*cr));
if (cr == NULL)
return NULL;
return cr;
}
/*
* config_reader_read -- read config file
*/
int
config_reader_read(struct config_reader *cr, const char *fname)
{
DWORD len = 0;
LPTSTR buf = TEXT(" ");
/* get the length of the full pathname incl. terminating null char */
len = GetFullPathName((LPTSTR)fname, 0, buf, NULL);
if (len == 0) {
/* the function failed */
return -1;
} else {
/* allocate a buffer large enough to store the pathname */
LPTSTR buffer = (LPTSTR)malloc(len * sizeof(TCHAR));
DWORD ret = GetFullPathName((LPTSTR)fname, len, buffer, NULL);
if (_taccess(buffer, 0) != 0) {
printf("%s", strerror(errno));
return -1;
}
cr->lpFileName = (LPTSTR)buffer;
}
return 0;
}
/*
* config_reader_free -- free config reader
*/
void
config_reader_free(struct config_reader *cr)
{
free(cr);
}
/*
* is_scenario -- (internal) return true if _name_ is scenario name
*
* This filters out the _global_ and _config_ sections.
*/
static int
is_scenario(LPTSTR name)
{
return _tcscmp(name, SECTION_GLOBAL);
}
/*
* is_argument -- (internal) return true if _name_ is argument name
*
* This filters out the _benchmark_ key.
*/
static int
is_argument(LPTSTR name)
{
return _tcscmp(name, KEY_BENCHMARK) != 0 &&
_tcscmp(name, KEY_GROUP) != 0;
}
/*
* config_reader_get_scenarios -- return scenarios from config file
*
* This function reads the config file and returns a list of scenarios.
* Each scenario contains a list of key/value arguments.
* The scenario's arguments are merged with arguments from global section.
*/
int
config_reader_get_scenarios(struct config_reader *cr,
struct scenarios **scenarios)
{
/*
* Read all groups.
* The config file must have at least one group, otherwise
* it is considered as invalid.
*/
int ret = 0;
TCHAR *sections = (TCHAR *)malloc(sizeof(TCHAR) * SIZEOF_SECTION);
if (!sections)
return -1;
GetPrivateProfileSectionNames(sections, SIZEOF_SECTION, cr->lpFileName);
if (NULL_LIST_EMPTY(sections)) {
ret = -1;
goto err_sections;
}
/*
* Check if global section is present and read it.
*/
TCHAR *global = (TCHAR *)malloc(sizeof(TCHAR) * SIZEOF_SECTION);
if (!global)
return -1;
GetPrivateProfileSection(SECTION_GLOBAL, global, SIZEOF_SECTION,
cr->lpFileName);
KV_LIST global_kv = KV_LIST_INIT(global);
int has_global = !KV_LIST_EMPTY(global_kv);
struct scenarios *s = scenarios_alloc();
assert(NULL != s);
if (!s) {
ret = -1;
goto err_gkeys;
}
LPTSTR global_group = NULL;
for (KV_LIST it = global_kv; !KV_LIST_EMPTY(it); KV_LIST_NEXT(it)) {
if (_tcscmp(KV_LIST_KEY(it), KEY_GROUP) == 0) {
global_group = KV_LIST_VALUE(it);
break;
}
}
TCHAR *section;
for (LPTSTR group_name = sections; !NULL_LIST_EMPTY(group_name);
group_name = NULL_LIST_NEXT(group_name)) {
/*
* Check whether a group is a scenario
* or global section.
*/
if (!is_scenario(group_name))
continue;
/*
* Check for KEY_BENCHMARK which contains benchmark name.
* If not present the benchmark name is the same as the
* name of the section.
*/
section = (TCHAR *)malloc(sizeof(TCHAR) * SIZEOF_SECTION);
if (!section)
ret = -1;
GetPrivateProfileSection(group_name, section, SIZEOF_SECTION,
cr->lpFileName);
KV_LIST section_kv = KV_LIST_INIT(section);
struct scenario *scenario = NULL;
LPTSTR name = NULL;
LPTSTR group = NULL;
for (KV_LIST it = section_kv; !KV_LIST_EMPTY(it);
KV_LIST_NEXT(it)) {
if (_tcscmp(KV_LIST_KEY(it), KEY_BENCHMARK) == 0) {
name = KV_LIST_VALUE(it);
}
if (_tcscmp(KV_LIST_KEY(it), KEY_GROUP) == 0) {
group = KV_LIST_VALUE(it);
}
}
if (name == NULL) {
scenario = scenario_alloc((const char *)group_name,
(const char *)group_name);
} else {
scenario = scenario_alloc((const char *)group_name,
(const char *)name);
}
assert(scenario != NULL);
if (has_global) {
/*
* Merge key/values from global section.
*/
for (KV_LIST it = global_kv; !KV_LIST_EMPTY(it);
KV_LIST_NEXT(it)) {
LPTSTR key = KV_LIST_KEY(it);
if (!is_argument(key))
continue;
LPTSTR value = KV_LIST_VALUE(it);
assert(NULL != value);
if (!value) {
ret = -1;
goto err_scenarios;
}
struct kv *kv = kv_alloc((const char *)key,
(const char *)value);
assert(NULL != kv);
if (!kv) {
ret = -1;
goto err_scenarios;
}
TAILQ_INSERT_TAIL(&scenario->head, kv, next);
}
}
/* check for group name */
if (group) {
scenario_set_group(scenario, (const char *)group);
} else if (global_group) {
scenario_set_group(scenario,
(const char *)global_group);
}
for (KV_LIST it = section_kv; !KV_LIST_EMPTY(it);
KV_LIST_NEXT(it)) {
LPTSTR key = KV_LIST_KEY(it);
if (!is_argument(key))
continue;
LPTSTR value = KV_LIST_VALUE(it);
assert(NULL != value);
if (!value) {
ret = -1;
goto err_scenarios;
}
struct kv *kv = kv_alloc((const char *)key,
(const char *)value);
assert(NULL != kv);
if (!kv) {
ret = -1;
goto err_scenarios;
}
TAILQ_INSERT_TAIL(&scenario->head, kv, next);
}
TAILQ_INSERT_TAIL(&s->head, scenario, next);
free(section);
}
*scenarios = s;
free(global);
free(sections);
return 0;
err_scenarios:
free(section);
scenarios_free(s);
err_gkeys:
free(global);
err_sections:
free(sections);
return ret;
}
| 8,745 | 23.430168 | 80 |
cpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/obj_pmalloc.cpp
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* obj_pmalloc.cpp -- pmalloc benchmarks definition
*/
#include <cassert>
#include <cerrno>
#include <cinttypes>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fcntl.h>
#include <unistd.h>
#include "benchmark.hpp"
#include "file.h"
#include "libpmemobj.h"
#include "memops.h"
#include "os.h"
#include "pmalloc.h"
#include "poolset_util.hpp"
#include "valgrind_internal.h"
/*
* The factor used for PMEM pool size calculation, accounts for metadata,
* fragmentation and etc.
*/
#define FACTOR 1.2f
/* The minimum allocation size that pmalloc can perform */
#define ALLOC_MIN_SIZE 64
/* OOB and allocation header size */
#define OOB_HEADER_SIZE 64
/*
* prog_args - command line parsed arguments
*/
struct prog_args {
size_t minsize; /* minimum size for random allocation size */
bool use_random_size; /* if set, use random size allocations */
unsigned seed; /* PRNG seed */
};
POBJ_LAYOUT_BEGIN(pmalloc_layout);
POBJ_LAYOUT_ROOT(pmalloc_layout, struct my_root);
POBJ_LAYOUT_TOID(pmalloc_layout, uint64_t);
POBJ_LAYOUT_END(pmalloc_layout);
/*
* my_root - root object
*/
struct my_root {
TOID(uint64_t) offs; /* vector of the allocated object offsets */
};
/*
* obj_bench - variables used in benchmark, passed within functions
*/
struct obj_bench {
PMEMobjpool *pop; /* persistent pool handle */
struct prog_args *pa; /* prog_args structure */
size_t *sizes; /* sizes for allocations */
TOID(struct my_root) root; /* root object's OID */
uint64_t *offs; /* pointer to the vector of offsets */
};
/*
* obj_init -- common part of the benchmark initialization for pmalloc and
* pfree. It allocates the PMEM memory pool and the necessary offset vector.
*/
static int
obj_init(struct benchmark *bench, struct benchmark_args *args)
{
struct my_root *root = nullptr;
assert(bench != nullptr);
assert(args != nullptr);
assert(args->opts != nullptr);
char path[PATH_MAX];
if (util_safe_strcpy(path, args->fname, sizeof(path)) != 0)
return -1;
enum file_type type = util_file_get_type(args->fname);
if (type == OTHER_ERROR) {
fprintf(stderr, "could not check type of file %s\n",
args->fname);
return -1;
}
if (((struct prog_args *)(args->opts))->minsize >= args->dsize) {
fprintf(stderr, "Wrong params - allocation size\n");
return -1;
}
auto *ob = (struct obj_bench *)malloc(sizeof(struct obj_bench));
if (ob == nullptr) {
perror("malloc");
return -1;
}
pmembench_set_priv(bench, ob);
ob->pa = (struct prog_args *)args->opts;
size_t n_ops_total = args->n_ops_per_thread * args->n_threads;
assert(n_ops_total != 0);
/* Create pmemobj pool. */
size_t alloc_size = args->dsize;
if (alloc_size < ALLOC_MIN_SIZE)
alloc_size = ALLOC_MIN_SIZE;
/* For data objects */
size_t poolsize = PMEMOBJ_MIN_POOL +
(n_ops_total * (alloc_size + OOB_HEADER_SIZE))
/* for offsets */
+ n_ops_total * sizeof(uint64_t);
/* multiply by FACTOR for metadata, fragmentation, etc. */
poolsize = (size_t)(poolsize * FACTOR);
if (args->is_poolset || type == TYPE_DEVDAX) {
if (args->fsize < poolsize) {
fprintf(stderr, "file size too large\n");
goto free_ob;
}
poolsize = 0;
} else if (poolsize < PMEMOBJ_MIN_POOL) {
poolsize = PMEMOBJ_MIN_POOL;
}
if (args->is_dynamic_poolset) {
int ret = dynamic_poolset_create(args->fname, poolsize);
if (ret == -1)
goto free_ob;
if (util_safe_strcpy(path, POOLSET_PATH, sizeof(path)) != 0)
goto free_ob;
poolsize = 0;
}
ob->pop = pmemobj_create(path, POBJ_LAYOUT_NAME(pmalloc_layout),
poolsize, args->fmode);
if (ob->pop == nullptr) {
fprintf(stderr, "%s\n", pmemobj_errormsg());
goto free_ob;
}
ob->root = POBJ_ROOT(ob->pop, struct my_root);
if (TOID_IS_NULL(ob->root)) {
fprintf(stderr, "POBJ_ROOT: %s\n", pmemobj_errormsg());
goto free_pop;
}
root = D_RW(ob->root);
assert(root != nullptr);
POBJ_ZALLOC(ob->pop, &root->offs, uint64_t,
n_ops_total * sizeof(PMEMoid));
if (TOID_IS_NULL(root->offs)) {
fprintf(stderr, "POBJ_ZALLOC off_vect: %s\n",
pmemobj_errormsg());
goto free_pop;
}
ob->offs = D_RW(root->offs);
ob->sizes = (size_t *)malloc(n_ops_total * sizeof(size_t));
if (ob->sizes == nullptr) {
fprintf(stderr, "malloc rand size vect err\n");
goto free_pop;
}
if (ob->pa->use_random_size) {
size_t width = args->dsize - ob->pa->minsize;
for (size_t i = 0; i < n_ops_total; i++) {
auto hr = (uint32_t)os_rand_r(&ob->pa->seed);
auto lr = (uint32_t)os_rand_r(&ob->pa->seed);
uint64_t r64 = (uint64_t)hr << 32 | lr;
ob->sizes[i] = r64 % width + ob->pa->minsize;
}
} else {
for (size_t i = 0; i < n_ops_total; i++)
ob->sizes[i] = args->dsize;
}
return 0;
free_pop:
pmemobj_close(ob->pop);
free_ob:
free(ob);
return -1;
}
/*
* obj_exit -- common part for the exit function for pmalloc and pfree
* benchmarks. It frees the allocated offset vector and the memory pool.
*/
static int
obj_exit(struct benchmark *bench, struct benchmark_args *args)
{
auto *ob = (struct obj_bench *)pmembench_get_priv(bench);
free(ob->sizes);
POBJ_FREE(&D_RW(ob->root)->offs);
pmemobj_close(ob->pop);
return 0;
}
/*
* pmalloc_init -- initialization for the pmalloc benchmark. Performs only the
* common initialization.
*/
static int
pmalloc_init(struct benchmark *bench, struct benchmark_args *args)
{
return obj_init(bench, args);
}
/*
* pmalloc_op -- actual benchmark operation. Performs the pmalloc allocations.
*/
static int
pmalloc_op(struct benchmark *bench, struct operation_info *info)
{
auto *ob = (struct obj_bench *)pmembench_get_priv(bench);
uint64_t i = info->index +
info->worker->index * info->args->n_ops_per_thread;
int ret = pmalloc(ob->pop, &ob->offs[i], ob->sizes[i], 0, 0);
if (ret) {
fprintf(stderr, "pmalloc ret: %d\n", ret);
return ret;
}
return 0;
}
struct pmix_worker {
size_t nobjects;
size_t shuffle_start;
unsigned seed;
};
/*
* pmix_worker_init -- initialization of the worker structure
*/
static int
pmix_worker_init(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
struct pmix_worker *w = (struct pmix_worker *)calloc(1, sizeof(*w));
auto *ob = (struct obj_bench *)pmembench_get_priv(bench);
if (w == nullptr)
return -1;
w->seed = ob->pa->seed;
worker->priv = w;
return 0;
}
/*
* pmix_worker_fini -- destruction of the worker structure
*/
static void
pmix_worker_fini(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
auto *w = (struct pmix_worker *)worker->priv;
free(w);
}
/*
* shuffle_objects -- randomly shuffle elements on a list
*
* Ideally, we wouldn't count the time this function takes, but for all
* practical purposes this is fast enough and isn't visible on the results.
* Just make sure the amount of objects to shuffle is not large.
*/
static void
shuffle_objects(uint64_t *objects, size_t start, size_t nobjects,
unsigned *seed)
{
uint64_t tmp;
size_t dest;
for (size_t n = start; n < nobjects; ++n) {
dest = RRAND_R(seed, nobjects - 1, 0);
tmp = objects[n];
objects[n] = objects[dest];
objects[dest] = tmp;
}
}
#define FREE_PCT 10
#define FREE_OPS 10
/*
* pmix_op -- mixed workload benchmark
*/
static int
pmix_op(struct benchmark *bench, struct operation_info *info)
{
auto *ob = (struct obj_bench *)pmembench_get_priv(bench);
auto *w = (struct pmix_worker *)info->worker->priv;
uint64_t idx = info->worker->index * info->args->n_ops_per_thread;
uint64_t *objects = &ob->offs[idx];
if (w->nobjects > FREE_OPS && FREE_PCT > RRAND_R(&w->seed, 100, 0)) {
shuffle_objects(objects, w->shuffle_start, w->nobjects,
&w->seed);
for (int i = 0; i < FREE_OPS; ++i) {
uint64_t off = objects[--w->nobjects];
pfree(ob->pop, &off);
}
w->shuffle_start = w->nobjects;
} else {
int ret = pmalloc(ob->pop, &objects[w->nobjects++],
ob->sizes[idx + info->index], 0, 0);
if (ret) {
fprintf(stderr, "pmalloc ret: %d\n", ret);
return ret;
}
}
return 0;
}
/*
* pmalloc_exit -- the end of the pmalloc benchmark. Frees the memory allocated
* during pmalloc_op and performs the common exit operations.
*/
static int
pmalloc_exit(struct benchmark *bench, struct benchmark_args *args)
{
auto *ob = (struct obj_bench *)pmembench_get_priv(bench);
for (size_t i = 0; i < args->n_ops_per_thread * args->n_threads; i++) {
if (ob->offs[i])
pfree(ob->pop, &ob->offs[i]);
}
return obj_exit(bench, args);
}
/*
* pfree_init -- initialization for the pfree benchmark. Performs the common
* initialization and allocates the memory to be freed during pfree_op.
*/
static int
pfree_init(struct benchmark *bench, struct benchmark_args *args)
{
int ret = obj_init(bench, args);
if (ret)
return ret;
auto *ob = (struct obj_bench *)pmembench_get_priv(bench);
for (size_t i = 0; i < args->n_ops_per_thread * args->n_threads; i++) {
ret = pmalloc(ob->pop, &ob->offs[i], ob->sizes[i], 0, 0);
if (ret) {
fprintf(stderr, "pmalloc at idx %" PRIu64 " ret: %s\n",
i, pmemobj_errormsg());
/* free the allocated memory */
while (i != 0) {
pfree(ob->pop, &ob->offs[i - 1]);
i--;
}
obj_exit(bench, args);
return ret;
}
}
return 0;
}
/*
* pmalloc_op -- actual benchmark operation. Performs the pfree operation.
*/
static int
pfree_op(struct benchmark *bench, struct operation_info *info)
{
auto *ob = (struct obj_bench *)pmembench_get_priv(bench);
uint64_t i = info->index +
info->worker->index * info->args->n_ops_per_thread;
pfree(ob->pop, &ob->offs[i]);
return 0;
}
/* command line options definition */
static struct benchmark_clo pmalloc_clo[3];
/*
* Stores information about pmalloc benchmark.
*/
static struct benchmark_info pmalloc_info;
/*
* Stores information about pfree benchmark.
*/
static struct benchmark_info pfree_info;
/*
* Stores information about pmix benchmark.
*/
static struct benchmark_info pmix_info;
CONSTRUCTOR(obj_pmalloc_constructor)
void
obj_pmalloc_constructor(void)
{
pmalloc_clo[0].opt_short = 'r';
pmalloc_clo[0].opt_long = "random";
pmalloc_clo[0].descr = "Use random size allocations - "
"from min-size to data-size";
pmalloc_clo[0].off =
clo_field_offset(struct prog_args, use_random_size);
pmalloc_clo[0].type = CLO_TYPE_FLAG;
pmalloc_clo[1].opt_short = 'm';
pmalloc_clo[1].opt_long = "min-size";
pmalloc_clo[1].descr = "Minimum size of allocation for "
"random mode";
pmalloc_clo[1].type = CLO_TYPE_UINT;
pmalloc_clo[1].off = clo_field_offset(struct prog_args, minsize);
pmalloc_clo[1].def = "1";
pmalloc_clo[1].type_uint.size =
clo_field_size(struct prog_args, minsize);
pmalloc_clo[1].type_uint.base = CLO_INT_BASE_DEC;
pmalloc_clo[1].type_uint.min = 1;
pmalloc_clo[1].type_uint.max = UINT64_MAX;
pmalloc_clo[2].opt_short = 'S';
pmalloc_clo[2].opt_long = "seed";
pmalloc_clo[2].descr = "Random mode seed value";
pmalloc_clo[2].off = clo_field_offset(struct prog_args, seed);
pmalloc_clo[2].def = "1";
pmalloc_clo[2].type = CLO_TYPE_UINT;
pmalloc_clo[2].type_uint.size = clo_field_size(struct prog_args, seed);
pmalloc_clo[2].type_uint.base = CLO_INT_BASE_DEC;
pmalloc_clo[2].type_uint.min = 1;
pmalloc_clo[2].type_uint.max = UINT_MAX;
pmalloc_info.name = "pmalloc",
pmalloc_info.brief = "Benchmark for internal pmalloc() "
"operation";
pmalloc_info.init = pmalloc_init;
pmalloc_info.exit = pmalloc_exit;
pmalloc_info.multithread = true;
pmalloc_info.multiops = true;
pmalloc_info.operation = pmalloc_op;
pmalloc_info.measure_time = true;
pmalloc_info.clos = pmalloc_clo;
pmalloc_info.nclos = ARRAY_SIZE(pmalloc_clo);
pmalloc_info.opts_size = sizeof(struct prog_args);
pmalloc_info.rm_file = true;
pmalloc_info.allow_poolset = true;
REGISTER_BENCHMARK(pmalloc_info);
pfree_info.name = "pfree";
pfree_info.brief = "Benchmark for internal pfree() "
"operation";
pfree_info.init = pfree_init;
pfree_info.exit = pmalloc_exit; /* same as for pmalloc */
pfree_info.multithread = true;
pfree_info.multiops = true;
pfree_info.operation = pfree_op;
pfree_info.measure_time = true;
pfree_info.clos = pmalloc_clo;
pfree_info.nclos = ARRAY_SIZE(pmalloc_clo);
pfree_info.opts_size = sizeof(struct prog_args);
pfree_info.rm_file = true;
pfree_info.allow_poolset = true;
REGISTER_BENCHMARK(pfree_info);
pmix_info.name = "pmix";
pmix_info.brief = "Benchmark for mixed alloc/free workload";
pmix_info.init = pmalloc_init;
pmix_info.exit = pmalloc_exit; /* same as for pmalloc */
pmix_info.multithread = true;
pmix_info.multiops = true;
pmix_info.operation = pmix_op;
pmix_info.init_worker = pmix_worker_init;
pmix_info.free_worker = pmix_worker_fini;
pmix_info.measure_time = true;
pmix_info.clos = pmalloc_clo;
pmix_info.nclos = ARRAY_SIZE(pmalloc_clo);
pmix_info.opts_size = sizeof(struct prog_args);
pmix_info.rm_file = true;
pmix_info.allow_poolset = true;
REGISTER_BENCHMARK(pmix_info);
};
| 14,547 | 26.192523 | 79 |
cpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/map_bench.cpp
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* map_bench.cpp -- benchmarks for: ctree, btree, rtree, rbtree, hashmap_atomic
* and hashmap_tx from examples.
*/
#include <cassert>
#include "benchmark.hpp"
#include "file.h"
#include "os.h"
#include "os_thread.h"
#include "poolset_util.hpp"
#include "map.h"
#include "map_btree.h"
#include "map_ctree.h"
#include "map_hashmap_atomic.h"
#include "map_hashmap_rp.h"
#include "map_hashmap_tx.h"
#include "map_rbtree.h"
#include "map_rtree.h"
/* Values less than 3 is not suitable for current rtree implementation */
#define FACTOR 3
#define ALLOC_OVERHEAD 64
TOID_DECLARE_ROOT(struct root);
struct root {
TOID(struct map) map;
};
#define OBJ_TYPE_NUM 1
#define swap(a, b) \
do { \
__typeof__(a) _tmp = (a); \
(a) = (b); \
(b) = _tmp; \
} while (0)
/* Values less than 2048 is not suitable for current rtree implementation */
#define SIZE_PER_KEY 2048
static const struct {
const char *str;
const struct map_ops *ops;
} map_types[] = {
{"ctree", MAP_CTREE}, {"btree", MAP_BTREE},
{"rtree", MAP_RTREE}, {"rbtree", MAP_RBTREE},
{"hashmap_tx", MAP_HASHMAP_TX}, {"hashmap_atomic", MAP_HASHMAP_ATOMIC},
{"hashmap_rp", MAP_HASHMAP_RP}};
#define MAP_TYPES_NUM (sizeof(map_types) / sizeof(map_types[0]))
struct map_bench_args {
unsigned seed;
uint64_t max_key;
char *type;
bool ext_tx;
bool alloc;
};
struct map_bench_worker {
uint64_t *keys;
size_t nkeys;
};
struct map_bench {
struct map_ctx *mapc;
os_mutex_t lock;
PMEMobjpool *pop;
size_t pool_size;
size_t nkeys;
size_t init_nkeys;
uint64_t *keys;
struct benchmark_args *args;
struct map_bench_args *margs;
TOID(struct root) root;
PMEMoid root_oid;
TOID(struct map) map;
int (*insert)(struct map_bench *, uint64_t);
int (*remove)(struct map_bench *, uint64_t);
int (*get)(struct map_bench *, uint64_t);
};
/*
* mutex_lock_nofail -- locks mutex and aborts if locking failed
*/
static void
mutex_lock_nofail(os_mutex_t *lock)
{
errno = os_mutex_lock(lock);
if (errno) {
perror("os_mutex_lock");
abort();
}
}
/*
* mutex_unlock_nofail -- unlocks mutex and aborts if unlocking failed
*/
static void
mutex_unlock_nofail(os_mutex_t *lock)
{
errno = os_mutex_unlock(lock);
if (errno) {
perror("os_mutex_unlock");
abort();
}
}
/*
* get_key -- return 64-bit random key
*/
static uint64_t
get_key(unsigned *seed, uint64_t max_key)
{
unsigned key_lo = os_rand_r(seed);
unsigned key_hi = os_rand_r(seed);
uint64_t key = (((uint64_t)key_hi) << 32) | ((uint64_t)key_lo);
if (max_key)
key = key % max_key;
return key;
}
/*
* parse_map_type -- parse type of map
*/
static const struct map_ops *
parse_map_type(const char *str)
{
for (unsigned i = 0; i < MAP_TYPES_NUM; i++) {
if (strcmp(str, map_types[i].str) == 0)
return map_types[i].ops;
}
return nullptr;
}
/*
* map_remove_free_op -- remove and free object from map
*/
static int
map_remove_free_op(struct map_bench *map_bench, uint64_t key)
{
volatile int ret = 0;
TX_BEGIN(map_bench->pop)
{
PMEMoid val = map_remove(map_bench->mapc, map_bench->map, key);
if (OID_IS_NULL(val))
ret = -1;
else
pmemobj_tx_free(val);
}
TX_ONABORT
{
ret = -1;
}
TX_END
return ret;
}
/*
* map_remove_root_op -- remove root object from map
*/
static int
map_remove_root_op(struct map_bench *map_bench, uint64_t key)
{
PMEMoid val = map_remove(map_bench->mapc, map_bench->map, key);
return !OID_EQUALS(val, map_bench->root_oid);
}
/*
* map_remove_op -- main operation for map_remove benchmark
*/
static int
map_remove_op(struct benchmark *bench, struct operation_info *info)
{
auto *map_bench = (struct map_bench *)pmembench_get_priv(bench);
auto *tworker = (struct map_bench_worker *)info->worker->priv;
uint64_t key = tworker->keys[info->index];
mutex_lock_nofail(&map_bench->lock);
int ret = map_bench->remove(map_bench, key);
mutex_unlock_nofail(&map_bench->lock);
return ret;
}
/*
* map_insert_alloc_op -- allocate an object and insert to map
*/
static int
map_insert_alloc_op(struct map_bench *map_bench, uint64_t key)
{
int ret = 0;
TX_BEGIN(map_bench->pop)
{
PMEMoid oid =
pmemobj_tx_alloc(map_bench->args->dsize, OBJ_TYPE_NUM);
ret = map_insert(map_bench->mapc, map_bench->map, key, oid);
}
TX_ONABORT
{
ret = -1;
}
TX_END
return ret;
}
/*
* map_insert_root_op -- insert root object to map
*/
static int
map_insert_root_op(struct map_bench *map_bench, uint64_t key)
{
return map_insert(map_bench->mapc, map_bench->map, key,
map_bench->root_oid);
}
/*
* map_insert_op -- main operation for map_insert benchmark
*/
static int
map_insert_op(struct benchmark *bench, struct operation_info *info)
{
auto *map_bench = (struct map_bench *)pmembench_get_priv(bench);
auto *tworker = (struct map_bench_worker *)info->worker->priv;
uint64_t key = tworker->keys[info->index];
mutex_lock_nofail(&map_bench->lock);
int ret = map_bench->insert(map_bench, key);
mutex_unlock_nofail(&map_bench->lock);
return ret;
}
/*
* map_get_obj_op -- get object from map at specified key
*/
static int
map_get_obj_op(struct map_bench *map_bench, uint64_t key)
{
PMEMoid val = map_get(map_bench->mapc, map_bench->map, key);
return OID_IS_NULL(val);
}
/*
* map_get_root_op -- get root object from map at specified key
*/
static int
map_get_root_op(struct map_bench *map_bench, uint64_t key)
{
PMEMoid val = map_get(map_bench->mapc, map_bench->map, key);
return !OID_EQUALS(val, map_bench->root_oid);
}
/*
* map_get_op -- main operation for map_get benchmark
*/
static int
map_get_op(struct benchmark *bench, struct operation_info *info)
{
auto *map_bench = (struct map_bench *)pmembench_get_priv(bench);
auto *tworker = (struct map_bench_worker *)info->worker->priv;
uint64_t key = tworker->keys[info->index];
mutex_lock_nofail(&map_bench->lock);
int ret = map_bench->get(map_bench, key);
mutex_unlock_nofail(&map_bench->lock);
return ret;
}
/*
* map_common_init_worker -- common init worker function for map_* benchmarks
*/
static int
map_common_init_worker(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
struct map_bench_worker *tworker =
(struct map_bench_worker *)calloc(1, sizeof(*tworker));
struct map_bench *tree;
struct map_bench_args *targs;
if (!tworker) {
perror("calloc");
return -1;
}
tworker->nkeys = args->n_ops_per_thread;
tworker->keys =
(uint64_t *)malloc(tworker->nkeys * sizeof(*tworker->keys));
if (!tworker->keys) {
perror("malloc");
goto err_free_worker;
}
tree = (struct map_bench *)pmembench_get_priv(bench);
targs = (struct map_bench_args *)args->opts;
if (targs->ext_tx) {
int ret = pmemobj_tx_begin(tree->pop, nullptr);
if (ret) {
(void)pmemobj_tx_end();
goto err_free_keys;
}
}
worker->priv = tworker;
return 0;
err_free_keys:
free(tworker->keys);
err_free_worker:
free(tworker);
return -1;
}
/*
* map_common_free_worker -- common cleanup worker function for map_*
* benchmarks
*/
static void
map_common_free_worker(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
auto *tworker = (struct map_bench_worker *)worker->priv;
auto *targs = (struct map_bench_args *)args->opts;
if (targs->ext_tx) {
pmemobj_tx_commit();
(void)pmemobj_tx_end();
}
free(tworker->keys);
free(tworker);
}
/*
* map_insert_init_worker -- init worker function for map_insert benchmark
*/
static int
map_insert_init_worker(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
int ret = map_common_init_worker(bench, args, worker);
if (ret)
return ret;
auto *targs = (struct map_bench_args *)args->opts;
assert(targs);
auto *tworker = (struct map_bench_worker *)worker->priv;
assert(tworker);
for (size_t i = 0; i < tworker->nkeys; i++)
tworker->keys[i] = get_key(&targs->seed, targs->max_key);
return 0;
}
/*
* map_global_rand_keys_init -- assign random keys from global keys array
*/
static int
map_global_rand_keys_init(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
auto *tree = (struct map_bench *)pmembench_get_priv(bench);
assert(tree);
auto *targs = (struct map_bench_args *)args->opts;
assert(targs);
auto *tworker = (struct map_bench_worker *)worker->priv;
assert(tworker);
assert(tree->init_nkeys);
/*
* Assign random keys from global tree->keys array without repetitions.
*/
for (size_t i = 0; i < tworker->nkeys; i++) {
uint64_t index = get_key(&targs->seed, tree->init_nkeys);
tworker->keys[i] = tree->keys[index];
swap(tree->keys[index], tree->keys[tree->init_nkeys - 1]);
tree->init_nkeys--;
}
return 0;
}
/*
* map_remove_init_worker -- init worker function for map_remove benchmark
*/
static int
map_remove_init_worker(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
int ret = map_common_init_worker(bench, args, worker);
if (ret)
return ret;
ret = map_global_rand_keys_init(bench, args, worker);
if (ret)
goto err_common_free_worker;
return 0;
err_common_free_worker:
map_common_free_worker(bench, args, worker);
return -1;
}
/*
* map_bench_get_init_worker -- init worker function for map_get benchmark
*/
static int
map_bench_get_init_worker(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
int ret = map_common_init_worker(bench, args, worker);
if (ret)
return ret;
ret = map_global_rand_keys_init(bench, args, worker);
if (ret)
goto err_common_free_worker;
return 0;
err_common_free_worker:
map_common_free_worker(bench, args, worker);
return -1;
}
/*
* map_common_init -- common init function for map_* benchmarks
*/
static int
map_common_init(struct benchmark *bench, struct benchmark_args *args)
{
assert(bench);
assert(args);
assert(args->opts);
char path[PATH_MAX];
if (util_safe_strcpy(path, args->fname, sizeof(path)) != 0)
return -1;
enum file_type type = util_file_get_type(args->fname);
if (type == OTHER_ERROR) {
fprintf(stderr, "could not check type of file %s\n",
args->fname);
return -1;
}
size_t size_per_key;
struct map_bench *map_bench =
(struct map_bench *)calloc(1, sizeof(*map_bench));
if (!map_bench) {
perror("calloc");
return -1;
}
map_bench->args = args;
map_bench->margs = (struct map_bench_args *)args->opts;
const struct map_ops *ops = parse_map_type(map_bench->margs->type);
if (!ops) {
fprintf(stderr, "invalid map type value specified -- '%s'\n",
map_bench->margs->type);
goto err_free_bench;
}
if (map_bench->margs->ext_tx && args->n_threads > 1) {
fprintf(stderr, "external transaction "
"requires single thread\n");
goto err_free_bench;
}
if (map_bench->margs->alloc) {
map_bench->insert = map_insert_alloc_op;
map_bench->remove = map_remove_free_op;
map_bench->get = map_get_obj_op;
} else {
map_bench->insert = map_insert_root_op;
map_bench->remove = map_remove_root_op;
map_bench->get = map_get_root_op;
}
map_bench->nkeys = args->n_threads * args->n_ops_per_thread;
map_bench->init_nkeys = map_bench->nkeys;
size_per_key = map_bench->margs->alloc
? SIZE_PER_KEY + map_bench->args->dsize + ALLOC_OVERHEAD
: SIZE_PER_KEY;
map_bench->pool_size = map_bench->nkeys * size_per_key * FACTOR;
if (args->is_poolset || type == TYPE_DEVDAX) {
if (args->fsize < map_bench->pool_size) {
fprintf(stderr, "file size too large\n");
goto err_free_bench;
}
map_bench->pool_size = 0;
} else if (map_bench->pool_size < 2 * PMEMOBJ_MIN_POOL) {
map_bench->pool_size = 2 * PMEMOBJ_MIN_POOL;
}
if (args->is_dynamic_poolset) {
int ret = dynamic_poolset_create(args->fname,
map_bench->pool_size);
if (ret == -1)
goto err_free_bench;
if (util_safe_strcpy(path, POOLSET_PATH, sizeof(path)) != 0)
goto err_free_bench;
map_bench->pool_size = 0;
}
map_bench->pop = pmemobj_create(path, "map_bench", map_bench->pool_size,
args->fmode);
if (!map_bench->pop) {
fprintf(stderr, "pmemobj_create: %s\n", pmemobj_errormsg());
goto err_free_bench;
}
errno = os_mutex_init(&map_bench->lock);
if (errno) {
perror("os_mutex_init");
goto err_close;
}
map_bench->mapc = map_ctx_init(ops, map_bench->pop);
if (!map_bench->mapc) {
perror("map_ctx_init");
goto err_destroy_lock;
}
map_bench->root = POBJ_ROOT(map_bench->pop, struct root);
if (TOID_IS_NULL(map_bench->root)) {
fprintf(stderr, "pmemobj_root: %s\n", pmemobj_errormsg());
goto err_free_map;
}
map_bench->root_oid = map_bench->root.oid;
if (map_create(map_bench->mapc, &D_RW(map_bench->root)->map, nullptr)) {
perror("map_new");
goto err_free_map;
}
map_bench->map = D_RO(map_bench->root)->map;
pmembench_set_priv(bench, map_bench);
return 0;
err_free_map:
map_ctx_free(map_bench->mapc);
err_destroy_lock:
os_mutex_destroy(&map_bench->lock);
err_close:
pmemobj_close(map_bench->pop);
err_free_bench:
free(map_bench);
return -1;
}
/*
* map_common_exit -- common cleanup function for map_* benchmarks
*/
static int
map_common_exit(struct benchmark *bench, struct benchmark_args *args)
{
auto *tree = (struct map_bench *)pmembench_get_priv(bench);
os_mutex_destroy(&tree->lock);
map_ctx_free(tree->mapc);
pmemobj_close(tree->pop);
free(tree);
return 0;
}
/*
* map_keys_init -- initialize array with keys
*/
static int
map_keys_init(struct benchmark *bench, struct benchmark_args *args)
{
auto *map_bench = (struct map_bench *)pmembench_get_priv(bench);
assert(map_bench);
auto *targs = (struct map_bench_args *)args->opts;
assert(targs);
assert(map_bench->nkeys != 0);
map_bench->keys =
(uint64_t *)malloc(map_bench->nkeys * sizeof(*map_bench->keys));
if (!map_bench->keys) {
perror("malloc");
return -1;
}
int ret = 0;
mutex_lock_nofail(&map_bench->lock);
TX_BEGIN(map_bench->pop)
{
for (size_t i = 0; i < map_bench->nkeys; i++) {
uint64_t key;
PMEMoid oid;
do {
key = get_key(&targs->seed, targs->max_key);
oid = map_get(map_bench->mapc, map_bench->map,
key);
} while (!OID_IS_NULL(oid));
if (targs->alloc)
oid = pmemobj_tx_alloc(args->dsize,
OBJ_TYPE_NUM);
else
oid = map_bench->root_oid;
ret = map_insert(map_bench->mapc, map_bench->map, key,
oid);
if (ret)
break;
map_bench->keys[i] = key;
}
}
TX_ONABORT
{
ret = -1;
}
TX_END
mutex_unlock_nofail(&map_bench->lock);
if (!ret)
return 0;
free(map_bench->keys);
return ret;
}
/*
* map_keys_exit -- cleanup of keys array
*/
static int
map_keys_exit(struct benchmark *bench, struct benchmark_args *args)
{
auto *tree = (struct map_bench *)pmembench_get_priv(bench);
free(tree->keys);
return 0;
}
/*
* map_remove_init -- init function for map_remove benchmark
*/
static int
map_remove_init(struct benchmark *bench, struct benchmark_args *args)
{
int ret = map_common_init(bench, args);
if (ret)
return ret;
ret = map_keys_init(bench, args);
if (ret)
goto err_exit_common;
return 0;
err_exit_common:
map_common_exit(bench, args);
return -1;
}
/*
* map_remove_exit -- cleanup function for map_remove benchmark
*/
static int
map_remove_exit(struct benchmark *bench, struct benchmark_args *args)
{
map_keys_exit(bench, args);
return map_common_exit(bench, args);
}
/*
* map_bench_get_init -- init function for map_get benchmark
*/
static int
map_bench_get_init(struct benchmark *bench, struct benchmark_args *args)
{
int ret = map_common_init(bench, args);
if (ret)
return ret;
ret = map_keys_init(bench, args);
if (ret)
goto err_exit_common;
return 0;
err_exit_common:
map_common_exit(bench, args);
return -1;
}
/*
* map_get_exit -- exit function for map_get benchmark
*/
static int
map_get_exit(struct benchmark *bench, struct benchmark_args *args)
{
map_keys_exit(bench, args);
return map_common_exit(bench, args);
}
static struct benchmark_clo map_bench_clos[5];
static struct benchmark_info map_insert_info;
static struct benchmark_info map_remove_info;
static struct benchmark_info map_get_info;
CONSTRUCTOR(map_bench_constructor)
void
map_bench_constructor(void)
{
map_bench_clos[0].opt_short = 'T';
map_bench_clos[0].opt_long = "type";
map_bench_clos[0].descr =
"Type of container "
"[ctree|btree|rtree|rbtree|hashmap_tx|hashmap_atomic]";
map_bench_clos[0].off = clo_field_offset(struct map_bench_args, type);
map_bench_clos[0].type = CLO_TYPE_STR;
map_bench_clos[0].def = "ctree";
map_bench_clos[1].opt_short = 's';
map_bench_clos[1].opt_long = "seed";
map_bench_clos[1].descr = "PRNG seed";
map_bench_clos[1].off = clo_field_offset(struct map_bench_args, seed);
map_bench_clos[1].type = CLO_TYPE_UINT;
map_bench_clos[1].def = "1";
map_bench_clos[1].type_uint.size =
clo_field_size(struct map_bench_args, seed);
map_bench_clos[1].type_uint.base = CLO_INT_BASE_DEC;
map_bench_clos[1].type_uint.min = 1;
map_bench_clos[1].type_uint.max = UINT_MAX;
map_bench_clos[2].opt_short = 'M';
map_bench_clos[2].opt_long = "max-key";
map_bench_clos[2].descr = "maximum key (0 means no limit)";
map_bench_clos[2].off =
clo_field_offset(struct map_bench_args, max_key);
map_bench_clos[2].type = CLO_TYPE_UINT;
map_bench_clos[2].def = "0";
map_bench_clos[2].type_uint.size =
clo_field_size(struct map_bench_args, seed);
map_bench_clos[2].type_uint.base = CLO_INT_BASE_DEC;
map_bench_clos[2].type_uint.min = 0;
map_bench_clos[2].type_uint.max = UINT64_MAX;
map_bench_clos[3].opt_short = 'x';
map_bench_clos[3].opt_long = "external-tx";
map_bench_clos[3].descr = "Use external transaction for all "
"operations (works with single "
"thread only)";
map_bench_clos[3].off = clo_field_offset(struct map_bench_args, ext_tx);
map_bench_clos[3].type = CLO_TYPE_FLAG;
map_bench_clos[4].opt_short = 'A';
map_bench_clos[4].opt_long = "alloc";
map_bench_clos[4].descr = "Allocate object of specified size "
"when inserting";
map_bench_clos[4].off = clo_field_offset(struct map_bench_args, alloc);
map_bench_clos[4].type = CLO_TYPE_FLAG;
map_insert_info.name = "map_insert";
map_insert_info.brief = "Inserting to tree map";
map_insert_info.init = map_common_init;
map_insert_info.exit = map_common_exit;
map_insert_info.multithread = true;
map_insert_info.multiops = true;
map_insert_info.init_worker = map_insert_init_worker;
map_insert_info.free_worker = map_common_free_worker;
map_insert_info.operation = map_insert_op;
map_insert_info.measure_time = true;
map_insert_info.clos = map_bench_clos;
map_insert_info.nclos = ARRAY_SIZE(map_bench_clos);
map_insert_info.opts_size = sizeof(struct map_bench_args);
map_insert_info.rm_file = true;
map_insert_info.allow_poolset = true;
REGISTER_BENCHMARK(map_insert_info);
map_remove_info.name = "map_remove";
map_remove_info.brief = "Removing from tree map";
map_remove_info.init = map_remove_init;
map_remove_info.exit = map_remove_exit;
map_remove_info.multithread = true;
map_remove_info.multiops = true;
map_remove_info.init_worker = map_remove_init_worker;
map_remove_info.free_worker = map_common_free_worker;
map_remove_info.operation = map_remove_op;
map_remove_info.measure_time = true;
map_remove_info.clos = map_bench_clos;
map_remove_info.nclos = ARRAY_SIZE(map_bench_clos);
map_remove_info.opts_size = sizeof(struct map_bench_args);
map_remove_info.rm_file = true;
map_remove_info.allow_poolset = true;
REGISTER_BENCHMARK(map_remove_info);
map_get_info.name = "map_get";
map_get_info.brief = "Tree lookup";
map_get_info.init = map_bench_get_init;
map_get_info.exit = map_get_exit;
map_get_info.multithread = true;
map_get_info.multiops = true;
map_get_info.init_worker = map_bench_get_init_worker;
map_get_info.free_worker = map_common_free_worker;
map_get_info.operation = map_get_op;
map_get_info.measure_time = true;
map_get_info.clos = map_bench_clos;
map_get_info.nclos = ARRAY_SIZE(map_bench_clos);
map_get_info.opts_size = sizeof(struct map_bench_args);
map_get_info.rm_file = true;
map_get_info.allow_poolset = true;
REGISTER_BENCHMARK(map_get_info);
}
| 21,937 | 24.043379 | 80 |
cpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/clo.cpp
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* clo.cpp -- command line options module definitions
*/
#include <cassert>
#include <cerrno>
#include <cinttypes>
#include <cstring>
#include <err.h>
#include <getopt.h>
#include "benchmark.hpp"
#include "clo.hpp"
#include "clo_vec.hpp"
#include "queue.h"
#include "scenario.hpp"
#ifndef min
#define min(a, b) ((a) < (b) ? (a) : (b))
#endif
#ifndef max
#define max(a, b) ((a) > (b) ? (a) : (b))
#endif
typedef int (*clo_parse_fn)(struct benchmark_clo *clo, const char *arg,
struct clo_vec *clovec);
typedef int (*clo_parse_single_fn)(struct benchmark_clo *clo, const char *arg,
void *ptr);
typedef int (*clo_eval_range_fn)(struct benchmark_clo *clo, void *first,
void *step, void *last, char type,
struct clo_vec_vlist *vlist);
typedef const char *(*clo_str_fn)(struct benchmark_clo *clo, void *addr,
size_t size);
#define STR_BUFF_SIZE 1024
static char str_buff[STR_BUFF_SIZE];
/*
* clo_parse_flag -- (internal) parse flag
*/
static int
clo_parse_flag(struct benchmark_clo *clo, const char *arg,
struct clo_vec *clovec)
{
bool flag = true;
if (arg != nullptr) {
if (strcmp(arg, "true") == 0)
flag = true;
else if (strcmp(arg, "false") == 0)
flag = false;
else
return -1;
}
return clo_vec_memcpy(clovec, clo->off, sizeof(flag), &flag);
}
/*
* clo_parse_str -- (internal) parse string value
*/
static int
clo_parse_str(struct benchmark_clo *clo, const char *arg,
struct clo_vec *clovec)
{
struct clo_vec_vlist *vlist = clo_vec_vlist_alloc();
assert(vlist != nullptr);
char *str = strdup(arg);
assert(str != nullptr);
clo_vec_add_alloc(clovec, str);
char *next = strtok(str, ",");
while (next) {
clo_vec_vlist_add(vlist, &next, sizeof(next));
next = strtok(nullptr, ",");
}
int ret = clo_vec_memcpy_list(clovec, clo->off, sizeof(str), vlist);
clo_vec_vlist_free(vlist);
return ret;
}
/*
* is_oct -- check if string may be octal number
*/
static int
is_oct(const char *arg, size_t len)
{
return (arg[0] == '0' || (len > 1 && arg[0] == '-' && arg[1] == '0'));
}
/*
* is_hex -- check if string may be hexadecimal number
*/
static int
is_hex(const char *arg, size_t len)
{
if (arg[0] == '-') {
arg++;
len--;
}
return (len > 2 && arg[0] == '0' && (arg[1] == 'x' || arg[1] == 'X'));
}
/*
* parse_number_base -- parse string as integer of given sign and base
*/
static int
parse_number_base(const char *arg, void *value, int s, int base)
{
char *end;
errno = 0;
if (s) {
auto *v = (int64_t *)value;
*v = strtoll(arg, &end, base);
} else {
auto *v = (uint64_t *)value;
*v = strtoull(arg, &end, base);
}
if (errno || *end != '\0')
return -1;
return 0;
}
/*
* parse_number -- parse string as integer of given sign and allowed bases
*/
static int
parse_number(const char *arg, size_t len, void *value, int s, int base)
{
if ((base & CLO_INT_BASE_HEX) && is_hex(arg, len)) {
if (!parse_number_base(arg, value, s, 16))
return 0;
}
if ((base & CLO_INT_BASE_OCT) && is_oct(arg, len)) {
if (!parse_number_base(arg, value, s, 8))
return 0;
}
if (base & CLO_INT_BASE_DEC) {
if (!parse_number_base(arg, value, s, 10))
return 0;
}
return -1;
}
/*
* clo_parse_single_int -- (internal) parse single int value
*/
static int
clo_parse_single_int(struct benchmark_clo *clo, const char *arg, void *ptr)
{
int64_t value = 0;
size_t len = strlen(arg);
if (parse_number(arg, len, &value, 1, clo->type_int.base)) {
errno = EINVAL;
return -1;
}
int64_t tmax = ((int64_t)1 << (8 * clo->type_int.size - 1)) - 1;
int64_t tmin = -((int64_t)1 << (8 * clo->type_int.size - 1));
tmax = min(tmax, clo->type_int.max);
tmin = max(tmin, clo->type_int.min);
if (value > tmax || value < tmin) {
errno = ERANGE;
return -1;
}
memcpy(ptr, &value, clo->type_int.size);
return 0;
}
/*
* clo_parse_single_uint -- (internal) parse single uint value
*/
static int
clo_parse_single_uint(struct benchmark_clo *clo, const char *arg, void *ptr)
{
if (arg[0] == '-') {
errno = EINVAL;
return -1;
}
uint64_t value = 0;
size_t len = strlen(arg);
if (parse_number(arg, len, &value, 0, clo->type_uint.base)) {
errno = EINVAL;
return -1;
}
uint64_t tmax = ~0 >> (64 - 8 * clo->type_uint.size);
uint64_t tmin = 0;
tmax = min(tmax, clo->type_uint.max);
tmin = max(tmin, clo->type_uint.min);
if (value > tmax || value < tmin) {
errno = ERANGE;
return -1;
}
memcpy(ptr, &value, clo->type_uint.size);
return 0;
}
/*
* clo_eval_range_uint -- (internal) evaluate range for uint values
*/
static int
clo_eval_range_uint(struct benchmark_clo *clo, void *first, void *step,
void *last, char type, struct clo_vec_vlist *vlist)
{
uint64_t curr = *(uint64_t *)first;
uint64_t l = *(uint64_t *)last;
int64_t s = *(int64_t *)step;
while (1) {
clo_vec_vlist_add(vlist, &curr, clo->type_uint.size);
switch (type) {
case '+':
curr += s;
if (curr > l)
return 0;
break;
case '-':
if (curr < (uint64_t)s)
return 0;
curr -= s;
if (curr < l)
return 0;
break;
case '*':
curr *= s;
if (curr > l)
return 0;
break;
case '/':
curr /= s;
if (curr < l)
return 0;
break;
default:
return -1;
}
}
return -1;
}
/*
* clo_eval_range_int -- (internal) evaluate range for int values
*/
static int
clo_eval_range_int(struct benchmark_clo *clo, void *first, void *step,
void *last, char type, struct clo_vec_vlist *vlist)
{
int64_t curr = *(int64_t *)first;
int64_t l = *(int64_t *)last;
uint64_t s = *(uint64_t *)step;
while (1) {
clo_vec_vlist_add(vlist, &curr, clo->type_int.size);
switch (type) {
case '+':
curr += s;
if (curr > l)
return 0;
break;
case '-':
curr -= s;
if (curr < l)
return 0;
break;
case '*':
curr *= s;
if (curr > l)
return 0;
break;
case '/':
curr /= s;
if (curr < l)
return 0;
break;
default:
return -1;
}
}
return -1;
}
/*
* clo_check_range_params -- (internal) validate step and step type
*/
static int
clo_check_range_params(uint64_t step, char step_type)
{
switch (step_type) {
/*
* Cannot construct range with step equal to 0
* for '+' or '-' range.
*/
case '+':
case '-':
if (step == 0)
return -1;
break;
/*
* Cannot construct range with step equal to 0 or 1
* for '*' or '/' range.
*/
case '*':
case '/':
if (step == 0 || step == 1)
return -1;
break;
default:
return -1;
}
return 0;
}
/*
* clo_parse_range -- (internal) parse range or value
*
* The range may be in the following format:
* <first>:<step type><step>:<last>
*
* Step type must be one of the following: +, -, *, /.
*/
static int
clo_parse_range(struct benchmark_clo *clo, const char *arg,
clo_parse_single_fn parse_single, clo_eval_range_fn eval_range,
struct clo_vec_vlist *vlist)
{
auto *str_first = (char *)malloc(strlen(arg) + 1);
assert(str_first != nullptr);
auto *str_step = (char *)malloc(strlen(arg) + 1);
assert(str_step != nullptr);
char step_type = '\0';
auto *str_last = (char *)malloc(strlen(arg) + 1);
assert(str_last != nullptr);
int ret = sscanf(arg, "%[^:]:%c%[^:]:%[^:]", str_first, &step_type,
str_step, str_last);
if (ret == 1) {
/* single value */
uint64_t value;
if (parse_single(clo, arg, &value)) {
ret = -1;
} else {
if (clo->type == CLO_TYPE_UINT)
clo_vec_vlist_add(vlist, &value,
clo->type_uint.size);
else
clo_vec_vlist_add(vlist, &value,
clo->type_int.size);
ret = 0;
}
} else if (ret == 4) {
/* range */
uint64_t first = 0;
uint64_t last = 0;
uint64_t step = 0;
if (parse_single(clo, str_first, &first)) {
ret = -1;
goto out;
}
char *end;
errno = 0;
step = strtoull(str_step, &end, 10);
if (errno || !end || *end != '\0') {
ret = -1;
goto out;
}
if (parse_single(clo, str_last, &last)) {
ret = -1;
goto out;
}
if (clo_check_range_params(step, step_type)) {
ret = -1;
goto out;
}
/* evaluate the range */
if (eval_range(clo, &first, &step, &last, step_type, vlist)) {
ret = -1;
goto out;
}
ret = 0;
} else {
ret = -1;
}
out:
free(str_first);
free(str_step);
free(str_last);
return ret;
}
/*
* clo_parse_ranges -- (internal) parse ranges/values separated by commas
*/
static int
clo_parse_ranges(struct benchmark_clo *clo, const char *arg,
struct clo_vec *clovec, clo_parse_single_fn parse_single,
clo_eval_range_fn eval_range)
{
struct clo_vec_vlist *vlist = clo_vec_vlist_alloc();
assert(vlist != nullptr);
int ret = 0;
char *args = strdup(arg);
assert(args != nullptr);
char *curr = args;
char *next;
/* iterate through all values separated by comma */
while ((next = strchr(curr, ',')) != nullptr) {
*next = '\0';
next++;
/* parse each comma separated value as range or single value */
if ((ret = clo_parse_range(clo, curr, parse_single, eval_range,
vlist)))
goto out;
curr = next;
}
/* parse each comma separated value as range or single value */
if ((ret = clo_parse_range(clo, curr, parse_single, eval_range, vlist)))
goto out;
/* add list of values to CLO vector */
if (clo->type == CLO_TYPE_UINT)
ret = clo_vec_memcpy_list(clovec, clo->off, clo->type_uint.size,
vlist);
else
ret = clo_vec_memcpy_list(clovec, clo->off, clo->type_int.size,
vlist);
out:
free(args);
clo_vec_vlist_free(vlist);
return ret;
}
/*
* clo_parse_int -- (internal) parse int value
*/
static int
clo_parse_int(struct benchmark_clo *clo, const char *arg,
struct clo_vec *clovec)
{
return clo_parse_ranges(clo, arg, clovec, clo_parse_single_int,
clo_eval_range_int);
}
/*
* clo_parse_uint -- (internal) parse uint value
*/
static int
clo_parse_uint(struct benchmark_clo *clo, const char *arg,
struct clo_vec *clovec)
{
return clo_parse_ranges(clo, arg, clovec, clo_parse_single_uint,
clo_eval_range_uint);
}
/*
* clo_str_flag -- (internal) convert flag value to string
*/
static const char *
clo_str_flag(struct benchmark_clo *clo, void *addr, size_t size)
{
if (clo->off + sizeof(bool) > size)
return nullptr;
bool flag = *(bool *)((char *)addr + clo->off);
return flag ? "true" : "false";
}
/*
* clo_str_str -- (internal) convert str value to string
*/
static const char *
clo_str_str(struct benchmark_clo *clo, void *addr, size_t size)
{
if (clo->off + sizeof(char *) > size)
return nullptr;
return *(char **)((char *)addr + clo->off);
}
/*
* clo_str_int -- (internal) convert int value to string
*/
static const char *
clo_str_int(struct benchmark_clo *clo, void *addr, size_t size)
{
if (clo->off + clo->type_int.size > size)
return nullptr;
void *val = (char *)addr + clo->off;
int ret = 0;
switch (clo->type_int.size) {
case 1:
ret = snprintf(str_buff, STR_BUFF_SIZE, "%" PRId8,
*(int8_t *)val);
break;
case 2:
ret = snprintf(str_buff, STR_BUFF_SIZE, "%" PRId16,
*(int16_t *)val);
break;
case 4:
ret = snprintf(str_buff, STR_BUFF_SIZE, "%" PRId32,
*(int32_t *)val);
break;
case 8:
ret = snprintf(str_buff, STR_BUFF_SIZE, "%" PRId64,
*(int64_t *)val);
break;
default:
return nullptr;
}
if (ret < 0)
return nullptr;
return str_buff;
}
/*
* clo_str_uint -- (internal) convert uint value to string
*/
static const char *
clo_str_uint(struct benchmark_clo *clo, void *addr, size_t size)
{
if (clo->off + clo->type_uint.size > size)
return nullptr;
void *val = (char *)addr + clo->off;
int ret = 0;
switch (clo->type_uint.size) {
case 1:
ret = snprintf(str_buff, STR_BUFF_SIZE, "%" PRIu8,
*(uint8_t *)val);
break;
case 2:
ret = snprintf(str_buff, STR_BUFF_SIZE, "%" PRIu16,
*(uint16_t *)val);
break;
case 4:
ret = snprintf(str_buff, STR_BUFF_SIZE, "%" PRIu32,
*(uint32_t *)val);
break;
case 8:
ret = snprintf(str_buff, STR_BUFF_SIZE, "%" PRIu64,
*(uint64_t *)val);
break;
default:
return nullptr;
}
if (ret < 0)
return nullptr;
return str_buff;
}
/*
* clo_parse -- (internal) array with functions for parsing CLOs
*/
static clo_parse_fn clo_parse[CLO_TYPE_MAX] = {
/* [CLO_TYPE_FLAG] = */ clo_parse_flag,
/* [CLO_TYPE_STR] = */ clo_parse_str,
/* [CLO_TYPE_INT] = */ clo_parse_int,
/* [CLO_TYPE_UINT] = */ clo_parse_uint,
};
/*
* clo_str -- (internal) array with functions for converting to string
*/
static clo_str_fn clo_str[CLO_TYPE_MAX] = {
/* [CLO_TYPE_FLAG] = */ clo_str_flag,
/* [CLO_TYPE_STR] = */ clo_str_str,
/* [CLO_TYPE_INT] = */ clo_str_int,
/* [CLO_TYPE_UINT] = */ clo_str_uint,
};
/*
* clo_get_by_short -- (internal) return CLO with specified short opt
*/
static struct benchmark_clo *
clo_get_by_short(struct benchmark_clo *clos, size_t nclo, char opt_short)
{
size_t i;
for (i = 0; i < nclo; i++) {
if (clos[i].opt_short == opt_short)
return &clos[i];
}
return nullptr;
}
/*
* clo_get_by_long -- (internal) return CLO with specified long opt
*/
static struct benchmark_clo *
clo_get_by_long(struct benchmark_clo *clos, size_t nclo, const char *opt_long)
{
size_t i;
for (i = 0; i < nclo; i++) {
if (strcmp(clos[i].opt_long, opt_long) == 0)
return &clos[i];
}
return nullptr;
}
/*
* clo_get_optstr -- (internal) returns option string from CLOs
*
* This function returns option string which contains all short
* options from CLO structure.
* The returned value must be freed by caller.
*/
static char *
clo_get_optstr(struct benchmark_clo *clos, size_t nclo)
{
size_t i;
char *optstr;
char *ptr;
/*
* In worst case every option requires an argument
* so we need space for ':' character + terminating
* NULL.
*/
size_t optstrlen = nclo * 2 + 1;
optstr = (char *)calloc(1, optstrlen);
assert(optstr != nullptr);
ptr = optstr;
for (i = 0; i < nclo; i++) {
if (clos[i].opt_short) {
*(ptr++) = clos[i].opt_short;
if (clos[i].type != CLO_TYPE_FLAG)
*(ptr++) = ':';
}
}
return optstr;
}
/*
* clo_get_long_options -- (internal) allocate long options structure
*
* This function allocates structure for long options and fills all
* entries according to values from becnhmark_clo. This is essentially
* conversion from struct benchmark_clo to struct option.
* The returned value must be freed by caller.
*/
static struct option *
clo_get_long_options(struct benchmark_clo *clos, size_t nclo)
{
size_t i;
struct option *options;
options = (struct option *)calloc(nclo + 1, sizeof(struct option));
assert(options != nullptr);
for (i = 0; i < nclo; i++) {
options[i].name = clos[i].opt_long;
options[i].val = clos[i].opt_short;
/* no optional arguments */
if (clos[i].type == CLO_TYPE_FLAG) {
options[i].has_arg = no_argument;
} else {
options[i].has_arg = required_argument;
}
}
return options;
}
/*
* clo_set_defaults -- (internal) set default values
*
* Default values are stored as strings in CLO
* structure so this function parses default values in
* the same manner as values passed by user. Returns -1
* if argument was not passed by user and default value
* is missing.
*/
static int
clo_set_defaults(struct benchmark_clo *clos, size_t nclo,
struct clo_vec *clovec)
{
size_t i;
for (i = 0; i < nclo; i++) {
if (clos[i].used)
continue;
/*
* If option was not used and default value
* is not specified, return error. Otherwise
* parse the default value in the same way as
* values passed by user. Except for the flag.
* If the flag default value was not specified
* assign "false" value to it.
*/
if (clos[i].def) {
if (clo_parse[clos[i].type](&clos[i], clos[i].def,
clovec))
return -1;
} else if (clos[i].type == CLO_TYPE_FLAG) {
if (clo_parse[clos[i].type](&clos[i], "false", clovec))
return -1;
} else {
printf("'%s' is required option\n", clos[i].opt_long);
return -1;
}
}
return 0;
}
/*
* benchmark_clo_parse -- parse CLOs and store values in desired structure
*
* This function parses command line arguments according to information
* from CLOs structure. The parsed values are stored in CLO vector
* pointed by clovec. If any of command line options are not passed by user,
* the default value is stored if exists. Otherwise it means the argument is
* required and error is returned.
*
* - argc - number of command line options passed by user
* - argv - command line options passed by user
* - clos - description of available command line options
* - nclos - number of available command line options
* - clovec - vector of arguments
*/
int
benchmark_clo_parse(int argc, char *argv[], struct benchmark_clo *clos,
ssize_t nclos, struct clo_vec *clovec)
{
char *optstr;
struct option *options;
int ret = 0;
int opt;
int optindex;
/* convert CLOs to option string and long options structure */
optstr = clo_get_optstr(clos, nclos);
options = clo_get_long_options(clos, nclos);
/* parse CLOs as long and/or short options */
while ((opt = getopt_long(argc, argv, optstr, options, &optindex)) !=
-1) {
struct benchmark_clo *clo = nullptr;
if (opt) {
clo = clo_get_by_short(clos, nclos, opt);
} else {
assert(optindex < nclos);
clo = &clos[optindex];
}
if (!clo) {
ret = -1;
goto out;
}
/* invoke parser according to type of CLO */
assert(clo->type < CLO_TYPE_MAX);
ret = clo_parse[clo->type](clo, optarg, clovec);
if (ret)
goto out;
/* mark CLO as used */
clo->used = optarg != nullptr || clo->type == CLO_TYPE_FLAG;
}
if (optind < argc) {
fprintf(stderr, "Unknown option: %s\n", argv[optind]);
ret = -1;
goto out;
}
/* parse unused CLOs with default values */
ret = clo_set_defaults(clos, nclos, clovec);
out:
free(options);
free(optstr);
if (ret)
errno = EINVAL;
return ret;
}
/*
* benchmark_clo_parse_scenario -- parse CLOs from scenario
*
* This function parses command line arguments according to information
* from CLOs structure. The parsed values are stored in CLO vector
* pointed by clovec. If any of command line options are not passed by user,
* the default value is stored if exists. Otherwise it means the argument is
* required and error is returned.
*
* - scenario - scenario with key value arguments
* - clos - description of available command line options
* - nclos - number of available command line options
* - clovec - vector of arguments
*/
int
benchmark_clo_parse_scenario(struct scenario *scenario,
struct benchmark_clo *clos, size_t nclos,
struct clo_vec *clovec)
{
struct kv *kv;
FOREACH_KV(kv, scenario)
{
struct benchmark_clo *clo =
clo_get_by_long(clos, nclos, kv->key);
if (!clo) {
fprintf(stderr, "unrecognized option -- '%s'\n",
kv->key);
return -1;
}
assert(clo->type < CLO_TYPE_MAX);
if (clo_parse[clo->type](clo, kv->value, clovec)) {
fprintf(stderr, "parsing option -- '%s' failed\n",
kv->value);
return -1;
}
/* mark CLO as used */
clo->used = 1;
}
return clo_set_defaults(clos, nclos, clovec);
}
/*
* benchmark_override_clos_in_scenario - parse the command line arguments and
* override/add the parameters in/to the scenario by replacing/adding the kv
* struct in/to the scenario.
*
* - scenario - scenario with key value arguments
* - argc - command line arguments number
* - argv - command line arguments vector
* - clos - description of available command line options
* - nclos - number of available command line options
*/
int
benchmark_override_clos_in_scenario(struct scenario *scenario, int argc,
char *argv[], struct benchmark_clo *clos,
int nclos)
{
char *optstr;
struct option *options;
int ret = 0;
int opt;
int optindex;
const char *true_str = "true";
/* convert CLOs to option string and long options structure */
optstr = clo_get_optstr(clos, nclos);
options = clo_get_long_options(clos, nclos);
/* parse CLOs as long and/or short options */
while ((opt = getopt_long(argc, argv, optstr, options, &optindex)) !=
-1) {
struct benchmark_clo *clo = nullptr;
if (opt) {
clo = clo_get_by_short(clos, nclos, opt);
} else {
assert(optindex < nclos);
clo = &clos[optindex];
}
if (!clo) {
ret = -1;
goto out;
}
/* Check if the given clo is defined in the scenario */
struct kv *kv = find_kv_in_scenario(clo->opt_long, scenario);
if (kv) { /* replace the value in the scenario */
if (optarg != nullptr && clo->type != CLO_TYPE_FLAG) {
free(kv->value);
kv->value = strdup(optarg);
} else if (optarg == nullptr &&
clo->type == CLO_TYPE_FLAG) {
free(kv->value);
kv->value = strdup(true_str);
} else {
ret = -1;
goto out;
}
} else { /* add a new param to the scenario */
if (optarg != nullptr && clo->type != CLO_TYPE_FLAG) {
kv = kv_alloc(clo->opt_long, optarg);
TAILQ_INSERT_TAIL(&scenario->head, kv, next);
} else if (optarg == nullptr &&
clo->type == CLO_TYPE_FLAG) {
kv = kv_alloc(clo->opt_long, true_str);
TAILQ_INSERT_TAIL(&scenario->head, kv, next);
} else {
ret = -1;
goto out;
}
}
}
if (optind < argc) {
fprintf(stderr, "Unknown option: %s\n", argv[optind]);
ret = -1;
goto out;
}
out:
free(options);
free(optstr);
if (ret)
errno = EINVAL;
return ret;
}
/*
* benchmark_clo_str -- converts command line option to string
*
* According to command line option type and parameters, converts
* the value from structure pointed by args of size size.
*/
const char *
benchmark_clo_str(struct benchmark_clo *clo, void *args, size_t size)
{
assert(clo->type < CLO_TYPE_MAX);
return clo_str[clo->type](clo, args, size);
}
/*
* clo_get_scenarios - search the command line arguments for scenarios listed in
* available_scenarios and put them in found_scenarios. Returns the number of
* found scenarios in the cmd line or -1 on error. The passed cmd line
* args should contain the scenario name(s) as the first argument(s) - starting
* from index 0
*/
int
clo_get_scenarios(int argc, char *argv[], struct scenarios *available_scenarios,
struct scenarios *found_scenarios)
{
assert(argv != nullptr);
assert(available_scenarios != nullptr);
assert(found_scenarios != nullptr);
if (argc <= 0) {
fprintf(stderr, "clo get scenarios, argc invalid value: %d\n",
argc);
return -1;
}
int tmp_argc = argc;
char **tmp_argv = argv;
do {
struct scenario *scenario =
scenarios_get_scenario(available_scenarios, *tmp_argv);
if (!scenario) {
fprintf(stderr, "unknown scenario: %s\n", *tmp_argv);
return -1;
}
struct scenario *new_scenario = clone_scenario(scenario);
assert(new_scenario != nullptr);
TAILQ_INSERT_TAIL(&found_scenarios->head, new_scenario, next);
tmp_argc--;
tmp_argv++;
} while (tmp_argc &&
contains_scenarios(tmp_argc, tmp_argv, available_scenarios));
return argc - tmp_argc;
}
| 24,613 | 22.4196 | 80 |
cpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/poolset_util.cpp
|
/*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cassert>
#include <fcntl.h>
#include <file.h>
#include "os.h"
#include "poolset_util.hpp"
#include "set.h"
#define PART_TEMPLATE "part."
#define POOL_PART_SIZE (1UL << 30)
/*
* dynamic_poolset_clear -- clears header in first part if it exists
*/
static int
dynamic_poolset_clear(const char *dir)
{
char path[PATH_MAX];
int count = snprintf(path, sizeof(path),
"%s" OS_DIR_SEP_STR PART_TEMPLATE "0", dir);
assert(count > 0);
if ((size_t)count >= sizeof(path)) {
fprintf(stderr, "path to a poolset part too long\n");
return -1;
}
int exists = util_file_exists(path);
if (exists < 0)
return -1;
if (!exists)
return 0;
return util_file_zero(path, 0, POOL_HDR_SIZE);
}
/*
* dynamic_poolset_create -- clear pool's header and create new poolset
*/
int
dynamic_poolset_create(const char *path, size_t size)
{
/* buffer for part's path and size */
char buff[PATH_MAX + 20];
int ret;
int fd;
int count;
int curr_part = 0;
ret = dynamic_poolset_clear(path);
if (ret == -1)
return -1;
fd = os_open(POOLSET_PATH, O_RDWR | O_CREAT, 0644);
if (fd == -1) {
perror("open");
return -1;
}
char header[] = "PMEMPOOLSET\nOPTION SINGLEHDR\n";
ret = util_write_all(fd, header, sizeof(header) - 1);
if (ret == -1)
goto err;
while (curr_part * POOL_PART_SIZE < size + POOL_HDR_SIZE) {
count = snprintf(buff, sizeof(buff),
"%lu %s" OS_DIR_SEP_STR PART_TEMPLATE "%d\n",
POOL_PART_SIZE, path, curr_part);
assert(count > 0);
if ((size_t)count >= sizeof(buff)) {
fprintf(stderr, "path to a poolset part too long\n");
goto err;
}
ret = util_write_all(fd, buff, count);
if (ret == -1)
goto err;
curr_part++;
}
close(fd);
return 0;
err:
close(fd);
return -1;
}
| 3,312 | 25.934959 | 73 |
cpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/benchmark_time.hpp
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* benchmark_time.hpp -- declarations of benchmark_time module
*/
#include <ctime>
typedef struct timespec benchmark_time_t;
void benchmark_time_get(benchmark_time_t *time);
void benchmark_time_diff(benchmark_time_t *d, benchmark_time_t *t1,
benchmark_time_t *t2);
double benchmark_time_get_secs(benchmark_time_t *t);
unsigned long long benchmark_time_get_nsecs(benchmark_time_t *t);
int benchmark_time_compare(const benchmark_time_t *t1,
const benchmark_time_t *t2);
void benchmark_time_set(benchmark_time_t *time, unsigned long long nsecs);
unsigned long long benchmark_get_avg_get_time(void);
| 2,213 | 45.125 | 74 |
hpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/blk.cpp
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* blk.cpp -- pmemblk benchmarks definitions
*/
#include "benchmark.hpp"
#include "file.h"
#include "libpmem.h"
#include "libpmemblk.h"
#include "libpmempool.h"
#include "os.h"
#include "poolset_util.hpp"
#include <cassert>
#include <cerrno>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fcntl.h>
#include <unistd.h>
struct blk_bench;
struct blk_worker;
/*
* op_type -- type of operation
*/
enum op_type {
OP_TYPE_UNKNOWN,
OP_TYPE_BLK,
OP_TYPE_FILE,
OP_TYPE_MEMCPY,
};
/*
* op_mode -- mode of the copy process
*/
enum op_mode {
OP_MODE_UNKNOWN,
OP_MODE_STAT, /* read/write always the same chunk */
OP_MODE_SEQ, /* read/write chunk by chunk */
OP_MODE_RAND /* read/write to chunks selected randomly */
};
/*
* typedef for the worker function
*/
typedef int (*worker_fn)(struct blk_bench *, struct benchmark_args *,
struct blk_worker *, os_off_t);
/*
* blk_args -- benchmark specific arguments
*/
struct blk_args {
size_t fsize; /* requested file size */
bool no_warmup; /* don't do warmup */
unsigned seed; /* seed for randomization */
char *type_str; /* type: blk, file, memcpy */
char *mode_str; /* mode: stat, seq, rand */
};
/*
* blk_bench -- pmemblk benchmark context
*/
struct blk_bench {
PMEMblkpool *pbp; /* pmemblk handle */
char *addr; /* address of user data (memcpy) */
int fd; /* file descr. for file io */
size_t nblocks; /* actual number of blocks */
size_t blocks_per_thread; /* number of blocks per thread */
worker_fn worker; /* worker function */
enum op_type type;
enum op_mode mode;
};
/*
* struct blk_worker -- pmemblk worker context
*/
struct blk_worker {
os_off_t *blocks; /* array with block numbers */
char *buff; /* buffer for read/write */
unsigned seed; /* worker seed */
};
/*
* parse_op_type -- parse command line "--operation" argument
*
* Returns proper operation type.
*/
static enum op_type
parse_op_type(const char *arg)
{
if (strcmp(arg, "blk") == 0)
return OP_TYPE_BLK;
else if (strcmp(arg, "file") == 0)
return OP_TYPE_FILE;
else if (strcmp(arg, "memcpy") == 0)
return OP_TYPE_MEMCPY;
else
return OP_TYPE_UNKNOWN;
}
/*
* parse_op_mode -- parse command line "--mode" argument
*
* Returns proper operation mode.
*/
static enum op_mode
parse_op_mode(const char *arg)
{
if (strcmp(arg, "stat") == 0)
return OP_MODE_STAT;
else if (strcmp(arg, "seq") == 0)
return OP_MODE_SEQ;
else if (strcmp(arg, "rand") == 0)
return OP_MODE_RAND;
else
return OP_MODE_UNKNOWN;
}
/*
* blk_do_warmup -- perform warm-up by writing to each block
*/
static int
blk_do_warmup(struct blk_bench *bb, struct benchmark_args *args)
{
size_t lba;
int ret = 0;
auto *buff = (char *)calloc(1, args->dsize);
if (!buff) {
perror("calloc");
return -1;
}
for (lba = 0; lba < bb->nblocks; ++lba) {
switch (bb->type) {
case OP_TYPE_FILE: {
size_t off = lba * args->dsize;
if (pwrite(bb->fd, buff, args->dsize, off) !=
(ssize_t)args->dsize) {
perror("pwrite");
ret = -1;
goto out;
}
} break;
case OP_TYPE_BLK:
if (pmemblk_write(bb->pbp, buff, lba) < 0) {
perror("pmemblk_write");
ret = -1;
goto out;
}
break;
case OP_TYPE_MEMCPY: {
size_t off = lba * args->dsize;
pmem_memcpy_persist((char *)bb->addr + off,
buff, args->dsize);
} break;
default:
perror("unknown type");
ret = -1;
goto out;
}
}
out:
free(buff);
return ret;
}
/*
* blk_read -- read function for pmemblk
*/
static int
blk_read(struct blk_bench *bb, struct benchmark_args *ba,
struct blk_worker *bworker, os_off_t off)
{
if (pmemblk_read(bb->pbp, bworker->buff, off) < 0) {
perror("pmemblk_read");
return -1;
}
return 0;
}
/*
* fileio_read -- read function for file io
*/
static int
fileio_read(struct blk_bench *bb, struct benchmark_args *ba,
struct blk_worker *bworker, os_off_t off)
{
os_off_t file_off = off * ba->dsize;
if (pread(bb->fd, bworker->buff, ba->dsize, file_off) !=
(ssize_t)ba->dsize) {
perror("pread");
return -1;
}
return 0;
}
/*
* memcpy_read -- read function for memcpy
*/
static int
memcpy_read(struct blk_bench *bb, struct benchmark_args *ba,
struct blk_worker *bworker, os_off_t off)
{
os_off_t file_off = off * ba->dsize;
memcpy(bworker->buff, (char *)bb->addr + file_off, ba->dsize);
return 0;
}
/*
* blk_write -- write function for pmemblk
*/
static int
blk_write(struct blk_bench *bb, struct benchmark_args *ba,
struct blk_worker *bworker, os_off_t off)
{
if (pmemblk_write(bb->pbp, bworker->buff, off) < 0) {
perror("pmemblk_write");
return -1;
}
return 0;
}
/*
* memcpy_write -- write function for memcpy
*/
static int
memcpy_write(struct blk_bench *bb, struct benchmark_args *ba,
struct blk_worker *bworker, os_off_t off)
{
os_off_t file_off = off * ba->dsize;
pmem_memcpy_persist((char *)bb->addr + file_off, bworker->buff,
ba->dsize);
return 0;
}
/*
* fileio_write -- write function for file io
*/
static int
fileio_write(struct blk_bench *bb, struct benchmark_args *ba,
struct blk_worker *bworker, os_off_t off)
{
os_off_t file_off = off * ba->dsize;
if (pwrite(bb->fd, bworker->buff, ba->dsize, file_off) !=
(ssize_t)ba->dsize) {
perror("pwrite");
return -1;
}
return 0;
}
/*
* blk_operation -- main operations for blk_read and blk_write benchmark
*/
static int
blk_operation(struct benchmark *bench, struct operation_info *info)
{
auto *bb = (struct blk_bench *)pmembench_get_priv(bench);
auto *bworker = (struct blk_worker *)info->worker->priv;
os_off_t off = bworker->blocks[info->index];
return bb->worker(bb, info->args, bworker, off);
}
/*
* blk_init_worker -- initialize worker
*/
static int
blk_init_worker(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
struct blk_worker *bworker =
(struct blk_worker *)malloc(sizeof(*bworker));
if (!bworker) {
perror("malloc");
return -1;
}
auto *bb = (struct blk_bench *)pmembench_get_priv(bench);
auto *bargs = (struct blk_args *)args->opts;
bworker->seed = os_rand_r(&bargs->seed);
bworker->buff = (char *)malloc(args->dsize);
if (!bworker->buff) {
perror("malloc");
goto err_buff;
}
/* fill buffer with some random data */
memset(bworker->buff, bworker->seed, args->dsize);
assert(args->n_ops_per_thread != 0);
bworker->blocks = (os_off_t *)malloc(sizeof(*bworker->blocks) *
args->n_ops_per_thread);
if (!bworker->blocks) {
perror("malloc");
goto err_blocks;
}
switch (bb->mode) {
case OP_MODE_RAND:
for (size_t i = 0; i < args->n_ops_per_thread; i++) {
bworker->blocks[i] =
worker->index * bb->blocks_per_thread +
os_rand_r(&bworker->seed) %
bb->blocks_per_thread;
}
break;
case OP_MODE_SEQ:
for (size_t i = 0; i < args->n_ops_per_thread; i++)
bworker->blocks[i] = i % bb->blocks_per_thread;
break;
case OP_MODE_STAT:
for (size_t i = 0; i < args->n_ops_per_thread; i++)
bworker->blocks[i] = 0;
break;
default:
perror("unknown mode");
goto err_blocks;
}
worker->priv = bworker;
return 0;
err_blocks:
free(bworker->buff);
err_buff:
free(bworker);
return -1;
}
/*
* blk_free_worker -- cleanup worker
*/
static void
blk_free_worker(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
auto *bworker = (struct blk_worker *)worker->priv;
free(bworker->blocks);
free(bworker->buff);
free(bworker);
}
/*
* blk_init -- function for initialization benchmark
*/
static int
blk_init(struct blk_bench *bb, struct benchmark_args *args)
{
auto *ba = (struct blk_args *)args->opts;
assert(ba != nullptr);
char path[PATH_MAX];
if (util_safe_strcpy(path, args->fname, sizeof(path)) != 0)
return -1;
bb->type = parse_op_type(ba->type_str);
if (bb->type == OP_TYPE_UNKNOWN) {
fprintf(stderr, "Invalid operation argument '%s'",
ba->type_str);
return -1;
}
enum file_type type = util_file_get_type(args->fname);
if (type == OTHER_ERROR) {
fprintf(stderr, "could not check type of file %s\n",
args->fname);
return -1;
}
if (bb->type == OP_TYPE_FILE && type == TYPE_DEVDAX) {
fprintf(stderr, "fileio not supported on device dax\n");
return -1;
}
bb->mode = parse_op_mode(ba->mode_str);
if (bb->mode == OP_MODE_UNKNOWN) {
fprintf(stderr, "Invalid mode argument '%s'", ba->mode_str);
return -1;
}
if (ba->fsize == 0)
ba->fsize = PMEMBLK_MIN_POOL;
size_t req_fsize = ba->fsize;
if (ba->fsize / args->dsize < args->n_threads ||
ba->fsize < PMEMBLK_MIN_POOL) {
fprintf(stderr, "too small file size\n");
return -1;
}
if (args->dsize >= ba->fsize) {
fprintf(stderr, "block size bigger than file size\n");
return -1;
}
if (args->is_poolset || type == TYPE_DEVDAX) {
if (args->fsize < ba->fsize) {
fprintf(stderr, "file size too large\n");
return -1;
}
ba->fsize = 0;
} else if (args->is_dynamic_poolset) {
int ret = dynamic_poolset_create(args->fname, ba->fsize);
if (ret == -1)
return -1;
if (util_safe_strcpy(path, POOLSET_PATH, sizeof(path)) != 0)
return -1;
ba->fsize = 0;
}
bb->fd = -1;
/*
* Create pmemblk in order to get the number of blocks
* even for file-io mode.
*/
bb->pbp = pmemblk_create(path, args->dsize, ba->fsize, args->fmode);
if (bb->pbp == nullptr) {
perror("pmemblk_create");
return -1;
}
bb->nblocks = pmemblk_nblock(bb->pbp);
/* limit the number of used blocks */
if (bb->nblocks > req_fsize / args->dsize)
bb->nblocks = req_fsize / args->dsize;
if (bb->nblocks < args->n_threads) {
fprintf(stderr, "too small file size");
goto out_close;
}
if (bb->type == OP_TYPE_FILE) {
pmemblk_close(bb->pbp);
bb->pbp = nullptr;
int flags = O_RDWR | O_CREAT | O_SYNC;
#ifdef _WIN32
flags |= O_BINARY;
#endif
bb->fd = os_open(args->fname, flags, args->fmode);
if (bb->fd < 0) {
perror("open");
return -1;
}
} else if (bb->type == OP_TYPE_MEMCPY) {
/* skip pool header, so addr points to the first block */
bb->addr = (char *)bb->pbp + 8192;
}
bb->blocks_per_thread = bb->nblocks / args->n_threads;
if (!ba->no_warmup) {
if (blk_do_warmup(bb, args) != 0)
goto out_close;
}
return 0;
out_close:
if (bb->type == OP_TYPE_FILE)
os_close(bb->fd);
else
pmemblk_close(bb->pbp);
return -1;
}
/*
* blk_read_init - function for initializing blk_read benchmark
*/
static int
blk_read_init(struct benchmark *bench, struct benchmark_args *args)
{
assert(bench != nullptr);
assert(args != nullptr);
int ret;
auto *bb = (struct blk_bench *)malloc(sizeof(struct blk_bench));
if (bb == nullptr) {
perror("malloc");
return -1;
}
pmembench_set_priv(bench, bb);
ret = blk_init(bb, args);
if (ret != 0) {
free(bb);
return ret;
}
switch (bb->type) {
case OP_TYPE_FILE:
bb->worker = fileio_read;
break;
case OP_TYPE_BLK:
bb->worker = blk_read;
break;
case OP_TYPE_MEMCPY:
bb->worker = memcpy_read;
break;
default:
perror("unknown operation type");
return -1;
}
return ret;
}
/*
* blk_write_init - function for initializing blk_write benchmark
*/
static int
blk_write_init(struct benchmark *bench, struct benchmark_args *args)
{
assert(bench != nullptr);
assert(args != nullptr);
int ret;
auto *bb = (struct blk_bench *)malloc(sizeof(struct blk_bench));
if (bb == nullptr) {
perror("malloc");
return -1;
}
pmembench_set_priv(bench, bb);
ret = blk_init(bb, args);
if (ret != 0) {
free(bb);
return ret;
}
switch (bb->type) {
case OP_TYPE_FILE:
bb->worker = fileio_write;
break;
case OP_TYPE_BLK:
bb->worker = blk_write;
break;
case OP_TYPE_MEMCPY:
bb->worker = memcpy_write;
break;
default:
perror("unknown operation type");
return -1;
}
return ret;
}
/*
* blk_exit -- function for de-initialization benchmark
*/
static int
blk_exit(struct benchmark *bench, struct benchmark_args *args)
{
auto *bb = (struct blk_bench *)pmembench_get_priv(bench);
char path[PATH_MAX];
if (util_safe_strcpy(path, args->fname, sizeof(path)) != 0)
return -1;
if (args->is_dynamic_poolset) {
if (util_safe_strcpy(path, POOLSET_PATH, sizeof(path)) != 0)
return -1;
}
int result;
switch (bb->type) {
case OP_TYPE_FILE:
os_close(bb->fd);
break;
case OP_TYPE_BLK:
pmemblk_close(bb->pbp);
result = pmemblk_check(path, args->dsize);
if (result < 0) {
perror("pmemblk_check error");
return -1;
} else if (result == 0) {
perror("pmemblk_check: not consistent");
return -1;
}
break;
case OP_TYPE_MEMCPY:
pmemblk_close(bb->pbp);
break;
default:
perror("unknown operation type");
return -1;
}
free(bb);
return 0;
}
static struct benchmark_clo blk_clo[5];
static struct benchmark_info blk_read_info;
static struct benchmark_info blk_write_info;
CONSTRUCTOR(blk_constructor)
void
blk_constructor(void)
{
blk_clo[0].opt_short = 'o';
blk_clo[0].opt_long = "operation";
blk_clo[0].descr = "Operation type - blk, file, memcpy";
blk_clo[0].type = CLO_TYPE_STR;
blk_clo[0].off = clo_field_offset(struct blk_args, type_str);
blk_clo[0].def = "blk";
blk_clo[1].opt_short = 'w';
blk_clo[1].opt_long = "no-warmup";
blk_clo[1].descr = "Don't do warmup";
blk_clo[1].type = CLO_TYPE_FLAG;
blk_clo[1].off = clo_field_offset(struct blk_args, no_warmup);
blk_clo[2].opt_short = 'm';
blk_clo[2].opt_long = "mode";
blk_clo[2].descr = "Reading/writing mode - stat, seq, rand";
blk_clo[2].type = CLO_TYPE_STR;
blk_clo[2].off = clo_field_offset(struct blk_args, mode_str);
blk_clo[2].def = "seq";
blk_clo[3].opt_short = 'S';
blk_clo[3].opt_long = "seed";
blk_clo[3].descr = "Random seed";
blk_clo[3].off = clo_field_offset(struct blk_args, seed);
blk_clo[3].def = "1";
blk_clo[3].type = CLO_TYPE_UINT;
blk_clo[3].type_uint.size = clo_field_size(struct blk_args, seed);
blk_clo[3].type_uint.base = CLO_INT_BASE_DEC;
blk_clo[3].type_uint.min = 1;
blk_clo[3].type_uint.max = UINT_MAX;
blk_clo[4].opt_short = 's';
blk_clo[4].opt_long = "file-size";
blk_clo[4].descr = "Requested file size in bytes - 0 means minimum";
blk_clo[4].type = CLO_TYPE_UINT;
blk_clo[4].off = clo_field_offset(struct blk_args, fsize);
blk_clo[4].def = "0";
blk_clo[4].type_uint.size = clo_field_size(struct blk_args, fsize);
blk_clo[4].type_uint.base = CLO_INT_BASE_DEC;
blk_clo[4].type_uint.min = 0;
blk_clo[4].type_uint.max = ~0;
blk_read_info.name = "blk_read";
blk_read_info.brief = "Benchmark for blk_read() operation";
blk_read_info.init = blk_read_init;
blk_read_info.exit = blk_exit;
blk_read_info.multithread = true;
blk_read_info.multiops = true;
blk_read_info.init_worker = blk_init_worker;
blk_read_info.free_worker = blk_free_worker;
blk_read_info.operation = blk_operation;
blk_read_info.clos = blk_clo;
blk_read_info.nclos = ARRAY_SIZE(blk_clo);
blk_read_info.opts_size = sizeof(struct blk_args);
blk_read_info.rm_file = true;
blk_read_info.allow_poolset = true;
REGISTER_BENCHMARK(blk_read_info);
blk_write_info.name = "blk_write";
blk_write_info.brief = "Benchmark for blk_write() operation";
blk_write_info.init = blk_write_init;
blk_write_info.exit = blk_exit;
blk_write_info.multithread = true;
blk_write_info.multiops = true;
blk_write_info.init_worker = blk_init_worker;
blk_write_info.free_worker = blk_free_worker;
blk_write_info.operation = blk_operation;
blk_write_info.clos = blk_clo;
blk_write_info.nclos = ARRAY_SIZE(blk_clo);
blk_write_info.opts_size = sizeof(struct blk_args);
blk_write_info.rm_file = true;
blk_write_info.allow_poolset = true;
REGISTER_BENCHMARK(blk_write_info);
}
| 17,282 | 22.904564 | 74 |
cpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/benchmark_worker.cpp
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* benchmark_worker.cpp -- benchmark_worker module definitions
*/
#include <cassert>
#include <err.h>
#include "benchmark_worker.hpp"
#include "os_thread.h"
/*
* worker_state_wait_for_transition -- wait for transition from and to
* specified states
*/
static void
worker_state_wait_for_transition(struct benchmark_worker *worker,
enum benchmark_worker_state state,
enum benchmark_worker_state new_state)
{
while (worker->state == state)
os_cond_wait(&worker->cond, &worker->lock);
assert(worker->state == new_state);
}
/*
* worker_state_transition -- change worker state from and to specified states
*/
static void
worker_state_transition(struct benchmark_worker *worker,
enum benchmark_worker_state old_state,
enum benchmark_worker_state new_state)
{
assert(worker->state == old_state);
worker->state = new_state;
os_cond_signal(&worker->cond);
}
/*
* thread_func -- (internal) callback for os_thread
*/
static void *
thread_func(void *arg)
{
assert(arg != nullptr);
auto *worker = (struct benchmark_worker *)arg;
os_mutex_lock(&worker->lock);
worker_state_wait_for_transition(worker, WORKER_STATE_IDLE,
WORKER_STATE_INIT);
if (worker->init)
worker->ret_init = worker->init(worker->bench, worker->args,
&worker->info);
worker_state_transition(worker, WORKER_STATE_INIT,
WORKER_STATE_INITIALIZED);
worker_state_wait_for_transition(worker, WORKER_STATE_INITIALIZED,
WORKER_STATE_RUN);
worker->ret = worker->func(worker->bench, &worker->info);
worker_state_transition(worker, WORKER_STATE_RUN, WORKER_STATE_END);
worker_state_wait_for_transition(worker, WORKER_STATE_END,
WORKER_STATE_EXIT);
if (worker->exit)
worker->exit(worker->bench, worker->args, &worker->info);
worker_state_transition(worker, WORKER_STATE_EXIT, WORKER_STATE_DONE);
os_mutex_unlock(&worker->lock);
return nullptr;
}
/*
* benchmark_worker_alloc -- allocate benchmark worker
*/
struct benchmark_worker *
benchmark_worker_alloc(void)
{
struct benchmark_worker *w =
(struct benchmark_worker *)calloc(1, sizeof(*w));
if (!w)
return nullptr;
if (os_mutex_init(&w->lock))
goto err_free_worker;
if (os_cond_init(&w->cond))
goto err_destroy_mutex;
if (os_thread_create(&w->thread, nullptr, thread_func, w))
goto err_destroy_cond;
return w;
err_destroy_cond:
os_cond_destroy(&w->cond);
err_destroy_mutex:
os_mutex_destroy(&w->lock);
err_free_worker:
free(w);
return nullptr;
}
/*
* benchmark_worker_free -- release benchmark worker
*/
void
benchmark_worker_free(struct benchmark_worker *w)
{
os_thread_join(&w->thread, nullptr);
os_cond_destroy(&w->cond);
os_mutex_destroy(&w->lock);
free(w);
}
/*
* benchmark_worker_init -- call init function for worker
*/
int
benchmark_worker_init(struct benchmark_worker *worker)
{
os_mutex_lock(&worker->lock);
worker_state_transition(worker, WORKER_STATE_IDLE, WORKER_STATE_INIT);
worker_state_wait_for_transition(worker, WORKER_STATE_INIT,
WORKER_STATE_INITIALIZED);
int ret = worker->ret_init;
os_mutex_unlock(&worker->lock);
return ret;
}
/*
* benchmark_worker_exit -- call exit function for worker
*/
void
benchmark_worker_exit(struct benchmark_worker *worker)
{
os_mutex_lock(&worker->lock);
worker_state_transition(worker, WORKER_STATE_END, WORKER_STATE_EXIT);
worker_state_wait_for_transition(worker, WORKER_STATE_EXIT,
WORKER_STATE_DONE);
os_mutex_unlock(&worker->lock);
}
/*
* benchmark_worker_run -- run benchmark worker
*/
int
benchmark_worker_run(struct benchmark_worker *worker)
{
int ret = 0;
os_mutex_lock(&worker->lock);
worker_state_transition(worker, WORKER_STATE_INITIALIZED,
WORKER_STATE_RUN);
os_mutex_unlock(&worker->lock);
return ret;
}
/*
* benchmark_worker_join -- join benchmark worker
*/
int
benchmark_worker_join(struct benchmark_worker *worker)
{
os_mutex_lock(&worker->lock);
worker_state_wait_for_transition(worker, WORKER_STATE_RUN,
WORKER_STATE_END);
os_mutex_unlock(&worker->lock);
return 0;
}
| 5,629 | 24.36036 | 78 |
cpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/vmem.cpp
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* vmem.cpp -- vmem_malloc, vmem_free and vmem_realloc multithread benchmarks
*/
#include "benchmark.hpp"
#include "file.h"
#include <cassert>
#include <libvmem.h>
#include <sys/stat.h>
#define DIR_MODE 0700
#define MAX_POOLS 8
#define FACTOR 2
#define RRAND(max, min) (rand() % ((max) - (min)) + (min))
struct vmem_bench;
typedef int (*operation)(struct vmem_bench *vb, unsigned worker_idx,
size_t info_idx);
/*
* vmem_args -- additional properties set as arguments opts
*/
struct vmem_args {
bool stdlib_alloc; /* use stdlib allocator instead of vmem */
bool no_warmup; /* do not perform warmup */
bool pool_per_thread; /* create single pool per thread */
ssize_t min_size; /* size of min allocation in range mode */
ssize_t rsize; /* size of reallocation */
ssize_t min_rsize; /* size of min reallocation in range mode */
/* perform operation on object allocated by other thread */
bool mix;
};
/*
* item -- structure representing single allocated object
*/
struct item {
void *buf; /* buffer for operations */
/* number of pool to which object is assigned */
unsigned pool_num;
};
/*
* vmem_worker -- additional properties set as worker private
*/
struct vmem_worker {
/* array to store objects used in operations performed by worker */
struct item *objs;
unsigned pool_number; /* number of pool used by worker */
};
/*
* vmem_bench -- additional properties set as benchmark private
*/
struct vmem_bench {
VMEM **pools; /* handle for VMEM pools */
struct vmem_worker *workers; /* array with private workers data */
size_t pool_size; /* size of each pool */
unsigned npools; /* number of created pools */
size_t *alloc_sizes; /* array with allocation sizes */
size_t *realloc_sizes; /* array with reallocation sizes */
unsigned *mix_ops; /* array with random indexes */
bool rand_alloc; /* use range mode in allocation */
bool rand_realloc; /* use range mode in reallocation */
int lib_mode; /* library mode - vmem or stdlib */
};
/*
* lib_mode -- enumeration used to determine mode of the benchmark
*/
enum lib_mode { VMEM_MODE, STDLIB_MODE };
/*
* vmem_malloc_op -- malloc operation using vmem
*/
static int
vmem_malloc_op(struct vmem_bench *vb, unsigned worker_idx, size_t info_idx)
{
struct item *item = &vb->workers[worker_idx].objs[info_idx];
item->buf = vmem_malloc(vb->pools[item->pool_num],
vb->alloc_sizes[info_idx]);
if (item->buf == nullptr) {
perror("vmem_malloc");
return -1;
}
return 0;
}
/*
* stdlib_malloc_op -- malloc operation using stdlib
*/
static int
stdlib_malloc_op(struct vmem_bench *vb, unsigned worker_idx, size_t info_idx)
{
struct item *item = &vb->workers[worker_idx].objs[info_idx];
item->buf = malloc(vb->alloc_sizes[info_idx]);
if (item->buf == nullptr) {
perror("malloc");
return -1;
}
return 0;
}
/*
* vmem_free_op -- free operation using vmem
*/
static int
vmem_free_op(struct vmem_bench *vb, unsigned worker_idx, size_t info_idx)
{
struct item *item = &vb->workers[worker_idx].objs[info_idx];
if (item->buf != nullptr)
vmem_free(vb->pools[item->pool_num], item->buf);
item->buf = nullptr;
return 0;
}
/*
* stdlib_free_op -- free operation using stdlib
*/
static int
stdlib_free_op(struct vmem_bench *vb, unsigned worker_idx, size_t info_idx)
{
struct item *item = &vb->workers[worker_idx].objs[info_idx];
if (item->buf != nullptr)
free(item->buf);
item->buf = nullptr;
return 0;
}
/*
* vmem_realloc_op -- realloc operation using vmem
*/
static int
vmem_realloc_op(struct vmem_bench *vb, unsigned worker_idx, size_t info_idx)
{
struct item *item = &vb->workers[worker_idx].objs[info_idx];
item->buf = vmem_realloc(vb->pools[item->pool_num], item->buf,
vb->realloc_sizes[info_idx]);
if (vb->realloc_sizes[info_idx] != 0 && item->buf == nullptr) {
perror("vmem_realloc");
return -1;
}
return 0;
}
/*
* stdlib_realloc_op -- realloc operation using stdlib
*/
static int
stdlib_realloc_op(struct vmem_bench *vb, unsigned worker_idx, size_t info_idx)
{
struct item *item = &vb->workers[worker_idx].objs[info_idx];
item->buf = realloc(item->buf, vb->realloc_sizes[info_idx]);
if (vb->realloc_sizes[info_idx] != 0 && item->buf == nullptr) {
perror("realloc");
return -1;
}
return 0;
}
static operation malloc_op[2] = {vmem_malloc_op, stdlib_malloc_op};
static operation free_op[2] = {vmem_free_op, stdlib_free_op};
static operation realloc_op[2] = {vmem_realloc_op, stdlib_realloc_op};
/*
* vmem_create_pools -- use vmem_create to create pools
*/
static int
vmem_create_pools(struct vmem_bench *vb, struct benchmark_args *args)
{
unsigned i;
auto *va = (struct vmem_args *)args->opts;
size_t dsize = args->dsize + va->rsize;
vb->pool_size =
dsize * args->n_ops_per_thread * args->n_threads / vb->npools;
vb->pools = (VMEM **)calloc(vb->npools, sizeof(VMEM *));
if (vb->pools == nullptr) {
perror("calloc");
return -1;
}
if (vb->pool_size < VMEM_MIN_POOL * args->n_threads)
vb->pool_size = VMEM_MIN_POOL * args->n_threads;
/* multiply pool size to prevent out of memory error */
vb->pool_size *= FACTOR;
for (i = 0; i < vb->npools; i++) {
vb->pools[i] = vmem_create(args->fname, vb->pool_size);
if (vb->pools[i] == nullptr) {
perror("vmem_create");
goto err;
}
}
return 0;
err:
for (int j = i - 1; j >= 0; j--)
vmem_delete(vb->pools[j]);
free(vb->pools);
return -1;
}
/*
* random_values -- calculates values for random sizes
*/
static void
random_values(size_t *alloc_sizes, struct benchmark_args *args, size_t max,
size_t min)
{
if (args->seed != 0)
srand(args->seed);
for (size_t i = 0; i < args->n_ops_per_thread; i++)
alloc_sizes[i] = RRAND(max, min);
}
/*
* static_values -- fulls array with the same value
*/
static void
static_values(size_t *alloc_sizes, size_t dsize, size_t nops)
{
for (size_t i = 0; i < nops; i++)
alloc_sizes[i] = dsize;
}
/*
* vmem_do_warmup -- perform warm-up by malloc and free for every thread
*/
static int
vmem_do_warmup(struct vmem_bench *vb, struct benchmark_args *args)
{
unsigned i;
size_t j;
int ret = 0;
for (i = 0; i < args->n_threads; i++) {
for (j = 0; j < args->n_ops_per_thread; j++) {
if (malloc_op[vb->lib_mode](vb, i, j) != 0) {
ret = -1;
fprintf(stderr, "warmup failed");
break;
}
}
for (; j > 0; j--)
free_op[vb->lib_mode](vb, i, j - 1);
}
return ret;
}
/*
* malloc_main_op -- main operations for vmem_malloc benchmark
*/
static int
malloc_main_op(struct benchmark *bench, struct operation_info *info)
{
auto *vb = (struct vmem_bench *)pmembench_get_priv(bench);
return malloc_op[vb->lib_mode](vb, info->worker->index, info->index);
}
/*
* free_main_op -- main operations for vmem_free benchmark
*/
static int
free_main_op(struct benchmark *bench, struct operation_info *info)
{
auto *vb = (struct vmem_bench *)pmembench_get_priv(bench);
return free_op[vb->lib_mode](vb, info->worker->index, info->index);
}
/*
* realloc_main_op -- main operations for vmem_realloc benchmark
*/
static int
realloc_main_op(struct benchmark *bench, struct operation_info *info)
{
auto *vb = (struct vmem_bench *)pmembench_get_priv(bench);
return realloc_op[vb->lib_mode](vb, info->worker->index, info->index);
}
/*
* vmem_mix_op -- main operations for vmem_mix benchmark
*/
static int
vmem_mix_op(struct benchmark *bench, struct operation_info *info)
{
auto *vb = (struct vmem_bench *)pmembench_get_priv(bench);
unsigned idx = vb->mix_ops[info->index];
free_op[vb->lib_mode](vb, info->worker->index, idx);
return malloc_op[vb->lib_mode](vb, info->worker->index, idx);
}
/*
* vmem_init_worker_alloc -- initialize worker for vmem_free and
* vmem_realloc benchmark when mix flag set to false
*/
static int
vmem_init_worker_alloc(struct vmem_bench *vb, struct benchmark_args *args,
struct worker_info *worker)
{
size_t i;
for (i = 0; i < args->n_ops_per_thread; i++) {
if (malloc_op[vb->lib_mode](vb, worker->index, i) != 0)
goto out;
}
return 0;
out:
for (int j = i - 1; j >= 0; j--)
free_op[vb->lib_mode](vb, worker->index, i);
return -1;
}
/*
* vmem_init_worker_alloc_mix -- initialize worker for vmem_free and
* vmem_realloc benchmark when mix flag set to true
*/
static int
vmem_init_worker_alloc_mix(struct vmem_bench *vb, struct benchmark_args *args,
struct worker_info *worker)
{
unsigned i = 0;
uint64_t j = 0;
size_t idx = 0;
size_t ops_per_thread = args->n_ops_per_thread / args->n_threads;
for (i = 0; i < args->n_threads; i++) {
for (j = 0; j < ops_per_thread; j++) {
idx = ops_per_thread * worker->index + j;
vb->workers[i].objs[idx].pool_num =
vb->workers[i].pool_number;
if (malloc_op[vb->lib_mode](vb, i, idx) != 0)
goto out;
}
}
for (idx = ops_per_thread * args->n_threads;
idx < args->n_ops_per_thread; idx++) {
if (malloc_op[vb->lib_mode](vb, worker->index, idx) != 0)
goto out_ops;
}
return 0;
out_ops:
for (idx--; idx >= ops_per_thread; idx--)
free_op[vb->lib_mode](vb, worker->index, idx);
out:
for (; i > 0; i--) {
for (; j > 0; j--) {
idx = ops_per_thread * worker->index + j - 1;
free_op[vb->lib_mode](vb, i - 1, idx);
}
}
return -1;
}
/*
* vmem_init_worker_alloc_mix -- initialize worker for vmem_free and
* vmem_realloc benchmark
*/
static int
vmem_init_worker(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
auto *va = (struct vmem_args *)args->opts;
auto *vb = (struct vmem_bench *)pmembench_get_priv(bench);
int ret = va->mix ? vmem_init_worker_alloc_mix(vb, args, worker)
: vmem_init_worker_alloc(vb, args, worker);
return ret;
}
/*
* vmem_exit -- function for de-initialization benchmark
*/
static int
vmem_exit(struct benchmark *bench, struct benchmark_args *args)
{
unsigned i;
auto *vb = (struct vmem_bench *)pmembench_get_priv(bench);
auto *va = (struct vmem_args *)args->opts;
if (!va->stdlib_alloc) {
for (i = 0; i < vb->npools; i++) {
vmem_delete(vb->pools[i]);
}
free(vb->pools);
}
for (i = 0; i < args->n_threads; i++)
free(vb->workers[i].objs);
free(vb->workers);
free(vb->alloc_sizes);
if (vb->realloc_sizes != nullptr)
free(vb->realloc_sizes);
if (vb->mix_ops != nullptr)
free(vb->mix_ops);
free(vb);
return 0;
}
/*
* vmem_exit_free -- frees worker with freeing elements
*/
static int
vmem_exit_free(struct benchmark *bench, struct benchmark_args *args)
{
auto *vb = (struct vmem_bench *)pmembench_get_priv(bench);
for (unsigned i = 0; i < args->n_threads; i++) {
for (size_t j = 0; j < args->n_ops_per_thread; j++) {
free_op[vb->lib_mode](vb, i, j);
}
}
return vmem_exit(bench, args);
}
/*
* vmem_init -- function for initialization benchmark
*/
static int
vmem_init(struct benchmark *bench, struct benchmark_args *args)
{
unsigned i;
size_t j;
assert(bench != nullptr);
assert(args != nullptr);
enum file_type type = util_file_get_type(args->fname);
if (type == OTHER_ERROR) {
fprintf(stderr, "could not check type of file %s\n",
args->fname);
return -1;
}
auto *vb = (struct vmem_bench *)calloc(1, sizeof(struct vmem_bench));
if (vb == nullptr) {
perror("malloc");
return -1;
}
pmembench_set_priv(bench, vb);
struct vmem_worker *vw;
auto *va = (struct vmem_args *)args->opts;
vb->alloc_sizes = nullptr;
vb->lib_mode = va->stdlib_alloc ? STDLIB_MODE : VMEM_MODE;
if (type == TYPE_DEVDAX && va->pool_per_thread) {
fprintf(stderr, "cannot use device dax for multiple pools\n");
goto err;
}
if (type == TYPE_NORMAL) {
fprintf(stderr, "Path cannot point to existing file\n");
goto err;
}
if (type == NOT_EXISTS && !va->stdlib_alloc &&
mkdir(args->fname, DIR_MODE) != 0)
goto err;
vb->npools = va->pool_per_thread ? args->n_threads : 1;
vb->rand_alloc = va->min_size != -1;
if (vb->rand_alloc && (size_t)va->min_size > args->dsize) {
fprintf(stderr, "invalid allocation size\n");
goto err;
}
/* vmem library is enable to create limited number of pools */
if (va->pool_per_thread && args->n_threads > MAX_POOLS) {
fprintf(stderr, "Maximum number of threads is %d for"
"pool-per-thread option\n",
MAX_POOLS);
goto err;
}
/* initializes buffers for operations for every thread */
vb->workers = (struct vmem_worker *)calloc(args->n_threads,
sizeof(struct vmem_worker));
if (vb->workers == nullptr) {
perror("calloc");
goto err;
}
for (i = 0; i < args->n_threads; i++) {
vw = &vb->workers[i];
vw->objs = (struct item *)calloc(args->n_ops_per_thread,
sizeof(struct item));
if (vw->objs == nullptr) {
perror("calloc");
goto err_free_workers;
}
vw->pool_number = va->pool_per_thread ? i : 0;
for (j = 0; j < args->n_ops_per_thread; j++)
vw->objs[j].pool_num = vw->pool_number;
}
if ((vb->alloc_sizes = (size_t *)malloc(
sizeof(size_t) * args->n_ops_per_thread)) == nullptr) {
perror("malloc");
goto err_free_buf;
}
if (vb->rand_alloc)
random_values(vb->alloc_sizes, args, args->dsize,
(size_t)va->min_size);
else
static_values(vb->alloc_sizes, args->dsize,
args->n_ops_per_thread);
if (!va->stdlib_alloc && vmem_create_pools(vb, args) != 0)
goto err_free_sizes;
if (!va->no_warmup && vmem_do_warmup(vb, args) != 0)
goto err_free_all;
return 0;
err_free_all:
if (!va->stdlib_alloc) {
for (i = 0; i < vb->npools; i++)
vmem_delete(vb->pools[i]);
free(vb->pools);
}
err_free_sizes:
free(vb->alloc_sizes);
err_free_buf:
for (j = i; j > 0; j--)
free(vb->workers[j - 1].objs);
err_free_workers:
free(vb->workers);
err:
free(vb);
return -1;
}
/*
* vmem_realloc_init -- function for initialization vmem_realloc benchmark
*/
static int
vmem_realloc_init(struct benchmark *bench, struct benchmark_args *args)
{
if (vmem_init(bench, args) != 0)
return -1;
auto *vb = (struct vmem_bench *)pmembench_get_priv(bench);
auto *va = (struct vmem_args *)args->opts;
vb->rand_realloc = va->min_rsize != -1;
if (vb->rand_realloc && va->min_rsize > va->rsize) {
fprintf(stderr, "invalid reallocation size\n");
goto err;
}
if ((vb->realloc_sizes = (size_t *)calloc(args->n_ops_per_thread,
sizeof(size_t))) == nullptr) {
perror("calloc");
goto err;
}
if (vb->rand_realloc)
random_values(vb->realloc_sizes, args, (size_t)va->rsize,
(size_t)va->min_rsize);
else
static_values(vb->realloc_sizes, (size_t)va->rsize,
args->n_ops_per_thread);
return 0;
err:
vmem_exit(bench, args);
return -1;
}
/*
* vmem_mix_init -- function for initialization vmem_realloc benchmark
*/
static int
vmem_mix_init(struct benchmark *bench, struct benchmark_args *args)
{
if (vmem_init(bench, args) != 0)
return -1;
size_t i;
unsigned idx, tmp;
auto *vb = (struct vmem_bench *)pmembench_get_priv(bench);
if ((vb->mix_ops = (unsigned *)calloc(args->n_ops_per_thread,
sizeof(unsigned))) == nullptr) {
perror("calloc");
goto err;
}
for (i = 0; i < args->n_ops_per_thread; i++)
vb->mix_ops[i] = i;
if (args->seed != 0)
srand(args->seed);
for (i = 1; i < args->n_ops_per_thread; i++) {
idx = RRAND(args->n_ops_per_thread - 1, 0);
tmp = vb->mix_ops[idx];
vb->mix_ops[i] = vb->mix_ops[idx];
vb->mix_ops[idx] = tmp;
}
return 0;
err:
vmem_exit(bench, args);
return -1;
}
static struct benchmark_info vmem_malloc_bench;
static struct benchmark_info vmem_mix_bench;
static struct benchmark_info vmem_free_bench;
static struct benchmark_info vmem_realloc_bench;
static struct benchmark_clo vmem_clo[7];
CONSTRUCTOR(vmem_persist_constructor)
void
vmem_persist_constructor(void)
{
vmem_clo[0].opt_short = 'a';
vmem_clo[0].opt_long = "stdlib-alloc";
vmem_clo[0].descr = "Use stdlib allocator";
vmem_clo[0].type = CLO_TYPE_FLAG;
vmem_clo[0].off = clo_field_offset(struct vmem_args, stdlib_alloc);
vmem_clo[1].opt_short = 'w';
vmem_clo[1].opt_long = "no-warmup";
vmem_clo[1].descr = "Do not perform warmup";
vmem_clo[1].type = CLO_TYPE_FLAG;
vmem_clo[1].off = clo_field_offset(struct vmem_args, no_warmup);
vmem_clo[2].opt_short = 'p';
vmem_clo[2].opt_long = "pool-per-thread";
vmem_clo[2].descr = "Create separate pool per thread";
vmem_clo[2].type = CLO_TYPE_FLAG;
vmem_clo[2].off = clo_field_offset(struct vmem_args, pool_per_thread);
vmem_clo[3].opt_short = 'm';
vmem_clo[3].opt_long = "alloc-min";
vmem_clo[3].type = CLO_TYPE_INT;
vmem_clo[3].descr = "Min allocation size";
vmem_clo[3].off = clo_field_offset(struct vmem_args, min_size);
vmem_clo[3].def = "-1";
vmem_clo[3].type_int.size = clo_field_size(struct vmem_args, min_size);
vmem_clo[3].type_int.base = CLO_INT_BASE_DEC;
vmem_clo[3].type_int.min = (-1);
vmem_clo[3].type_int.max = INT_MAX;
/*
* number of command line arguments is decremented to make below
* options available only for vmem_free and vmem_realloc benchmark
*/
vmem_clo[4].opt_short = 'T';
vmem_clo[4].opt_long = "mix-thread";
vmem_clo[4].descr = "Reallocate object allocated "
"by another thread";
vmem_clo[4].type = CLO_TYPE_FLAG;
vmem_clo[4].off = clo_field_offset(struct vmem_args, mix);
/*
* number of command line arguments is decremented to make below
* options available only for vmem_realloc benchmark
*/
vmem_clo[5].opt_short = 'r';
vmem_clo[5].opt_long = "realloc-size";
vmem_clo[5].type = CLO_TYPE_UINT;
vmem_clo[5].descr = "Reallocation size";
vmem_clo[5].off = clo_field_offset(struct vmem_args, rsize);
vmem_clo[5].def = "512";
vmem_clo[5].type_uint.size = clo_field_size(struct vmem_args, rsize);
vmem_clo[5].type_uint.base = CLO_INT_BASE_DEC;
vmem_clo[5].type_uint.min = 0;
vmem_clo[5].type_uint.max = ~0;
vmem_clo[6].opt_short = 'R';
vmem_clo[6].opt_long = "realloc-min";
vmem_clo[6].type = CLO_TYPE_INT;
vmem_clo[6].descr = "Min reallocation size";
vmem_clo[6].off = clo_field_offset(struct vmem_args, min_rsize);
vmem_clo[6].def = "-1";
vmem_clo[6].type_int.size = clo_field_size(struct vmem_args, min_rsize);
vmem_clo[6].type_int.base = CLO_INT_BASE_DEC;
vmem_clo[6].type_int.min = -1;
vmem_clo[6].type_int.max = INT_MAX;
vmem_malloc_bench.name = "vmem_malloc";
vmem_malloc_bench.brief = "vmem_malloc() benchmark";
vmem_malloc_bench.init = vmem_init;
vmem_malloc_bench.exit = vmem_exit_free;
vmem_malloc_bench.multithread = true;
vmem_malloc_bench.multiops = true;
vmem_malloc_bench.init_worker = nullptr;
vmem_malloc_bench.free_worker = nullptr;
vmem_malloc_bench.operation = malloc_main_op;
vmem_malloc_bench.clos = vmem_clo;
vmem_malloc_bench.nclos = ARRAY_SIZE(vmem_clo) - 3;
vmem_malloc_bench.opts_size = sizeof(struct vmem_args);
vmem_malloc_bench.rm_file = true;
vmem_malloc_bench.allow_poolset = false;
REGISTER_BENCHMARK(vmem_malloc_bench);
vmem_mix_bench.name = "vmem_mix";
vmem_mix_bench.brief = "vmem_malloc() and vmem_free() "
"bechmark";
vmem_mix_bench.init = vmem_mix_init;
vmem_mix_bench.exit = vmem_exit_free;
vmem_mix_bench.multithread = true;
vmem_mix_bench.multiops = true;
vmem_mix_bench.init_worker = vmem_init_worker;
vmem_mix_bench.free_worker = nullptr;
vmem_mix_bench.operation = vmem_mix_op;
vmem_mix_bench.clos = vmem_clo;
vmem_mix_bench.nclos = ARRAY_SIZE(vmem_clo) - 3;
vmem_mix_bench.opts_size = sizeof(struct vmem_args);
vmem_mix_bench.rm_file = true;
vmem_mix_bench.allow_poolset = false;
REGISTER_BENCHMARK(vmem_mix_bench);
vmem_free_bench.name = "vmem_free";
vmem_free_bench.brief = "vmem_free() benchmark";
vmem_free_bench.init = vmem_init;
vmem_free_bench.exit = vmem_exit;
vmem_free_bench.multithread = true;
vmem_free_bench.multiops = true;
vmem_free_bench.init_worker = vmem_init_worker;
vmem_free_bench.free_worker = nullptr;
vmem_free_bench.operation = free_main_op;
vmem_free_bench.clos = vmem_clo;
vmem_free_bench.nclos = ARRAY_SIZE(vmem_clo) - 2;
vmem_free_bench.opts_size = sizeof(struct vmem_args);
vmem_free_bench.rm_file = true;
vmem_free_bench.allow_poolset = false;
REGISTER_BENCHMARK(vmem_free_bench);
vmem_realloc_bench.name = "vmem_realloc";
vmem_realloc_bench.brief = "Multithread benchmark vmem - "
"realloc";
vmem_realloc_bench.init = vmem_realloc_init;
vmem_realloc_bench.exit = vmem_exit_free;
vmem_realloc_bench.multithread = true;
vmem_realloc_bench.multiops = true;
vmem_realloc_bench.init_worker = vmem_init_worker;
vmem_realloc_bench.free_worker = nullptr;
vmem_realloc_bench.operation = realloc_main_op;
vmem_realloc_bench.clos = vmem_clo;
vmem_realloc_bench.nclos = ARRAY_SIZE(vmem_clo);
vmem_realloc_bench.opts_size = sizeof(struct vmem_args);
vmem_realloc_bench.rm_file = true;
vmem_realloc_bench.allow_poolset = false;
REGISTER_BENCHMARK(vmem_realloc_bench);
};
| 22,231 | 27.723514 | 78 |
cpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/benchmark_time.cpp
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* benchmark_time.cpp -- benchmark_time module definitions
*/
#include "benchmark_time.hpp"
#include "os.h"
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#define NSECPSEC 1000000000
/*
* benchmark_time_get -- get timestamp from clock source
*/
void
benchmark_time_get(benchmark_time_t *time)
{
os_clock_gettime(CLOCK_MONOTONIC, time);
}
/*
* benchmark_time_diff -- get time interval
*/
void
benchmark_time_diff(benchmark_time_t *d, benchmark_time_t *t1,
benchmark_time_t *t2)
{
long long nsecs = (t2->tv_sec - t1->tv_sec) * NSECPSEC + t2->tv_nsec -
t1->tv_nsec;
assert(nsecs >= 0);
d->tv_sec = nsecs / NSECPSEC;
d->tv_nsec = nsecs % NSECPSEC;
}
/*
* benchmark_time_get_secs -- get total number of seconds
*/
double
benchmark_time_get_secs(benchmark_time_t *t)
{
return (double)t->tv_sec + (double)t->tv_nsec / NSECPSEC;
}
/*
* benchmark_time_get_nsecs -- get total number of nanoseconds
*/
unsigned long long
benchmark_time_get_nsecs(benchmark_time_t *t)
{
unsigned long long ret = t->tv_nsec;
ret += t->tv_sec * NSECPSEC;
return ret;
}
/*
* benchmark_time_compare -- compare two moments in time
*/
int
benchmark_time_compare(const benchmark_time_t *t1, const benchmark_time_t *t2)
{
if (t1->tv_sec == t2->tv_sec)
return (int)((long long)t1->tv_nsec - (long long)t2->tv_nsec);
else
return (int)((long long)t1->tv_sec - (long long)t2->tv_sec);
}
/*
* benchmark_time_set -- set time using number of nanoseconds
*/
void
benchmark_time_set(benchmark_time_t *time, unsigned long long nsecs)
{
time->tv_sec = nsecs / NSECPSEC;
time->tv_nsec = nsecs % NSECPSEC;
}
/*
* number of samples used to calculate average time required to get a current
* time from the system
*/
#define N_PROBES_GET_TIME 10000000UL
/*
* benchmark_get_avg_get_time -- calculates average time required to get the
* current time from the system in nanoseconds
*/
unsigned long long
benchmark_get_avg_get_time(void)
{
benchmark_time_t time;
benchmark_time_t start;
benchmark_time_t stop;
benchmark_time_get(&start);
for (size_t i = 0; i < N_PROBES_GET_TIME; i++) {
benchmark_time_get(&time);
}
benchmark_time_get(&stop);
benchmark_time_diff(&time, &start, &stop);
unsigned long long avg =
benchmark_time_get_nsecs(&time) / N_PROBES_GET_TIME;
return avg;
}
| 3,926 | 26.851064 | 78 |
cpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/obj_lanes.cpp
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* obj_lanes.cpp -- lane benchmark definition
*/
#include <cassert>
#include <cerrno>
#include <unistd.h>
#include "benchmark.hpp"
#include "file.h"
#include "libpmemobj.h"
/* an internal libpmemobj code */
#include "lane.h"
/*
* The number of times to repeat the operation, used to get more accurate
* results, because the operation time was minimal compared to the framework
* overhead.
*/
#define OPERATION_REPEAT_COUNT 10000
/*
* obj_bench - variables used in benchmark, passed within functions
*/
struct obj_bench {
PMEMobjpool *pop; /* persistent pool handle */
struct prog_args *pa; /* prog_args structure */
};
/*
* lanes_init -- benchmark initialization
*/
static int
lanes_init(struct benchmark *bench, struct benchmark_args *args)
{
assert(bench != nullptr);
assert(args != nullptr);
assert(args->opts != nullptr);
enum file_type type = util_file_get_type(args->fname);
if (type == OTHER_ERROR) {
fprintf(stderr, "could not check type of file %s\n",
args->fname);
return -1;
}
auto *ob = (struct obj_bench *)malloc(sizeof(struct obj_bench));
if (ob == nullptr) {
perror("malloc");
return -1;
}
pmembench_set_priv(bench, ob);
ob->pa = (struct prog_args *)args->opts;
size_t psize;
if (args->is_poolset || type == TYPE_DEVDAX)
psize = 0;
else
psize = PMEMOBJ_MIN_POOL;
/* create pmemobj pool */
ob->pop = pmemobj_create(args->fname, "obj_lanes", psize, args->fmode);
if (ob->pop == nullptr) {
fprintf(stderr, "%s\n", pmemobj_errormsg());
goto err;
}
return 0;
err:
free(ob);
return -1;
}
/*
* lanes_exit -- benchmark clean up
*/
static int
lanes_exit(struct benchmark *bench, struct benchmark_args *args)
{
auto *ob = (struct obj_bench *)pmembench_get_priv(bench);
pmemobj_close(ob->pop);
free(ob);
return 0;
}
/*
* lanes_op -- performs the lane hold and release operations
*/
static int
lanes_op(struct benchmark *bench, struct operation_info *info)
{
auto *ob = (struct obj_bench *)pmembench_get_priv(bench);
struct lane *lane;
for (int i = 0; i < OPERATION_REPEAT_COUNT; i++) {
lane_hold(ob->pop, &lane);
lane_release(ob->pop);
}
return 0;
}
static struct benchmark_info lanes_info;
CONSTRUCTOR(obj_lines_constructor)
void
obj_lines_constructor(void)
{
lanes_info.name = "obj_lanes";
lanes_info.brief = "Benchmark for internal lanes "
"operation";
lanes_info.init = lanes_init;
lanes_info.exit = lanes_exit;
lanes_info.multithread = true;
lanes_info.multiops = true;
lanes_info.operation = lanes_op;
lanes_info.measure_time = true;
lanes_info.clos = NULL;
lanes_info.nclos = 0;
lanes_info.opts_size = 0;
lanes_info.rm_file = true;
lanes_info.allow_poolset = true;
REGISTER_BENCHMARK(lanes_info);
}
| 4,333 | 25.753086 | 76 |
cpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/benchmark.hpp
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* benchmark.hpp -- This file contains interface for creating benchmarks to the
* pmembench framework. The _most_ important data structure is
* struct benchmark_info which should be properly filled and registered by the
* benchmark. Some fields should be filled by meta-data and information about
* the benchmark like: name, brief description, supported operation modes etc.
* The other group of fields are function callbacks which may be implemented by
* the benchmark. Some callbacks are required, others are optional. This is
* indicated in the structure description.
*
* To register a benchmark you can use the special macro
* REGISTER_BENCHMARK() which takes static benchmark_info data structure as an
* argument. You can also use the pmembench_register() function. Please note
* that registering a benchmark should be done at initialization time. You can
* achieve this by specifying pmembench_init macro in function attributes:
*
* static void pmembench_init my_benchmark_init()
* {
* pmembench_register(&my_benchmark);
* }
*
* However using the REGISTER_BENCHMARK() macro is recommended.
*/
#ifndef _BENCHMARK_H
#define _BENCHMARK_H
#include <climits>
#include <cstdbool>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <util.h>
#include "benchmark_time.hpp"
#include "os.h"
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
#endif
#define RRAND(max, min) (rand() % ((max) - (min)) + (min))
#define RRAND_R(seed, max, min) (os_rand_r(seed) % ((max) - (min)) + (min))
struct benchmark;
/*
* benchmark_args - Arguments for benchmark.
*
* It contains set of common arguments and pointer to benchmark's specific
* arguments which are automatically processed by framework according to
* clos, nclos and opt_size in benchmark_info structure.
*/
struct benchmark_args {
const char *fname; /* path to test file */
size_t fsize; /* size of test file */
bool is_poolset; /* test file is a poolset */
bool is_dynamic_poolset; /* test file is directory in which
benchmark creates reusable files */
mode_t fmode; /* test file's permissions */
unsigned n_threads; /* number of working threads */
size_t n_ops_per_thread; /* number of operations per thread */
bool thread_affinity; /* set worker threads CPU affinity mask */
ssize_t main_affinity; /* main thread affinity */
char *affinity_list; /* set CPU affinity order */
size_t dsize; /* data size */
unsigned seed; /* PRNG seed */
unsigned repeats; /* number of repeats of one scenario */
unsigned min_exe_time; /* minimal execution time */
bool help; /* print help for benchmark */
void *opts; /* benchmark specific arguments */
};
/*
* benchmark_results - Benchmark's execution results.
*/
struct benchmark_results {
uint64_t nbytes; /* number of bytes processed */
uint64_t nops; /* number of operations executed */
benchmark_time_t time; /* total execution time */
};
/*
* struct results -- statistics for total measurements
*/
struct results {
double min;
double max;
double avg;
double std_dev;
double med;
};
/*
* struct latency -- statistics for latency measurements
*/
struct latency {
uint64_t max;
uint64_t min;
uint64_t avg;
double std_dev;
uint64_t pctl50_0p;
uint64_t pctl99_0p;
uint64_t pctl99_9p;
};
/*
* struct thread_results -- results of a single thread
*/
struct thread_results {
benchmark_time_t beg;
benchmark_time_t end;
benchmark_time_t end_op[];
};
/*
* struct bench_results -- results of the whole benchmark
*/
struct bench_results {
struct thread_results **thres;
};
/*
* struct total_results -- results and statistics of the whole benchmark
*/
struct total_results {
size_t nrepeats;
size_t nthreads;
size_t nops;
double nopsps;
struct results total;
struct latency latency;
struct bench_results *res;
};
/*
* Command Line Option integer value base.
*/
#define CLO_INT_BASE_NONE 0x0
#define CLO_INT_BASE_DEC 0x1
#define CLO_INT_BASE_HEX 0x2
#define CLO_INT_BASE_OCT 0x4
/*
* Command Line Option type.
*/
enum clo_type {
CLO_TYPE_FLAG,
CLO_TYPE_STR,
CLO_TYPE_INT,
CLO_TYPE_UINT,
CLO_TYPE_MAX,
};
/*
* Description of command line option.
*
* This structure is used to declare command line options by the benchmark
* which will be automatically parsed by the framework.
*
* opt_short : Short option char. If there is no short option write 0.
* opt_long : Long option string.
* descr : Description of command line option.
* off : Offset in data structure in which the value should be stored.
* type : Type of command line option.
* def : Default value. If set to NULL, this options is required.
* ignore_in_res: Do not print in results.
* check : Optional callback for checking the command line option value.
* type_int : Parameters for signed integer.
* type_uint : Parameters for unsigned integer.
* type_str : Parameters for string.
*
* size : Size of integer value. Valid values: 1, 2, 4, 8.
* base : Integer base system from which the parsing should be
* performed. This field may be used as bit mask by logically
* adding different base types.
* limit_min : Indicates whether value should be limited by the minimum
* value.
* limit_max : Indicates whether value should be limited by the maximum
* value.
* min : Minimum value when limit_min is set.
* max : Maximum value when limit_min is set.
*
* alloc : If set to true the framework should allocate memory for the
* value. The memory will be freed by the framework at the end of
* execution. Otherwise benchmark must provide valid pointer in
* opt_var and max_size parameter must be set properly.
* max_size : Maximum size of string.
*/
struct benchmark_clo {
int opt_short;
const char *opt_long;
enum clo_type type;
const char *descr;
size_t off;
const char *def;
bool ignore_in_res;
struct {
size_t size;
int base;
int64_t min;
int64_t max;
} type_int;
struct {
size_t size;
int base;
uint64_t min;
uint64_t max;
} type_uint;
int used;
};
#define clo_field_offset(s, f) ((size_t) & ((s *)0)->f)
#define clo_field_size(s, f) (sizeof(((s *)0)->f))
/*
* worker_info - Worker thread's information structure.
*/
struct worker_info {
size_t index; /* index of worker thread */
struct operation_info *opinfo; /* operation info structure */
size_t nops; /* number of operations */
void *priv; /* worker's private data */
benchmark_time_t beg; /* start time */
benchmark_time_t end; /* end time */
};
/*
* operation_info - Information about operation.
*/
struct operation_info {
struct worker_info *worker; /* worker's info */
struct benchmark_args *args; /* benchmark arguments */
size_t index; /* operation's index */
benchmark_time_t end; /* operation's end time */
};
/*
* struct benchmark_info -- benchmark descriptor
* name : Name of benchmark.
* brief : Brief description of benchmark.
* clos : Command line options which will be automatically parsed by
* framework.
* nclos : Number of command line options.
* opts_size : Size of data structure where the parsed values should be
* stored in.
* print_help : Callback for printing help message.
* pre_init : Function for initialization of the benchmark before parsing
* command line arguments.
* init : Function for initialization of the benchmark after parsing
* command line arguments.
* exit : Function for de-initialization of the benchmark.
* multithread : Indicates whether the benchmark operation function may be
* run in many threads.
* multiops : Indicates whether the benchmark operation function may be
* run many time in a loop.
* measure_time : Indicates whether the benchmark framework should measure the
* execution time of operation function. If set to false, the
* benchmark must report the execution time by itself.
* init_worker : Callback for initialization thread specific data. Invoked in
* a single thread for every thread worker.
* operation : Callback function which does the main job of benchmark.
* rm_file : Indicates whether the test file should be removed by
* framework before the init function will be called.
* allow_poolset: Indicates whether benchmark may use poolset files.
* If set to false and fname points to a poolset, an error
* will be returned.
* According to multithread and single_operation flags it may be
* invoked in different ways:
* +-------------+----------+-------------------------------------+
* | multithread | multiops | description |
* +-------------+----------+-------------------------------------+
* | false | false | invoked once, in one thread |
* +-------------+----------+-------------------------------------+
* | false | true | invoked many times, in one thread |
* +-------------+----------+-------------------------------------+
* | true | false | invoked once, in many threads |
* +-------------+----------+-------------------------------------+
* | true | true | invoked many times, in many threads |
* +-------------+----------+-------------------------------------+
*
*/
struct benchmark_info {
const char *name;
const char *brief;
struct benchmark_clo *clos;
size_t nclos;
size_t opts_size;
void (*print_help)(struct benchmark *bench);
int (*pre_init)(struct benchmark *bench);
int (*init)(struct benchmark *bench, struct benchmark_args *args);
int (*exit)(struct benchmark *bench, struct benchmark_args *args);
int (*init_worker)(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker);
void (*free_worker)(struct benchmark *bench,
struct benchmark_args *args,
struct worker_info *worker);
int (*operation)(struct benchmark *bench, struct operation_info *info);
void (*print_extra_headers)();
void (*print_extra_values)(struct benchmark *bench,
struct benchmark_args *args,
struct total_results *res);
bool multithread;
bool multiops;
bool measure_time;
bool rm_file;
bool allow_poolset;
bool print_bandwidth;
};
void *pmembench_get_priv(struct benchmark *bench);
void pmembench_set_priv(struct benchmark *bench, void *priv);
struct benchmark_info *pmembench_get_info(struct benchmark *bench);
int pmembench_register(struct benchmark_info *bench_info);
#define REGISTER_BENCHMARK(bench) \
if (pmembench_register(&(bench))) { \
fprintf(stderr, "Unable to register benchmark '%s'\n", \
(bench).name); \
}
#endif /* _BENCHMARK_H */
| 12,541 | 34.429379 | 80 |
hpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/obj_locks.cpp
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* obj_locks.cpp -- main source file for PMEM locks benchmark
*/
#include <cassert>
#include <cerrno>
#include "benchmark.hpp"
#include "libpmemobj.h"
#include "file.h"
#include "lane.h"
#include "list.h"
#include "memops.h"
#include "obj.h"
#include "os_thread.h"
#include "out.h"
#include "pmalloc.h"
#include "sync.h"
struct prog_args {
bool use_system_threads; /* use system locks instead of PMEM locks */
unsigned n_locks; /* number of mutex/rwlock objects */
bool run_id_increment; /* increment run_id after each lock/unlock */
uint64_t runid_initial_value; /* initial value of run_id */
char *lock_mode; /* "1by1" or "all-lock" */
char *lock_type; /* "mutex", "rwlock" or "ram-mutex" */
bool use_rdlock; /* use read lock, instead of write lock */
};
/*
* mutex similar to PMEMmutex, but with os_mutex_t in RAM
*/
typedef union padded_volatile_pmemmutex {
char padding[_POBJ_CL_SIZE];
struct {
uint64_t runid;
os_mutex_t *mutexp; /* pointer to os_thread mutex in RAM */
} volatile_pmemmutex;
} PMEM_volatile_mutex;
typedef union lock_union {
PMEMmutex pm_mutex;
PMEMrwlock pm_rwlock;
PMEM_volatile_mutex pm_vmutex;
os_mutex_t pt_mutex;
os_rwlock_t pt_rwlock;
} lock_t;
POBJ_LAYOUT_BEGIN(pmembench_lock_layout);
POBJ_LAYOUT_ROOT(pmembench_lock_layout, struct my_root);
POBJ_LAYOUT_TOID(pmembench_lock_layout, lock_t);
POBJ_LAYOUT_END(pmembench_lock_layout);
/*
* my_root -- root object structure
*/
struct my_root {
TOID(lock_t) locks; /* an array of locks */
};
/*
* lock usage
*/
enum operation_mode {
OP_MODE_1BY1, /* lock and unlock one lock at a time */
OP_MODE_ALL_LOCK, /* grab all locks, then unlock them all */
OP_MODE_MAX,
};
/*
* lock type
*/
enum benchmark_mode {
BENCH_MODE_MUTEX, /* PMEMmutex vs. os_mutex_t */
BENCH_MODE_RWLOCK, /* PMEMrwlock vs. os_rwlock_t */
BENCH_MODE_VOLATILE_MUTEX, /* PMEMmutex with os_thread mutex in RAM */
BENCH_MODE_MAX
};
struct mutex_bench;
struct bench_ops {
int (*bench_init)(struct mutex_bench *);
int (*bench_exit)(struct mutex_bench *);
int (*bench_op)(struct mutex_bench *);
};
/*
* mutex_bench -- stores variables used in benchmark, passed within functions
*/
struct mutex_bench {
PMEMobjpool *pop; /* pointer to the persistent pool */
TOID(struct my_root) root; /* OID of the root object */
struct prog_args *pa; /* prog_args structure */
enum operation_mode lock_mode; /* lock usage mode */
enum benchmark_mode lock_type; /* lock type */
lock_t *locks; /* pointer to the array of locks */
struct bench_ops *ops;
};
#define GET_VOLATILE_MUTEX(pop, mutexp) \
(os_mutex_t *)get_lock( \
(pop)->run_id, &(mutexp)->volatile_pmemmutex.runid, \
(mutexp)->volatile_pmemmutex.mutexp, \
(int (*)(void **lock, void *arg))volatile_mutex_init)
typedef int (*lock_fun_wrapper)(PMEMobjpool *pop, void *lock);
/*
* bench_operation_1by1 -- acquire lock and unlock release locks
*/
static void
bench_operation_1by1(lock_fun_wrapper flock, lock_fun_wrapper funlock,
struct mutex_bench *mb, PMEMobjpool *pop)
{
for (unsigned i = 0; i < (mb)->pa->n_locks; (i)++) {
auto *o = (void *)(&(mb)->locks[i]);
flock(pop, o);
funlock(pop, o);
}
}
/*
* bench_operation_all_lock -- acquire all locks and release all locks
*/
static void
bench_operation_all_lock(lock_fun_wrapper flock, lock_fun_wrapper funlock,
struct mutex_bench *mb, PMEMobjpool *pop)
{
for (unsigned i = 0; i < (mb)->pa->n_locks; (i)++) {
auto *o = (void *)(&(mb)->locks[i]);
flock(pop, o);
}
for (unsigned i = 0; i < (mb)->pa->n_locks; i++) {
auto *o = (void *)(&(mb)->locks[i]);
funlock(pop, o);
}
}
/*
* get_lock -- atomically initialize and return a lock
*/
static void *
get_lock(uint64_t pop_runid, volatile uint64_t *runid, void *lock,
int (*init_lock)(void **lock, void *arg))
{
uint64_t tmp_runid;
while ((tmp_runid = *runid) != pop_runid) {
if ((tmp_runid != (pop_runid - 1))) {
if (util_bool_compare_and_swap64(runid, tmp_runid,
(pop_runid - 1))) {
if (init_lock(&lock, nullptr)) {
util_fetch_and_and64(runid, 0);
return nullptr;
}
if (util_bool_compare_and_swap64(
runid, (pop_runid - 1),
pop_runid) == 0) {
return nullptr;
}
}
}
}
return lock;
}
/*
* volatile_mutex_init -- initialize the volatile mutex object
*
* Allocate memory for the os_thread mutex and initialize it.
* Set the runid to the same value as in the memory pool.
*/
static int
volatile_mutex_init(os_mutex_t **mutexp, void *attr)
{
if (*mutexp == nullptr) {
*mutexp = (os_mutex_t *)malloc(sizeof(os_mutex_t));
if (*mutexp == nullptr) {
perror("volatile_mutex_init alloc");
return ENOMEM;
}
}
return os_mutex_init(*mutexp);
}
/*
* volatile_mutex_lock -- initialize the mutex object if needed and lock it
*/
static int
volatile_mutex_lock(PMEMobjpool *pop, PMEM_volatile_mutex *mutexp)
{
auto *mutex = GET_VOLATILE_MUTEX(pop, mutexp);
if (mutex == nullptr)
return EINVAL;
return os_mutex_lock(mutex);
}
/*
* volatile_mutex_unlock -- unlock the mutex
*/
static int
volatile_mutex_unlock(PMEMobjpool *pop, PMEM_volatile_mutex *mutexp)
{
auto *mutex = (os_mutex_t *)GET_VOLATILE_MUTEX(pop, mutexp);
if (mutex == nullptr)
return EINVAL;
return os_mutex_unlock(mutex);
}
/*
* volatile_mutex_destroy -- destroy os_thread mutex and release memory
*/
static int
volatile_mutex_destroy(PMEMobjpool *pop, PMEM_volatile_mutex *mutexp)
{
auto *mutex = (os_mutex_t *)GET_VOLATILE_MUTEX(pop, mutexp);
if (mutex == nullptr)
return EINVAL;
int ret = os_mutex_destroy(mutex);
if (ret != 0)
return ret;
free(mutex);
return 0;
}
/*
* os_mutex_lock_wrapper -- wrapper for os_mutex_lock
*/
static int
os_mutex_lock_wrapper(PMEMobjpool *pop, void *lock)
{
return os_mutex_lock((os_mutex_t *)lock);
}
/*
* os_mutex_unlock_wrapper -- wrapper for os_mutex_unlock
*/
static int
os_mutex_unlock_wrapper(PMEMobjpool *pop, void *lock)
{
return os_mutex_unlock((os_mutex_t *)lock);
}
/*
* pmemobj_mutex_lock_wrapper -- wrapper for pmemobj_mutex_lock
*/
static int
pmemobj_mutex_lock_wrapper(PMEMobjpool *pop, void *lock)
{
return pmemobj_mutex_lock(pop, (PMEMmutex *)lock);
}
/*
* pmemobj_mutex_unlock_wrapper -- wrapper for pmemobj_mutex_unlock
*/
static int
pmemobj_mutex_unlock_wrapper(PMEMobjpool *pop, void *lock)
{
return pmemobj_mutex_unlock(pop, (PMEMmutex *)lock);
}
/*
* os_rwlock_wrlock_wrapper -- wrapper for os_rwlock_wrlock
*/
static int
os_rwlock_wrlock_wrapper(PMEMobjpool *pop, void *lock)
{
return os_rwlock_wrlock((os_rwlock_t *)lock);
}
/*
* os_rwlock_rdlock_wrapper -- wrapper for os_rwlock_rdlock
*/
static int
os_rwlock_rdlock_wrapper(PMEMobjpool *pop, void *lock)
{
return os_rwlock_rdlock((os_rwlock_t *)lock);
}
/*
* os_rwlock_unlock_wrapper -- wrapper for os_rwlock_unlock
*/
static int
os_rwlock_unlock_wrapper(PMEMobjpool *pop, void *lock)
{
return os_rwlock_unlock((os_rwlock_t *)lock);
}
/*
* pmemobj_rwlock_wrlock_wrapper -- wrapper for pmemobj_rwlock_wrlock
*/
static int
pmemobj_rwlock_wrlock_wrapper(PMEMobjpool *pop, void *lock)
{
return pmemobj_rwlock_wrlock(pop, (PMEMrwlock *)lock);
}
/*
* pmemobj_rwlock_rdlock_wrapper -- wrapper for pmemobj_rwlock_rdlock
*/
static int
pmemobj_rwlock_rdlock_wrapper(PMEMobjpool *pop, void *lock)
{
return pmemobj_rwlock_rdlock(pop, (PMEMrwlock *)lock);
}
/*
* pmemobj_rwlock_unlock_wrapper -- wrapper for pmemobj_rwlock_unlock
*/
static int
pmemobj_rwlock_unlock_wrapper(PMEMobjpool *pop, void *lock)
{
return pmemobj_rwlock_unlock(pop, (PMEMrwlock *)lock);
}
/*
* volatile_mutex_lock_wrapper -- wrapper for volatile_mutex_lock
*/
static int
volatile_mutex_lock_wrapper(PMEMobjpool *pop, void *lock)
{
return volatile_mutex_lock(pop, (PMEM_volatile_mutex *)lock);
}
/*
* volatile_mutex_unlock_wrapper -- wrapper for volatile_mutex_unlock
*/
static int
volatile_mutex_unlock_wrapper(PMEMobjpool *pop, void *lock)
{
return volatile_mutex_unlock(pop, (PMEM_volatile_mutex *)lock);
}
/*
* init_bench_mutex -- allocate and initialize mutex objects
*/
static int
init_bench_mutex(struct mutex_bench *mb)
{
POBJ_ZALLOC(mb->pop, &D_RW(mb->root)->locks, lock_t,
mb->pa->n_locks * sizeof(lock_t));
if (TOID_IS_NULL(D_RO(mb->root)->locks)) {
perror("POBJ_ZALLOC");
return -1;
}
struct my_root *root = D_RW(mb->root);
assert(root != nullptr);
mb->locks = D_RW(root->locks);
assert(mb->locks != nullptr);
if (!mb->pa->use_system_threads) {
/* initialize PMEM mutexes */
for (unsigned i = 0; i < mb->pa->n_locks; i++) {
auto *p = (PMEMmutex_internal *)&mb->locks[i];
p->pmemmutex.runid = mb->pa->runid_initial_value;
os_mutex_init(&p->PMEMmutex_lock);
}
} else {
/* initialize os_thread mutexes */
for (unsigned i = 0; i < mb->pa->n_locks; i++) {
auto *p = (os_mutex_t *)&mb->locks[i];
os_mutex_init(p);
}
}
return 0;
}
/*
* exit_bench_mutex -- destroy the mutex objects and release memory
*/
static int
exit_bench_mutex(struct mutex_bench *mb)
{
if (mb->pa->use_system_threads) {
/* deinitialize os_thread mutex objects */
for (unsigned i = 0; i < mb->pa->n_locks; i++) {
auto *p = (os_mutex_t *)&mb->locks[i];
os_mutex_destroy(p);
}
}
POBJ_FREE(&D_RW(mb->root)->locks);
return 0;
}
/*
* op_bench_mutex -- lock and unlock the mutex object
*
* If requested, increment the run_id of the memory pool. In case of PMEMmutex
* this will force the rwlock object(s) reinitialization at the lock operation.
*/
static int
op_bench_mutex(struct mutex_bench *mb)
{
if (!mb->pa->use_system_threads) {
if (mb->lock_mode == OP_MODE_1BY1) {
bench_operation_1by1(pmemobj_mutex_lock_wrapper,
pmemobj_mutex_unlock_wrapper, mb,
mb->pop);
} else {
bench_operation_all_lock(pmemobj_mutex_lock_wrapper,
pmemobj_mutex_unlock_wrapper,
mb, mb->pop);
}
if (mb->pa->run_id_increment)
mb->pop->run_id += 2; /* must be a multiple of 2 */
} else {
if (mb->lock_mode == OP_MODE_1BY1) {
bench_operation_1by1(os_mutex_lock_wrapper,
os_mutex_unlock_wrapper, mb,
nullptr);
} else {
bench_operation_all_lock(os_mutex_lock_wrapper,
os_mutex_unlock_wrapper, mb,
nullptr);
}
}
return 0;
}
/*
* init_bench_rwlock -- allocate and initialize rwlock objects
*/
static int
init_bench_rwlock(struct mutex_bench *mb)
{
struct my_root *root = D_RW(mb->root);
assert(root != nullptr);
POBJ_ZALLOC(mb->pop, &root->locks, lock_t,
mb->pa->n_locks * sizeof(lock_t));
if (TOID_IS_NULL(root->locks)) {
perror("POBJ_ZALLOC");
return -1;
}
mb->locks = D_RW(root->locks);
assert(mb->locks != nullptr);
if (!mb->pa->use_system_threads) {
/* initialize PMEM rwlocks */
for (unsigned i = 0; i < mb->pa->n_locks; i++) {
auto *p = (PMEMrwlock_internal *)&mb->locks[i];
p->pmemrwlock.runid = mb->pa->runid_initial_value;
os_rwlock_init(&p->PMEMrwlock_lock);
}
} else {
/* initialize os_thread rwlocks */
for (unsigned i = 0; i < mb->pa->n_locks; i++) {
auto *p = (os_rwlock_t *)&mb->locks[i];
os_rwlock_init(p);
}
}
return 0;
}
/*
* exit_bench_rwlock -- destroy the rwlocks and release memory
*/
static int
exit_bench_rwlock(struct mutex_bench *mb)
{
if (mb->pa->use_system_threads) {
/* deinitialize os_thread mutex objects */
for (unsigned i = 0; i < mb->pa->n_locks; i++) {
auto *p = (os_rwlock_t *)&mb->locks[i];
os_rwlock_destroy(p);
}
}
POBJ_FREE(&D_RW(mb->root)->locks);
return 0;
}
/*
* op_bench_rwlock -- lock and unlock the rwlock object
*
* If requested, increment the run_id of the memory pool. In case of PMEMrwlock
* this will force the rwlock object(s) reinitialization at the lock operation.
*/
static int
op_bench_rwlock(struct mutex_bench *mb)
{
if (!mb->pa->use_system_threads) {
if (mb->lock_mode == OP_MODE_1BY1) {
bench_operation_1by1(
!mb->pa->use_rdlock
? pmemobj_rwlock_wrlock_wrapper
: pmemobj_rwlock_rdlock_wrapper,
pmemobj_rwlock_unlock_wrapper, mb, mb->pop);
} else {
bench_operation_all_lock(
!mb->pa->use_rdlock
? pmemobj_rwlock_wrlock_wrapper
: pmemobj_rwlock_rdlock_wrapper,
pmemobj_rwlock_unlock_wrapper, mb, mb->pop);
}
if (mb->pa->run_id_increment)
mb->pop->run_id += 2; /* must be a multiple of 2 */
} else {
if (mb->lock_mode == OP_MODE_1BY1) {
bench_operation_1by1(
!mb->pa->use_rdlock ? os_rwlock_wrlock_wrapper
: os_rwlock_rdlock_wrapper,
os_rwlock_unlock_wrapper, mb, nullptr);
} else {
bench_operation_all_lock(
!mb->pa->use_rdlock ? os_rwlock_wrlock_wrapper
: os_rwlock_rdlock_wrapper,
os_rwlock_unlock_wrapper, mb, nullptr);
}
}
return 0;
}
/*
* init_bench_vmutex -- allocate and initialize mutexes
*/
static int
init_bench_vmutex(struct mutex_bench *mb)
{
struct my_root *root = D_RW(mb->root);
assert(root != nullptr);
POBJ_ZALLOC(mb->pop, &root->locks, lock_t,
mb->pa->n_locks * sizeof(lock_t));
if (TOID_IS_NULL(root->locks)) {
perror("POBJ_ZALLOC");
return -1;
}
mb->locks = D_RW(root->locks);
assert(mb->locks != nullptr);
/* initialize PMEM volatile mutexes */
for (unsigned i = 0; i < mb->pa->n_locks; i++) {
auto *p = (PMEM_volatile_mutex *)&mb->locks[i];
p->volatile_pmemmutex.runid = mb->pa->runid_initial_value;
volatile_mutex_init(&p->volatile_pmemmutex.mutexp, nullptr);
}
return 0;
}
/*
* exit_bench_vmutex -- destroy the mutex objects and release their
* memory
*/
static int
exit_bench_vmutex(struct mutex_bench *mb)
{
for (unsigned i = 0; i < mb->pa->n_locks; i++) {
auto *p = (PMEM_volatile_mutex *)&mb->locks[i];
volatile_mutex_destroy(mb->pop, p);
}
POBJ_FREE(&D_RW(mb->root)->locks);
return 0;
}
/*
* op_bench_volatile_mutex -- lock and unlock the mutex object
*/
static int
op_bench_vmutex(struct mutex_bench *mb)
{
if (mb->lock_mode == OP_MODE_1BY1) {
bench_operation_1by1(volatile_mutex_lock_wrapper,
volatile_mutex_unlock_wrapper, mb,
mb->pop);
} else {
bench_operation_all_lock(volatile_mutex_lock_wrapper,
volatile_mutex_unlock_wrapper, mb,
mb->pop);
}
if (mb->pa->run_id_increment)
mb->pop->run_id += 2; /* must be a multiple of 2 */
return 0;
}
struct bench_ops benchmark_ops[BENCH_MODE_MAX] = {
{init_bench_mutex, exit_bench_mutex, op_bench_mutex},
{init_bench_rwlock, exit_bench_rwlock, op_bench_rwlock},
{init_bench_vmutex, exit_bench_vmutex, op_bench_vmutex}};
/*
* operation_mode -- parses command line "--mode" and returns
* proper operation mode
*/
static enum operation_mode
parse_op_mode(const char *arg)
{
if (strcmp(arg, "1by1") == 0)
return OP_MODE_1BY1;
else if (strcmp(arg, "all-lock") == 0)
return OP_MODE_ALL_LOCK;
else
return OP_MODE_MAX;
}
/*
* benchmark_mode -- parses command line "--bench_type" and returns
* proper benchmark ops
*/
static struct bench_ops *
parse_benchmark_mode(const char *arg)
{
if (strcmp(arg, "mutex") == 0)
return &benchmark_ops[BENCH_MODE_MUTEX];
else if (strcmp(arg, "rwlock") == 0)
return &benchmark_ops[BENCH_MODE_RWLOCK];
else if (strcmp(arg, "volatile-mutex") == 0)
return &benchmark_ops[BENCH_MODE_VOLATILE_MUTEX];
else
return nullptr;
}
/*
* locks_init -- allocates persistent memory, maps it, creates the appropriate
* objects in the allocated memory and initializes them
*/
static int
locks_init(struct benchmark *bench, struct benchmark_args *args)
{
assert(bench != nullptr);
assert(args != nullptr);
enum file_type type = util_file_get_type(args->fname);
if (type == OTHER_ERROR) {
fprintf(stderr, "could not check type of file %s\n",
args->fname);
return -1;
}
int ret = 0;
size_t poolsize;
struct mutex_bench *mb = (struct mutex_bench *)malloc(sizeof(*mb));
if (mb == nullptr) {
perror("malloc");
return -1;
}
mb->pa = (struct prog_args *)args->opts;
mb->lock_mode = parse_op_mode(mb->pa->lock_mode);
if (mb->lock_mode >= OP_MODE_MAX) {
fprintf(stderr, "Invalid mutex mode: %s\n", mb->pa->lock_mode);
errno = EINVAL;
goto err_free_mb;
}
mb->ops = parse_benchmark_mode(mb->pa->lock_type);
if (mb->ops == nullptr) {
fprintf(stderr, "Invalid benchmark type: %s\n",
mb->pa->lock_type);
errno = EINVAL;
goto err_free_mb;
}
/* reserve some space for metadata */
poolsize = mb->pa->n_locks * sizeof(lock_t) + PMEMOBJ_MIN_POOL;
if (args->is_poolset || type == TYPE_DEVDAX) {
if (args->fsize < poolsize) {
fprintf(stderr, "file size too large\n");
goto err_free_mb;
}
poolsize = 0;
}
mb->pop = pmemobj_create(args->fname,
POBJ_LAYOUT_NAME(pmembench_lock_layout),
poolsize, args->fmode);
if (mb->pop == nullptr) {
ret = -1;
perror("pmemobj_create");
goto err_free_mb;
}
mb->root = POBJ_ROOT(mb->pop, struct my_root);
assert(!TOID_IS_NULL(mb->root));
ret = mb->ops->bench_init(mb);
if (ret != 0)
goto err_free_pop;
pmembench_set_priv(bench, mb);
return 0;
err_free_pop:
pmemobj_close(mb->pop);
err_free_mb:
free(mb);
return ret;
}
/*
* locks_exit -- destroys allocated objects and release memory
*/
static int
locks_exit(struct benchmark *bench, struct benchmark_args *args)
{
assert(bench != nullptr);
assert(args != nullptr);
auto *mb = (struct mutex_bench *)pmembench_get_priv(bench);
assert(mb != nullptr);
mb->ops->bench_exit(mb);
pmemobj_close(mb->pop);
free(mb);
return 0;
}
/*
* locks_op -- actual benchmark operation
*
* Performs lock and unlock as by the program arguments.
*/
static int
locks_op(struct benchmark *bench, struct operation_info *info)
{
auto *mb = (struct mutex_bench *)pmembench_get_priv(bench);
assert(mb != nullptr);
assert(mb->pop != nullptr);
assert(!TOID_IS_NULL(mb->root));
assert(mb->locks != nullptr);
assert(mb->lock_mode < OP_MODE_MAX);
mb->ops->bench_op(mb);
return 0;
}
/* structure to define command line arguments */
static struct benchmark_clo locks_clo[7];
static struct benchmark_info locks_info;
CONSTRUCTOR(pmem_locks_constructor)
void
pmem_locks_constructor(void)
{
locks_clo[0].opt_short = 'p';
locks_clo[0].opt_long = "use_system_threads";
locks_clo[0].descr = "Use os_thread locks instead of PMEM, "
"does not matter for volatile mutex";
locks_clo[0].def = "false";
locks_clo[0].off =
clo_field_offset(struct prog_args, use_system_threads);
locks_clo[0].type = CLO_TYPE_FLAG;
locks_clo[1].opt_short = 'm';
locks_clo[1].opt_long = "numlocks";
locks_clo[1].descr = "The number of lock objects used "
"for benchmark";
locks_clo[1].def = "1";
locks_clo[1].off = clo_field_offset(struct prog_args, n_locks);
locks_clo[1].type = CLO_TYPE_UINT;
locks_clo[1].type_uint.size = clo_field_size(struct prog_args, n_locks);
locks_clo[1].type_uint.base = CLO_INT_BASE_DEC;
locks_clo[1].type_uint.min = 1;
locks_clo[1].type_uint.max = UINT_MAX;
locks_clo[2].opt_short = 0;
locks_clo[2].opt_long = "mode";
locks_clo[2].descr = "Locking mode";
locks_clo[2].type = CLO_TYPE_STR;
locks_clo[2].off = clo_field_offset(struct prog_args, lock_mode);
locks_clo[2].def = "1by1";
locks_clo[3].opt_short = 'r';
locks_clo[3].opt_long = "run_id";
locks_clo[3].descr = "Increment the run_id of PMEM object "
"pool after each operation";
locks_clo[3].def = "false";
locks_clo[3].off = clo_field_offset(struct prog_args, run_id_increment);
locks_clo[3].type = CLO_TYPE_FLAG;
locks_clo[4].opt_short = 'i';
locks_clo[4].opt_long = "run_id_init_val";
locks_clo[4].descr = "Use this value for initializing the "
"run_id of each PMEMmutex object";
locks_clo[4].def = "2";
locks_clo[4].off =
clo_field_offset(struct prog_args, runid_initial_value);
locks_clo[4].type = CLO_TYPE_UINT;
locks_clo[4].type_uint.size =
clo_field_size(struct prog_args, runid_initial_value);
locks_clo[4].type_uint.base = CLO_INT_BASE_DEC;
locks_clo[4].type_uint.min = 0;
locks_clo[4].type_uint.max = UINT64_MAX;
locks_clo[5].opt_short = 'b';
locks_clo[5].opt_long = "bench_type";
locks_clo[5].descr = "The Benchmark type: mutex, "
"rwlock or volatile-mutex";
locks_clo[5].type = CLO_TYPE_STR;
locks_clo[5].off = clo_field_offset(struct prog_args, lock_type);
locks_clo[5].def = "mutex";
locks_clo[6].opt_short = 'R';
locks_clo[6].opt_long = "rdlock";
locks_clo[6].descr = "Select read over write lock, only "
"valid when lock_type is \"rwlock\"";
locks_clo[6].type = CLO_TYPE_FLAG;
locks_clo[6].off = clo_field_offset(struct prog_args, use_rdlock);
locks_info.name = "obj_locks";
locks_info.brief = "Benchmark for pmem locks operations";
locks_info.init = locks_init;
locks_info.exit = locks_exit;
locks_info.multithread = false;
locks_info.multiops = true;
locks_info.operation = locks_op;
locks_info.measure_time = true;
locks_info.clos = locks_clo;
locks_info.nclos = ARRAY_SIZE(locks_clo);
locks_info.opts_size = sizeof(struct prog_args);
locks_info.rm_file = true;
locks_info.allow_poolset = true;
REGISTER_BENCHMARK(locks_info);
};
| 22,726 | 24.855518 | 80 |
cpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/pmem_memset.cpp
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmem_memset.cpp -- benchmark for pmem_memset function
*/
#include <cassert>
#include <cerrno>
#include <cstring>
#include <fcntl.h>
#include <libpmem.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <unistd.h>
#include "benchmark.hpp"
#include "file.h"
#include "os.h"
#define MAX_OFFSET 63
#define CONST_B 0xFF
struct memset_bench;
typedef int (*operation_fn)(void *dest, int c, size_t len);
/*
* memset_args -- benchmark specific command line options
*/
struct memset_args {
char *mode; /* operation mode: stat, seq, rand */
bool memset; /* use libc memset function */
bool persist; /* perform persist operation */
bool msync; /* perform msync operation */
bool no_warmup; /* do not do warmup */
size_t chunk_size; /* elementary chunk size */
size_t dest_off; /* destination address offset */
unsigned seed; /* seed for random numbers */
};
/*
* memset_bench -- benchmark context
*/
struct memset_bench {
struct memset_args *pargs; /* benchmark specific arguments */
uint64_t *offsets; /* random/sequential address offsets */
size_t n_offsets; /* number of random elements */
int const_b; /* memset() value */
size_t fsize; /* file size */
void *pmem_addr; /* mapped file address */
operation_fn func_op; /* operation function */
};
/*
* operation_mode -- mode of operation of memset()
*/
enum operation_mode {
OP_MODE_UNKNOWN,
OP_MODE_STAT, /* always use the same chunk */
OP_MODE_SEQ, /* use consecutive chunks */
OP_MODE_RAND /* use random chunks */
};
/*
* parse_op_mode -- parse operation mode from string
*/
static enum operation_mode
parse_op_mode(const char *arg)
{
if (strcmp(arg, "stat") == 0)
return OP_MODE_STAT;
else if (strcmp(arg, "seq") == 0)
return OP_MODE_SEQ;
else if (strcmp(arg, "rand") == 0)
return OP_MODE_RAND;
else
return OP_MODE_UNKNOWN;
}
/*
* init_offsets -- initialize offsets[] array depending on the selected mode
*/
static int
init_offsets(struct benchmark_args *args, struct memset_bench *mb,
enum operation_mode op_mode)
{
unsigned n_threads = args->n_threads;
size_t n_ops = args->n_ops_per_thread;
mb->n_offsets = n_ops * n_threads;
assert(mb->n_offsets != 0);
mb->offsets = (uint64_t *)malloc(mb->n_offsets * sizeof(*mb->offsets));
if (!mb->offsets) {
perror("malloc");
return -1;
}
unsigned seed = mb->pargs->seed;
for (unsigned i = 0; i < n_threads; i++) {
for (size_t j = 0; j < n_ops; j++) {
size_t o;
switch (op_mode) {
case OP_MODE_STAT:
o = i;
break;
case OP_MODE_SEQ:
o = i * n_ops + j;
break;
case OP_MODE_RAND:
o = i * n_ops +
os_rand_r(&seed) % n_ops;
break;
default:
assert(0);
return -1;
}
mb->offsets[i * n_ops + j] = o * mb->pargs->chunk_size;
}
}
return 0;
}
/*
* libpmem_memset_persist -- perform operation using libpmem
* pmem_memset_persist().
*/
static int
libpmem_memset_persist(void *dest, int c, size_t len)
{
pmem_memset_persist(dest, c, len);
return 0;
}
/*
* libpmem_memset_nodrain -- perform operation using libpmem
* pmem_memset_nodrain().
*/
static int
libpmem_memset_nodrain(void *dest, int c, size_t len)
{
pmem_memset_nodrain(dest, c, len);
return 0;
}
/*
* libc_memset_persist -- perform operation using libc memset() function
* followed by pmem_persist().
*/
static int
libc_memset_persist(void *dest, int c, size_t len)
{
memset(dest, c, len);
pmem_persist(dest, len);
return 0;
}
/*
* libc_memset_msync -- perform operation using libc memset() function
* followed by pmem_msync().
*/
static int
libc_memset_msync(void *dest, int c, size_t len)
{
memset(dest, c, len);
return pmem_msync(dest, len);
}
/*
* libc_memset -- perform operation using libc memset() function
* followed by pmem_flush().
*/
static int
libc_memset(void *dest, int c, size_t len)
{
memset(dest, c, len);
pmem_flush(dest, len);
return 0;
}
/*
* warmup_persist -- does the warmup by writing the whole pool area
*/
static int
warmup_persist(struct memset_bench *mb)
{
void *dest = mb->pmem_addr;
int c = mb->const_b;
size_t len = mb->fsize;
pmem_memset_persist(dest, c, len);
return 0;
}
/*
* warmup_msync -- does the warmup by writing the whole pool area
*/
static int
warmup_msync(struct memset_bench *mb)
{
void *dest = mb->pmem_addr;
int c = mb->const_b;
size_t len = mb->fsize;
return libc_memset_msync(dest, c, len);
}
/*
* memset_op -- actual benchmark operation. It can have one of the four
* functions assigned:
* libc_memset,
* libc_memset_persist,
* libpmem_memset_nodrain,
* libpmem_memset_persist.
*/
static int
memset_op(struct benchmark *bench, struct operation_info *info)
{
auto *mb = (struct memset_bench *)pmembench_get_priv(bench);
assert(info->index < mb->n_offsets);
size_t idx = info->worker->index * info->args->n_ops_per_thread +
info->index;
void *dest =
(char *)mb->pmem_addr + mb->offsets[idx] + mb->pargs->dest_off;
int c = mb->const_b;
size_t len = mb->pargs->chunk_size;
mb->func_op(dest, c, len);
return 0;
}
/*
* memset_init -- initialization function
*/
static int
memset_init(struct benchmark *bench, struct benchmark_args *args)
{
assert(bench != nullptr);
assert(args != nullptr);
assert(args->opts != nullptr);
int ret = 0;
size_t size;
size_t large;
size_t little;
size_t file_size = 0;
int flags = 0;
enum file_type type = util_file_get_type(args->fname);
if (type == OTHER_ERROR) {
fprintf(stderr, "could not check type of file %s\n",
args->fname);
return -1;
}
int (*warmup_func)(struct memset_bench *) = warmup_persist;
auto *mb = (struct memset_bench *)malloc(sizeof(struct memset_bench));
if (!mb) {
perror("malloc");
return -1;
}
mb->pargs = (struct memset_args *)args->opts;
mb->pargs->chunk_size = args->dsize;
enum operation_mode op_mode = parse_op_mode(mb->pargs->mode);
if (op_mode == OP_MODE_UNKNOWN) {
fprintf(stderr, "Invalid operation mode argument '%s'\n",
mb->pargs->mode);
ret = -1;
goto err_free_mb;
}
size = MAX_OFFSET + mb->pargs->chunk_size;
large = size * args->n_ops_per_thread * args->n_threads;
little = size * args->n_threads;
mb->fsize = (op_mode == OP_MODE_STAT) ? little : large;
/* initialize offsets[] array depending on benchmark args */
if (init_offsets(args, mb, op_mode) < 0) {
ret = -1;
goto err_free_mb;
}
/* initialize memset() value */
mb->const_b = CONST_B;
if (type != TYPE_DEVDAX) {
file_size = mb->fsize;
flags = PMEM_FILE_CREATE | PMEM_FILE_EXCL;
}
/* create a pmem file and memory map it */
if ((mb->pmem_addr = pmem_map_file(args->fname, file_size, flags,
args->fmode, nullptr, nullptr)) ==
nullptr) {
perror(args->fname);
ret = -1;
goto err_free_offsets;
}
if (mb->pargs->memset) {
if (mb->pargs->persist && mb->pargs->msync) {
fprintf(stderr, "Invalid benchmark parameters: "
"persist and msync cannot be specified "
"together\n");
ret = -1;
goto err_free_offsets;
}
if (mb->pargs->persist) {
mb->func_op = libc_memset_persist;
} else if (mb->pargs->msync) {
mb->func_op = libc_memset_msync;
warmup_func = warmup_msync;
} else {
mb->func_op = libc_memset;
}
} else {
mb->func_op = (mb->pargs->persist) ? libpmem_memset_persist
: libpmem_memset_nodrain;
}
if (!mb->pargs->no_warmup && type != TYPE_DEVDAX) {
ret = warmup_func(mb);
if (ret) {
perror("Pool warmup failed");
goto err_free_offsets;
}
}
pmembench_set_priv(bench, mb);
return ret;
err_free_offsets:
free(mb->offsets);
err_free_mb:
free(mb);
return ret;
}
/*
* memset_exit -- benchmark cleanup function
*/
static int
memset_exit(struct benchmark *bench, struct benchmark_args *args)
{
auto *mb = (struct memset_bench *)pmembench_get_priv(bench);
pmem_unmap(mb->pmem_addr, mb->fsize);
free(mb->offsets);
free(mb);
return 0;
}
static struct benchmark_clo memset_clo[7];
/* Stores information about benchmark. */
static struct benchmark_info memset_info;
CONSTRUCTOR(pmem_memset_constructor)
void
pmem_memset_constructor(void)
{
memset_clo[0].opt_short = 'M';
memset_clo[0].opt_long = "mem-mode";
memset_clo[0].descr = "Memory writing mode - "
"stat, seq, rand";
memset_clo[0].def = "seq";
memset_clo[0].off = clo_field_offset(struct memset_args, mode);
memset_clo[0].type = CLO_TYPE_STR;
memset_clo[1].opt_short = 'm';
memset_clo[1].opt_long = "memset";
memset_clo[1].descr = "Use libc memset()";
memset_clo[1].def = "false";
memset_clo[1].off = clo_field_offset(struct memset_args, memset);
memset_clo[1].type = CLO_TYPE_FLAG;
memset_clo[2].opt_short = 'p';
memset_clo[2].opt_long = "persist";
memset_clo[2].descr = "Use pmem_persist()";
memset_clo[2].def = "true";
memset_clo[2].off = clo_field_offset(struct memset_args, persist);
memset_clo[2].type = CLO_TYPE_FLAG;
memset_clo[3].opt_short = 'D';
memset_clo[3].opt_long = "dest-offset";
memset_clo[3].descr = "Destination cache line alignment "
"offset";
memset_clo[3].def = "0";
memset_clo[3].off = clo_field_offset(struct memset_args, dest_off);
memset_clo[3].type = CLO_TYPE_UINT;
memset_clo[3].type_uint.size =
clo_field_size(struct memset_args, dest_off);
memset_clo[3].type_uint.base = CLO_INT_BASE_DEC;
memset_clo[3].type_uint.min = 0;
memset_clo[3].type_uint.max = MAX_OFFSET;
memset_clo[4].opt_short = 'w';
memset_clo[4].opt_long = "no-warmup";
memset_clo[4].descr = "Don't do warmup";
memset_clo[4].def = "false";
memset_clo[4].type = CLO_TYPE_FLAG;
memset_clo[4].off = clo_field_offset(struct memset_args, no_warmup);
memset_clo[5].opt_short = 'S';
memset_clo[5].opt_long = "seed";
memset_clo[5].descr = "seed for random numbers";
memset_clo[5].def = "1";
memset_clo[5].off = clo_field_offset(struct memset_args, seed);
memset_clo[5].type = CLO_TYPE_UINT;
memset_clo[5].type_uint.size = clo_field_size(struct memset_args, seed);
memset_clo[5].type_uint.base = CLO_INT_BASE_DEC;
memset_clo[5].type_uint.min = 1;
memset_clo[5].type_uint.max = UINT_MAX;
memset_clo[6].opt_short = 's';
memset_clo[6].opt_long = "msync";
memset_clo[6].descr = "Use pmem_msync()";
memset_clo[6].def = "false";
memset_clo[6].off = clo_field_offset(struct memset_args, msync);
memset_clo[6].type = CLO_TYPE_FLAG;
memset_info.name = "pmem_memset";
memset_info.brief = "Benchmark for pmem_memset_persist() "
"and pmem_memset_nodrain() operations";
memset_info.init = memset_init;
memset_info.exit = memset_exit;
memset_info.multithread = true;
memset_info.multiops = true;
memset_info.operation = memset_op;
memset_info.measure_time = true;
memset_info.clos = memset_clo;
memset_info.nclos = ARRAY_SIZE(memset_clo);
memset_info.opts_size = sizeof(struct memset_args);
memset_info.rm_file = true;
memset_info.allow_poolset = false;
memset_info.print_bandwidth = true;
REGISTER_BENCHMARK(memset_info);
};
| 12,566 | 25.018634 | 76 |
cpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/scenario.hpp
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* scenario.hpp -- scenario module declaration
*/
#include <cstdbool>
struct kv {
TAILQ_ENTRY(kv) next;
char *key;
char *value;
};
struct scenario {
TAILQ_ENTRY(scenario) next;
TAILQ_HEAD(scenariohead, kv) head;
char *name;
char *benchmark;
char *group;
};
struct scenarios {
TAILQ_HEAD(scenarioshead, scenario) head;
};
#define FOREACH_SCENARIO(s, ss) TAILQ_FOREACH((s), &(ss)->head, next)
#define FOREACH_KV(kv, s) TAILQ_FOREACH((kv), &(s)->head, next)
struct kv *kv_alloc(const char *key, const char *value);
void kv_free(struct kv *kv);
struct scenario *scenario_alloc(const char *name, const char *bench);
void scenario_free(struct scenario *s);
void scenario_set_group(struct scenario *s, const char *group);
struct scenarios *scenarios_alloc(void);
void scenarios_free(struct scenarios *scenarios);
struct scenario *scenarios_get_scenario(struct scenarios *ss, const char *name);
bool contains_scenarios(int argc, char **argv, struct scenarios *ss);
struct scenario *clone_scenario(struct scenario *src_scenario);
struct kv *find_kv_in_scenario(const char *key,
const struct scenario *scenario);
| 2,737 | 35.506667 | 80 |
hpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/log.cpp
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* log.cpp -- pmemlog benchmarks definitions
*/
#include <cassert>
#include <cerrno>
#include <cstring>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/uio.h>
#include <unistd.h>
#include "benchmark.hpp"
#include "file.h"
#include "libpmemlog.h"
#include "os.h"
#include "poolset_util.hpp"
/*
* Size of pool header, pool descriptor
* and additional page alignment overhead
*/
#define POOL_HDR_SIZE (3 * 4096)
#define MIN_VEC_SIZE 1
/*
* prog_args - benchmark's specific command line arguments
*/
struct prog_args {
unsigned seed; /* seed for pseudo-random generator */
bool rand; /* use random numbers */
int vec_size; /* vector size */
size_t el_size; /* size of single append */
size_t min_size; /* minimum size for random mode */
bool no_warmup; /* don't do warmup */
bool fileio; /* use file io instead of pmemlog */
};
/*
* thread_info - thread specific data
*/
struct log_worker_info {
unsigned seed;
struct iovec *iov; /* io vector */
char *buf; /* buffer for write/read operations */
size_t buf_size; /* buffer size */
size_t buf_ptr; /* pointer for read operations */
size_t *rand_sizes;
size_t *vec_sizes; /* sum of sizes in vector */
};
/*
* log_bench - main context of benchmark
*/
struct log_bench {
size_t psize; /* size of pool */
PMEMlogpool *plp; /* pmemlog handle */
struct prog_args *args; /* benchmark specific arguments */
int fd; /* file descriptor for file io mode */
unsigned seed;
/*
* Pointer to the main benchmark operation. The appropriate function
* will be assigned depending on the benchmark specific arguments.
*/
int (*func_op)(struct benchmark *, struct operation_info *);
};
/*
* do_warmup -- do warmup by writing the whole pool area
*/
static int
do_warmup(struct log_bench *lb, size_t nops)
{
int ret = 0;
size_t bsize = lb->args->vec_size * lb->args->el_size;
auto *buf = (char *)calloc(1, bsize);
if (!buf) {
perror("calloc");
return -1;
}
if (!lb->args->fileio) {
for (size_t i = 0; i < nops; i++) {
if (pmemlog_append(lb->plp, buf, lb->args->el_size) <
0) {
ret = -1;
perror("pmemlog_append");
goto out;
}
}
pmemlog_rewind(lb->plp);
} else {
for (size_t i = 0; i < nops; i++) {
if (write(lb->fd, buf, (unsigned)lb->args->el_size) !=
(ssize_t)lb->args->el_size) {
ret = -1;
perror("write");
os_close(lb->fd);
goto out;
}
}
if (os_lseek(lb->fd, 0, SEEK_SET) < 0) {
ret = -1;
perror("lseek");
os_close(lb->fd);
}
}
out:
free(buf);
return ret;
}
/*
* log_append -- performs pmemlog_append operation
*/
static int
log_append(struct benchmark *bench, struct operation_info *info)
{
auto *lb = (struct log_bench *)pmembench_get_priv(bench);
assert(lb);
auto *worker_info = (struct log_worker_info *)info->worker->priv;
assert(worker_info);
size_t size = lb->args->rand ? worker_info->rand_sizes[info->index]
: lb->args->el_size;
if (pmemlog_append(lb->plp, worker_info->buf, size) < 0) {
perror("pmemlog_append");
return -1;
}
return 0;
}
/*
* log_appendv -- performs pmemlog_appendv operation
*/
static int
log_appendv(struct benchmark *bench, struct operation_info *info)
{
auto *lb = (struct log_bench *)pmembench_get_priv(bench);
assert(lb);
auto *worker_info = (struct log_worker_info *)info->worker->priv;
assert(worker_info);
struct iovec *iov = &worker_info->iov[info->index * lb->args->vec_size];
if (pmemlog_appendv(lb->plp, iov, lb->args->vec_size) < 0) {
perror("pmemlog_appendv");
return -1;
}
return 0;
}
/*
* fileio_append -- performs fileio append operation
*/
static int
fileio_append(struct benchmark *bench, struct operation_info *info)
{
auto *lb = (struct log_bench *)pmembench_get_priv(bench);
assert(lb);
auto *worker_info = (struct log_worker_info *)info->worker->priv;
assert(worker_info);
size_t size = lb->args->rand ? worker_info->rand_sizes[info->index]
: lb->args->el_size;
if (write(lb->fd, worker_info->buf, (unsigned)size) != (ssize_t)size) {
perror("write");
return -1;
}
return 0;
}
/*
* fileio_appendv -- performs fileio appendv operation
*/
static int
fileio_appendv(struct benchmark *bench, struct operation_info *info)
{
auto *lb = (struct log_bench *)pmembench_get_priv(bench);
assert(lb != nullptr);
auto *worker_info = (struct log_worker_info *)info->worker->priv;
assert(worker_info);
struct iovec *iov = &worker_info->iov[info->index * lb->args->vec_size];
size_t vec_size = worker_info->vec_sizes[info->index];
if (os_writev(lb->fd, iov, lb->args->vec_size) != (ssize_t)vec_size) {
perror("writev");
return -1;
}
return 0;
}
/*
* log_process_data -- callback function for pmemlog_walk.
*/
static int
log_process_data(const void *buf, size_t len, void *arg)
{
auto *worker_info = (struct log_worker_info *)arg;
size_t left = worker_info->buf_size - worker_info->buf_ptr;
if (len > left) {
worker_info->buf_ptr = 0;
left = worker_info->buf_size;
}
len = len < left ? len : left;
assert(len <= left);
void *buff = &worker_info->buf[worker_info->buf_ptr];
memcpy(buff, buf, len);
worker_info->buf_ptr += len;
return 1;
}
/*
* fileio_read -- perform single fileio read
*/
static int
fileio_read(int fd, ssize_t len, struct log_worker_info *worker_info)
{
ssize_t left = worker_info->buf_size - worker_info->buf_ptr;
if (len > left) {
worker_info->buf_ptr = 0;
left = worker_info->buf_size;
}
len = len < left ? len : left;
assert(len <= left);
size_t off = worker_info->buf_ptr;
void *buff = &worker_info->buf[off];
if ((len = pread(fd, buff, len, off)) < 0)
return -1;
worker_info->buf_ptr += len;
return 1;
}
/*
* log_read_op -- perform read operation
*/
static int
log_read_op(struct benchmark *bench, struct operation_info *info)
{
auto *lb = (struct log_bench *)pmembench_get_priv(bench);
assert(lb);
auto *worker_info = (struct log_worker_info *)info->worker->priv;
assert(worker_info);
worker_info->buf_ptr = 0;
size_t chunk_size = lb->args->rand
? worker_info->rand_sizes[info->index]
: lb->args->el_size;
if (!lb->args->fileio) {
pmemlog_walk(lb->plp, chunk_size, log_process_data,
worker_info);
return 0;
}
int ret;
while ((ret = fileio_read(lb->fd, chunk_size, worker_info)) == 1)
;
return ret;
}
/*
* log_init_worker -- init benchmark worker
*/
static int
log_init_worker(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
int ret = 0;
auto *lb = (struct log_bench *)pmembench_get_priv(bench);
size_t i_size, n_vectors;
assert(lb);
auto *worker_info = (struct log_worker_info *)malloc(
sizeof(struct log_worker_info));
if (!worker_info) {
perror("malloc");
return -1;
}
/* allocate buffer for append / read */
worker_info->buf_size = lb->args->el_size * lb->args->vec_size;
worker_info->buf = (char *)malloc(worker_info->buf_size);
if (!worker_info->buf) {
perror("malloc");
ret = -1;
goto err_free_worker_info;
}
/*
* For random mode, each operation has its own vector with
* random sizes. Otherwise there is only one vector with
* equal sizes.
*/
n_vectors = args->n_ops_per_thread;
worker_info->iov = (struct iovec *)malloc(
n_vectors * lb->args->vec_size * sizeof(struct iovec));
if (!worker_info->iov) {
perror("malloc");
ret = -1;
goto err_free_buf;
}
if (lb->args->rand) {
/* each thread has random seed */
worker_info->seed = (unsigned)os_rand_r(&lb->seed);
/* each vector element has its own random size */
size_t n_sizes = args->n_ops_per_thread * lb->args->vec_size;
worker_info->rand_sizes = (size_t *)malloc(
n_sizes * sizeof(*worker_info->rand_sizes));
if (!worker_info->rand_sizes) {
perror("malloc");
ret = -1;
goto err_free_iov;
}
/* generate append sizes */
for (size_t i = 0; i < n_sizes; i++) {
auto hr = (uint32_t)os_rand_r(&worker_info->seed);
auto lr = (uint32_t)os_rand_r(&worker_info->seed);
uint64_t r64 = (uint64_t)hr << 32 | lr;
size_t width = lb->args->el_size - lb->args->min_size;
worker_info->rand_sizes[i] =
r64 % width + lb->args->min_size;
}
} else {
worker_info->rand_sizes = nullptr;
}
worker_info->vec_sizes = (size_t *)calloc(
args->n_ops_per_thread, sizeof(*worker_info->vec_sizes));
if (!worker_info->vec_sizes) {
perror("malloc\n");
ret = -1;
goto err_free_rand_sizes;
}
/* fill up the io vectors */
i_size = 0;
for (size_t n = 0; n < args->n_ops_per_thread; n++) {
size_t buf_ptr = 0;
size_t vec_off = n * lb->args->vec_size;
for (int i = 0; i < lb->args->vec_size; ++i) {
size_t el_size = lb->args->rand
? worker_info->rand_sizes[i_size++]
: lb->args->el_size;
worker_info->iov[vec_off + i].iov_base =
&worker_info->buf[buf_ptr];
worker_info->iov[vec_off + i].iov_len = el_size;
worker_info->vec_sizes[n] += el_size;
buf_ptr += el_size;
}
}
worker->priv = worker_info;
return 0;
err_free_rand_sizes:
free(worker_info->rand_sizes);
err_free_iov:
free(worker_info->iov);
err_free_buf:
free(worker_info->buf);
err_free_worker_info:
free(worker_info);
return ret;
}
/*
* log_free_worker -- cleanup benchmark worker
*/
static void
log_free_worker(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
auto *worker_info = (struct log_worker_info *)worker->priv;
assert(worker_info);
free(worker_info->buf);
free(worker_info->iov);
free(worker_info->rand_sizes);
free(worker_info->vec_sizes);
free(worker_info);
}
/*
* log_init -- benchmark initialization function
*/
static int
log_init(struct benchmark *bench, struct benchmark_args *args)
{
int ret = 0;
assert(bench);
assert(args != nullptr);
assert(args->opts != nullptr);
struct benchmark_info *bench_info;
char path[PATH_MAX];
if (util_safe_strcpy(path, args->fname, sizeof(path)) != 0)
return -1;
enum file_type type = util_file_get_type(args->fname);
if (type == OTHER_ERROR) {
fprintf(stderr, "could not check type of file %s\n",
args->fname);
return -1;
}
auto *lb = (struct log_bench *)malloc(sizeof(struct log_bench));
if (!lb) {
perror("malloc");
return -1;
}
lb->args = (struct prog_args *)args->opts;
lb->args->el_size = args->dsize;
if (lb->args->vec_size == 0)
lb->args->vec_size = 1;
if (lb->args->rand && lb->args->min_size > lb->args->el_size) {
errno = EINVAL;
ret = -1;
goto err_free_lb;
}
if (lb->args->rand && lb->args->min_size == lb->args->el_size)
lb->args->rand = false;
/* align pool size to ensure that we have enough usable space */
lb->psize =
ALIGN_UP(POOL_HDR_SIZE +
args->n_ops_per_thread * args->n_threads *
lb->args->vec_size * lb->args->el_size,
Mmap_align);
/* calculate a required pool size */
if (lb->psize < PMEMLOG_MIN_POOL)
lb->psize = PMEMLOG_MIN_POOL;
if (args->is_poolset || type == TYPE_DEVDAX) {
if (lb->args->fileio) {
fprintf(stderr, "fileio not supported on device dax "
"nor poolset\n");
ret = -1;
goto err_free_lb;
}
if (args->fsize < lb->psize) {
fprintf(stderr, "file size too large\n");
ret = -1;
goto err_free_lb;
}
lb->psize = 0;
} else if (args->is_dynamic_poolset) {
if (lb->args->fileio) {
fprintf(stderr,
"fileio not supported with dynamic poolset\n");
ret = -1;
goto err_free_lb;
}
ret = dynamic_poolset_create(args->fname, lb->psize);
if (ret == -1)
goto err_free_lb;
if (util_safe_strcpy(path, POOLSET_PATH, sizeof(path)) != 0)
goto err_free_lb;
lb->psize = 0;
}
bench_info = pmembench_get_info(bench);
if (!lb->args->fileio) {
if ((lb->plp = pmemlog_create(path, lb->psize, args->fmode)) ==
nullptr) {
perror("pmemlog_create");
ret = -1;
goto err_free_lb;
}
bench_info->operation =
(lb->args->vec_size > 1) ? log_appendv : log_append;
} else {
int flags = O_CREAT | O_RDWR | O_SYNC;
/* Create a file if it does not exist. */
if ((lb->fd = os_open(args->fname, flags, args->fmode)) < 0) {
perror(args->fname);
ret = -1;
goto err_free_lb;
}
/* allocate the pmem */
if ((errno = os_posix_fallocate(lb->fd, 0, lb->psize)) != 0) {
perror("posix_fallocate");
ret = -1;
goto err_close;
}
bench_info->operation = (lb->args->vec_size > 1)
? fileio_appendv
: fileio_append;
}
if (!lb->args->no_warmup && type != TYPE_DEVDAX) {
size_t warmup_nops = args->n_threads * args->n_ops_per_thread;
if (do_warmup(lb, warmup_nops)) {
fprintf(stderr, "warmup failed\n");
ret = -1;
goto err_close;
}
}
pmembench_set_priv(bench, lb);
return 0;
err_close:
if (lb->args->fileio)
os_close(lb->fd);
else
pmemlog_close(lb->plp);
err_free_lb:
free(lb);
return ret;
}
/*
* log_exit -- cleanup benchmark
*/
static int
log_exit(struct benchmark *bench, struct benchmark_args *args)
{
auto *lb = (struct log_bench *)pmembench_get_priv(bench);
if (!lb->args->fileio)
pmemlog_close(lb->plp);
else
os_close(lb->fd);
free(lb);
return 0;
}
/* command line options definition */
static struct benchmark_clo log_clo[6];
/* log_append benchmark info */
static struct benchmark_info log_append_info;
/* log_read benchmark info */
static struct benchmark_info log_read_info;
CONSTRUCTOR(log_constructor)
void
log_constructor(void)
{
log_clo[0].opt_short = 'r';
log_clo[0].opt_long = "random";
log_clo[0].descr = "Use random sizes for append/read";
log_clo[0].off = clo_field_offset(struct prog_args, rand);
log_clo[0].type = CLO_TYPE_FLAG;
log_clo[1].opt_short = 'S';
log_clo[1].opt_long = "seed";
log_clo[1].descr = "Random mode";
log_clo[1].off = clo_field_offset(struct prog_args, seed);
log_clo[1].def = "1";
log_clo[1].type = CLO_TYPE_UINT;
log_clo[1].type_uint.size = clo_field_size(struct prog_args, seed);
log_clo[1].type_uint.base = CLO_INT_BASE_DEC;
log_clo[1].type_uint.min = 1;
log_clo[1].type_uint.max = UINT_MAX;
log_clo[2].opt_short = 'i';
log_clo[2].opt_long = "file-io";
log_clo[2].descr = "File I/O mode";
log_clo[2].off = clo_field_offset(struct prog_args, fileio);
log_clo[2].type = CLO_TYPE_FLAG;
log_clo[3].opt_short = 'w';
log_clo[3].opt_long = "no-warmup";
log_clo[3].descr = "Don't do warmup", log_clo[3].type = CLO_TYPE_FLAG;
log_clo[3].off = clo_field_offset(struct prog_args, no_warmup);
log_clo[4].opt_short = 'm';
log_clo[4].opt_long = "min-size";
log_clo[4].descr = "Minimum size of append/read for "
"random mode";
log_clo[4].type = CLO_TYPE_UINT;
log_clo[4].off = clo_field_offset(struct prog_args, min_size);
log_clo[4].def = "1";
log_clo[4].type_uint.size = clo_field_size(struct prog_args, min_size);
log_clo[4].type_uint.base = CLO_INT_BASE_DEC;
log_clo[4].type_uint.min = 1;
log_clo[4].type_uint.max = UINT64_MAX;
/* this one is only for log_append */
log_clo[5].opt_short = 'v';
log_clo[5].opt_long = "vector";
log_clo[5].descr = "Vector size";
log_clo[5].off = clo_field_offset(struct prog_args, vec_size);
log_clo[5].def = "1";
log_clo[5].type = CLO_TYPE_INT;
log_clo[5].type_int.size = clo_field_size(struct prog_args, vec_size);
log_clo[5].type_int.base = CLO_INT_BASE_DEC;
log_clo[5].type_int.min = MIN_VEC_SIZE;
log_clo[5].type_int.max = INT_MAX;
log_append_info.name = "log_append";
log_append_info.brief = "Benchmark for pmemlog_append() "
"operation";
log_append_info.init = log_init;
log_append_info.exit = log_exit;
log_append_info.multithread = true;
log_append_info.multiops = true;
log_append_info.init_worker = log_init_worker;
log_append_info.free_worker = log_free_worker;
/* this will be assigned in log_init */
log_append_info.operation = nullptr;
log_append_info.measure_time = true;
log_append_info.clos = log_clo;
log_append_info.nclos = ARRAY_SIZE(log_clo);
log_append_info.opts_size = sizeof(struct prog_args);
log_append_info.rm_file = true;
log_append_info.allow_poolset = true;
REGISTER_BENCHMARK(log_append_info);
log_read_info.name = "log_read";
log_read_info.brief = "Benchmark for pmemlog_walk() "
"operation";
log_read_info.init = log_init;
log_read_info.exit = log_exit;
log_read_info.multithread = true;
log_read_info.multiops = true;
log_read_info.init_worker = log_init_worker;
log_read_info.free_worker = log_free_worker;
log_read_info.operation = log_read_op;
log_read_info.measure_time = true;
log_read_info.clos = log_clo;
/* without vector */
log_read_info.nclos = ARRAY_SIZE(log_clo) - 1;
log_read_info.opts_size = sizeof(struct prog_args);
log_read_info.rm_file = true;
log_read_info.allow_poolset = true;
REGISTER_BENCHMARK(log_read_info);
};
| 18,225 | 24.278779 | 74 |
cpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/benchmarks/clo.hpp
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* clo.hpp -- command line options module declarations
*/
int benchmark_clo_parse(int argc, char *argv[], struct benchmark_clo *clos,
ssize_t nclo, struct clo_vec *clovec);
int benchmark_clo_parse_scenario(struct scenario *scenario,
struct benchmark_clo *clos, size_t nclo,
struct clo_vec *clovec);
const char *benchmark_clo_str(struct benchmark_clo *clo, void *args,
size_t size);
int clo_get_scenarios(int argc, char *argv[],
struct scenarios *available_scenarios,
struct scenarios *found_scenarios);
int benchmark_override_clos_in_scenario(struct scenario *scenario, int argc,
char *argv[],
struct benchmark_clo *clos, int nclos);
| 2,287 | 46.666667 | 76 |
hpp
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/common/ctl.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ctl.h -- internal declaration of statistics and control related structures
*/
#ifndef PMDK_CTL_H
#define PMDK_CTL_H 1
#include "queue.h"
#include "errno.h"
#include "out.h"
#ifdef __cplusplus
extern "C" {
#endif
struct ctl;
struct ctl_index {
const char *name;
long value;
SLIST_ENTRY(ctl_index) entry;
};
SLIST_HEAD(ctl_indexes, ctl_index);
enum ctl_query_source {
CTL_UNKNOWN_QUERY_SOURCE,
/* query executed directly from the program */
CTL_QUERY_PROGRAMMATIC,
/* query executed from the config file */
CTL_QUERY_CONFIG_INPUT,
MAX_CTL_QUERY_SOURCE
};
enum ctl_query_type {
CTL_QUERY_READ,
CTL_QUERY_WRITE,
CTL_QUERY_RUNNABLE,
MAX_CTL_QUERY_TYPE
};
typedef int (*node_callback)(void *ctx, enum ctl_query_source type,
void *arg, struct ctl_indexes *indexes);
enum ctl_node_type {
CTL_NODE_UNKNOWN,
CTL_NODE_NAMED,
CTL_NODE_LEAF,
CTL_NODE_INDEXED,
MAX_CTL_NODE
};
typedef int (*ctl_arg_parser)(const void *arg, void *dest, size_t dest_size);
struct ctl_argument_parser {
size_t dest_offset; /* offset of the field inside of the argument */
size_t dest_size; /* size of the field inside of the argument */
ctl_arg_parser parser;
};
struct ctl_argument {
size_t dest_size; /* sizeof the entire argument */
struct ctl_argument_parser parsers[]; /* array of 'fields' in arg */
};
#define sizeof_member(t, m) sizeof(((t *)0)->m)
#define CTL_ARG_PARSER(t, p)\
{0, sizeof(t), p}
#define CTL_ARG_PARSER_STRUCT(t, m, p)\
{offsetof(t, m), sizeof_member(t, m), p}
#define CTL_ARG_PARSER_END {0, 0, NULL}
/*
* CTL Tree node structure, do not use directly. All the necessery functionality
* is provided by the included macros.
*/
struct ctl_node {
const char *name;
enum ctl_node_type type;
node_callback cb[MAX_CTL_QUERY_TYPE];
struct ctl_argument *arg;
struct ctl_node *children;
};
struct ctl *ctl_new(void);
void ctl_delete(struct ctl *stats);
int ctl_load_config_from_string(struct ctl *ctl, void *ctx,
const char *cfg_string);
int ctl_load_config_from_file(struct ctl *ctl, void *ctx,
const char *cfg_file);
/* Use through CTL_REGISTER_MODULE, never directly */
void ctl_register_module_node(struct ctl *c,
const char *name, struct ctl_node *n);
int ctl_arg_boolean(const void *arg, void *dest, size_t dest_size);
#define CTL_ARG_BOOLEAN {sizeof(int),\
{{0, sizeof(int), ctl_arg_boolean},\
CTL_ARG_PARSER_END}};
int ctl_arg_integer(const void *arg, void *dest, size_t dest_size);
#define CTL_ARG_INT {sizeof(int),\
{{0, sizeof(int), ctl_arg_integer},\
CTL_ARG_PARSER_END}};
#define CTL_ARG_LONG_LONG {sizeof(long long),\
{{0, sizeof(long long), ctl_arg_integer},\
CTL_ARG_PARSER_END}};
int ctl_arg_string(const void *arg, void *dest, size_t dest_size);
#define CTL_ARG_STRING(len) {len,\
{{0, len, ctl_arg_string},\
CTL_ARG_PARSER_END}};
#define CTL_STR(name) #name
#define CTL_NODE_END {NULL, CTL_NODE_UNKNOWN, {NULL, NULL, NULL}, NULL, NULL}
#define CTL_NODE(name)\
ctl_node_##name
int ctl_query(struct ctl *ctl, void *ctx, enum ctl_query_source source,
const char *name, enum ctl_query_type type, void *arg);
/* Declaration of a new child node */
#define CTL_CHILD(name)\
{CTL_STR(name), CTL_NODE_NAMED, {NULL, NULL, NULL}, NULL,\
(struct ctl_node *)CTL_NODE(name)}
/* Declaration of a new indexed node */
#define CTL_INDEXED(name)\
{CTL_STR(name), CTL_NODE_INDEXED, {NULL, NULL, NULL}, NULL,\
(struct ctl_node *)CTL_NODE(name)}
#define CTL_READ_HANDLER(name)\
ctl_##name##_read
#define CTL_WRITE_HANDLER(name)\
ctl_##name##_write
#define CTL_RUNNABLE_HANDLER(name)\
ctl_##name##_runnable
#define CTL_ARG(name)\
ctl_arg_##name
/*
* Declaration of a new read-only leaf. If used the corresponding read function
* must be declared by CTL_READ_HANDLER macro.
*/
#define CTL_LEAF_RO(name)\
{CTL_STR(name), CTL_NODE_LEAF, {CTL_READ_HANDLER(name), NULL, NULL}, NULL, NULL}
/*
* Declaration of a new write-only leaf. If used the corresponding write
* function must be declared by CTL_WRITE_HANDLER macro.
*/
#define CTL_LEAF_WO(name)\
{CTL_STR(name), CTL_NODE_LEAF, {NULL, CTL_WRITE_HANDLER(name), NULL},\
&CTL_ARG(name), NULL}
/*
* Declaration of a new runnable leaf. If used the corresponding run
* function must be declared by CTL_RUNNABLE_HANDLER macro.
*/
#define CTL_LEAF_RUNNABLE(name)\
{CTL_STR(name), CTL_NODE_LEAF, {NULL, NULL, CTL_RUNNABLE_HANDLER(name)},\
NULL, NULL}
/*
* Declaration of a new read-write leaf. If used both read and write function
* must be declared by CTL_READ_HANDLER and CTL_WRITE_HANDLER macros.
*/
#define CTL_LEAF_RW(name)\
{CTL_STR(name), CTL_NODE_LEAF,\
{CTL_READ_HANDLER(name), CTL_WRITE_HANDLER(name), NULL},\
&CTL_ARG(name), NULL}
#define CTL_REGISTER_MODULE(_ctl, name)\
ctl_register_module_node((_ctl), CTL_STR(name),\
(struct ctl_node *)CTL_NODE(name))
#ifdef __cplusplus
}
#endif
#endif
| 6,437 | 27.113537 | 80 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/common/os_deep.h
|
/*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* os_deep.h -- abstraction layer for common usage of deep_* functions
*/
#ifndef PMDK_OS_DEEP_PERSIST_H
#define PMDK_OS_DEEP_PERSIST_H 1
#include <stdint.h>
#include <stddef.h>
#include "set.h"
#ifdef __cplusplus
extern "C" {
#endif
int os_range_deep_common(uintptr_t addr, size_t len);
int os_part_deep_common(struct pool_replica *rep, unsigned partidx, void *addr,
size_t len, int flush);
#ifdef __cplusplus
}
#endif
#endif
| 2,042 | 34.842105 | 79 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/common/ctl_global.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ctl_global.h -- definitions for the global CTL namespace
*/
#ifndef PMDK_CTL_GLOBAL_H
#define PMDK_CTL_GLOBAL_H 1
#ifdef __cplusplus
extern "C" {
#endif
void ctl_global_register(void);
#ifdef __cplusplus
}
#endif
#endif
| 1,834 | 34.980392 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/common/file.h
|
/*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* file.h -- internal definitions for file module
*/
#ifndef PMDK_FILE_H
#define PMDK_FILE_H 1
#include <stddef.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <dirent.h>
#include <limits.h>
#include "os.h"
#ifdef __cplusplus
extern "C" {
#endif
#ifdef _WIN32
#define NAME_MAX _MAX_FNAME
#endif
struct file_info {
char filename[NAME_MAX + 1];
int is_dir;
};
struct dir_handle {
const char *path;
#ifdef _WIN32
HANDLE handle;
char *_file;
#else
DIR *dirp;
#endif
};
enum file_type {
OTHER_ERROR = -2,
NOT_EXISTS = -1,
TYPE_NORMAL = 1,
TYPE_DEVDAX = 2
};
int util_file_dir_open(struct dir_handle *a, const char *path);
int util_file_dir_next(struct dir_handle *a, struct file_info *info);
int util_file_dir_close(struct dir_handle *a);
int util_file_dir_remove(const char *path);
int util_file_exists(const char *path);
enum file_type util_fd_get_type(int fd);
enum file_type util_file_get_type(const char *path);
int util_ddax_region_find(const char *path);
ssize_t util_file_get_size(const char *path);
size_t util_file_device_dax_alignment(const char *path);
void *util_file_map_whole(const char *path);
int util_file_zero(const char *path, os_off_t off, size_t len);
ssize_t util_file_pread(const char *path, void *buffer, size_t size,
os_off_t offset);
ssize_t util_file_pwrite(const char *path, const void *buffer, size_t size,
os_off_t offset);
int util_tmpfile(const char *dir, const char *templ, int flags);
int util_is_absolute_path(const char *path);
int util_file_create(const char *path, size_t size, size_t minsize);
int util_file_open(const char *path, size_t *size, size_t minsize, int flags);
int util_unlink(const char *path);
int util_unlink_flock(const char *path);
int util_file_mkdir(const char *path, mode_t mode);
int util_write_all(int fd, const char *buf, size_t count);
#ifndef _WIN32
#define util_read read
#define util_write write
#else
/* XXX - consider adding an assertion on (count <= UINT_MAX) */
#define util_read(fd, buf, count) read(fd, buf, (unsigned)(count))
#define util_write(fd, buf, count) write(fd, buf, (unsigned)(count))
#define S_ISCHR(m) (((m) & S_IFMT) == S_IFCHR)
#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
#endif
#ifdef __cplusplus
}
#endif
#endif
| 3,839 | 31.268908 | 78 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/common/util_pmem.h
|
/*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* util_pmem.h -- internal definitions for pmem utils
*/
#ifndef PMDK_UTIL_PMEM_H
#define PMDK_UTIL_PMEM_H 1
#include "libpmem.h"
#include "out.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* util_persist -- flush to persistence
*/
static inline void
util_persist(int is_pmem, const void *addr, size_t len)
{
LOG(3, "is_pmem %d, addr %p, len %zu", is_pmem, addr, len);
if (is_pmem)
pmem_persist(addr, len);
else if (pmem_msync(addr, len))
FATAL("!pmem_msync");
}
/*
* util_persist_auto -- flush to persistence
*/
static inline void
util_persist_auto(int is_pmem, const void *addr, size_t len)
{
LOG(3, "is_pmem %d, addr %p, len %zu", is_pmem, addr, len);
util_persist(is_pmem || pmem_is_pmem(addr, len), addr, len);
}
#ifdef __cplusplus
}
#endif
#endif /* util_pmem.h */
| 2,398 | 30.155844 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/common/pmemcommon.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmemcommon.h -- definitions for "common" module
*/
#ifndef PMEMCOMMON_H
#define PMEMCOMMON_H 1
#include "util.h"
#include "out.h"
#include "mmap.h"
#ifdef __cplusplus
extern "C" {
#endif
static inline void
common_init(const char *log_prefix, const char *log_level_var,
const char *log_file_var, int major_version,
int minor_version)
{
util_init();
out_init(log_prefix, log_level_var, log_file_var, major_version,
minor_version);
util_mmap_init();
}
static inline void
common_fini(void)
{
util_mmap_fini();
out_fini();
}
#ifdef __cplusplus
}
#endif
#endif
| 2,182 | 29.746479 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/common/shutdown_state.h
|
/*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* shutdown_state.h -- unsafe shudown detection
*/
#ifndef PMDK_SHUTDOWN_STATE_H
#define PMDK_SHUTDOWN_STATE_H 1
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
struct pool_replica;
struct shutdown_state {
uint64_t usc;
uint64_t uuid; /* UID checksum */
uint8_t dirty;
uint8_t reserved[39];
uint64_t checksum;
};
int shutdown_state_init(struct shutdown_state *sds, struct pool_replica *rep);
int shutdown_state_add_part(struct shutdown_state *sds, const char *path,
struct pool_replica *rep);
void shutdown_state_set_dirty(struct shutdown_state *sds,
struct pool_replica *rep);
void shutdown_state_clear_dirty(struct shutdown_state *sds,
struct pool_replica *rep);
int shutdown_state_check(struct shutdown_state *curr_sds,
struct shutdown_state *pool_sds, struct pool_replica *rep);
#ifdef __cplusplus
}
#endif
#endif /* shutdown_state.h */
| 2,475 | 33.873239 | 78 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/common/uuid.h
|
/*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* uuid.h -- internal definitions for uuid module
*/
#ifndef PMDK_UUID_H
#define PMDK_UUID_H 1
#include <stdint.h>
#include <string.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Structure for binary version of uuid. From RFC4122,
* https://tools.ietf.org/html/rfc4122
*/
struct uuid {
uint32_t time_low;
uint16_t time_mid;
uint16_t time_hi_and_ver;
uint8_t clock_seq_hi;
uint8_t clock_seq_low;
uint8_t node[6];
};
#define POOL_HDR_UUID_LEN 16 /* uuid byte length */
#define POOL_HDR_UUID_STR_LEN 37 /* uuid string length */
#define POOL_HDR_UUID_GEN_FILE "/proc/sys/kernel/random/uuid"
typedef unsigned char uuid_t[POOL_HDR_UUID_LEN]; /* 16 byte binary uuid value */
int util_uuid_generate(uuid_t uuid);
int util_uuid_to_string(const uuid_t u, char *buf);
int util_uuid_from_string(const char uuid[POOL_HDR_UUID_STR_LEN],
struct uuid *ud);
/*
* uuidcmp -- compare two uuids
*/
static inline int
uuidcmp(const uuid_t uuid1, const uuid_t uuid2)
{
return memcmp(uuid1, uuid2, POOL_HDR_UUID_LEN);
}
#ifdef __cplusplus
}
#endif
#endif
| 2,660 | 30.305882 | 80 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/common/queue.h
|
/*
* Source: glibc 2.24 (git://sourceware.org/glibc.git /misc/sys/queue.h)
*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)queue.h 8.5 (Berkeley) 8/20/94
*/
#ifndef _SYS_QUEUE_H_
#define _SYS_QUEUE_H_
/*
* This file defines five types of data structures: singly-linked lists,
* lists, simple queues, tail queues, and circular queues.
*
* A singly-linked list is headed by a single forward pointer. The
* elements are singly linked for minimum space and pointer manipulation
* overhead at the expense of O(n) removal for arbitrary elements. New
* elements can be added to the list after an existing element or at the
* head of the list. Elements being removed from the head of the list
* should use the explicit macro for this purpose for optimum
* efficiency. A singly-linked list may only be traversed in the forward
* direction. Singly-linked lists are ideal for applications with large
* datasets and few or no removals or for implementing a LIFO queue.
*
* A list is headed by a single forward pointer (or an array of forward
* pointers for a hash table header). The elements are doubly linked
* so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before
* or after an existing element or at the head of the list. A list
* may only be traversed in the forward direction.
*
* A simple queue is headed by a pair of pointers, one the head of the
* list and the other to the tail of the list. The elements are singly
* linked to save space, so elements can only be removed from the
* head of the list. New elements can be added to the list after
* an existing element, at the head of the list, or at the end of the
* list. A simple queue may only be traversed in the forward direction.
*
* A tail queue is headed by a pair of pointers, one to the head of the
* list and the other to the tail of the list. The elements are doubly
* linked so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before or
* after an existing element, at the head of the list, or at the end of
* the list. A tail queue may be traversed in either direction.
*
* A circle queue is headed by a pair of pointers, one to the head of the
* list and the other to the tail of the list. The elements are doubly
* linked so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before or after
* an existing element, at the head of the list, or at the end of the list.
* A circle queue may be traversed in either direction, but has a more
* complex end of list detection.
*
* For details on the use of these macros, see the queue(3) manual page.
*/
/*
* XXX This is a workaround for a bug in the llvm's static analyzer. For more
* info see https://github.com/pmem/issues/issues/309.
*/
#ifdef __clang_analyzer__
static void custom_assert(void)
{
abort();
}
#define ANALYZER_ASSERT(x) (__builtin_expect(!(x), 0) ? (void)0 : custom_assert())
#else
#define ANALYZER_ASSERT(x) do {} while (0)
#endif
/*
* List definitions.
*/
#define LIST_HEAD(name, type) \
struct name { \
struct type *lh_first; /* first element */ \
}
#define LIST_HEAD_INITIALIZER(head) \
{ NULL }
#ifdef __cplusplus
#define _CAST_AND_ASSIGN(x, y) x = (__typeof__(x))y;
#else
#define _CAST_AND_ASSIGN(x, y) x = (void *)(y);
#endif
#define LIST_ENTRY(type) \
struct { \
struct type *le_next; /* next element */ \
struct type **le_prev; /* address of previous next element */ \
}
/*
* List functions.
*/
#define LIST_INIT(head) do { \
(head)->lh_first = NULL; \
} while (/*CONSTCOND*/0)
#define LIST_INSERT_AFTER(listelm, elm, field) do { \
if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
(listelm)->field.le_next->field.le_prev = \
&(elm)->field.le_next; \
(listelm)->field.le_next = (elm); \
(elm)->field.le_prev = &(listelm)->field.le_next; \
} while (/*CONSTCOND*/0)
#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.le_prev = (listelm)->field.le_prev; \
(elm)->field.le_next = (listelm); \
*(listelm)->field.le_prev = (elm); \
(listelm)->field.le_prev = &(elm)->field.le_next; \
} while (/*CONSTCOND*/0)
#define LIST_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.le_next = (head)->lh_first) != NULL) \
(head)->lh_first->field.le_prev = &(elm)->field.le_next;\
(head)->lh_first = (elm); \
(elm)->field.le_prev = &(head)->lh_first; \
} while (/*CONSTCOND*/0)
#define LIST_REMOVE(elm, field) do { \
ANALYZER_ASSERT((elm) != NULL); \
if ((elm)->field.le_next != NULL) \
(elm)->field.le_next->field.le_prev = \
(elm)->field.le_prev; \
*(elm)->field.le_prev = (elm)->field.le_next; \
} while (/*CONSTCOND*/0)
#define LIST_FOREACH(var, head, field) \
for ((var) = ((head)->lh_first); \
(var); \
(var) = ((var)->field.le_next))
/*
* List access methods.
*/
#define LIST_EMPTY(head) ((head)->lh_first == NULL)
#define LIST_FIRST(head) ((head)->lh_first)
#define LIST_NEXT(elm, field) ((elm)->field.le_next)
/*
* Singly-linked List definitions.
*/
#define SLIST_HEAD(name, type) \
struct name { \
struct type *slh_first; /* first element */ \
}
#define SLIST_HEAD_INITIALIZER(head) \
{ NULL }
#define SLIST_ENTRY(type) \
struct { \
struct type *sle_next; /* next element */ \
}
/*
* Singly-linked List functions.
*/
#define SLIST_INIT(head) do { \
(head)->slh_first = NULL; \
} while (/*CONSTCOND*/0)
#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
(elm)->field.sle_next = (slistelm)->field.sle_next; \
(slistelm)->field.sle_next = (elm); \
} while (/*CONSTCOND*/0)
#define SLIST_INSERT_HEAD(head, elm, field) do { \
(elm)->field.sle_next = (head)->slh_first; \
(head)->slh_first = (elm); \
} while (/*CONSTCOND*/0)
#define SLIST_REMOVE_HEAD(head, field) do { \
(head)->slh_first = (head)->slh_first->field.sle_next; \
} while (/*CONSTCOND*/0)
#define SLIST_REMOVE(head, elm, type, field) do { \
if ((head)->slh_first == (elm)) { \
SLIST_REMOVE_HEAD((head), field); \
} \
else { \
struct type *curelm = (head)->slh_first; \
while(curelm->field.sle_next != (elm)) \
curelm = curelm->field.sle_next; \
curelm->field.sle_next = \
curelm->field.sle_next->field.sle_next; \
} \
} while (/*CONSTCOND*/0)
#define SLIST_FOREACH(var, head, field) \
for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next)
/*
* Singly-linked List access methods.
*/
#define SLIST_EMPTY(head) ((head)->slh_first == NULL)
#define SLIST_FIRST(head) ((head)->slh_first)
#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
/*
* Singly-linked Tail queue declarations.
*/
#define STAILQ_HEAD(name, type) \
struct name { \
struct type *stqh_first; /* first element */ \
struct type **stqh_last; /* addr of last next element */ \
}
#define STAILQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).stqh_first }
#define STAILQ_ENTRY(type) \
struct { \
struct type *stqe_next; /* next element */ \
}
/*
* Singly-linked Tail queue functions.
*/
#define STAILQ_INIT(head) do { \
(head)->stqh_first = NULL; \
(head)->stqh_last = &(head)->stqh_first; \
} while (/*CONSTCOND*/0)
#define STAILQ_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \
(head)->stqh_last = &(elm)->field.stqe_next; \
(head)->stqh_first = (elm); \
} while (/*CONSTCOND*/0)
#define STAILQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.stqe_next = NULL; \
*(head)->stqh_last = (elm); \
(head)->stqh_last = &(elm)->field.stqe_next; \
} while (/*CONSTCOND*/0)
#define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\
(head)->stqh_last = &(elm)->field.stqe_next; \
(listelm)->field.stqe_next = (elm); \
} while (/*CONSTCOND*/0)
#define STAILQ_REMOVE_HEAD(head, field) do { \
if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \
(head)->stqh_last = &(head)->stqh_first; \
} while (/*CONSTCOND*/0)
#define STAILQ_REMOVE(head, elm, type, field) do { \
if ((head)->stqh_first == (elm)) { \
STAILQ_REMOVE_HEAD((head), field); \
} else { \
struct type *curelm = (head)->stqh_first; \
while (curelm->field.stqe_next != (elm)) \
curelm = curelm->field.stqe_next; \
if ((curelm->field.stqe_next = \
curelm->field.stqe_next->field.stqe_next) == NULL) \
(head)->stqh_last = &(curelm)->field.stqe_next; \
} \
} while (/*CONSTCOND*/0)
#define STAILQ_FOREACH(var, head, field) \
for ((var) = ((head)->stqh_first); \
(var); \
(var) = ((var)->field.stqe_next))
#define STAILQ_CONCAT(head1, head2) do { \
if (!STAILQ_EMPTY((head2))) { \
*(head1)->stqh_last = (head2)->stqh_first; \
(head1)->stqh_last = (head2)->stqh_last; \
STAILQ_INIT((head2)); \
} \
} while (/*CONSTCOND*/0)
/*
* Singly-linked Tail queue access methods.
*/
#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
#define STAILQ_FIRST(head) ((head)->stqh_first)
#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
/*
* Simple queue definitions.
*/
#define SIMPLEQ_HEAD(name, type) \
struct name { \
struct type *sqh_first; /* first element */ \
struct type **sqh_last; /* addr of last next element */ \
}
#define SIMPLEQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).sqh_first }
#define SIMPLEQ_ENTRY(type) \
struct { \
struct type *sqe_next; /* next element */ \
}
/*
* Simple queue functions.
*/
#define SIMPLEQ_INIT(head) do { \
(head)->sqh_first = NULL; \
(head)->sqh_last = &(head)->sqh_first; \
} while (/*CONSTCOND*/0)
#define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
(head)->sqh_last = &(elm)->field.sqe_next; \
(head)->sqh_first = (elm); \
} while (/*CONSTCOND*/0)
#define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.sqe_next = NULL; \
*(head)->sqh_last = (elm); \
(head)->sqh_last = &(elm)->field.sqe_next; \
} while (/*CONSTCOND*/0)
#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
(head)->sqh_last = &(elm)->field.sqe_next; \
(listelm)->field.sqe_next = (elm); \
} while (/*CONSTCOND*/0)
#define SIMPLEQ_REMOVE_HEAD(head, field) do { \
if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
(head)->sqh_last = &(head)->sqh_first; \
} while (/*CONSTCOND*/0)
#define SIMPLEQ_REMOVE(head, elm, type, field) do { \
if ((head)->sqh_first == (elm)) { \
SIMPLEQ_REMOVE_HEAD((head), field); \
} else { \
struct type *curelm = (head)->sqh_first; \
while (curelm->field.sqe_next != (elm)) \
curelm = curelm->field.sqe_next; \
if ((curelm->field.sqe_next = \
curelm->field.sqe_next->field.sqe_next) == NULL) \
(head)->sqh_last = &(curelm)->field.sqe_next; \
} \
} while (/*CONSTCOND*/0)
#define SIMPLEQ_FOREACH(var, head, field) \
for ((var) = ((head)->sqh_first); \
(var); \
(var) = ((var)->field.sqe_next))
/*
* Simple queue access methods.
*/
#define SIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL)
#define SIMPLEQ_FIRST(head) ((head)->sqh_first)
#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
/*
* Tail queue definitions.
*/
#define _TAILQ_HEAD(name, type, qual) \
struct name { \
qual type *tqh_first; /* first element */ \
qual type *qual *tqh_last; /* addr of last next element */ \
}
#define TAILQ_HEAD(name, type) _TAILQ_HEAD(name, struct type,)
#define TAILQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).tqh_first }
#define _TAILQ_ENTRY(type, qual) \
struct { \
qual type *tqe_next; /* next element */ \
qual type *qual *tqe_prev; /* address of previous next element */\
}
#define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type,)
/*
* Tail queue functions.
*/
#define TAILQ_INIT(head) do { \
(head)->tqh_first = NULL; \
(head)->tqh_last = &(head)->tqh_first; \
} while (/*CONSTCOND*/0)
#define TAILQ_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
(head)->tqh_first->field.tqe_prev = \
&(elm)->field.tqe_next; \
else \
(head)->tqh_last = &(elm)->field.tqe_next; \
(head)->tqh_first = (elm); \
(elm)->field.tqe_prev = &(head)->tqh_first; \
} while (/*CONSTCOND*/0)
#define TAILQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.tqe_next = NULL; \
(elm)->field.tqe_prev = (head)->tqh_last; \
*(head)->tqh_last = (elm); \
(head)->tqh_last = &(elm)->field.tqe_next; \
} while (/*CONSTCOND*/0)
#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
(elm)->field.tqe_next->field.tqe_prev = \
&(elm)->field.tqe_next; \
else \
(head)->tqh_last = &(elm)->field.tqe_next; \
(listelm)->field.tqe_next = (elm); \
(elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
} while (/*CONSTCOND*/0)
#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
(elm)->field.tqe_next = (listelm); \
*(listelm)->field.tqe_prev = (elm); \
(listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
} while (/*CONSTCOND*/0)
#define TAILQ_REMOVE(head, elm, field) do { \
ANALYZER_ASSERT((elm) != NULL); \
if (((elm)->field.tqe_next) != NULL) \
(elm)->field.tqe_next->field.tqe_prev = \
(elm)->field.tqe_prev; \
else \
(head)->tqh_last = (elm)->field.tqe_prev; \
*(elm)->field.tqe_prev = (elm)->field.tqe_next; \
} while (/*CONSTCOND*/0)
#define TAILQ_FOREACH(var, head, field) \
for ((var) = ((head)->tqh_first); \
(var); \
(var) = ((var)->field.tqe_next))
#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \
(var); \
(var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
#define TAILQ_CONCAT(head1, head2, field) do { \
if (!TAILQ_EMPTY(head2)) { \
*(head1)->tqh_last = (head2)->tqh_first; \
(head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
(head1)->tqh_last = (head2)->tqh_last; \
TAILQ_INIT((head2)); \
} \
} while (/*CONSTCOND*/0)
/*
* Tail queue access methods.
*/
#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
#define TAILQ_FIRST(head) ((head)->tqh_first)
#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
#define TAILQ_LAST(head, headname) \
(*(((struct headname *)((head)->tqh_last))->tqh_last))
#define TAILQ_PREV(elm, headname, field) \
(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
/*
* Circular queue definitions.
*/
#define CIRCLEQ_HEAD(name, type) \
struct name { \
struct type *cqh_first; /* first element */ \
struct type *cqh_last; /* last element */ \
}
#define CIRCLEQ_HEAD_INITIALIZER(head) \
{ (void *)&(head), (void *)&(head) }
#define CIRCLEQ_ENTRY(type) \
struct { \
struct type *cqe_next; /* next element */ \
struct type *cqe_prev; /* previous element */ \
}
/*
* Circular queue functions.
*/
#define CIRCLEQ_INIT(head) do { \
_CAST_AND_ASSIGN((head)->cqh_first, (head)); \
_CAST_AND_ASSIGN((head)->cqh_last, (head)); \
} while (/*CONSTCOND*/0)
#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
(elm)->field.cqe_next = (listelm)->field.cqe_next; \
(elm)->field.cqe_prev = (listelm); \
if ((listelm)->field.cqe_next == (void *)(head)) \
(head)->cqh_last = (elm); \
else \
(listelm)->field.cqe_next->field.cqe_prev = (elm); \
(listelm)->field.cqe_next = (elm); \
} while (/*CONSTCOND*/0)
#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
(elm)->field.cqe_next = (listelm); \
(elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
if ((listelm)->field.cqe_prev == (void *)(head)) \
(head)->cqh_first = (elm); \
else \
(listelm)->field.cqe_prev->field.cqe_next = (elm); \
(listelm)->field.cqe_prev = (elm); \
} while (/*CONSTCOND*/0)
#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
(elm)->field.cqe_next = (head)->cqh_first; \
(elm)->field.cqe_prev = (void *)(head); \
if ((head)->cqh_last == (void *)(head)) \
(head)->cqh_last = (elm); \
else \
(head)->cqh_first->field.cqe_prev = (elm); \
(head)->cqh_first = (elm); \
} while (/*CONSTCOND*/0)
#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
_CAST_AND_ASSIGN((elm)->field.cqe_next, (head)); \
(elm)->field.cqe_prev = (head)->cqh_last; \
if ((head)->cqh_first == (void *)(head)) \
(head)->cqh_first = (elm); \
else \
(head)->cqh_last->field.cqe_next = (elm); \
(head)->cqh_last = (elm); \
} while (/*CONSTCOND*/0)
#define CIRCLEQ_REMOVE(head, elm, field) do { \
if ((elm)->field.cqe_next == (void *)(head)) \
(head)->cqh_last = (elm)->field.cqe_prev; \
else \
(elm)->field.cqe_next->field.cqe_prev = \
(elm)->field.cqe_prev; \
if ((elm)->field.cqe_prev == (void *)(head)) \
(head)->cqh_first = (elm)->field.cqe_next; \
else \
(elm)->field.cqe_prev->field.cqe_next = \
(elm)->field.cqe_next; \
} while (/*CONSTCOND*/0)
#define CIRCLEQ_FOREACH(var, head, field) \
for ((var) = ((head)->cqh_first); \
(var) != (const void *)(head); \
(var) = ((var)->field.cqe_next))
#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
for ((var) = ((head)->cqh_last); \
(var) != (const void *)(head); \
(var) = ((var)->field.cqe_prev))
/*
* Circular queue access methods.
*/
#define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head))
#define CIRCLEQ_FIRST(head) ((head)->cqh_first)
#define CIRCLEQ_LAST(head) ((head)->cqh_last)
#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
#define CIRCLEQ_LOOP_NEXT(head, elm, field) \
(((elm)->field.cqe_next == (void *)(head)) \
? ((head)->cqh_first) \
: ((elm)->field.cqe_next))
#define CIRCLEQ_LOOP_PREV(head, elm, field) \
(((elm)->field.cqe_prev == (void *)(head)) \
? ((head)->cqh_last) \
: ((elm)->field.cqe_prev))
/*
* Sorted queue functions.
*/
#define SORTEDQ_HEAD(name, type) CIRCLEQ_HEAD(name, type)
#define SORTEDQ_HEAD_INITIALIZER(head) CIRCLEQ_HEAD_INITIALIZER(head)
#define SORTEDQ_ENTRY(type) CIRCLEQ_ENTRY(type)
#define SORTEDQ_INIT(head) CIRCLEQ_INIT(head)
#define SORTEDQ_INSERT(head, elm, field, type, comparer) { \
type *_elm_it; \
for (_elm_it = (head)->cqh_first; \
((_elm_it != (void *)(head)) && \
(comparer(_elm_it, (elm)) < 0)); \
_elm_it = _elm_it->field.cqe_next) \
/*NOTHING*/; \
if (_elm_it == (void *)(head)) \
CIRCLEQ_INSERT_TAIL(head, elm, field); \
else \
CIRCLEQ_INSERT_BEFORE(head, _elm_it, elm, field); \
}
#define SORTEDQ_REMOVE(head, elm, field) CIRCLEQ_REMOVE(head, elm, field)
#define SORTEDQ_FOREACH(var, head, field) CIRCLEQ_FOREACH(var, head, field)
#define SORTEDQ_FOREACH_REVERSE(var, head, field) \
CIRCLEQ_FOREACH_REVERSE(var, head, field)
/*
* Sorted queue access methods.
*/
#define SORTEDQ_EMPTY(head) CIRCLEQ_EMPTY(head)
#define SORTEDQ_FIRST(head) CIRCLEQ_FIRST(head)
#define SORTEDQ_LAST(head) CIRCLEQ_LAST(head)
#define SORTEDQ_NEXT(elm, field) CIRCLEQ_NEXT(elm, field)
#define SORTEDQ_PREV(elm, field) CIRCLEQ_PREV(elm, field)
#endif /* sys/queue.h */
| 21,518 | 32.888189 | 82 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/common/set.h
|
/*
* Copyright 2014-2018, Intel Corporation
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* set.h -- internal definitions for set module
*/
#ifndef PMDK_SET_H
#define PMDK_SET_H 1
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <sys/types.h>
#include "out.h"
#include "vec.h"
#include "pool_hdr.h"
#include "librpmem.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* pool sets & replicas
*/
#define POOLSET_HDR_SIG "PMEMPOOLSET"
#define POOLSET_HDR_SIG_LEN 11 /* does NOT include '\0' */
#define POOLSET_REPLICA_SIG "REPLICA"
#define POOLSET_REPLICA_SIG_LEN 7 /* does NOT include '\0' */
#define POOLSET_OPTION_SIG "OPTION"
#define POOLSET_OPTION_SIG_LEN 6 /* does NOT include '\0' */
/* pool set option flags */
enum pool_set_option_flag {
OPTION_UNKNOWN = 0x0,
OPTION_SINGLEHDR = 0x1, /* pool headers only in the first part */
OPTION_NOHDRS = 0x2, /* no pool headers, remote replicas only */
};
struct pool_set_option {
const char *name;
enum pool_set_option_flag flag;
};
#define POOL_LOCAL 0
#define POOL_REMOTE 1
#define REPLICAS_DISABLED 0
#define REPLICAS_ENABLED 1
/* util_pool_open flags */
#define POOL_OPEN_COW 1 /* copy-on-write mode */
#define POOL_OPEN_IGNORE_SDS 2 /* ignore shutdown state */
#define POOL_OPEN_IGNORE_BAD_BLOCKS 4 /* ignore bad blocks */
#define POOL_OPEN_CHECK_BAD_BLOCKS 8 /* check bad blocks */
enum del_parts_mode {
DO_NOT_DELETE_PARTS, /* do not delete part files */
DELETE_CREATED_PARTS, /* delete only newly created parts files */
DELETE_ALL_PARTS /* force delete all parts files */
};
struct pool_set_part {
/* populated by a pool set file parser */
const char *path;
size_t filesize; /* aligned to page size */
int fd;
int flags; /* stores flags used when opening the file */
/* valid only if fd >= 0 */
int is_dev_dax; /* indicates if the part is on device dax */
size_t alignment; /* internal alignment (Device DAX only) */
int created; /* indicates newly created (zeroed) file */
/* util_poolset_open/create */
void *remote_hdr; /* allocated header for remote replica */
void *hdr; /* base address of header */
size_t hdrsize; /* size of the header mapping */
int hdr_map_sync; /* header mapped with MAP_SYNC */
void *addr; /* base address of the mapping */
size_t size; /* size of the mapping - page aligned */
int map_sync; /* part has been mapped with MAP_SYNC flag */
int rdonly; /* is set based on compat features, affects */
/* the whole poolset */
uuid_t uuid;
int has_bad_blocks; /* part file contains bad blocks */
int sds_dirty_modified; /* sds dirty flag was set */
};
struct pool_set_directory {
const char *path;
size_t resvsize; /* size of the address space reservation */
};
struct remote_replica {
void *rpp; /* RPMEMpool opaque handle */
char *node_addr; /* address of a remote node */
/* poolset descriptor is a pool set file name on a remote node */
char *pool_desc; /* descriptor of a poolset */
};
struct pool_replica {
unsigned nparts;
unsigned nallocated;
unsigned nhdrs; /* should be 0, 1 or nparts */
size_t repsize; /* total size of all the parts (mappings) */
size_t resvsize; /* min size of the address space reservation */
int is_pmem; /* true if all the parts are in PMEM */
void *mapaddr; /* base address (libpmemcto only) */
struct remote_replica *remote; /* not NULL if the replica */
/* is a remote one */
VEC(, struct pool_set_directory) directory;
struct pool_set_part part[];
};
struct pool_set {
char *path; /* path of the poolset file */
unsigned nreplicas;
uuid_t uuid;
int rdonly;
int zeroed; /* true if all the parts are new files */
size_t poolsize; /* the smallest replica size */
int has_bad_blocks; /* pool set contains bad blocks */
int remote; /* true if contains a remote replica */
unsigned options; /* enabled pool set options */
int directory_based;
size_t resvsize;
unsigned next_id;
unsigned next_directory_id;
int ignore_sds; /* don't use shutdown state */
struct pool_replica *replica[];
};
struct part_file {
int is_remote;
/*
* Pointer to the part file structure -
* - not-NULL only for a local part file
*/
struct pool_set_part *part;
/*
* Pointer to the replica structure -
* - not-NULL only for a remote replica
*/
struct remote_replica *remote;
};
struct pool_attr {
char signature[POOL_HDR_SIG_LEN]; /* pool signature */
uint32_t major; /* format major version number */
features_t features; /* features flags */
unsigned char poolset_uuid[POOL_HDR_UUID_LEN]; /* pool uuid */
unsigned char first_part_uuid[POOL_HDR_UUID_LEN]; /* first part uuid */
unsigned char prev_repl_uuid[POOL_HDR_UUID_LEN]; /* prev replica uuid */
unsigned char next_repl_uuid[POOL_HDR_UUID_LEN]; /* next replica uuid */
unsigned char arch_flags[POOL_HDR_ARCH_LEN]; /* arch flags */
};
/* get index of the (r)th replica */
static inline unsigned
REPidx(const struct pool_set *set, unsigned r)
{
ASSERTne(set->nreplicas, 0);
return (set->nreplicas + r) % set->nreplicas;
}
/* get index of the (r + 1)th replica */
static inline unsigned
REPNidx(const struct pool_set *set, unsigned r)
{
ASSERTne(set->nreplicas, 0);
return (set->nreplicas + r + 1) % set->nreplicas;
}
/* get index of the (r - 1)th replica */
static inline unsigned
REPPidx(const struct pool_set *set, unsigned r)
{
ASSERTne(set->nreplicas, 0);
return (set->nreplicas + r - 1) % set->nreplicas;
}
/* get index of the (r)th part */
static inline unsigned
PARTidx(const struct pool_replica *rep, unsigned p)
{
ASSERTne(rep->nparts, 0);
return (rep->nparts + p) % rep->nparts;
}
/* get index of the (r + 1)th part */
static inline unsigned
PARTNidx(const struct pool_replica *rep, unsigned p)
{
ASSERTne(rep->nparts, 0);
return (rep->nparts + p + 1) % rep->nparts;
}
/* get index of the (r - 1)th part */
static inline unsigned
PARTPidx(const struct pool_replica *rep, unsigned p)
{
ASSERTne(rep->nparts, 0);
return (rep->nparts + p - 1) % rep->nparts;
}
/* get index of the (r)th part */
static inline unsigned
HDRidx(const struct pool_replica *rep, unsigned p)
{
ASSERTne(rep->nhdrs, 0);
return (rep->nhdrs + p) % rep->nhdrs;
}
/* get index of the (r + 1)th part */
static inline unsigned
HDRNidx(const struct pool_replica *rep, unsigned p)
{
ASSERTne(rep->nhdrs, 0);
return (rep->nhdrs + p + 1) % rep->nhdrs;
}
/* get index of the (r - 1)th part */
static inline unsigned
HDRPidx(const struct pool_replica *rep, unsigned p)
{
ASSERTne(rep->nhdrs, 0);
return (rep->nhdrs + p - 1) % rep->nhdrs;
}
/* get (r)th replica */
static inline struct pool_replica *
REP(const struct pool_set *set, unsigned r)
{
return set->replica[REPidx(set, r)];
}
/* get (r + 1)th replica */
static inline struct pool_replica *
REPN(const struct pool_set *set, unsigned r)
{
return set->replica[REPNidx(set, r)];
}
/* get (r - 1)th replica */
static inline struct pool_replica *
REPP(const struct pool_set *set, unsigned r)
{
return set->replica[REPPidx(set, r)];
}
/* get (p)th part */
static inline struct pool_set_part *
PART(struct pool_replica *rep, unsigned p)
{
return &rep->part[PARTidx(rep, p)];
}
/* get (p + 1)th part */
static inline struct pool_set_part *
PARTN(struct pool_replica *rep, unsigned p)
{
return &rep->part[PARTNidx(rep, p)];
}
/* get (p - 1)th part */
static inline struct pool_set_part *
PARTP(struct pool_replica *rep, unsigned p)
{
return &rep->part[PARTPidx(rep, p)];
}
/* get (p)th header */
static inline struct pool_hdr *
HDR(struct pool_replica *rep, unsigned p)
{
return (struct pool_hdr *)(rep->part[HDRidx(rep, p)].hdr);
}
/* get (p + 1)th header */
static inline struct pool_hdr *
HDRN(struct pool_replica *rep, unsigned p)
{
return (struct pool_hdr *)(rep->part[HDRNidx(rep, p)].hdr);
}
/* get (p - 1)th header */
static inline struct pool_hdr *
HDRP(struct pool_replica *rep, unsigned p)
{
return (struct pool_hdr *)(rep->part[HDRPidx(rep, p)].hdr);
}
extern int Prefault_at_open;
extern int Prefault_at_create;
int util_poolset_parse(struct pool_set **setp, const char *path, int fd);
int util_poolset_read(struct pool_set **setp, const char *path);
int util_poolset_create_set(struct pool_set **setp, const char *path,
size_t poolsize, size_t minsize, int ignore_sds);
int util_poolset_open(struct pool_set *set);
void util_poolset_close(struct pool_set *set, enum del_parts_mode del);
void util_poolset_free(struct pool_set *set);
int util_poolset_chmod(struct pool_set *set, mode_t mode);
void util_poolset_fdclose(struct pool_set *set);
void util_poolset_fdclose_always(struct pool_set *set);
int util_is_poolset_file(const char *path);
int util_poolset_foreach_part_struct(struct pool_set *set,
int (*cb)(struct part_file *pf, void *arg), void *arg);
int util_poolset_foreach_part(const char *path,
int (*cb)(struct part_file *pf, void *arg), void *arg);
size_t util_poolset_size(const char *path);
int util_replica_deep_common(const void *addr, size_t len,
struct pool_set *set, unsigned replica_id, int flush);
int util_replica_deep_persist(const void *addr, size_t len,
struct pool_set *set, unsigned replica_id);
int util_replica_deep_drain(const void *addr, size_t len,
struct pool_set *set, unsigned replica_id);
int util_pool_create(struct pool_set **setp, const char *path, size_t poolsize,
size_t minsize, size_t minpartsize, const struct pool_attr *attr,
unsigned *nlanes, int can_have_rep);
int util_pool_create_uuids(struct pool_set **setp, const char *path,
size_t poolsize, size_t minsize, size_t minpartsize,
const struct pool_attr *attr, unsigned *nlanes, int can_have_rep,
int remote);
int util_part_open(struct pool_set_part *part, size_t minsize, int create);
void util_part_fdclose(struct pool_set_part *part);
int util_replica_open(struct pool_set *set, unsigned repidx, int flags);
int util_replica_set_attr(struct pool_replica *rep,
const struct rpmem_pool_attr *rattr);
void util_pool_hdr2attr(struct pool_attr *attr, struct pool_hdr *hdr);
void util_pool_attr2hdr(struct pool_hdr *hdr,
const struct pool_attr *attr);
int util_replica_close(struct pool_set *set, unsigned repidx);
int util_map_part(struct pool_set_part *part, void *addr, size_t size,
size_t offset, int flags, int rdonly);
int util_unmap_part(struct pool_set_part *part);
int util_unmap_parts(struct pool_replica *rep, unsigned start_index,
unsigned end_index);
int util_header_create(struct pool_set *set, unsigned repidx, unsigned partidx,
const struct pool_attr *attr, int overwrite);
int util_map_hdr(struct pool_set_part *part, int flags, int rdonly);
int util_unmap_hdr(struct pool_set_part *part);
int util_pool_has_device_dax(struct pool_set *set);
int util_pool_open_nocheck(struct pool_set *set, unsigned flags);
int util_pool_open(struct pool_set **setp, const char *path, size_t minpartsize,
const struct pool_attr *attr, unsigned *nlanes, void *addr,
unsigned flags);
int util_pool_open_remote(struct pool_set **setp, const char *path, int cow,
size_t minpartsize, struct rpmem_pool_attr *rattr);
void *util_pool_extend(struct pool_set *set, size_t *size, size_t minpartsize);
void util_remote_init(void);
void util_remote_fini(void);
int util_update_remote_header(struct pool_set *set, unsigned repn);
void util_remote_init_lock(void);
void util_remote_destroy_lock(void);
int util_pool_close_remote(RPMEMpool *rpp);
void util_remote_unload(void);
void util_replica_fdclose(struct pool_replica *rep);
int util_poolset_remote_open(struct pool_replica *rep, unsigned repidx,
size_t minsize, int create, void *pool_addr,
size_t pool_size, unsigned *nlanes);
int util_remote_load(void);
int util_replica_open_remote(struct pool_set *set, unsigned repidx, int flags);
int util_poolset_remote_replica_open(struct pool_set *set, unsigned repidx,
size_t minsize, int create, unsigned *nlanes);
int util_replica_close_local(struct pool_replica *rep, unsigned repn,
enum del_parts_mode del);
int util_replica_close_remote(struct pool_replica *rep, unsigned repn,
enum del_parts_mode del);
extern int (*Rpmem_persist)(RPMEMpool *rpp, size_t offset, size_t length,
unsigned lane, unsigned flags);
extern int (*Rpmem_deep_persist)(RPMEMpool *rpp, size_t offset, size_t length,
unsigned lane);
extern int (*Rpmem_read)(RPMEMpool *rpp, void *buff, size_t offset,
size_t length, unsigned lane);
extern int (*Rpmem_close)(RPMEMpool *rpp);
extern int (*Rpmem_remove)(const char *target,
const char *pool_set_name, int flags);
extern int (*Rpmem_set_attr)(RPMEMpool *rpp,
const struct rpmem_pool_attr *rattr);
#ifdef __cplusplus
}
#endif
#endif
| 14,162 | 31.261959 | 80 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/common/dlsym.h
|
/*
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* dlsym.h -- dynamic linking utilities with library-specific implementation
*/
#ifndef PMDK_DLSYM_H
#define PMDK_DLSYM_H 1
#include "out.h"
#if defined(USE_LIBDL) && !defined(_WIN32)
#include <dlfcn.h>
/*
* util_dlopen -- calls real dlopen()
*/
static inline void *
util_dlopen(const char *filename)
{
LOG(3, "filename %s", filename);
return dlopen(filename, RTLD_NOW);
}
/*
* util_dlerror -- calls real dlerror()
*/
static inline char *
util_dlerror(void)
{
return dlerror();
}
/*
* util_dlsym -- calls real dlsym()
*/
static inline void *
util_dlsym(void *handle, const char *symbol)
{
LOG(3, "handle %p symbol %s", handle, symbol);
return dlsym(handle, symbol);
}
/*
* util_dlclose -- calls real dlclose()
*/
static inline int
util_dlclose(void *handle)
{
LOG(3, "handle %p", handle);
return dlclose(handle);
}
#else /* empty functions */
/*
* util_dlopen -- empty function
*/
static inline void *
util_dlopen(const char *filename)
{
errno = ENOSYS;
return NULL;
}
/*
* util_dlerror -- empty function
*/
static inline char *
util_dlerror(void)
{
errno = ENOSYS;
return NULL;
}
/*
* util_dlsym -- empty function
*/
static inline void *
util_dlsym(void *handle, const char *symbol)
{
errno = ENOSYS;
return NULL;
}
/*
* util_dlclose -- empty function
*/
static inline int
util_dlclose(void *handle)
{
errno = ENOSYS;
return 0;
}
#endif
#endif
| 3,000 | 21.56391 | 76 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/common/sys_util.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* sys_util.h -- internal utility wrappers around system functions
*/
#ifndef PMDK_SYS_UTIL_H
#define PMDK_SYS_UTIL_H 1
#include <errno.h>
#include "os_thread.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* util_mutex_init -- os_mutex_init variant that never fails from
* caller perspective. If os_mutex_init failed, this function aborts
* the program.
*/
static inline void
util_mutex_init(os_mutex_t *m)
{
int tmp = os_mutex_init(m);
if (tmp) {
errno = tmp;
FATAL("!os_mutex_init");
}
}
/*
* util_mutex_destroy -- os_mutex_destroy variant that never fails from
* caller perspective. If os_mutex_destroy failed, this function aborts
* the program.
*/
static inline void
util_mutex_destroy(os_mutex_t *m)
{
int tmp = os_mutex_destroy(m);
if (tmp) {
errno = tmp;
FATAL("!os_mutex_destroy");
}
}
/*
* util_mutex_lock -- os_mutex_lock variant that never fails from
* caller perspective. If os_mutex_lock failed, this function aborts
* the program.
*/
static inline void
util_mutex_lock(os_mutex_t *m)
{
int tmp = os_mutex_lock(m);
if (tmp) {
errno = tmp;
FATAL("!os_mutex_lock");
}
}
/*
* util_mutex_trylock -- os_mutex_trylock variant that never fails from
* caller perspective (other than EBUSY). If util_mutex_trylock failed, this
* function aborts the program.
* Returns 0 if locked successfully, otherwise returns EBUSY.
*/
static inline int
util_mutex_trylock(os_mutex_t *m)
{
int tmp = os_mutex_trylock(m);
if (tmp && tmp != EBUSY) {
errno = tmp;
FATAL("!os_mutex_trylock");
}
return tmp;
}
/*
* util_mutex_unlock -- os_mutex_unlock variant that never fails from
* caller perspective. If os_mutex_unlock failed, this function aborts
* the program.
*/
static inline void
util_mutex_unlock(os_mutex_t *m)
{
int tmp = os_mutex_unlock(m);
if (tmp) {
errno = tmp;
FATAL("!os_mutex_unlock");
}
}
/*
* util_rwlock_init -- os_rwlock_init variant that never fails from
* caller perspective. If os_rwlock_init failed, this function aborts
* the program.
*/
static inline void
util_rwlock_init(os_rwlock_t *m)
{
int tmp = os_rwlock_init(m);
if (tmp) {
errno = tmp;
FATAL("!os_rwlock_init");
}
}
/*
* util_rwlock_rdlock -- os_rwlock_rdlock variant that never fails from
* caller perspective. If os_rwlock_rdlock failed, this function aborts
* the program.
*/
static inline void
util_rwlock_rdlock(os_rwlock_t *m)
{
int tmp = os_rwlock_rdlock(m);
if (tmp) {
errno = tmp;
FATAL("!os_rwlock_rdlock");
}
}
/*
* util_rwlock_wrlock -- os_rwlock_wrlock variant that never fails from
* caller perspective. If os_rwlock_wrlock failed, this function aborts
* the program.
*/
static inline void
util_rwlock_wrlock(os_rwlock_t *m)
{
int tmp = os_rwlock_wrlock(m);
if (tmp) {
errno = tmp;
FATAL("!os_rwlock_wrlock");
}
}
/*
* util_rwlock_unlock -- os_rwlock_unlock variant that never fails from
* caller perspective. If os_rwlock_unlock failed, this function aborts
* the program.
*/
static inline void
util_rwlock_unlock(os_rwlock_t *m)
{
int tmp = os_rwlock_unlock(m);
if (tmp) {
errno = tmp;
FATAL("!os_rwlock_unlock");
}
}
/*
* util_rwlock_destroy -- os_rwlock_destroy variant that never fails from
* caller perspective. If os_rwlock_destroy failed, this function aborts
* the program.
*/
static inline void
util_rwlock_destroy(os_rwlock_t *m)
{
int tmp = os_rwlock_destroy(m);
if (tmp) {
errno = tmp;
FATAL("!os_rwlock_destroy");
}
}
/*
* util_spin_init -- os_spin_init variant that logs on fail and sets errno.
*/
static inline int
util_spin_init(os_spinlock_t *lock, int pshared)
{
int tmp = os_spin_init(lock, pshared);
if (tmp) {
errno = tmp;
ERR("!os_spin_init");
}
return tmp;
}
/*
* util_spin_destroy -- os_spin_destroy variant that never fails from
* caller perspective. If os_spin_destroy failed, this function aborts
* the program.
*/
static inline void
util_spin_destroy(os_spinlock_t *lock)
{
int tmp = os_spin_destroy(lock);
if (tmp) {
errno = tmp;
FATAL("!os_spin_destroy");
}
}
/*
* util_spin_lock -- os_spin_lock variant that never fails from caller
* perspective. If os_spin_lock failed, this function aborts the program.
*/
static inline void
util_spin_lock(os_spinlock_t *lock)
{
int tmp = os_spin_lock(lock);
if (tmp) {
errno = tmp;
FATAL("!os_spin_lock");
}
}
/*
* util_spin_unlock -- os_spin_unlock variant that never fails
* from caller perspective. If os_spin_unlock failed,
* this function aborts the program.
*/
static inline void
util_spin_unlock(os_spinlock_t *lock)
{
int tmp = os_spin_unlock(lock);
if (tmp) {
errno = tmp;
FATAL("!os_spin_unlock");
}
}
/*
* util_semaphore_init -- os_semaphore_init variant that never fails
* from caller perspective. If os_semaphore_init failed,
* this function aborts the program.
*/
static inline void
util_semaphore_init(os_semaphore_t *sem, unsigned value)
{
if (os_semaphore_init(sem, value))
FATAL("!os_semaphore_init");
}
/*
* util_semaphore_destroy -- deletes a semaphore instance
*/
static inline void
util_semaphore_destroy(os_semaphore_t *sem)
{
if (os_semaphore_destroy(sem) != 0)
FATAL("!os_semaphore_destroy");
}
/*
* util_semaphore_wait -- decreases the value of the semaphore
*/
static inline void
util_semaphore_wait(os_semaphore_t *sem)
{
errno = 0;
int ret;
do {
ret = os_semaphore_wait(sem);
} while (errno == EINTR); /* signal interrupt */
if (ret != 0)
FATAL("!os_semaphore_wait");
}
/*
* util_semaphore_trywait -- tries to decrease the value of the semaphore
*/
static inline int
util_semaphore_trywait(os_semaphore_t *sem)
{
errno = 0;
int ret;
do {
ret = os_semaphore_trywait(sem);
} while (errno == EINTR); /* signal interrupt */
if (ret != 0 && errno != EAGAIN)
FATAL("!os_semaphore_trywait");
return ret;
}
/*
* util_semaphore_post -- increases the value of the semaphore
*/
static inline void
util_semaphore_post(os_semaphore_t *sem)
{
if (os_semaphore_post(sem) != 0)
FATAL("!os_semaphore_post");
}
#ifdef __cplusplus
}
#endif
#endif
| 7,640 | 22.154545 | 76 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/common/os.h
|
/*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* os.h -- os abstaction layer
*/
#ifndef PMDK_OS_H
#define PMDK_OS_H 1
#include <sys/stat.h>
#include <stdio.h>
#include <unistd.h>
#include "errno_freebsd.h"
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _WIN32
#define OS_DIR_SEPARATOR '/'
#define OS_DIR_SEP_STR "/"
#else
#define OS_DIR_SEPARATOR '\\'
#define OS_DIR_SEP_STR "\\"
#endif
#ifndef _WIN32
/* madvise() */
#ifdef __FreeBSD__
#define os_madvise minherit
#define MADV_DONTFORK INHERIT_NONE
#else
#define os_madvise madvise
#endif
/* dlopen() */
#ifdef __FreeBSD__
#define RTLD_DEEPBIND 0 /* XXX */
#endif
/* major(), minor() */
#ifdef __FreeBSD__
#define os_major (unsigned)major
#define os_minor (unsigned)minor
#else
#define os_major major
#define os_minor minor
#endif
#endif /* #ifndef _WIN32 */
struct iovec;
/* os_flock */
#define OS_LOCK_SH 1
#define OS_LOCK_EX 2
#define OS_LOCK_NB 4
#define OS_LOCK_UN 8
#ifndef _WIN32
typedef struct stat os_stat_t;
#define os_fstat fstat
#define os_lseek lseek
#else
typedef struct _stat64 os_stat_t;
#define os_fstat _fstat64
#define os_lseek _lseeki64
#endif
#define os_close close
#define os_fclose fclose
#ifndef _WIN32
typedef off_t os_off_t;
#else
/* XXX: os_off_t defined in platform.h */
#endif
int os_open(const char *pathname, int flags, ...);
int os_fsync(int fd);
int os_fsync_dir(const char *dir_name);
int os_stat(const char *pathname, os_stat_t *buf);
int os_unlink(const char *pathname);
int os_access(const char *pathname, int mode);
FILE *os_fopen(const char *pathname, const char *mode);
FILE *os_fdopen(int fd, const char *mode);
int os_chmod(const char *pathname, mode_t mode);
int os_mkstemp(char *temp);
int os_posix_fallocate(int fd, os_off_t offset, os_off_t len);
int os_ftruncate(int fd, os_off_t length);
int os_flock(int fd, int operation);
ssize_t os_writev(int fd, const struct iovec *iov, int iovcnt);
int os_clock_gettime(int id, struct timespec *ts);
unsigned os_rand_r(unsigned *seedp);
int os_unsetenv(const char *name);
int os_setenv(const char *name, const char *value, int overwrite);
char *os_getenv(const char *name);
const char *os_strsignal(int sig);
int os_execv(const char *path, char *const argv[]);
/*
* XXX: missing APis (used in ut_file.c)
*
* rename
* read
* write
*/
#ifdef __cplusplus
}
#endif
#endif /* os.h */
| 3,902 | 25.917241 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/common/mmap.h
|
/*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* mmap.h -- internal definitions for mmap module
*/
#ifndef PMDK_MMAP_H
#define PMDK_MMAP_H 1
#include <stddef.h>
#include <stdint.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <errno.h>
#include "out.h"
#include "queue.h"
#include "os.h"
#ifdef __cplusplus
extern "C" {
#endif
extern int Mmap_no_random;
extern void *Mmap_hint;
extern char *Mmap_mapfile;
void *util_map_sync(void *addr, size_t len, int proto, int flags, int fd,
os_off_t offset, int *map_sync);
void *util_map(int fd, size_t len, int flags, int rdonly,
size_t req_align, int *map_sync);
int util_unmap(void *addr, size_t len);
void *util_map_tmpfile(const char *dir, size_t size, size_t req_align);
#ifdef __FreeBSD__
#define MAP_NORESERVE 0
#define OS_MAPFILE "/proc/curproc/map"
#else
#define OS_MAPFILE "/proc/self/maps"
#endif
#ifndef MAP_SYNC
#define MAP_SYNC 0x80000
#endif
#ifndef MAP_SHARED_VALIDATE
#define MAP_SHARED_VALIDATE 0x03
#endif
/*
* macros for micromanaging range protections for the debug version
*/
#ifdef DEBUG
#define RANGE(addr, len, is_dev_dax, type) do {\
if (!is_dev_dax) ASSERT(util_range_##type(addr, len) >= 0);\
} while (0)
#else
#define RANGE(addr, len, is_dev_dax, type) do {} while (0)
#endif
#define RANGE_RO(addr, len, is_dev_dax) RANGE(addr, len, is_dev_dax, ro)
#define RANGE_RW(addr, len, is_dev_dax) RANGE(addr, len, is_dev_dax, rw)
#define RANGE_NONE(addr, len, is_dev_dax) RANGE(addr, len, is_dev_dax, none)
/* pmem mapping type */
enum pmem_map_type {
PMEM_DEV_DAX, /* device dax */
PMEM_MAP_SYNC, /* mapping with MAP_SYNC flag on dax fs */
MAX_PMEM_TYPE
};
/*
* this structure tracks the file mappings outstanding per file handle
*/
struct map_tracker {
SORTEDQ_ENTRY(map_tracker) entry;
uintptr_t base_addr;
uintptr_t end_addr;
int region_id;
enum pmem_map_type type;
#ifdef _WIN32
/* Windows-specific data */
HANDLE FileHandle;
HANDLE FileMappingHandle;
DWORD Access;
os_off_t Offset;
size_t FileLen;
#endif
};
void util_mmap_init(void);
void util_mmap_fini(void);
int util_range_ro(void *addr, size_t len);
int util_range_rw(void *addr, size_t len);
int util_range_none(void *addr, size_t len);
char *util_map_hint_unused(void *minaddr, size_t len, size_t align);
char *util_map_hint(size_t len, size_t req_align);
#define MEGABYTE ((uintptr_t)1 << 20)
#define GIGABYTE ((uintptr_t)1 << 30)
/*
* util_map_hint_align -- choose the desired mapping alignment
*
* The smallest supported alignment is 2 megabytes because of the object
* alignment requirements. Changing this value to 4 kilobytes constitues a
* layout change.
*
* Use 1GB page alignment only if the mapping length is at least
* twice as big as the page size.
*/
static inline size_t
util_map_hint_align(size_t len, size_t req_align)
{
size_t align = 2 * MEGABYTE;
if (req_align)
align = req_align;
else if (len >= 2 * GIGABYTE)
align = GIGABYTE;
return align;
}
int util_range_register(const void *addr, size_t len, const char *path,
enum pmem_map_type type);
int util_range_unregister(const void *addr, size_t len);
struct map_tracker *util_range_find(uintptr_t addr, size_t len);
int util_range_is_pmem(const void *addr, size_t len);
#ifdef __cplusplus
}
#endif
#endif
| 4,854 | 27.063584 | 76 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/common/os_dimm.h
|
/*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* os_dimm.h -- DIMMs API based on the ndctl library
*/
#ifndef PMDK_OS_DIMM_H
#define PMDK_OS_DIMM_H 1
#include <string.h>
#include <stdint.h>
#include "os_badblock.h"
#ifdef __cplusplus
extern "C" {
#endif
int os_dimm_uid(const char *path, char *uid, size_t *len);
int os_dimm_usc(const char *path, uint64_t *usc);
int os_dimm_files_namespace_badblocks(const char *path, struct badblocks *bbs);
int os_dimm_devdax_clear_badblocks_all(const char *path);
int os_dimm_devdax_clear_badblocks(const char *path, struct badblocks *bbs);
#ifdef __cplusplus
}
#endif
#endif
| 2,180 | 35.35 | 79 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/common/util.h
|
/*
* Copyright 2014-2018, Intel Corporation
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* util.h -- internal definitions for util module
*/
#ifndef PMDK_UTIL_H
#define PMDK_UTIL_H 1
#include <string.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <ctype.h>
#ifdef _MSC_VER
#include <intrin.h> /* popcnt, bitscan */
#endif
#include <sys/param.h>
#ifdef __cplusplus
extern "C" {
#endif
extern unsigned long long Pagesize;
extern unsigned long long Mmap_align;
#define CACHELINE_SIZE 64ULL
#define PAGE_ALIGNED_DOWN_SIZE(size) ((size) & ~(Pagesize - 1))
#define PAGE_ALIGNED_UP_SIZE(size)\
PAGE_ALIGNED_DOWN_SIZE((size) + (Pagesize - 1))
#define IS_PAGE_ALIGNED(size) (((size) & (Pagesize - 1)) == 0)
#define PAGE_ALIGN_UP(addr) ((void *)PAGE_ALIGNED_UP_SIZE((uintptr_t)(addr)))
#define ALIGN_UP(size, align) (((size) + (align) - 1) & ~((align) - 1))
#define ALIGN_DOWN(size, align) ((size) & ~((align) - 1))
#define ADDR_SUM(vp, lp) ((void *)((char *)(vp) + (lp)))
#define util_alignof(t) offsetof(struct {char _util_c; t _util_m; }, _util_m)
#define FORMAT_PRINTF(a, b) __attribute__((__format__(__printf__, (a), (b))))
/*
* overridable names for malloc & friends used by this library
*/
typedef void *(*Malloc_func)(size_t size);
typedef void (*Free_func)(void *ptr);
typedef void *(*Realloc_func)(void *ptr, size_t size);
typedef char *(*Strdup_func)(const char *s);
extern Malloc_func Malloc;
extern Free_func Free;
extern Realloc_func Realloc;
extern Strdup_func Strdup;
extern void *Zalloc(size_t sz);
void util_init(void);
int util_is_zeroed(const void *addr, size_t len);
int util_checksum(void *addr, size_t len, uint64_t *csump,
int insert, size_t skip_off);
uint64_t util_checksum_seq(const void *addr, size_t len, uint64_t csum);
int util_parse_size(const char *str, size_t *sizep);
char *util_fgets(char *buffer, int max, FILE *stream);
char *util_getexecname(char *path, size_t pathlen);
char *util_part_realpath(const char *path);
int util_compare_file_inodes(const char *path1, const char *path2);
void *util_aligned_malloc(size_t alignment, size_t size);
void util_aligned_free(void *ptr);
struct tm *util_localtime(const time_t *timep);
int util_safe_strcpy(char *dst, const char *src, size_t max_length);
#ifdef _WIN32
char *util_toUTF8(const wchar_t *wstr);
wchar_t *util_toUTF16(const char *wstr);
void util_free_UTF8(char *str);
void util_free_UTF16(wchar_t *str);
int util_toUTF16_buff(const char *in, wchar_t *out, size_t out_size);
int util_toUTF8_buff(const wchar_t *in, char *out, size_t out_size);
#endif
#define UTIL_MAX_ERR_MSG 128
void util_strerror(int errnum, char *buff, size_t bufflen);
void util_set_alloc_funcs(
void *(*malloc_func)(size_t size),
void (*free_func)(void *ptr),
void *(*realloc_func)(void *ptr, size_t size),
char *(*strdup_func)(const char *s));
/*
* Macro calculates number of elements in given table
*/
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
#ifdef _MSC_VER
#define force_inline inline __forceinline
#define NORETURN __declspec(noreturn)
#else
#define force_inline __attribute__((always_inline)) inline
#define NORETURN __attribute__((noreturn))
#endif
#define util_get_not_masked_bits(x, mask) ((x) & ~(mask))
/*
* util_setbit -- setbit macro substitution which properly deals with types
*/
static inline void
util_setbit(uint8_t *b, uint32_t i)
{
b[i / 8] = (uint8_t)(b[i / 8] | (uint8_t)(1 << (i % 8)));
}
/*
* util_clrbit -- clrbit macro substitution which properly deals with types
*/
static inline void
util_clrbit(uint8_t *b, uint32_t i)
{
b[i / 8] = (uint8_t)(b[i / 8] & (uint8_t)(~(1 << (i % 8))));
}
#define util_isset(a, i) isset(a, i)
#define util_isclr(a, i) isclr(a, i)
#define util_flag_isset(a, f) ((a) & (f))
#define util_flag_isclr(a, f) (((a) & (f)) == 0)
/*
* util_is_pow2 -- returns !0 when there's only 1 bit set in v, 0 otherwise
*/
static force_inline int
util_is_pow2(uint64_t v)
{
return v && !(v & (v - 1));
}
/*
* util_div_ceil -- divides a by b and rounds up the result
*/
static force_inline unsigned
util_div_ceil(unsigned a, unsigned b)
{
return (unsigned)(((unsigned long)a + b - 1) / b);
}
/*
* util_bool_compare_and_swap -- perform an atomic compare and swap
* util_fetch_and_* -- perform an operation atomically, return old value
* util_synchronize -- issue a full memory barrier
* util_popcount -- count number of set bits
* util_lssb_index -- return index of least significant set bit,
* undefined on zero
* util_mssb_index -- return index of most significant set bit
* undefined on zero
*
* XXX assertions needed on (value != 0) in both versions of bitscans
*
*/
#ifndef _MSC_VER
/*
* ISO C11 -- 7.17.1.4
* memory_order - an enumerated type whose enumerators identify memory ordering
* constraints.
*/
typedef enum {
memory_order_relaxed = __ATOMIC_RELAXED,
memory_order_consume = __ATOMIC_CONSUME,
memory_order_acquire = __ATOMIC_ACQUIRE,
memory_order_release = __ATOMIC_RELEASE,
memory_order_acq_rel = __ATOMIC_ACQ_REL,
memory_order_seq_cst = __ATOMIC_SEQ_CST
} memory_order;
/*
* ISO C11 -- 7.17.7.2 The atomic_load generic functions
* Integer width specific versions as supplement for:
*
*
* #include <stdatomic.h>
* C atomic_load(volatile A *object);
* C atomic_load_explicit(volatile A *object, memory_order order);
*
* The atomic_load interface doesn't return the loaded value, but instead
* copies it to a specified address -- see comments at the MSVC version.
*
* Also, instead of generic functions, two versions are available:
* for 32 bit fundamental integers, and for 64 bit ones.
*/
#define util_atomic_load_explicit32 __atomic_load
#define util_atomic_load_explicit64 __atomic_load
/*
* ISO C11 -- 7.17.7.1 The atomic_store generic functions
* Integer width specific versions as supplement for:
*
* #include <stdatomic.h>
* void atomic_store(volatile A *object, C desired);
* void atomic_store_explicit(volatile A *object, C desired,
* memory_order order);
*/
#define util_atomic_store_explicit32 __atomic_store_n
#define util_atomic_store_explicit64 __atomic_store_n
/*
* https://gcc.gnu.org/onlinedocs/gcc/_005f_005fsync-Builtins.html
* https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html
* https://clang.llvm.org/docs/LanguageExtensions.html#builtin-functions
*/
#define util_bool_compare_and_swap32 __sync_bool_compare_and_swap
#define util_bool_compare_and_swap64 __sync_bool_compare_and_swap
#define util_fetch_and_add32 __sync_fetch_and_add
#define util_fetch_and_add64 __sync_fetch_and_add
#define util_fetch_and_sub32 __sync_fetch_and_sub
#define util_fetch_and_sub64 __sync_fetch_and_sub
#define util_fetch_and_and32 __sync_fetch_and_and
#define util_fetch_and_and64 __sync_fetch_and_and
#define util_fetch_and_or32 __sync_fetch_and_or
#define util_fetch_and_or64 __sync_fetch_and_or
#define util_synchronize __sync_synchronize
#define util_popcount(value) ((unsigned char)__builtin_popcount(value))
#define util_popcount64(value) ((unsigned char)__builtin_popcountll(value))
#define util_lssb_index(value) ((unsigned char)__builtin_ctz(value))
#define util_lssb_index64(value) ((unsigned char)__builtin_ctzll(value))
#define util_mssb_index(value) ((unsigned char)(31 - __builtin_clz(value)))
#define util_mssb_index64(value) ((unsigned char)(63 - __builtin_clzll(value)))
#else
/* ISO C11 -- 7.17.1.4 */
typedef enum {
memory_order_relaxed,
memory_order_consume,
memory_order_acquire,
memory_order_release,
memory_order_acq_rel,
memory_order_seq_cst
} memory_order;
/*
* ISO C11 -- 7.17.7.2 The atomic_load generic functions
* Integer width specific versions as supplement for:
*
*
* #include <stdatomic.h>
* C atomic_load(volatile A *object);
* C atomic_load_explicit(volatile A *object, memory_order order);
*
* The atomic_load interface doesn't return the loaded value, but instead
* copies it to a specified address.
* The MSVC specific implementation needs to trigger a barrier (at least
* compiler barrier) after the load from the volatile value. The actual load
* from the volatile value itself is expected to be atomic.
*
* The actual isnterface here:
* #include <util.h>
* void util_atomic_load32(volatile A *object, A *destination);
* void util_atomic_load64(volatile A *object, A *destination);
* void util_atomic_load_explicit32(volatile A *object, A *destination,
* memory_order order);
* void util_atomic_load_explicit64(volatile A *object, A *destination,
* memory_order order);
*/
#ifndef _M_X64
#error MSVC ports of util_atomic_ only work on X86_64
#endif
#if _MSC_VER >= 2000
#error util_atomic_ utility functions not tested with this version of VC++
#error These utility functions are not future proof, as they are not
#error based on publicly available documentation.
#endif
#define util_atomic_load_explicit(object, dest, order)\
do {\
COMPILE_ERROR_ON(order != memory_order_seq_cst &&\
order != memory_order_consume &&\
order != memory_order_acquire &&\
order != memory_order_relaxed);\
*dest = *object;\
if (order == memory_order_seq_cst ||\
order == memory_order_consume ||\
order == memory_order_acquire)\
_ReadWriteBarrier();\
} while (0)
#define util_atomic_load_explicit32 util_atomic_load_explicit
#define util_atomic_load_explicit64 util_atomic_load_explicit
/* ISO C11 -- 7.17.7.1 The atomic_store generic functions */
#define util_atomic_store_explicit64(object, desired, order)\
do {\
COMPILE_ERROR_ON(order != memory_order_seq_cst &&\
order != memory_order_release &&\
order != memory_order_relaxed);\
if (order == memory_order_seq_cst) {\
_InterlockedExchange64(\
(volatile long long *)object, desired);\
} else {\
if (order == memory_order_release)\
_ReadWriteBarrier();\
*object = desired;\
}\
} while (0)
#define util_atomic_store_explicit32(object, desired, order)\
do {\
COMPILE_ERROR_ON(order != memory_order_seq_cst &&\
order != memory_order_release &&\
order != memory_order_relaxed);\
if (order == memory_order_seq_cst) {\
_InterlockedExchange(\
(volatile long *)object, desired);\
} else {\
if (order == memory_order_release)\
_ReadWriteBarrier();\
*object = desired;\
}\
} while (0)
/*
* https://msdn.microsoft.com/en-us/library/hh977022.aspx
*/
static __inline int
bool_compare_and_swap32_VC(volatile LONG *ptr,
LONG oldval, LONG newval)
{
LONG old = InterlockedCompareExchange(ptr, newval, oldval);
return (old == oldval);
}
static __inline int
bool_compare_and_swap64_VC(volatile LONG64 *ptr,
LONG64 oldval, LONG64 newval)
{
LONG64 old = InterlockedCompareExchange64(ptr, newval, oldval);
return (old == oldval);
}
#define util_bool_compare_and_swap32(p, o, n)\
bool_compare_and_swap32_VC((LONG *)(p), (LONG)(o), (LONG)(n))
#define util_bool_compare_and_swap64(p, o, n)\
bool_compare_and_swap64_VC((LONG64 *)(p), (LONG64)(o), (LONG64)(n))
#define util_fetch_and_add32(ptr, value)\
InterlockedExchangeAdd((LONG *)(ptr), value)
#define util_fetch_and_add64(ptr, value)\
InterlockedExchangeAdd64((LONG64 *)(ptr), value)
#define util_fetch_and_sub32(ptr, value)\
InterlockedExchangeSubtract((LONG *)(ptr), value)
#define util_fetch_and_sub64(ptr, value)\
InterlockedExchangeAdd64((LONG64 *)(ptr), -((LONG64)(value)))
#define util_fetch_and_and32(ptr, value)\
InterlockedAnd((LONG *)(ptr), value)
#define util_fetch_and_and64(ptr, value)\
InterlockedAnd64((LONG64 *)(ptr), value)
#define util_fetch_and_or32(ptr, value)\
InterlockedOr((LONG *)(ptr), value)
#define util_fetch_and_or64(ptr, value)\
InterlockedOr64((LONG64 *)(ptr), value)
static __inline void
util_synchronize(void)
{
MemoryBarrier();
}
#define util_popcount(value) (unsigned char)__popcnt(value)
#define util_popcount64(value) (unsigned char)__popcnt64(value)
static __inline unsigned char
util_lssb_index(int value)
{
unsigned long ret;
_BitScanForward(&ret, value);
return (unsigned char)ret;
}
static __inline unsigned char
util_lssb_index64(long long value)
{
unsigned long ret;
_BitScanForward64(&ret, value);
return (unsigned char)ret;
}
static __inline unsigned char
util_mssb_index(int value)
{
unsigned long ret;
_BitScanReverse(&ret, value);
return (unsigned char)ret;
}
static __inline unsigned char
util_mssb_index64(long long value)
{
unsigned long ret;
_BitScanReverse64(&ret, value);
return (unsigned char)ret;
}
#endif
/* ISO C11 -- 7.17.7 Operations on atomic types */
#define util_atomic_load32(object, dest)\
util_atomic_load_explicit32(object, dest, memory_order_seqcst)
#define util_atomic_load64(object, dest)\
util_atomic_load_explicit64(object, dest, memory_order_seqcst)
#define util_atomic_store32(object, desired)\
util_atomic_store_explicit32(object, desired, memory_order_seqcst)
#define util_atomic_store64(object, desired)\
util_atomic_store_explicit64(object, desired, memory_order_seqcst)
/*
* util_get_printable_ascii -- convert non-printable ascii to dot '.'
*/
static inline char
util_get_printable_ascii(char c)
{
return isprint((unsigned char)c) ? c : '.';
}
char *util_concat_str(const char *s1, const char *s2);
#if !defined(likely)
#if defined(__GNUC__)
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else
#define likely(x) (!!(x))
#define unlikely(x) (!!(x))
#endif
#endif
#if defined(__CHECKER__)
#define COMPILE_ERROR_ON(cond)
#define ASSERT_COMPILE_ERROR_ON(cond)
#elif defined(_MSC_VER)
#define COMPILE_ERROR_ON(cond) C_ASSERT(!(cond))
/* XXX - can't be done with C_ASSERT() unless we have __builtin_constant_p() */
#define ASSERT_COMPILE_ERROR_ON(cond) do {} while (0)
#else
#define COMPILE_ERROR_ON(cond) ((void)sizeof(char[(cond) ? -1 : 1]))
#define ASSERT_COMPILE_ERROR_ON(cond) COMPILE_ERROR_ON(cond)
#endif
#ifndef _MSC_VER
#define ATTR_CONSTRUCTOR __attribute__((constructor)) static
#define ATTR_DESTRUCTOR __attribute__((destructor)) static
#else
#define ATTR_CONSTRUCTOR
#define ATTR_DESTRUCTOR
#endif
#ifndef _MSC_VER
#define CONSTRUCTOR(fun) ATTR_CONSTRUCTOR
#else
#ifdef __cplusplus
#define CONSTRUCTOR(fun) \
void fun(); \
struct _##fun { \
_##fun() { \
fun(); \
} \
}; static _##fun foo; \
static
#else
#define CONSTRUCTOR(fun) \
MSVC_CONSTR(fun) \
static
#endif
#endif
#ifdef __GNUC__
#define CHECK_FUNC_COMPATIBLE(func1, func2)\
COMPILE_ERROR_ON(!__builtin_types_compatible_p(typeof(func1),\
typeof(func2)))
#else
#define CHECK_FUNC_COMPATIBLE(func1, func2) do {} while (0)
#endif /* __GNUC__ */
#ifdef __cplusplus
}
#endif
#endif /* util.h */
| 16,304 | 29.880682 | 79 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/common/valgrind_internal.h
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* valgrind_internal.h -- internal definitions for valgrind macros
*/
#ifndef PMDK_VALGRIND_INTERNAL_H
#define PMDK_VALGRIND_INTERNAL_H 1
#ifndef _WIN32
#ifndef VALGRIND_ENABLED
#define VALGRIND_ENABLED 1
#endif
#endif
#if VALGRIND_ENABLED
#define VG_PMEMCHECK_ENABLED 1
#define VG_HELGRIND_ENABLED 1
#define VG_MEMCHECK_ENABLED 1
#define VG_DRD_ENABLED 1
#endif
#if VG_PMEMCHECK_ENABLED || VG_HELGRIND_ENABLED || VG_MEMCHECK_ENABLED || \
VG_DRD_ENABLED
#define ANY_VG_TOOL_ENABLED 1
#else
#define ANY_VG_TOOL_ENABLED 0
#endif
#if ANY_VG_TOOL_ENABLED
extern unsigned _On_valgrind;
#define On_valgrind __builtin_expect(_On_valgrind, 0)
#include "valgrind/valgrind.h"
#else
#define On_valgrind (0)
#endif
#if VG_HELGRIND_ENABLED
#include "valgrind/helgrind.h"
#endif
#if VG_DRD_ENABLED
#include "valgrind/drd.h"
#endif
#if VG_HELGRIND_ENABLED || VG_DRD_ENABLED
#define VALGRIND_ANNOTATE_HAPPENS_BEFORE(obj) do {\
if (On_valgrind) \
ANNOTATE_HAPPENS_BEFORE((obj));\
} while (0)
#define VALGRIND_ANNOTATE_HAPPENS_AFTER(obj) do {\
if (On_valgrind) \
ANNOTATE_HAPPENS_AFTER((obj));\
} while (0)
#define VALGRIND_ANNOTATE_NEW_MEMORY(addr, size) do {\
if (On_valgrind) \
ANNOTATE_NEW_MEMORY((addr), (size));\
} while (0)
#define VALGRIND_ANNOTATE_IGNORE_READS_BEGIN() do {\
if (On_valgrind) \
ANNOTATE_IGNORE_READS_BEGIN();\
} while (0)
#define VALGRIND_ANNOTATE_IGNORE_READS_END() do {\
if (On_valgrind) \
ANNOTATE_IGNORE_READS_END();\
} while (0)
#define VALGRIND_ANNOTATE_IGNORE_WRITES_BEGIN() do {\
if (On_valgrind) \
ANNOTATE_IGNORE_WRITES_BEGIN();\
} while (0)
#define VALGRIND_ANNOTATE_IGNORE_WRITES_END() do {\
if (On_valgrind) \
ANNOTATE_IGNORE_WRITES_END();\
} while (0)
#else
#define VALGRIND_ANNOTATE_HAPPENS_BEFORE(obj) do { (void)(obj); } while (0)
#define VALGRIND_ANNOTATE_HAPPENS_AFTER(obj) do { (void)(obj); } while (0)
#define VALGRIND_ANNOTATE_NEW_MEMORY(addr, size) do {\
(void) (addr);\
(void) (size);\
} while (0)
#define VALGRIND_ANNOTATE_IGNORE_READS_BEGIN() do {} while (0)
#define VALGRIND_ANNOTATE_IGNORE_READS_END() do {} while (0)
#define VALGRIND_ANNOTATE_IGNORE_WRITES_BEGIN() do {} while (0)
#define VALGRIND_ANNOTATE_IGNORE_WRITES_END() do {} while (0)
#endif
#if VG_PMEMCHECK_ENABLED
#include "valgrind/pmemcheck.h"
extern void util_emit_log(const char *lib, const char *func, int order);
extern int _Pmreorder_emit;
#define Pmreorder_emit __builtin_expect(_Pmreorder_emit, 0)
#define VALGRIND_REGISTER_PMEM_MAPPING(addr, len) do {\
if (On_valgrind)\
VALGRIND_PMC_REGISTER_PMEM_MAPPING((addr), (len));\
} while (0)
#define VALGRIND_REGISTER_PMEM_FILE(desc, base_addr, size, offset) do {\
if (On_valgrind)\
VALGRIND_PMC_REGISTER_PMEM_FILE((desc), (base_addr), (size), \
(offset));\
} while (0)
#define VALGRIND_REMOVE_PMEM_MAPPING(addr, len) do {\
if (On_valgrind)\
VALGRIND_PMC_REMOVE_PMEM_MAPPING((addr), (len));\
} while (0)
#define VALGRIND_CHECK_IS_PMEM_MAPPING(addr, len) do {\
if (On_valgrind)\
VALGRIND_PMC_CHECK_IS_PMEM_MAPPING((addr), (len));\
} while (0)
#define VALGRIND_PRINT_PMEM_MAPPINGS do {\
if (On_valgrind)\
VALGRIND_PMC_PRINT_PMEM_MAPPINGS;\
} while (0)
#define VALGRIND_DO_FLUSH(addr, len) do {\
if (On_valgrind)\
VALGRIND_PMC_DO_FLUSH((addr), (len));\
} while (0)
#define VALGRIND_DO_FENCE do {\
if (On_valgrind)\
VALGRIND_PMC_DO_FENCE;\
} while (0)
#define VALGRIND_DO_PERSIST(addr, len) do {\
if (On_valgrind) {\
VALGRIND_PMC_DO_FLUSH((addr), (len));\
VALGRIND_PMC_DO_FENCE;\
}\
} while (0)
#define VALGRIND_SET_CLEAN(addr, len) do {\
if (On_valgrind)\
VALGRIND_PMC_SET_CLEAN(addr, len);\
} while (0)
#define VALGRIND_WRITE_STATS do {\
if (On_valgrind)\
VALGRIND_PMC_WRITE_STATS;\
} while (0)
#define VALGRIND_LOG_STORES do {\
if (On_valgrind)\
VALGRIND_PMC_LOG_STORES;\
} while (0)
#define VALGRIND_NO_LOG_STORES do {\
if (On_valgrind)\
VALGRIND_PMC_NO_LOG_STORES;\
} while (0)
#define VALGRIND_ADD_LOG_REGION(addr, len) do {\
if (On_valgrind)\
VALGRIND_PMC_ADD_LOG_REGION((addr), (len));\
} while (0)
#define VALGRIND_REMOVE_LOG_REGION(addr, len) do {\
if (On_valgrind)\ \
VALGRIND_PMC_REMOVE_LOG_REGION((addr), (len));\
} while (0)
#define VALGRIND_EMIT_LOG(emit_log) do {\
if (On_valgrind)\
VALGRIND_PMC_EMIT_LOG((emit_log));\
} while (0)
#define VALGRIND_START_TX do {\
if (On_valgrind)\
VALGRIND_PMC_START_TX;\
} while (0)
#define VALGRIND_START_TX_N(txn) do {\
if (On_valgrind)\
VALGRIND_PMC_START_TX_N(txn);\
} while (0)
#define VALGRIND_END_TX do {\
if (On_valgrind)\
VALGRIND_PMC_END_TX;\
} while (0)
#define VALGRIND_END_TX_N(txn) do {\
if (On_valgrind)\
VALGRIND_PMC_END_TX_N(txn);\
} while (0)
#define VALGRIND_ADD_TO_TX(addr, len) do {\
if (On_valgrind)\
VALGRIND_PMC_ADD_TO_TX(addr, len);\
} while (0)
#define VALGRIND_ADD_TO_TX_N(txn, addr, len) do {\
if (On_valgrind)\
VALGRIND_PMC_ADD_TO_TX_N(txn, addr, len);\
} while (0)
#define VALGRIND_REMOVE_FROM_TX(addr, len) do {\
if (On_valgrind)\
VALGRIND_PMC_REMOVE_FROM_TX(addr, len);\
} while (0)
#define VALGRIND_REMOVE_FROM_TX_N(txn, addr, len) do {\
if (On_valgrind)\
VALGRIND_PMC_REMOVE_FROM_TX_N(txn, addr, len);\
} while (0)
#define VALGRIND_ADD_TO_GLOBAL_TX_IGNORE(addr, len) do {\
if (On_valgrind)\
VALGRIND_PMC_ADD_TO_GLOBAL_TX_IGNORE(addr, len);\
} while (0)
/*
* Logs library and function name with proper suffix
* to pmemcheck store log file.
*/
#define PMEMOBJ_API_START()\
if (Pmreorder_emit)\
util_emit_log("libpmemobj", __func__, 0);
#define PMEMOBJ_API_END()\
if (Pmreorder_emit)\
util_emit_log("libpmemobj", __func__, 1);
#else
#define Pmreorder_emit (0)
#define VALGRIND_REGISTER_PMEM_MAPPING(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_REGISTER_PMEM_FILE(desc, base_addr, size, offset) do {\
(void) (desc);\
(void) (base_addr);\
(void) (size);\
(void) (offset);\
} while (0)
#define VALGRIND_REMOVE_PMEM_MAPPING(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_CHECK_IS_PMEM_MAPPING(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_PRINT_PMEM_MAPPINGS do {} while (0)
#define VALGRIND_DO_FLUSH(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_DO_FENCE do {} while (0)
#define VALGRIND_DO_PERSIST(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_SET_CLEAN(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_WRITE_STATS do {} while (0)
#define VALGRIND_LOG_STORES do {} while (0)
#define VALGRIND_NO_LOG_STORES do {} while (0)
#define VALGRIND_ADD_LOG_REGION(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_REMOVE_LOG_REGION(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_EMIT_LOG(emit_log) do {\
(void) (emit_log);\
} while (0)
#define VALGRIND_START_TX do {} while (0)
#define VALGRIND_START_TX_N(txn) do { (void) (txn); } while (0)
#define VALGRIND_END_TX do {} while (0)
#define VALGRIND_END_TX_N(txn) do {\
(void) (txn);\
} while (0)
#define VALGRIND_ADD_TO_TX(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_ADD_TO_TX_N(txn, addr, len) do {\
(void) (txn);\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_REMOVE_FROM_TX(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_REMOVE_FROM_TX_N(txn, addr, len) do {\
(void) (txn);\
(void) (addr);\
(void) (len);\
} while (0)
#define VALGRIND_ADD_TO_GLOBAL_TX_IGNORE(addr, len) do {\
(void) (addr);\
(void) (len);\
} while (0)
#define PMEMOBJ_API_START() do {} while (0)
#define PMEMOBJ_API_END() do {} while (0)
#endif
#if VG_MEMCHECK_ENABLED
#include "valgrind/memcheck.h"
#define VALGRIND_DO_DISABLE_ERROR_REPORTING do {\
if (On_valgrind)\
VALGRIND_DISABLE_ERROR_REPORTING;\
} while (0)
#define VALGRIND_DO_ENABLE_ERROR_REPORTING do {\
if (On_valgrind)\
VALGRIND_ENABLE_ERROR_REPORTING;\
} while (0)
#define VALGRIND_DO_CREATE_MEMPOOL(heap, rzB, is_zeroed) do {\
if (On_valgrind)\
VALGRIND_CREATE_MEMPOOL(heap, rzB, is_zeroed);\
} while (0)
#define VALGRIND_DO_DESTROY_MEMPOOL(heap) do {\
if (On_valgrind)\
VALGRIND_DESTROY_MEMPOOL(heap);\
} while (0)
#define VALGRIND_DO_MEMPOOL_ALLOC(heap, addr, size) do {\
if (On_valgrind)\
VALGRIND_MEMPOOL_ALLOC(heap, addr, size);\
} while (0)
#define VALGRIND_DO_MEMPOOL_FREE(heap, addr) do {\
if (On_valgrind)\
VALGRIND_MEMPOOL_FREE(heap, addr);\
} while (0)
#define VALGRIND_DO_MEMPOOL_CHANGE(heap, addrA, addrB, size) do {\
if (On_valgrind)\
VALGRIND_MEMPOOL_CHANGE(heap, addrA, addrB, size);\
} while (0)
#define VALGRIND_DO_MAKE_MEM_DEFINED(addr, len) do {\
if (On_valgrind)\
VALGRIND_MAKE_MEM_DEFINED(addr, len);\
} while (0)
#define VALGRIND_DO_MAKE_MEM_UNDEFINED(addr, len) do {\
if (On_valgrind)\
VALGRIND_MAKE_MEM_UNDEFINED(addr, len);\
} while (0)
#define VALGRIND_DO_MAKE_MEM_NOACCESS(addr, len) do {\
if (On_valgrind)\
VALGRIND_MAKE_MEM_NOACCESS(addr, len);\
} while (0)
#define VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len) do {\
if (On_valgrind)\
VALGRIND_CHECK_MEM_IS_ADDRESSABLE(addr, len);\
} while (0)
#else
#define VALGRIND_DO_DISABLE_ERROR_REPORTING do {} while (0)
#define VALGRIND_DO_ENABLE_ERROR_REPORTING do {} while (0)
#define VALGRIND_DO_CREATE_MEMPOOL(heap, rzB, is_zeroed)\
do { (void) (heap); (void) (rzB); (void) (is_zeroed); } while (0)
#define VALGRIND_DO_DESTROY_MEMPOOL(heap)\
do { (void) (heap); } while (0)
#define VALGRIND_DO_MEMPOOL_ALLOC(heap, addr, size)\
do { (void) (heap); (void) (addr); (void) (size); } while (0)
#define VALGRIND_DO_MEMPOOL_FREE(heap, addr)\
do { (void) (heap); (void) (addr); } while (0)
#define VALGRIND_DO_MEMPOOL_CHANGE(heap, addrA, addrB, size)\
do {\
(void) (heap); (void) (addrA); (void) (addrB); (void) (size);\
} while (0)
#define VALGRIND_DO_MAKE_MEM_DEFINED(addr, len)\
do { (void) (addr); (void) (len); } while (0)
#define VALGRIND_DO_MAKE_MEM_UNDEFINED(addr, len)\
do { (void) (addr); (void) (len); } while (0)
#define VALGRIND_DO_MAKE_MEM_NOACCESS(addr, len)\
do { (void) (addr); (void) (len); } while (0)
#define VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len)\
do { (void) (addr); (void) (len); } while (0)
#endif
#endif
| 11,923 | 23.738589 | 75 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/common/vecq.h
|
/*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* vecq.h -- vector queue (FIFO) interface
*/
#ifndef PMDK_VECQ_H
#define PMDK_VECQ_H 1
#include <stddef.h>
#include "util.h"
#include "out.h"
#ifdef __cplusplus
extern "C" {
#endif
#define VECQ_INIT_SIZE (64)
#define VECQ(name, type)\
struct name {\
type *buffer;\
size_t capacity;\
size_t front;\
size_t back;\
}
#define VECQ_INIT(vec) do {\
(vec)->buffer = NULL;\
(vec)->capacity = 0;\
(vec)->front = 0;\
(vec)->back = 0;\
} while (0)
#define VECQ_REINIT(vec) do {\
VALGRIND_ANNOTATE_NEW_MEMORY((vec), sizeof(*vec));\
VALGRIND_ANNOTATE_NEW_MEMORY((vec)->buffer,\
(sizeof(*(vec)->buffer) * ((vec)->capacity)));\
(vec)->front = 0;\
(vec)->back = 0;\
} while (0)
#define VECQ_FRONT_POS(vec)\
((vec)->front & ((vec)->capacity - 1))
#define VECQ_BACK_POS(vec)\
((vec)->back & ((vec)->capacity - 1))
#define VECQ_FRONT(vec)\
(vec)->buffer[VECQ_FRONT_POS(vec)]
#define VECQ_BACK(vec)\
(vec)->buffer[VECQ_BACK_POS(vec)]
#define VECQ_DEQUEUE(vec)\
((vec)->buffer[(((vec)->front++) & ((vec)->capacity - 1))])
#define VECQ_SIZE(vec)\
((vec)->back - (vec)->front)
static inline int
vecq_grow(void *vec, size_t s)
{
VECQ(vvec, void) *vecp = (struct vvec *)vec;
size_t ncapacity = vecp->capacity == 0 ?
VECQ_INIT_SIZE : vecp->capacity * 2;
void *tbuf = Realloc(vecp->buffer, s * ncapacity);
if (tbuf == NULL) {
ERR("!Realloc");
return -1;
}
vecp->buffer = tbuf;
vecp->capacity = ncapacity;
return 0;
}
#define VECQ_GROW(vec)\
vecq_grow((void *)vec, sizeof(*(vec)->buffer))
#define VECQ_INSERT(vec, element)\
(VECQ_BACK(vec) = element, (vec)->back += 1, 0)
#define VECQ_ENQUEUE(vec, element)\
((vec)->capacity == VECQ_SIZE(vec) ?\
(VECQ_GROW(vec) == 0 ? VECQ_INSERT(vec, element) : -1) :\
VECQ_INSERT(vec, element))
#define VECQ_CAPACITY(vec)\
((vec)->capacity)
#define VECQ_FOREACH(el, vec)\
for (size_t _vec_i = 0;\
_vec_i < VECQ_SIZE(vec) &&\
(((el) = (vec)->buffer[_vec_i & ((vec)->capacity - 1)]), 1);\
++_vec_i)
#define VECQ_FOREACH_REVERSE(el, vec)\
for (size_t _vec_i = VECQ_SIZE(vec);\
_vec_i > 0 &&\
(((el) = (vec)->buffer[(_vec_i - 1) & ((vec)->capacity - 1)]), 1);\
--_vec_i)
#define VECQ_CLEAR(vec) do {\
(vec)->front = 0;\
(vec)->back = 0;\
} while (0)
#define VECQ_DELETE(vec) do {\
Free((vec)->buffer);\
(vec)->buffer = NULL;\
(vec)->capacity = 0;\
(vec)->front = 0;\
(vec)->back = 0;\
} while (0)
#ifdef __cplusplus
}
#endif
#endif /* PMDK_VECQ_H */
| 4,023 | 25.473684 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/common/fs.h
|
/*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fs.h -- file system traversal abstraction layer
*/
#ifndef PMDK_FS_H
#define PMDK_FS_H 1
#include <unistd.h>
#ifdef __cplusplus
extern "C" {
#endif
struct fs;
enum fs_entry_type {
FS_ENTRY_FILE,
FS_ENTRY_DIRECTORY,
FS_ENTRY_SYMLINK,
FS_ENTRY_OTHER,
MAX_FS_ENTRY_TYPES
};
struct fs_entry {
enum fs_entry_type type;
const char *name;
size_t namelen;
const char *path;
size_t pathlen;
/* the depth of the traversal */
/* XXX long on FreeBSD. Linux uses short. No harm in it being bigger */
long level;
};
struct fs *fs_new(const char *path);
void fs_delete(struct fs *f);
/* this call invalidates the previous entry */
struct fs_entry *fs_read(struct fs *f);
#ifdef __cplusplus
}
#endif
#endif /* PMDK_FS_H */
| 2,342 | 27.925926 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/common/vec.h
|
/*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* vec.h -- vector interface
*/
#ifndef PMDK_VEC_H
#define PMDK_VEC_H 1
#include <stddef.h>
#include "valgrind_internal.h"
#include "util.h"
#include "out.h"
#ifdef __cplusplus
extern "C" {
#endif
#define VEC_INIT_SIZE (64)
#define VEC(name, type)\
struct name {\
type *buffer;\
size_t size;\
size_t capacity;\
}
#define VEC_INITIALIZER {NULL, 0, 0}
#define VEC_INIT(vec) do {\
(vec)->buffer = NULL;\
(vec)->size = 0;\
(vec)->capacity = 0;\
} while (0)
#define VEC_MOVE(vecl, vecr) do {\
(vecl)->buffer = (vecr)->buffer;\
(vecl)->size = (vecr)->size;\
(vecl)->capacity = (vecr)->capacity;\
(vecr)->buffer = NULL;\
(vecr)->size = 0;\
(vecr)->capacity = 0;\
} while (0)
#define VEC_REINIT(vec) do {\
VALGRIND_ANNOTATE_NEW_MEMORY((vec), sizeof(*vec));\
VALGRIND_ANNOTATE_NEW_MEMORY((vec)->buffer,\
(sizeof(*(vec)->buffer) * ((vec)->capacity)));\
(vec)->size = 0;\
} while (0)
static inline int
vec_reserve(void *vec, size_t ncapacity, size_t s)
{
size_t ncap = ncapacity == 0 ? VEC_INIT_SIZE : ncapacity;
VEC(vvec, void) *vecp = (struct vvec *)vec;
void *tbuf = Realloc(vecp->buffer, s * ncap);
if (tbuf == NULL) {
ERR("!Realloc");
return -1;
}
vecp->buffer = tbuf;
vecp->capacity = ncap;
return 0;
}
#define VEC_RESERVE(vec, ncapacity)\
(((vec)->size == 0 || (ncapacity) > (vec)->size) ?\
vec_reserve((void *)vec, ncapacity, sizeof(*(vec)->buffer)) :\
0)
#define VEC_POP_BACK(vec) do {\
(vec)->size -= 1;\
} while (0)
#define VEC_FRONT(vec)\
(vec)->buffer[0]
#define VEC_BACK(vec)\
(vec)->buffer[(vec)->size - 1]
#define VEC_ERASE_BY_POS(vec, pos) do {\
if ((pos) != ((vec)->size - 1))\
(vec)->buffer[(pos)] = VEC_BACK(vec);\
VEC_POP_BACK(vec);\
} while (0)
#define VEC_ERASE_BY_PTR(vec, element) do {\
if ((element) != &VEC_BACK(vec))\
*(element) = VEC_BACK(vec);\
VEC_POP_BACK(vec);\
} while (0)
#define VEC_INSERT(vec, element)\
((vec)->buffer[(vec)->size - 1] = (element), 0)
#define VEC_INC_SIZE(vec)\
(((vec)->size++), 0)
#define VEC_INC_BACK(vec)\
((vec)->capacity == (vec)->size ?\
(VEC_RESERVE((vec), ((vec)->capacity * 2)) == 0 ?\
VEC_INC_SIZE(vec) : -1) :\
VEC_INC_SIZE(vec))
#define VEC_PUSH_BACK(vec, element)\
(VEC_INC_BACK(vec) == 0? VEC_INSERT(vec, element) : -1)
#define VEC_FOREACH(el, vec)\
for (size_t _vec_i = 0;\
_vec_i < (vec)->size && (((el) = (vec)->buffer[_vec_i]), 1);\
++_vec_i)
#define VEC_FOREACH_REVERSE(el, vec)\
for (size_t _vec_i = ((vec)->size);\
_vec_i != 0 && (((el) = (vec)->buffer[_vec_i - 1]), 1);\
--_vec_i)
#define VEC_FOREACH_BY_POS(elpos, vec)\
for ((elpos) = 0; (elpos) < (vec)->size; ++(elpos))
#define VEC_FOREACH_BY_PTR(el, vec)\
for (size_t _vec_i = 0;\
_vec_i < (vec)->size && (((el) = &(vec)->buffer[_vec_i]), 1);\
++_vec_i)
#define VEC_SIZE(vec)\
((vec)->size)
#define VEC_CAPACITY(vec)\
((vec)->capacity)
#define VEC_ARR(vec)\
((vec)->buffer)
#define VEC_GET(vec, id)\
(&(vec)->buffer[id])
#define VEC_CLEAR(vec) do {\
(vec)->size = 0;\
} while (0)
#define VEC_DELETE(vec) do {\
Free((vec)->buffer);\
(vec)->buffer = NULL;\
(vec)->size = 0;\
(vec)->capacity = 0;\
} while (0)
#ifdef __cplusplus
}
#endif
#endif /* PMDK_VEC_H */
| 4,773 | 24.666667 | 74 |
h
|
null |
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/pmdk/src/common/badblock.h
|
/*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* badblock.h - common part of bad blocks API
*/
#ifndef PMDK_BADBLOCK_POOLSET_H
#define PMDK_BADBLOCK_POOLSET_H 1
#include "set.h"
#ifdef __cplusplus
extern "C" {
#endif
struct badblocks *badblocks_new(void);
void badblocks_delete(struct badblocks *bbs);
int badblocks_check_poolset(struct pool_set *set, int create);
int badblocks_clear_poolset(struct pool_set *set, int create);
char *badblocks_recovery_file_alloc(const char *file,
unsigned rep, unsigned part);
int badblocks_recovery_file_exists(struct pool_set *set);
#ifdef __cplusplus
}
#endif
#endif /* PMDK_BADBLOCK_POOLSET_H */
| 2,203 | 35.131148 | 74 |
h
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.